Compare commits
46 Commits
qdev-array
...
multifd-fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
981624a51b | ||
|
|
a401debe32 | ||
|
|
1bdcab53b0 | ||
|
|
4439e376a2 | ||
|
|
27ec0aea2b | ||
|
|
216f1c0799 | ||
|
|
7bfc72b82c | ||
|
|
296b174351 | ||
|
|
bd006aa37a | ||
|
|
3d66673424 | ||
|
|
4c84a5b854 | ||
|
|
ac0f5a5c61 | ||
|
|
7960c99f7d | ||
|
|
e017872d55 | ||
|
|
1ed6e0fea1 | ||
|
|
5c256aa813 | ||
|
|
a7c11aaf8f | ||
|
|
6b7fc46f44 | ||
|
|
2e9b519644 | ||
|
|
3f8359b3aa | ||
|
|
733c2311f9 | ||
|
|
8b424732bd | ||
|
|
aeab71c90c | ||
|
|
61e1dc63de | ||
|
|
f9e7e60b36 | ||
|
|
082acdd3b8 | ||
|
|
bdd2f44ca6 | ||
|
|
7d246857f2 | ||
|
|
f31d26ed67 | ||
|
|
1a69ed8fb1 | ||
|
|
322a7e0b68 | ||
|
|
d04ea45102 | ||
|
|
02b502d253 | ||
|
|
89767de55c | ||
|
|
61d4f74ecb | ||
|
|
68eff73a4f | ||
|
|
e2c4395aa5 | ||
|
|
905af99677 | ||
|
|
b1d2461984 | ||
|
|
948cb60563 | ||
|
|
b00a856b9f | ||
|
|
e82c306e54 | ||
|
|
83ba1324df | ||
|
|
0f2a1a3c30 | ||
|
|
be29a05eb0 | ||
|
|
a39011878e |
@@ -30,7 +30,6 @@ avocado-system-alpine:
|
||||
variables:
|
||||
IMAGE: alpine
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:avr arch:loongarch64 arch:mips64 arch:mipsel
|
||||
|
||||
build-system-ubuntu:
|
||||
extends:
|
||||
@@ -41,7 +40,8 @@ build-system-ubuntu:
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
CONFIGURE_ARGS: --enable-docs
|
||||
TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu
|
||||
TARGETS: alpha-softmmu cris-softmmu hppa-softmmu
|
||||
microblazeel-softmmu mips64el-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-ubuntu:
|
||||
@@ -61,7 +61,6 @@ avocado-system-ubuntu:
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:alpha arch:microblaze arch:mips64el
|
||||
|
||||
build-system-debian:
|
||||
extends:
|
||||
@@ -73,7 +72,7 @@ build-system-debian:
|
||||
IMAGE: debian-amd64
|
||||
CONFIGURE_ARGS: --with-coroutine=sigaltstack
|
||||
TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
|
||||
sparc-softmmu xtensa-softmmu
|
||||
sparc-softmmu xtensaeb-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-debian:
|
||||
@@ -93,7 +92,6 @@ avocado-system-debian:
|
||||
variables:
|
||||
IMAGE: debian-amd64
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:arm arch:i386 arch:riscv64 arch:sh4 arch:sparc arch:xtensa
|
||||
|
||||
crash-test-debian:
|
||||
extends: .native_test_job_template
|
||||
@@ -116,7 +114,7 @@ build-system-fedora:
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
|
||||
TARGETS: microblaze-softmmu mips-softmmu
|
||||
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
|
||||
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
@@ -137,8 +135,6 @@ avocado-system-fedora:
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:microblaze arch:mips arch:xtensa arch:m68k
|
||||
arch:riscv32 arch:ppc arch:sparc64
|
||||
|
||||
crash-test-fedora:
|
||||
extends: .native_test_job_template
|
||||
@@ -184,8 +180,6 @@ avocado-system-centos:
|
||||
variables:
|
||||
IMAGE: centos8
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:ppc64 arch:or1k arch:390x arch:x86_64 arch:rx
|
||||
arch:sh4 arch:nios2
|
||||
|
||||
build-system-opensuse:
|
||||
extends:
|
||||
@@ -215,7 +209,6 @@ avocado-system-opensuse:
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:s390x arch:x86_64 arch:aarch64
|
||||
|
||||
|
||||
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
|
||||
@@ -256,7 +249,6 @@ build-user:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-system
|
||||
--target-list-exclude=alpha-linux-user,sh4-linux-user
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-user-static:
|
||||
@@ -266,18 +258,6 @@ build-user-static:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-system --static
|
||||
--target-list-exclude=alpha-linux-user,sh4-linux-user
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# targets stuck on older compilers
|
||||
build-legacy:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-debian-legacy-cross-container
|
||||
variables:
|
||||
IMAGE: debian-legacy-test-cross
|
||||
TARGETS: alpha-linux-user alpha-softmmu sh4-linux-user
|
||||
CONFIGURE_ARGS: --disable-tools
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-user-hexagon:
|
||||
@@ -290,9 +270,7 @@ build-user-hexagon:
|
||||
CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# Build the softmmu targets we have check-tcg tests and compilers in
|
||||
# our omnibus all-test-cross container. Those targets that haven't got
|
||||
# Debian cross compiler support need to use special containers.
|
||||
# Only build the softmmu targets we have check-tcg tests for
|
||||
build-some-softmmu:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
@@ -300,18 +278,7 @@ build-some-softmmu:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --enable-debug
|
||||
TARGETS: arm-softmmu aarch64-softmmu i386-softmmu riscv64-softmmu
|
||||
s390x-softmmu x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-loongarch64:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: loongarch-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-loongarch-cross
|
||||
CONFIGURE_ARGS: --disable-tools --enable-debug
|
||||
TARGETS: loongarch64-linux-user loongarch64-softmmu
|
||||
TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# We build tricore in a very minimal tricore only container
|
||||
@@ -344,7 +311,7 @@ clang-user:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
|
||||
--target-list-exclude=alpha-linux-user,microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
|
||||
--target-list-exclude=microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
|
||||
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
|
||||
MAKE_CHECK_ARGS: check-unit check-tcg
|
||||
|
||||
@@ -531,7 +498,7 @@ build-tci:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
script:
|
||||
- TARGETS="aarch64 arm hppa m68k microblaze ppc64 s390x x86_64"
|
||||
- TARGETS="aarch64 alpha arm hppa m68k microblaze ppc64 s390x x86_64"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-tcg-interpreter --disable-docs --disable-gtk --disable-vnc
|
||||
|
||||
@@ -11,6 +11,6 @@ MAKE='/opt/homebrew/bin/gmake'
|
||||
NINJA='/opt/homebrew/bin/ninja'
|
||||
PACKAGING_COMMAND='brew'
|
||||
PIP3='/opt/homebrew/bin/pip3'
|
||||
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd'
|
||||
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol tesseract usbredir vde vte3 xorriso zlib zstd'
|
||||
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli'
|
||||
PYTHON='/opt/homebrew/bin/python3'
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
alpha-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-alpha-cross
|
||||
|
||||
amd64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -10,12 +16,6 @@ amd64-debian-user-cross-container:
|
||||
variables:
|
||||
NAME: debian-all-test-cross
|
||||
|
||||
amd64-debian-legacy-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-legacy-test-cross
|
||||
|
||||
arm64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -40,11 +40,23 @@ hexagon-cross-container:
|
||||
variables:
|
||||
NAME: debian-hexagon-cross
|
||||
|
||||
loongarch-debian-cross-container:
|
||||
hppa-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-loongarch-cross
|
||||
NAME: debian-hppa-cross
|
||||
|
||||
m68k-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-m68k-cross
|
||||
|
||||
mips64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mips64-cross
|
||||
|
||||
mips64el-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
@@ -52,12 +64,24 @@ mips64el-debian-cross-container:
|
||||
variables:
|
||||
NAME: debian-mips64el-cross
|
||||
|
||||
mips-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mips-cross
|
||||
|
||||
mipsel-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mipsel-cross
|
||||
|
||||
powerpc-test-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-powerpc-test-cross
|
||||
|
||||
ppc64el-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -71,7 +95,13 @@ riscv64-debian-cross-container:
|
||||
allow_failure: true
|
||||
variables:
|
||||
NAME: debian-riscv64-cross
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
|
||||
# we can however build TCG tests using a non-sid base
|
||||
riscv64-debian-test-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-riscv64-test-cross
|
||||
|
||||
s390x-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
@@ -79,6 +109,18 @@ s390x-debian-cross-container:
|
||||
variables:
|
||||
NAME: debian-s390x-cross
|
||||
|
||||
sh4-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-sh4-cross
|
||||
|
||||
sparc64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-sparc64-cross
|
||||
|
||||
tricore-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
|
||||
@@ -165,7 +165,7 @@ cross-win32-system:
|
||||
job: win32-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: fedora-win32-cross
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
|
||||
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
|
||||
microblazeel-softmmu mips64el-softmmu nios2-softmmu
|
||||
artifacts:
|
||||
@@ -179,7 +179,7 @@ cross-win64-system:
|
||||
job: win64-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: fedora-win64-cross
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
|
||||
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu
|
||||
m68k-softmmu microblazeel-softmmu nios2-softmmu
|
||||
or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
|
||||
|
||||
@@ -72,7 +72,6 @@
|
||||
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
|
||||
bison diffutils flex
|
||||
git grep make sed
|
||||
$MINGW_TARGET-binutils
|
||||
$MINGW_TARGET-capstone
|
||||
$MINGW_TARGET-ccache
|
||||
$MINGW_TARGET-curl
|
||||
|
||||
5
.mailmap
5
.mailmap
@@ -30,12 +30,10 @@ malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
# Corrupted Author fields
|
||||
Aaron Larson <alarson@ddci.com> alarson@ddci.com
|
||||
Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber>
|
||||
fanwenjie <fanwj@mail.ustc.edu.cn> fanwj@mail.ustc.edu.cn <fanwj@mail.ustc.edu.cn>
|
||||
Jason Wang <jasowang@redhat.com> Jason Wang <jasowang>
|
||||
Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
|
||||
Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org>
|
||||
Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
|
||||
Timothée Cocault <timothee.cocault@gmail.com> timothee.cocault@gmail.com <timothee.cocault@gmail.com>
|
||||
|
||||
# There is also a:
|
||||
# (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
@@ -83,9 +81,6 @@ Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
|
||||
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
|
||||
Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
|
||||
Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
|
||||
Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr>
|
||||
Luc Michel <luc@lmichel.fr> <luc.michel@greensocs.com>
|
||||
Luc Michel <luc@lmichel.fr> <lmichel@kalray.eu>
|
||||
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
|
||||
Paul Brook <paul@nowt.org> <paul@codesourcery.com>
|
||||
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
|
||||
|
||||
@@ -11,9 +11,6 @@ config OPENGL
|
||||
config X11
|
||||
bool
|
||||
|
||||
config PIXMAN
|
||||
bool
|
||||
|
||||
config SPICE
|
||||
bool
|
||||
|
||||
@@ -49,6 +46,3 @@ config FUZZ
|
||||
config VFIO_USER_SERVER_ALLOWED
|
||||
bool
|
||||
imply VFIO_USER_SERVER
|
||||
|
||||
config HV_BALLOON_POSSIBLE
|
||||
bool
|
||||
|
||||
170
MAINTAINERS
170
MAINTAINERS
@@ -245,10 +245,10 @@ M: Richard Henderson <richard.henderson@linaro.org>
|
||||
S: Maintained
|
||||
F: target/hppa/
|
||||
F: disas/hppa.c
|
||||
F: tests/tcg/hppa/
|
||||
|
||||
LoongArch TCG CPUs
|
||||
M: Song Gao <gaosong@loongson.cn>
|
||||
M: Xiaojuan Yang <yangxiaojuan@loongson.cn>
|
||||
S: Maintained
|
||||
F: target/loongarch/
|
||||
F: tests/tcg/loongarch64/
|
||||
@@ -259,7 +259,6 @@ M: Laurent Vivier <laurent@vivier.eu>
|
||||
S: Maintained
|
||||
F: target/m68k/
|
||||
F: disas/m68k.c
|
||||
F: tests/tcg/m68k/
|
||||
|
||||
MicroBlaze TCG CPUs
|
||||
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
|
||||
@@ -286,9 +285,7 @@ R: Marek Vasut <marex@denx.de>
|
||||
S: Orphan
|
||||
F: target/nios2/
|
||||
F: hw/nios2/
|
||||
F: hw/intc/nios2_vic.c
|
||||
F: disas/nios2.c
|
||||
F: include/hw/intc/nios2_vic.h
|
||||
F: configs/devices/nios2-softmmu/default.mak
|
||||
F: tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh
|
||||
F: tests/tcg/nios2/
|
||||
@@ -299,7 +296,6 @@ S: Odd Fixes
|
||||
F: docs/system/openrisc/cpu-features.rst
|
||||
F: target/openrisc/
|
||||
F: hw/openrisc/
|
||||
F: include/hw/openrisc/
|
||||
F: tests/tcg/openrisc/
|
||||
|
||||
PowerPC TCG CPUs
|
||||
@@ -312,31 +308,21 @@ F: target/ppc/
|
||||
F: hw/ppc/ppc.c
|
||||
F: hw/ppc/ppc_booke.c
|
||||
F: include/hw/ppc/ppc.h
|
||||
F: hw/ppc/meson.build
|
||||
F: hw/ppc/trace*
|
||||
F: configs/devices/ppc*
|
||||
F: docs/system/ppc/embedded.rst
|
||||
F: docs/system/target-ppc.rst
|
||||
F: tests/tcg/ppc*/*
|
||||
|
||||
RISC-V TCG CPUs
|
||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||
M: Alistair Francis <alistair.francis@wdc.com>
|
||||
M: Bin Meng <bin.meng@windriver.com>
|
||||
R: Weiwei Li <liwei1518@gmail.com>
|
||||
R: Weiwei Li <liweiwei@iscas.ac.cn>
|
||||
R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
|
||||
R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
|
||||
L: qemu-riscv@nongnu.org
|
||||
S: Supported
|
||||
F: configs/targets/riscv*
|
||||
F: docs/system/target-riscv.rst
|
||||
F: target/riscv/
|
||||
F: hw/riscv/
|
||||
F: hw/intc/riscv*
|
||||
F: include/hw/riscv/
|
||||
F: linux-user/host/riscv32/
|
||||
F: linux-user/host/riscv64/
|
||||
F: tests/tcg/riscv64/
|
||||
|
||||
RISC-V XThead* extensions
|
||||
M: Christoph Muellner <christoph.muellner@vrull.eu>
|
||||
@@ -345,7 +331,6 @@ L: qemu-riscv@nongnu.org
|
||||
S: Supported
|
||||
F: target/riscv/insn_trans/trans_xthead.c.inc
|
||||
F: target/riscv/xthead*.decode
|
||||
F: disas/riscv-xthead*
|
||||
|
||||
RISC-V XVentanaCondOps extension
|
||||
M: Philipp Tomsich <philipp.tomsich@vrull.eu>
|
||||
@@ -353,7 +338,6 @@ L: qemu-riscv@nongnu.org
|
||||
S: Maintained
|
||||
F: target/riscv/XVentanaCondOps.decode
|
||||
F: target/riscv/insn_trans/trans_xventanacondops.c.inc
|
||||
F: disas/riscv-xventana*
|
||||
|
||||
RENESAS RX CPUs
|
||||
R: Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
@@ -378,7 +362,6 @@ F: target/sh4/
|
||||
F: hw/sh4/
|
||||
F: disas/sh4.c
|
||||
F: include/hw/sh4/
|
||||
F: tests/tcg/sh4/
|
||||
|
||||
SPARC TCG CPUs
|
||||
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
|
||||
@@ -389,7 +372,6 @@ F: hw/sparc/
|
||||
F: hw/sparc64/
|
||||
F: include/hw/sparc/sparc64.h
|
||||
F: disas/sparc.c
|
||||
F: tests/tcg/sparc64/
|
||||
|
||||
X86 TCG CPUs
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
@@ -490,7 +472,7 @@ S: Supported
|
||||
F: include/sysemu/kvm_xen.h
|
||||
F: target/i386/kvm/xen*
|
||||
F: hw/i386/kvm/xen*
|
||||
F: tests/avocado/kvm_xen_guest.py
|
||||
F: tests/avocado/xen_guest.py
|
||||
|
||||
Guest CPU Cores (other accelerators)
|
||||
------------------------------------
|
||||
@@ -575,7 +557,6 @@ M: Cornelia Huck <cohuck@redhat.com>
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: linux-headers/
|
||||
F: include/standard-headers/
|
||||
F: scripts/update-linux-headers.sh
|
||||
|
||||
POSIX
|
||||
@@ -687,7 +668,7 @@ M: Peter Maydell <peter.maydell@linaro.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/intc/arm*
|
||||
F: hw/intc/gic*_internal.h
|
||||
F: hw/intc/gic_internal.h
|
||||
F: hw/misc/a9scu.c
|
||||
F: hw/misc/arm11scu.c
|
||||
F: hw/misc/arm_l2x0.c
|
||||
@@ -859,10 +840,8 @@ M: Hao Wu <wuhaotsh@google.com>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Supported
|
||||
F: hw/*/npcm*
|
||||
F: hw/sensor/adm1266.c
|
||||
F: include/hw/*/npcm*
|
||||
F: tests/qtest/npcm*
|
||||
F: tests/qtest/adm1266-test.c
|
||||
F: pc-bios/npcm7xx_bootrom.bin
|
||||
F: roms/vbootrom
|
||||
F: docs/system/arm/nuvoton.rst
|
||||
@@ -901,7 +880,7 @@ S: Odd Fixes
|
||||
F: hw/arm/raspi.c
|
||||
F: hw/arm/raspi_platform.h
|
||||
F: hw/*/bcm283*
|
||||
F: include/hw/arm/rasp*
|
||||
F: include/hw/arm/raspi*
|
||||
F: include/hw/*/bcm283*
|
||||
F: docs/system/arm/raspi.rst
|
||||
|
||||
@@ -960,9 +939,6 @@ R: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/arm/sbsa-ref.c
|
||||
F: hw/misc/sbsa_ec.c
|
||||
F: hw/watchdog/sbsa_gwdt.c
|
||||
F: include/hw/watchdog/sbsa_gwdt.h
|
||||
F: docs/system/arm/sbsa.rst
|
||||
F: tests/avocado/machine_aarch64_sbsaref.py
|
||||
|
||||
@@ -1133,7 +1109,7 @@ F: docs/system/arm/emcraft-sf2.rst
|
||||
ASPEED BMCs
|
||||
M: Cédric Le Goater <clg@kaod.org>
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
R: Andrew Jeffery <andrew@codeconstruct.com.au>
|
||||
R: Andrew Jeffery <andrew@aj.id.au>
|
||||
R: Joel Stanley <joel@jms.id.au>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
@@ -1189,29 +1165,24 @@ F: hw/*/etraxfs_*.c
|
||||
|
||||
HP-PARISC Machines
|
||||
------------------
|
||||
HP B160L, HP C3700
|
||||
HP B160L
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
R: Helge Deller <deller@gmx.de>
|
||||
S: Odd Fixes
|
||||
F: configs/devices/hppa-softmmu/default.mak
|
||||
F: hw/display/artist.c
|
||||
F: hw/hppa/
|
||||
F: hw/input/lasips2.c
|
||||
F: hw/net/*i82596*
|
||||
F: hw/misc/lasi.c
|
||||
F: hw/pci-host/astro.c
|
||||
F: hw/pci-host/dino.c
|
||||
F: include/hw/input/lasips2.h
|
||||
F: include/hw/misc/lasi.h
|
||||
F: include/hw/net/lasi_82596.h
|
||||
F: include/hw/pci-host/astro.h
|
||||
F: include/hw/pci-host/dino.h
|
||||
F: pc-bios/hppa-firmware.img
|
||||
F: roms/seabios-hppa/
|
||||
|
||||
LoongArch Machines
|
||||
------------------
|
||||
Virt
|
||||
M: Xiaojuan Yang <yangxiaojuan@loongson.cn>
|
||||
M: Song Gao <gaosong@loongson.cn>
|
||||
S: Maintained
|
||||
F: docs/system/loongarch/virt.rst
|
||||
@@ -1286,7 +1257,6 @@ F: include/hw/char/goldfish_tty.h
|
||||
F: include/hw/intc/goldfish_pic.h
|
||||
F: include/hw/intc/m68k_irqc.h
|
||||
F: include/hw/misc/virt_ctrl.h
|
||||
F: docs/specs/virt-ctlr.rst
|
||||
|
||||
MicroBlaze Machines
|
||||
-------------------
|
||||
@@ -1316,16 +1286,14 @@ M: Hervé Poussineau <hpoussin@reactos.org>
|
||||
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
|
||||
S: Maintained
|
||||
F: hw/mips/jazz.c
|
||||
F: hw/display/g364fb.c
|
||||
F: hw/display/jazz_led.c
|
||||
F: hw/dma/rc4030.c
|
||||
F: hw/nvram/ds1225y.c
|
||||
|
||||
Malta
|
||||
M: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
R: Aurelien Jarno <aurelien@aurel32.net>
|
||||
S: Odd Fixes
|
||||
F: hw/isa/piix.c
|
||||
F: hw/isa/piix4.c
|
||||
F: hw/acpi/piix4.c
|
||||
F: hw/mips/malta.c
|
||||
F: hw/pci-host/gt64120.c
|
||||
@@ -1345,7 +1313,10 @@ M: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||
S: Odd Fixes
|
||||
F: hw/mips/fuloong2e.c
|
||||
F: hw/isa/vt82c686.c
|
||||
F: hw/pci-host/bonito.c
|
||||
F: hw/usb/vt82c686-uhci-pci.c
|
||||
F: include/hw/isa/vt82c686.h
|
||||
F: include/hw/pci-host/bonito.h
|
||||
F: tests/avocado/machine_mips_fuloong2e.py
|
||||
|
||||
@@ -1357,7 +1328,6 @@ F: hw/intc/loongson_liointc.c
|
||||
F: hw/mips/loongson3_bootp.c
|
||||
F: hw/mips/loongson3_bootp.h
|
||||
F: hw/mips/loongson3_virt.c
|
||||
F: include/hw/intc/loongson_liointc.h
|
||||
F: tests/avocado/machine_mips_loongson3v.py
|
||||
|
||||
Boston
|
||||
@@ -1375,7 +1345,6 @@ or1k-sim
|
||||
M: Jia Liu <proljc@gmail.com>
|
||||
S: Maintained
|
||||
F: docs/system/openrisc/or1k-sim.rst
|
||||
F: hw/intc/ompic.c
|
||||
F: hw/openrisc/openrisc_sim.c
|
||||
|
||||
PowerPC Machines
|
||||
@@ -1383,8 +1352,7 @@ PowerPC Machines
|
||||
405 (ref405ep)
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Orphan
|
||||
F: hw/ppc/ppc405*
|
||||
F: tests/avocado/ppc_405.py
|
||||
F: hw/ppc/ppc405_boards.c
|
||||
|
||||
Bamboo
|
||||
L: qemu-ppc@nongnu.org
|
||||
@@ -1396,7 +1364,6 @@ e500
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Orphan
|
||||
F: hw/ppc/e500*
|
||||
F: hw/ppc/ppce500_spin.c
|
||||
F: hw/gpio/mpc8xxx.c
|
||||
F: hw/i2c/mpc_i2c.c
|
||||
F: hw/net/fsl_etsec/
|
||||
@@ -1404,9 +1371,8 @@ F: hw/pci-host/ppce500.c
|
||||
F: include/hw/ppc/ppc_e500.h
|
||||
F: include/hw/pci-host/ppce500.h
|
||||
F: pc-bios/u-boot.e500
|
||||
F: hw/intc/openpic_kvm.c
|
||||
F: hw/intc/openpic_kvm.h
|
||||
F: include/hw/ppc/openpic_kvm.h
|
||||
F: docs/system/ppc/ppce500.rst
|
||||
|
||||
mpc8544ds
|
||||
L: qemu-ppc@nongnu.org
|
||||
@@ -1426,7 +1392,6 @@ F: hw/pci-bridge/dec.[hc]
|
||||
F: hw/misc/macio/
|
||||
F: hw/misc/mos6522.c
|
||||
F: hw/nvram/mac_nvram.c
|
||||
F: hw/ppc/fw_cfg.c
|
||||
F: hw/input/adb*
|
||||
F: include/hw/misc/macio/
|
||||
F: include/hw/misc/mos6522.h
|
||||
@@ -1480,10 +1445,6 @@ F: hw/*/spapr*
|
||||
F: include/hw/*/spapr*
|
||||
F: hw/*/xics*
|
||||
F: include/hw/*/xics*
|
||||
F: include/hw/ppc/fdt.h
|
||||
F: hw/ppc/fdt.c
|
||||
F: include/hw/ppc/pef.h
|
||||
F: hw/ppc/pef.c
|
||||
F: pc-bios/slof.bin
|
||||
F: docs/system/ppc/pseries.rst
|
||||
F: docs/specs/ppc-spapr-*
|
||||
@@ -1521,7 +1482,6 @@ M: BALATON Zoltan <balaton@eik.bme.hu>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/ppc/sam460ex.c
|
||||
F: hw/ppc/ppc440_uc.c
|
||||
F: hw/ppc/ppc440_pcix.c
|
||||
F: hw/display/sm501*
|
||||
F: hw/ide/sii3112.c
|
||||
@@ -1539,14 +1499,6 @@ F: hw/pci-host/mv64361.c
|
||||
F: hw/pci-host/mv643xx.h
|
||||
F: include/hw/pci-host/mv64361.h
|
||||
|
||||
amigaone
|
||||
M: BALATON Zoltan <balaton@eik.bme.hu>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/ppc/amigaone.c
|
||||
F: hw/pci-host/articia.c
|
||||
F: include/hw/pci-host/articia.h
|
||||
|
||||
Virtual Open Firmware (VOF)
|
||||
M: Alexey Kardashevskiy <aik@ozlabs.ru>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
@@ -1573,7 +1525,6 @@ Microchip PolarFire SoC Icicle Kit
|
||||
M: Bin Meng <bin.meng@windriver.com>
|
||||
L: qemu-riscv@nongnu.org
|
||||
S: Supported
|
||||
F: docs/system/riscv/microchip-icicle-kit.rst
|
||||
F: hw/riscv/microchip_pfsoc.c
|
||||
F: hw/char/mchp_pfsoc_mmuart.c
|
||||
F: hw/misc/mchp_pfsoc_dmc.c
|
||||
@@ -1589,7 +1540,6 @@ Shakti C class SoC
|
||||
M: Vijai Kumar K <vijai@behindbytes.com>
|
||||
L: qemu-riscv@nongnu.org
|
||||
S: Supported
|
||||
F: docs/system/riscv/shakti-c.rst
|
||||
F: hw/riscv/shakti_c.c
|
||||
F: hw/char/shakti_uart.c
|
||||
F: include/hw/riscv/shakti_c.h
|
||||
@@ -1601,7 +1551,6 @@ M: Bin Meng <bin.meng@windriver.com>
|
||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||
L: qemu-riscv@nongnu.org
|
||||
S: Supported
|
||||
F: docs/system/riscv/sifive_u.rst
|
||||
F: hw/*/*sifive*.c
|
||||
F: include/hw/*/*sifive*.h
|
||||
|
||||
@@ -1626,7 +1575,6 @@ F: hw/intc/sh_intc.c
|
||||
F: hw/pci-host/sh_pci.c
|
||||
F: hw/timer/sh_timer.c
|
||||
F: include/hw/sh4/sh_intc.h
|
||||
F: include/hw/timer/tmu012.h
|
||||
|
||||
Shix
|
||||
R: Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
@@ -1750,16 +1698,6 @@ F: hw/s390x/event-facility.c
|
||||
F: hw/s390x/sclp*.c
|
||||
L: qemu-s390x@nongnu.org
|
||||
|
||||
S390 CPU topology
|
||||
M: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
|
||||
S: Supported
|
||||
F: include/hw/s390x/cpu-topology.h
|
||||
F: hw/s390x/cpu-topology.c
|
||||
F: target/s390x/kvm/stsi-topology.c
|
||||
F: docs/devel/s390-cpu-topology.rst
|
||||
F: docs/system/s390x/cpu-topology.rst
|
||||
F: tests/avocado/s390_topology.py
|
||||
|
||||
X86 Machines
|
||||
------------
|
||||
PC
|
||||
@@ -1774,7 +1712,7 @@ F: hw/pci-host/pam.c
|
||||
F: include/hw/pci-host/i440fx.h
|
||||
F: include/hw/pci-host/q35.h
|
||||
F: include/hw/pci-host/pam.h
|
||||
F: hw/isa/piix.c
|
||||
F: hw/isa/piix3.c
|
||||
F: hw/isa/lpc_ich9.c
|
||||
F: hw/i2c/smbus_ich9.c
|
||||
F: hw/acpi/piix4.c
|
||||
@@ -1784,7 +1722,7 @@ F: include/hw/southbridge/ich9.h
|
||||
F: include/hw/southbridge/piix.h
|
||||
F: hw/isa/apm.c
|
||||
F: include/hw/isa/apm.h
|
||||
F: tests/unit/test-x86-topo.c
|
||||
F: tests/unit/test-x86-cpuid.c
|
||||
F: tests/qtest/test-x86-cpuid-compat.c
|
||||
|
||||
PC Chipset
|
||||
@@ -1814,7 +1752,6 @@ F: include/hw/dma/i8257.h
|
||||
F: include/hw/i2c/pm_smbus.h
|
||||
F: include/hw/input/i8042.h
|
||||
F: include/hw/intc/ioapic*
|
||||
F: include/hw/intc/i8259.h
|
||||
F: include/hw/isa/i8259_internal.h
|
||||
F: include/hw/isa/superio.h
|
||||
F: include/hw/timer/hpet.h
|
||||
@@ -1844,7 +1781,6 @@ F: hw/core/null-machine.c
|
||||
F: hw/core/numa.c
|
||||
F: hw/cpu/cluster.c
|
||||
F: qapi/machine.json
|
||||
F: qapi/machine-common.json
|
||||
F: qapi/machine-target.json
|
||||
F: include/hw/boards.h
|
||||
F: include/hw/core/cpu.h
|
||||
@@ -1870,7 +1806,6 @@ M: Max Filippov <jcmvbkbc@gmail.com>
|
||||
S: Maintained
|
||||
F: hw/xtensa/xtfpga.c
|
||||
F: hw/net/opencores_eth.c
|
||||
F: include/hw/xtensa/mx_pic.h
|
||||
|
||||
Devices
|
||||
-------
|
||||
@@ -1896,7 +1831,6 @@ EDU
|
||||
M: Jiri Slaby <jslaby@suse.cz>
|
||||
S: Maintained
|
||||
F: hw/misc/edu.c
|
||||
F: docs/specs/edu.rst
|
||||
|
||||
IDE
|
||||
M: John Snow <jsnow@redhat.com>
|
||||
@@ -2032,9 +1966,7 @@ F: docs/specs/acpi_hest_ghes.rst
|
||||
ppc4xx
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Orphan
|
||||
F: hw/ppc/ppc4xx*.c
|
||||
F: hw/ppc/ppc440_uc.c
|
||||
F: hw/ppc/ppc440.h
|
||||
F: hw/ppc/ppc4*.c
|
||||
F: hw/i2c/ppc4xx_i2c.c
|
||||
F: include/hw/ppc/ppc4xx.h
|
||||
F: include/hw/i2c/ppc4xx_i2c.h
|
||||
@@ -2046,7 +1978,6 @@ M: Marc-André Lureau <marcandre.lureau@redhat.com>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: hw/char/
|
||||
F: include/hw/char/
|
||||
|
||||
Network devices
|
||||
M: Jason Wang <jasowang@redhat.com>
|
||||
@@ -2323,15 +2254,6 @@ F: hw/virtio/virtio-mem-pci.h
|
||||
F: hw/virtio/virtio-mem-pci.c
|
||||
F: include/hw/virtio/virtio-mem.h
|
||||
|
||||
virtio-snd
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
R: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
|
||||
S: Supported
|
||||
F: hw/audio/virtio-snd.c
|
||||
F: hw/audio/virtio-snd-pci.c
|
||||
F: include/hw/audio/virtio-snd.h
|
||||
F: docs/system/devices/virtio-snd.rst
|
||||
|
||||
nvme
|
||||
M: Keith Busch <kbusch@kernel.org>
|
||||
M: Klaus Jensen <its@irrelevant.dk>
|
||||
@@ -2374,7 +2296,6 @@ S: Maintained
|
||||
F: hw/net/vmxnet*
|
||||
F: hw/scsi/vmw_pvscsi*
|
||||
F: tests/qtest/vmxnet3-test.c
|
||||
F: docs/specs/vwm_pvscsi-spec.rst
|
||||
|
||||
Rocker
|
||||
M: Jiri Pirko <jiri@resnulli.us>
|
||||
@@ -2459,7 +2380,7 @@ S: Orphan
|
||||
R: Ani Sinha <ani@anisinha.ca>
|
||||
F: hw/acpi/vmgenid.c
|
||||
F: include/hw/acpi/vmgenid.h
|
||||
F: docs/specs/vmgenid.rst
|
||||
F: docs/specs/vmgenid.txt
|
||||
F: tests/qtest/vmgenid-test.c
|
||||
|
||||
LED
|
||||
@@ -2491,7 +2412,6 @@ F: hw/display/vga*
|
||||
F: hw/display/bochs-display.c
|
||||
F: include/hw/display/vga.h
|
||||
F: include/hw/display/bochs-vbe.h
|
||||
F: docs/specs/standard-vga.rst
|
||||
|
||||
ramfb
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
@@ -2505,7 +2425,6 @@ S: Odd Fixes
|
||||
F: hw/display/virtio-gpu*
|
||||
F: hw/display/virtio-vga.*
|
||||
F: include/hw/virtio/virtio-gpu.h
|
||||
F: docs/system/devices/virtio-gpu.rst
|
||||
|
||||
vhost-user-blk
|
||||
M: Raphael Norwitz <raphael.norwitz@nutanix.com>
|
||||
@@ -2546,18 +2465,9 @@ PIIX4 South Bridge (i82371AB)
|
||||
M: Hervé Poussineau <hpoussin@reactos.org>
|
||||
M: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
S: Maintained
|
||||
F: hw/isa/piix.c
|
||||
F: hw/isa/piix4.c
|
||||
F: include/hw/southbridge/piix.h
|
||||
|
||||
VIA South Bridges (VT82C686B, VT8231)
|
||||
M: BALATON Zoltan <balaton@eik.bme.hu>
|
||||
M: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||
S: Maintained
|
||||
F: hw/isa/vt82c686.c
|
||||
F: hw/usb/vt82c686-uhci-pci.c
|
||||
F: include/hw/isa/vt82c686.h
|
||||
|
||||
Firmware configuration (fw_cfg)
|
||||
M: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
R: Gerd Hoffmann <kraxel@redhat.com>
|
||||
@@ -2608,7 +2518,6 @@ W: https://canbus.pages.fel.cvut.cz/
|
||||
F: net/can/*
|
||||
F: hw/net/can/*
|
||||
F: include/net/can_*.h
|
||||
F: docs/system/devices/can.rst
|
||||
|
||||
OpenPIC interrupt controller
|
||||
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
|
||||
@@ -2652,7 +2561,7 @@ M: Halil Pasic <pasic@linux.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@linux.ibm.com>
|
||||
S: Supported
|
||||
F: hw/s390x/storage-keys.h
|
||||
F: hw/s390x/s390-skeys*.c
|
||||
F: hw/390x/s390-skeys*.c
|
||||
L: qemu-s390x@nongnu.org
|
||||
|
||||
S390 storage attribute device
|
||||
@@ -2660,7 +2569,7 @@ M: Halil Pasic <pasic@linux.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@linux.ibm.com>
|
||||
S: Supported
|
||||
F: hw/s390x/storage-attributes.h
|
||||
F: hw/s390x/s390-stattrib*.c
|
||||
F: hw/s390/s390-stattrib*.c
|
||||
L: qemu-s390x@nongnu.org
|
||||
|
||||
S390 floating interrupt controller
|
||||
@@ -2680,14 +2589,6 @@ F: hw/usb/canokey.c
|
||||
F: hw/usb/canokey.h
|
||||
F: docs/system/devices/canokey.rst
|
||||
|
||||
Hyper-V Dynamic Memory Protocol
|
||||
M: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
|
||||
S: Supported
|
||||
F: hw/hyperv/hv-balloon*.c
|
||||
F: hw/hyperv/hv-balloon*.h
|
||||
F: include/hw/hyperv/dynmem-proto.h
|
||||
F: include/hw/hyperv/hv-balloon.h
|
||||
|
||||
Subsystems
|
||||
----------
|
||||
Overall Audio backends
|
||||
@@ -2791,13 +2692,12 @@ S: Supported
|
||||
F: util/async.c
|
||||
F: util/aio-*.c
|
||||
F: util/aio-*.h
|
||||
F: util/defer-call.c
|
||||
F: util/fdmon-*.c
|
||||
F: block/io.c
|
||||
F: block/plug.c
|
||||
F: migration/block*
|
||||
F: include/block/aio.h
|
||||
F: include/block/aio-wait.h
|
||||
F: include/qemu/defer-call.h
|
||||
F: scripts/qemugdb/aio.py
|
||||
F: tests/unit/test-fdmon-epoll.c
|
||||
T: git https://github.com/stefanha/qemu.git block
|
||||
@@ -2916,7 +2816,6 @@ F: include/sysemu/dump.h
|
||||
F: qapi/dump.json
|
||||
F: scripts/dump-guest-memory.py
|
||||
F: stubs/dump.c
|
||||
F: docs/specs/vmcoreinfo.rst
|
||||
|
||||
Error reporting
|
||||
M: Markus Armbruster <armbru@redhat.com>
|
||||
@@ -2942,8 +2841,8 @@ F: gdbstub/*
|
||||
F: include/exec/gdbstub.h
|
||||
F: include/gdbstub/*
|
||||
F: gdb-xml/
|
||||
F: tests/tcg/multiarch/gdbstub/*
|
||||
F: scripts/feature_to_c.py
|
||||
F: tests/tcg/multiarch/gdbstub/
|
||||
F: scripts/feature_to_c.sh
|
||||
F: scripts/probe-gdb-support.py
|
||||
|
||||
Memory API
|
||||
@@ -2977,7 +2876,6 @@ F: hw/mem/pc-dimm.c
|
||||
F: include/hw/mem/memory-device.h
|
||||
F: include/hw/mem/nvdimm.h
|
||||
F: include/hw/mem/pc-dimm.h
|
||||
F: stubs/memory_device.c
|
||||
F: docs/nvdimm.txt
|
||||
|
||||
SPICE
|
||||
@@ -3015,7 +2913,7 @@ F: include/qemu/main-loop.h
|
||||
F: include/sysemu/runstate.h
|
||||
F: include/sysemu/runstate-action.h
|
||||
F: util/main-loop.c
|
||||
F: util/qemu-timer*.c
|
||||
F: util/qemu-timer.c
|
||||
F: system/vl.c
|
||||
F: system/main.c
|
||||
F: system/cpus.c
|
||||
@@ -3164,11 +3062,10 @@ M: Michael Roth <michael.roth@amd.com>
|
||||
M: Konstantin Kostiuk <kkostiuk@redhat.com>
|
||||
S: Maintained
|
||||
F: qga/
|
||||
F: contrib/systemd/qemu-guest-agent.service
|
||||
F: docs/interop/qemu-ga.rst
|
||||
F: docs/interop/qemu-ga-ref.rst
|
||||
F: scripts/qemu-guest-agent/
|
||||
F: tests/*/test-qga*
|
||||
F: tests/unit/test-qga.c
|
||||
T: git https://github.com/mdroth/qemu.git qga
|
||||
|
||||
QEMU Guest Agent Win32
|
||||
@@ -3231,7 +3128,6 @@ M: Laurent Vivier <lvivier@redhat.com>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: system/qtest.c
|
||||
F: include/sysemu/qtest.h
|
||||
F: accel/qtest/
|
||||
F: tests/qtest/
|
||||
F: docs/devel/qgraph.rst
|
||||
@@ -3510,12 +3406,6 @@ M: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
|
||||
S: Maintained
|
||||
F: contrib/elf2dmp/
|
||||
|
||||
Overall sensors
|
||||
M: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
S: Odd Fixes
|
||||
F: hw/sensor
|
||||
F: include/hw/sensor
|
||||
|
||||
I2C and SMBus
|
||||
M: Corey Minyard <cminyard@mvista.com>
|
||||
S: Maintained
|
||||
@@ -3681,7 +3571,7 @@ M: Alistair Francis <Alistair.Francis@wdc.com>
|
||||
L: qemu-riscv@nongnu.org
|
||||
S: Maintained
|
||||
F: tcg/riscv/
|
||||
F: disas/riscv.[ch]
|
||||
F: disas/riscv.c
|
||||
|
||||
S390 TCG target
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
@@ -3953,7 +3843,7 @@ F: docs/block-replication.txt
|
||||
PVRDMA
|
||||
M: Yuval Shaia <yuval.shaia.ml@gmail.com>
|
||||
M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
|
||||
S: Odd Fixes
|
||||
S: Maintained
|
||||
F: hw/rdma/*
|
||||
F: hw/rdma/vmw/*
|
||||
F: docs/pvrdma.txt
|
||||
@@ -4001,7 +3891,6 @@ M: Jason Wang <jasowang@redhat.com>
|
||||
R: Andrew Melnychenko <andrew@daynix.com>
|
||||
R: Yuri Benditovich <yuri.benditovich@daynix.com>
|
||||
S: Maintained
|
||||
F: docs/devel/ebpf_rss.rst
|
||||
F: ebpf/*
|
||||
F: tools/ebpf/*
|
||||
|
||||
@@ -4018,7 +3907,6 @@ F: .github/workflows/lockdown.yml
|
||||
F: .gitlab-ci.yml
|
||||
F: .gitlab-ci.d/
|
||||
F: .travis.yml
|
||||
F: docs/devel/ci*
|
||||
F: scripts/ci/
|
||||
F: tests/docker/
|
||||
F: tests/vm/
|
||||
@@ -4078,7 +3966,7 @@ F: gitdm.config
|
||||
F: contrib/gitdm/*
|
||||
|
||||
Incompatible changes
|
||||
R: devel@lists.libvirt.org
|
||||
R: libvir-list@redhat.com
|
||||
F: docs/about/deprecated.rst
|
||||
|
||||
Build System
|
||||
|
||||
10
Makefile
10
Makefile
@@ -283,13 +283,6 @@ include $(SRC_PATH)/tests/vm/Makefile.include
|
||||
print-help-run = printf " %-30s - %s\\n" "$1" "$2"
|
||||
print-help = @$(call print-help-run,$1,$2)
|
||||
|
||||
.PHONY: update-linux-vdso
|
||||
update-linux-vdso:
|
||||
@for m in $(SRC_PATH)/linux-user/*/Makefile.vdso; do \
|
||||
$(MAKE) $(SUBDIR_MAKEFLAGS) -C $$(dirname $$m) -f Makefile.vdso \
|
||||
SRC_PATH=$(SRC_PATH) BUILD_DIR=$(BUILD_DIR); \
|
||||
done
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo 'Generic targets:'
|
||||
@@ -310,9 +303,6 @@ endif
|
||||
$(call print-help,distclean,Remove all generated files)
|
||||
$(call print-help,dist,Build a distributable tarball)
|
||||
@echo ''
|
||||
@echo 'Linux-user targets:'
|
||||
$(call print-help,update-linux-vdso,Build linux-user vdso images)
|
||||
@echo ''
|
||||
@echo 'Test targets:'
|
||||
$(call print-help,check,Run all tests (check-help for details))
|
||||
$(call print-help,bench,Run all benchmarks)
|
||||
|
||||
@@ -90,6 +90,8 @@ bool kvm_kernel_irqchip;
|
||||
bool kvm_split_irqchip;
|
||||
bool kvm_async_interrupts_allowed;
|
||||
bool kvm_halt_in_kernel_allowed;
|
||||
bool kvm_eventfds_allowed;
|
||||
bool kvm_irqfds_allowed;
|
||||
bool kvm_resamplefds_allowed;
|
||||
bool kvm_msi_via_irqfd_allowed;
|
||||
bool kvm_gsi_routing_allowed;
|
||||
@@ -97,6 +99,8 @@ bool kvm_gsi_direct_mapping;
|
||||
bool kvm_allowed;
|
||||
bool kvm_readonly_mem_allowed;
|
||||
bool kvm_vm_attributes_allowed;
|
||||
bool kvm_direct_msi_allowed;
|
||||
bool kvm_ioeventfd_any_length_allowed;
|
||||
bool kvm_msi_use_devid;
|
||||
bool kvm_has_guest_debug;
|
||||
static int kvm_sstep_flags;
|
||||
@@ -107,9 +111,6 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
|
||||
KVM_CAP_INFO(USER_MEMORY),
|
||||
KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
|
||||
KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
|
||||
KVM_CAP_INFO(INTERNAL_ERROR_DATA),
|
||||
KVM_CAP_INFO(IOEVENTFD),
|
||||
KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
|
||||
KVM_CAP_LAST_INFO
|
||||
};
|
||||
|
||||
@@ -173,31 +174,13 @@ void kvm_resample_fd_notify(int gsi)
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int kvm_get_max_memslots(void)
|
||||
int kvm_get_max_memslots(void)
|
||||
{
|
||||
KVMState *s = KVM_STATE(current_accel());
|
||||
|
||||
return s->nr_slots;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_free_memslots(void)
|
||||
{
|
||||
unsigned int used_slots = 0;
|
||||
KVMState *s = kvm_state;
|
||||
int i;
|
||||
|
||||
kvm_slots_lock();
|
||||
for (i = 0; i < s->nr_as; i++) {
|
||||
if (!s->as[i].ml) {
|
||||
continue;
|
||||
}
|
||||
used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots);
|
||||
}
|
||||
kvm_slots_unlock();
|
||||
|
||||
return s->nr_slots - used_slots;
|
||||
}
|
||||
|
||||
/* Called with KVMMemoryListener.slots_lock held */
|
||||
static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
|
||||
{
|
||||
@@ -213,6 +196,19 @@ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool kvm_has_free_slot(MachineState *ms)
|
||||
{
|
||||
KVMState *s = KVM_STATE(ms->accelerator);
|
||||
bool result;
|
||||
KVMMemoryListener *kml = &s->memory_listener;
|
||||
|
||||
kvm_slots_lock();
|
||||
result = !!kvm_get_free_slot(kml);
|
||||
kvm_slots_unlock();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Called with KVMMemoryListener.slots_lock held */
|
||||
static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
|
||||
{
|
||||
@@ -1105,6 +1101,13 @@ static void kvm_coalesce_pio_del(MemoryListener *listener,
|
||||
}
|
||||
}
|
||||
|
||||
static MemoryListener kvm_coalesced_pio_listener = {
|
||||
.name = "kvm-coalesced-pio",
|
||||
.coalesced_io_add = kvm_coalesce_pio_add,
|
||||
.coalesced_io_del = kvm_coalesce_pio_del,
|
||||
.priority = MEMORY_LISTENER_PRIORITY_MIN,
|
||||
};
|
||||
|
||||
int kvm_check_extension(KVMState *s, unsigned int extension)
|
||||
{
|
||||
int ret;
|
||||
@@ -1246,6 +1249,43 @@ static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
|
||||
}
|
||||
|
||||
|
||||
static int kvm_check_many_ioeventfds(void)
|
||||
{
|
||||
/* Userspace can use ioeventfd for io notification. This requires a host
|
||||
* that supports eventfd(2) and an I/O thread; since eventfd does not
|
||||
* support SIGIO it cannot interrupt the vcpu.
|
||||
*
|
||||
* Older kernels have a 6 device limit on the KVM io bus. Find out so we
|
||||
* can avoid creating too many ioeventfds.
|
||||
*/
|
||||
#if defined(CONFIG_EVENTFD)
|
||||
int ioeventfds[7];
|
||||
int i, ret = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
|
||||
ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
|
||||
if (ioeventfds[i] < 0) {
|
||||
break;
|
||||
}
|
||||
ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
|
||||
if (ret < 0) {
|
||||
close(ioeventfds[i]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Decide whether many devices are supported or not */
|
||||
ret = i == ARRAY_SIZE(ioeventfds);
|
||||
|
||||
while (i-- > 0) {
|
||||
kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
|
||||
close(ioeventfds[i]);
|
||||
}
|
||||
return ret;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const KVMCapabilityInfo *
|
||||
kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
|
||||
{
|
||||
@@ -1347,7 +1387,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
}
|
||||
start_addr += slot_size;
|
||||
size -= slot_size;
|
||||
kml->nr_used_slots--;
|
||||
} while (size);
|
||||
return;
|
||||
}
|
||||
@@ -1373,7 +1412,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
ram_start_offset += slot_size;
|
||||
ram += slot_size;
|
||||
size -= slot_size;
|
||||
kml->nr_used_slots++;
|
||||
} while (size);
|
||||
}
|
||||
|
||||
@@ -1761,8 +1799,6 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
|
||||
|
||||
static MemoryListener kvm_io_listener = {
|
||||
.name = "kvm-io",
|
||||
.coalesced_io_add = kvm_coalesce_pio_add,
|
||||
.coalesced_io_del = kvm_coalesce_pio_del,
|
||||
.eventfd_add = kvm_io_ioeventfd_add,
|
||||
.eventfd_del = kvm_io_ioeventfd_del,
|
||||
.priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
|
||||
@@ -1804,7 +1840,7 @@ static void clear_gsi(KVMState *s, unsigned int gsi)
|
||||
|
||||
void kvm_init_irq_routing(KVMState *s)
|
||||
{
|
||||
int gsi_count;
|
||||
int gsi_count, i;
|
||||
|
||||
gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
|
||||
if (gsi_count > 0) {
|
||||
@@ -1816,6 +1852,12 @@ void kvm_init_irq_routing(KVMState *s)
|
||||
s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
|
||||
s->nr_allocated_irq_routes = 0;
|
||||
|
||||
if (!kvm_direct_msi_allowed) {
|
||||
for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
|
||||
QTAILQ_INIT(&s->msi_hashtab[i]);
|
||||
}
|
||||
}
|
||||
|
||||
kvm_arch_init_irq_routing(s);
|
||||
}
|
||||
|
||||
@@ -1935,10 +1977,41 @@ void kvm_irqchip_change_notify(void)
|
||||
notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
|
||||
}
|
||||
|
||||
static unsigned int kvm_hash_msi(uint32_t data)
|
||||
{
|
||||
/* This is optimized for IA32 MSI layout. However, no other arch shall
|
||||
* repeat the mistake of not providing a direct MSI injection API. */
|
||||
return data & 0xff;
|
||||
}
|
||||
|
||||
static void kvm_flush_dynamic_msi_routes(KVMState *s)
|
||||
{
|
||||
KVMMSIRoute *route, *next;
|
||||
unsigned int hash;
|
||||
|
||||
for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
|
||||
QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
|
||||
kvm_irqchip_release_virq(s, route->kroute.gsi);
|
||||
QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
|
||||
g_free(route);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int kvm_irqchip_get_virq(KVMState *s)
|
||||
{
|
||||
int next_virq;
|
||||
|
||||
/*
|
||||
* PIC and IOAPIC share the first 16 GSI numbers, thus the available
|
||||
* GSI numbers are more than the number of IRQ route. Allocating a GSI
|
||||
* number can succeed even though a new route entry cannot be added.
|
||||
* When this happens, flush dynamic MSI entries to free IRQ route entries.
|
||||
*/
|
||||
if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
|
||||
kvm_flush_dynamic_msi_routes(s);
|
||||
}
|
||||
|
||||
/* Return the lowest unused GSI in the bitmap */
|
||||
next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
|
||||
if (next_virq >= s->gsi_count) {
|
||||
@@ -1948,17 +2021,63 @@ static int kvm_irqchip_get_virq(KVMState *s)
|
||||
}
|
||||
}
|
||||
|
||||
static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
|
||||
{
|
||||
unsigned int hash = kvm_hash_msi(msg.data);
|
||||
KVMMSIRoute *route;
|
||||
|
||||
QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
|
||||
if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
|
||||
route->kroute.u.msi.address_hi == (msg.address >> 32) &&
|
||||
route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
|
||||
return route;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
|
||||
{
|
||||
struct kvm_msi msi;
|
||||
KVMMSIRoute *route;
|
||||
|
||||
msi.address_lo = (uint32_t)msg.address;
|
||||
msi.address_hi = msg.address >> 32;
|
||||
msi.data = le32_to_cpu(msg.data);
|
||||
msi.flags = 0;
|
||||
memset(msi.pad, 0, sizeof(msi.pad));
|
||||
if (kvm_direct_msi_allowed) {
|
||||
msi.address_lo = (uint32_t)msg.address;
|
||||
msi.address_hi = msg.address >> 32;
|
||||
msi.data = le32_to_cpu(msg.data);
|
||||
msi.flags = 0;
|
||||
memset(msi.pad, 0, sizeof(msi.pad));
|
||||
|
||||
return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
|
||||
return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
|
||||
}
|
||||
|
||||
route = kvm_lookup_msi_route(s, msg);
|
||||
if (!route) {
|
||||
int virq;
|
||||
|
||||
virq = kvm_irqchip_get_virq(s);
|
||||
if (virq < 0) {
|
||||
return virq;
|
||||
}
|
||||
|
||||
route = g_new0(KVMMSIRoute, 1);
|
||||
route->kroute.gsi = virq;
|
||||
route->kroute.type = KVM_IRQ_ROUTING_MSI;
|
||||
route->kroute.flags = 0;
|
||||
route->kroute.u.msi.address_lo = (uint32_t)msg.address;
|
||||
route->kroute.u.msi.address_hi = msg.address >> 32;
|
||||
route->kroute.u.msi.data = le32_to_cpu(msg.data);
|
||||
|
||||
kvm_add_routing_entry(s, &route->kroute);
|
||||
kvm_irqchip_commit_routes(s);
|
||||
|
||||
QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
|
||||
entry);
|
||||
}
|
||||
|
||||
assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
|
||||
|
||||
return kvm_set_irq(s, route->kroute.gsi, 1);
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
|
||||
@@ -2085,6 +2204,10 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
|
||||
}
|
||||
}
|
||||
|
||||
if (!kvm_irqfds_enabled()) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
|
||||
}
|
||||
|
||||
@@ -2245,11 +2368,6 @@ static void kvm_irqchip_create(KVMState *s)
|
||||
return;
|
||||
}
|
||||
|
||||
if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
|
||||
fprintf(stderr, "kvm: irqfd not implemented\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* First probe and see if there's a arch-specific hook to create the
|
||||
* in-kernel irqchip for us */
|
||||
ret = kvm_arch_irqchip_create(s);
|
||||
@@ -2524,8 +2642,22 @@ static int kvm_init(MachineState *ms)
|
||||
#ifdef KVM_CAP_VCPU_EVENTS
|
||||
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
|
||||
#endif
|
||||
|
||||
s->robust_singlestep =
|
||||
kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
|
||||
|
||||
#ifdef KVM_CAP_DEBUGREGS
|
||||
s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
|
||||
#endif
|
||||
|
||||
s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
|
||||
#endif
|
||||
|
||||
s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
|
||||
|
||||
s->irq_set_ioctl = KVM_IRQ_LINE;
|
||||
if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
|
||||
s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
|
||||
@@ -2534,12 +2666,21 @@ static int kvm_init(MachineState *ms)
|
||||
kvm_readonly_mem_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
|
||||
|
||||
kvm_eventfds_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
|
||||
|
||||
kvm_irqfds_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
|
||||
|
||||
kvm_resamplefds_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
|
||||
|
||||
kvm_vm_attributes_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
|
||||
|
||||
kvm_ioeventfd_any_length_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
|
||||
|
||||
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
||||
kvm_has_guest_debug =
|
||||
(kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
|
||||
@@ -2576,16 +2717,24 @@ static int kvm_init(MachineState *ms)
|
||||
kvm_irqchip_create(s);
|
||||
}
|
||||
|
||||
s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
|
||||
s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
|
||||
if (kvm_eventfds_allowed) {
|
||||
s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
|
||||
s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
|
||||
}
|
||||
s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
|
||||
s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
|
||||
|
||||
kvm_memory_listener_register(s, &s->memory_listener,
|
||||
&address_space_memory, 0, "kvm-memory");
|
||||
memory_listener_register(&kvm_io_listener,
|
||||
if (kvm_eventfds_allowed) {
|
||||
memory_listener_register(&kvm_io_listener,
|
||||
&address_space_io);
|
||||
}
|
||||
memory_listener_register(&kvm_coalesced_pio_listener,
|
||||
&address_space_io);
|
||||
|
||||
s->many_ioeventfds = kvm_check_many_ioeventfds();
|
||||
|
||||
s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
|
||||
if (!s->sync_mmu) {
|
||||
ret = ram_block_discard_disable(true);
|
||||
@@ -2638,14 +2787,16 @@ static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direc
|
||||
|
||||
static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
|
||||
{
|
||||
int i;
|
||||
|
||||
fprintf(stderr, "KVM internal error. Suberror: %d\n",
|
||||
run->internal.suberror);
|
||||
|
||||
for (i = 0; i < run->internal.ndata; ++i) {
|
||||
fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
|
||||
i, (uint64_t)run->internal.data[i]);
|
||||
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < run->internal.ndata; ++i) {
|
||||
fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
|
||||
i, (uint64_t)run->internal.data[i]);
|
||||
}
|
||||
}
|
||||
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
|
||||
fprintf(stderr, "emulation failure\n");
|
||||
@@ -3139,11 +3290,29 @@ int kvm_has_vcpu_events(void)
|
||||
return kvm_state->vcpu_events;
|
||||
}
|
||||
|
||||
int kvm_has_robust_singlestep(void)
|
||||
{
|
||||
return kvm_state->robust_singlestep;
|
||||
}
|
||||
|
||||
int kvm_has_debugregs(void)
|
||||
{
|
||||
return kvm_state->debugregs;
|
||||
}
|
||||
|
||||
int kvm_max_nested_state_length(void)
|
||||
{
|
||||
return kvm_state->max_nested_state_len;
|
||||
}
|
||||
|
||||
int kvm_has_many_ioeventfds(void)
|
||||
{
|
||||
if (!kvm_enabled()) {
|
||||
return 0;
|
||||
}
|
||||
return kvm_state->many_ioeventfds;
|
||||
}
|
||||
|
||||
int kvm_has_gsi_routing(void)
|
||||
{
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
@@ -3153,6 +3322,11 @@ int kvm_has_gsi_routing(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
int kvm_has_intx_set_mask(void)
|
||||
{
|
||||
return kvm_state->intx_set_mask;
|
||||
}
|
||||
|
||||
bool kvm_arm_supports_user_irq(void)
|
||||
{
|
||||
return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
|
||||
|
||||
@@ -17,13 +17,17 @@
|
||||
KVMState *kvm_state;
|
||||
bool kvm_kernel_irqchip;
|
||||
bool kvm_async_interrupts_allowed;
|
||||
bool kvm_eventfds_allowed;
|
||||
bool kvm_irqfds_allowed;
|
||||
bool kvm_resamplefds_allowed;
|
||||
bool kvm_msi_via_irqfd_allowed;
|
||||
bool kvm_gsi_routing_allowed;
|
||||
bool kvm_gsi_direct_mapping;
|
||||
bool kvm_allowed;
|
||||
bool kvm_readonly_mem_allowed;
|
||||
bool kvm_ioeventfd_any_length_allowed;
|
||||
bool kvm_msi_use_devid;
|
||||
bool kvm_direct_msi_allowed;
|
||||
|
||||
void kvm_flush_coalesced_mmio_buffer(void)
|
||||
{
|
||||
@@ -38,6 +42,11 @@ bool kvm_has_sync_mmu(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_has_many_ioeventfds(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
|
||||
{
|
||||
return 1;
|
||||
@@ -83,6 +92,11 @@ void kvm_irqchip_change_notify(void)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
||||
EventNotifier *rn, int virq)
|
||||
{
|
||||
@@ -95,14 +109,9 @@ int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_max_memslots(void)
|
||||
bool kvm_has_free_slot(MachineState *ms)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_free_memslots(void)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
void kvm_init_cpu_signals(CPUState *cpu)
|
||||
|
||||
@@ -22,6 +22,10 @@ void tlb_set_dirty(CPUState *cpu, vaddr vaddr)
|
||||
{
|
||||
}
|
||||
|
||||
void tcg_flush_jmp_cache(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
int probe_access_flags(CPUArchState *env, vaddr addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool nonfault, void **phost, uintptr_t retaddr)
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#include "exec/memory.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/cputlb.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/memory-internal.h"
|
||||
#include "exec/ram_addr.h"
|
||||
#include "tcg/tcg.h"
|
||||
@@ -322,6 +321,21 @@ static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
|
||||
}
|
||||
}
|
||||
|
||||
void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
|
||||
{
|
||||
CPUState *cpu;
|
||||
size_t full = 0, part = 0, elide = 0;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
|
||||
part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
|
||||
elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
|
||||
}
|
||||
*pfull = full;
|
||||
*ppart = part;
|
||||
*pelide = elide;
|
||||
}
|
||||
|
||||
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
|
||||
{
|
||||
uint16_t asked = data.host_int;
|
||||
@@ -2692,7 +2706,7 @@ static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
|
||||
|
||||
case MO_ATOM_WITHIN16_PAIR:
|
||||
/* Since size > 8, this is the half that must be atomic. */
|
||||
if (!HAVE_CMPXCHG128) {
|
||||
if (!HAVE_ATOMIC128_RW) {
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
}
|
||||
return store_whole_le16(p->haddr, p->size, val_le);
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
extern int64_t max_delay;
|
||||
extern int64_t max_advance;
|
||||
|
||||
void dump_exec_info(GString *buf);
|
||||
|
||||
/*
|
||||
* Return true if CS is not running in parallel with other cpus, either
|
||||
* because there are no other cpus or we are within an exclusive context.
|
||||
|
||||
@@ -825,7 +825,7 @@ static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
|
||||
int sh = o * 8;
|
||||
Int128 m, v;
|
||||
|
||||
qemu_build_assert(HAVE_CMPXCHG128);
|
||||
qemu_build_assert(HAVE_ATOMIC128_RW);
|
||||
|
||||
/* Like MAKE_64BIT_MASK(0, sz), but larger. */
|
||||
if (sz <= 64) {
|
||||
@@ -887,7 +887,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
} else if ((pi & 15) == 7) {
|
||||
if (HAVE_CMPXCHG128) {
|
||||
if (HAVE_ATOMIC128_RW) {
|
||||
Int128 v = int128_lshift(int128_make64(val), 56);
|
||||
Int128 m = int128_lshift(int128_make64(0xffff), 56);
|
||||
store_atom_insert_al16(pv - 7, v, m);
|
||||
@@ -956,7 +956,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (HAVE_CMPXCHG128) {
|
||||
if (HAVE_ATOMIC128_RW) {
|
||||
store_whole_le16(pv, 4, int128_make64(cpu_to_le32(val)));
|
||||
return;
|
||||
}
|
||||
@@ -1021,7 +1021,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
}
|
||||
break;
|
||||
case MO_64:
|
||||
if (HAVE_CMPXCHG128) {
|
||||
if (HAVE_ATOMIC128_RW) {
|
||||
store_whole_le16(pv, 8, int128_make64(cpu_to_le64(val)));
|
||||
return;
|
||||
}
|
||||
@@ -1076,7 +1076,7 @@ static void store_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
}
|
||||
break;
|
||||
case -MO_64:
|
||||
if (HAVE_CMPXCHG128) {
|
||||
if (HAVE_ATOMIC128_RW) {
|
||||
uint64_t val_le;
|
||||
int s2 = pi & 15;
|
||||
int s1 = 16 - s2;
|
||||
@@ -1103,6 +1103,10 @@ static void store_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
}
|
||||
break;
|
||||
case MO_128:
|
||||
if (HAVE_ATOMIC128_RW) {
|
||||
atomic16_set(pv, val);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "qemu/qht.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/type-helpers.h"
|
||||
#include "qapi/qapi-commands-machine.h"
|
||||
@@ -18,7 +17,6 @@
|
||||
#include "sysemu/tcg.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "internal-common.h"
|
||||
#include "tb-context.h"
|
||||
|
||||
|
||||
static void dump_drift_info(GString *buf)
|
||||
@@ -52,153 +50,6 @@ static void dump_accel_info(GString *buf)
|
||||
one_insn_per_tb ? "on" : "off");
|
||||
}
|
||||
|
||||
static void print_qht_statistics(struct qht_stats hst, GString *buf)
|
||||
{
|
||||
uint32_t hgram_opts;
|
||||
size_t hgram_bins;
|
||||
char *hgram;
|
||||
|
||||
if (!hst.head_buckets) {
|
||||
return;
|
||||
}
|
||||
g_string_append_printf(buf, "TB hash buckets %zu/%zu "
|
||||
"(%0.2f%% head buckets used)\n",
|
||||
hst.used_head_buckets, hst.head_buckets,
|
||||
(double)hst.used_head_buckets /
|
||||
hst.head_buckets * 100);
|
||||
|
||||
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
|
||||
hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
|
||||
if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
|
||||
hgram_opts |= QDIST_PR_NODECIMAL;
|
||||
}
|
||||
hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
|
||||
g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. "
|
||||
"Histogram: %s\n",
|
||||
qdist_avg(&hst.occupancy) * 100, hgram);
|
||||
g_free(hgram);
|
||||
|
||||
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
|
||||
hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
|
||||
if (hgram_bins > 10) {
|
||||
hgram_bins = 10;
|
||||
} else {
|
||||
hgram_bins = 0;
|
||||
hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
|
||||
}
|
||||
hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
|
||||
g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. "
|
||||
"Histogram: %s\n",
|
||||
qdist_avg(&hst.chain), hgram);
|
||||
g_free(hgram);
|
||||
}
|
||||
|
||||
struct tb_tree_stats {
|
||||
size_t nb_tbs;
|
||||
size_t host_size;
|
||||
size_t target_size;
|
||||
size_t max_target_size;
|
||||
size_t direct_jmp_count;
|
||||
size_t direct_jmp2_count;
|
||||
size_t cross_page;
|
||||
};
|
||||
|
||||
static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
|
||||
{
|
||||
const TranslationBlock *tb = value;
|
||||
struct tb_tree_stats *tst = data;
|
||||
|
||||
tst->nb_tbs++;
|
||||
tst->host_size += tb->tc.size;
|
||||
tst->target_size += tb->size;
|
||||
if (tb->size > tst->max_target_size) {
|
||||
tst->max_target_size = tb->size;
|
||||
}
|
||||
if (tb->page_addr[1] != -1) {
|
||||
tst->cross_page++;
|
||||
}
|
||||
if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
|
||||
tst->direct_jmp_count++;
|
||||
if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
|
||||
tst->direct_jmp2_count++;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
|
||||
{
|
||||
CPUState *cpu;
|
||||
size_t full = 0, part = 0, elide = 0;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
|
||||
part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
|
||||
elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
|
||||
}
|
||||
*pfull = full;
|
||||
*ppart = part;
|
||||
*pelide = elide;
|
||||
}
|
||||
|
||||
static void tcg_dump_info(GString *buf)
|
||||
{
|
||||
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
|
||||
}
|
||||
|
||||
static void dump_exec_info(GString *buf)
|
||||
{
|
||||
struct tb_tree_stats tst = {};
|
||||
struct qht_stats hst;
|
||||
size_t nb_tbs, flush_full, flush_part, flush_elide;
|
||||
|
||||
tcg_tb_foreach(tb_tree_stats_iter, &tst);
|
||||
nb_tbs = tst.nb_tbs;
|
||||
/* XXX: avoid using doubles ? */
|
||||
g_string_append_printf(buf, "Translation buffer state:\n");
|
||||
/*
|
||||
* Report total code size including the padding and TB structs;
|
||||
* otherwise users might think "-accel tcg,tb-size" is not honoured.
|
||||
* For avg host size we use the precise numbers from tb_tree_stats though.
|
||||
*/
|
||||
g_string_append_printf(buf, "gen code size %zu/%zu\n",
|
||||
tcg_code_size(), tcg_code_capacity());
|
||||
g_string_append_printf(buf, "TB count %zu\n", nb_tbs);
|
||||
g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n",
|
||||
nb_tbs ? tst.target_size / nb_tbs : 0,
|
||||
tst.max_target_size);
|
||||
g_string_append_printf(buf, "TB avg host size %zu bytes "
|
||||
"(expansion ratio: %0.1f)\n",
|
||||
nb_tbs ? tst.host_size / nb_tbs : 0,
|
||||
tst.target_size ?
|
||||
(double)tst.host_size / tst.target_size : 0);
|
||||
g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
|
||||
tst.cross_page,
|
||||
nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
|
||||
g_string_append_printf(buf, "direct jump count %zu (%zu%%) "
|
||||
"(2 jumps=%zu %zu%%)\n",
|
||||
tst.direct_jmp_count,
|
||||
nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
|
||||
tst.direct_jmp2_count,
|
||||
nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
|
||||
|
||||
qht_statistics_init(&tb_ctx.htable, &hst);
|
||||
print_qht_statistics(hst, buf);
|
||||
qht_statistics_destroy(&hst);
|
||||
|
||||
g_string_append_printf(buf, "\nStatistics:\n");
|
||||
g_string_append_printf(buf, "TB flush count %u\n",
|
||||
qatomic_read(&tb_ctx.tb_flush_count));
|
||||
g_string_append_printf(buf, "TB invalidate count %u\n",
|
||||
qatomic_read(&tb_ctx.tb_phys_invalidate_count));
|
||||
|
||||
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
|
||||
g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
|
||||
g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
|
||||
g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
|
||||
tcg_dump_info(buf);
|
||||
}
|
||||
|
||||
HumanReadableText *qmp_x_query_jit(Error **errp)
|
||||
{
|
||||
g_autoptr(GString) buf = g_string_new("");
|
||||
@@ -215,11 +66,6 @@ HumanReadableText *qmp_x_query_jit(Error **errp)
|
||||
return human_readable_text_from_str(buf);
|
||||
}
|
||||
|
||||
static void tcg_dump_op_count(GString *buf)
|
||||
{
|
||||
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
|
||||
}
|
||||
|
||||
HumanReadableText *qmp_x_query_opcount(Error **errp)
|
||||
{
|
||||
g_autoptr(GString) buf = g_string_new("");
|
||||
|
||||
@@ -327,7 +327,8 @@ static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
|
||||
return op;
|
||||
}
|
||||
|
||||
static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx)
|
||||
static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
|
||||
void *func, int *cb_idx)
|
||||
{
|
||||
TCGOp *old_op;
|
||||
int func_idx;
|
||||
@@ -371,7 +372,8 @@ static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
|
||||
}
|
||||
|
||||
/* call */
|
||||
op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx);
|
||||
op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
|
||||
cb->f.vcpu_udata, cb_idx);
|
||||
|
||||
return op;
|
||||
}
|
||||
@@ -418,7 +420,8 @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
|
||||
|
||||
if (type == PLUGIN_GEN_CB_MEM) {
|
||||
/* call */
|
||||
op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx);
|
||||
op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
|
||||
cb->f.vcpu_udata, cb_idx);
|
||||
}
|
||||
|
||||
return op;
|
||||
@@ -863,14 +866,10 @@ void plugin_gen_insn_end(void)
|
||||
* do any clean-up here and make sure things are reset in
|
||||
* plugin_gen_tb_start.
|
||||
*/
|
||||
void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
|
||||
void plugin_gen_tb_end(CPUState *cpu)
|
||||
{
|
||||
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
|
||||
|
||||
/* translator may have removed instructions, update final count */
|
||||
g_assert(num_insns <= ptb->n);
|
||||
ptb->n = num_insns;
|
||||
|
||||
/* collect instrumentation requests */
|
||||
qemu_plugin_tb_trans_cb(cpu, ptb);
|
||||
|
||||
|
||||
@@ -34,7 +34,6 @@
|
||||
#include "qemu/timer.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/hwaddr.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/gdbstub.h"
|
||||
|
||||
#include "tcg-accel-ops.h"
|
||||
@@ -78,13 +77,6 @@ int tcg_cpus_exec(CPUState *cpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void tcg_cpu_reset_hold(CPUState *cpu)
|
||||
{
|
||||
tcg_flush_jmp_cache(cpu);
|
||||
|
||||
tlb_flush(cpu);
|
||||
}
|
||||
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
@@ -213,7 +205,6 @@ static void tcg_accel_ops_init(AccelOpsClass *ops)
|
||||
}
|
||||
}
|
||||
|
||||
ops->cpu_reset_hold = tcg_cpu_reset_hold;
|
||||
ops->supports_guest_debug = tcg_supports_guest_debug;
|
||||
ops->insert_breakpoint = tcg_insert_breakpoint;
|
||||
ops->remove_breakpoint = tcg_remove_breakpoint;
|
||||
|
||||
@@ -645,6 +645,133 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
||||
cpu_loop_exit_noexc(cpu);
|
||||
}
|
||||
|
||||
static void print_qht_statistics(struct qht_stats hst, GString *buf)
|
||||
{
|
||||
uint32_t hgram_opts;
|
||||
size_t hgram_bins;
|
||||
char *hgram;
|
||||
|
||||
if (!hst.head_buckets) {
|
||||
return;
|
||||
}
|
||||
g_string_append_printf(buf, "TB hash buckets %zu/%zu "
|
||||
"(%0.2f%% head buckets used)\n",
|
||||
hst.used_head_buckets, hst.head_buckets,
|
||||
(double)hst.used_head_buckets /
|
||||
hst.head_buckets * 100);
|
||||
|
||||
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
|
||||
hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
|
||||
if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
|
||||
hgram_opts |= QDIST_PR_NODECIMAL;
|
||||
}
|
||||
hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
|
||||
g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. "
|
||||
"Histogram: %s\n",
|
||||
qdist_avg(&hst.occupancy) * 100, hgram);
|
||||
g_free(hgram);
|
||||
|
||||
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
|
||||
hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
|
||||
if (hgram_bins > 10) {
|
||||
hgram_bins = 10;
|
||||
} else {
|
||||
hgram_bins = 0;
|
||||
hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
|
||||
}
|
||||
hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
|
||||
g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. "
|
||||
"Histogram: %s\n",
|
||||
qdist_avg(&hst.chain), hgram);
|
||||
g_free(hgram);
|
||||
}
|
||||
|
||||
struct tb_tree_stats {
|
||||
size_t nb_tbs;
|
||||
size_t host_size;
|
||||
size_t target_size;
|
||||
size_t max_target_size;
|
||||
size_t direct_jmp_count;
|
||||
size_t direct_jmp2_count;
|
||||
size_t cross_page;
|
||||
};
|
||||
|
||||
static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
|
||||
{
|
||||
const TranslationBlock *tb = value;
|
||||
struct tb_tree_stats *tst = data;
|
||||
|
||||
tst->nb_tbs++;
|
||||
tst->host_size += tb->tc.size;
|
||||
tst->target_size += tb->size;
|
||||
if (tb->size > tst->max_target_size) {
|
||||
tst->max_target_size = tb->size;
|
||||
}
|
||||
if (tb_page_addr1(tb) != -1) {
|
||||
tst->cross_page++;
|
||||
}
|
||||
if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
|
||||
tst->direct_jmp_count++;
|
||||
if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
|
||||
tst->direct_jmp2_count++;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void dump_exec_info(GString *buf)
|
||||
{
|
||||
struct tb_tree_stats tst = {};
|
||||
struct qht_stats hst;
|
||||
size_t nb_tbs, flush_full, flush_part, flush_elide;
|
||||
|
||||
tcg_tb_foreach(tb_tree_stats_iter, &tst);
|
||||
nb_tbs = tst.nb_tbs;
|
||||
/* XXX: avoid using doubles ? */
|
||||
g_string_append_printf(buf, "Translation buffer state:\n");
|
||||
/*
|
||||
* Report total code size including the padding and TB structs;
|
||||
* otherwise users might think "-accel tcg,tb-size" is not honoured.
|
||||
* For avg host size we use the precise numbers from tb_tree_stats though.
|
||||
*/
|
||||
g_string_append_printf(buf, "gen code size %zu/%zu\n",
|
||||
tcg_code_size(), tcg_code_capacity());
|
||||
g_string_append_printf(buf, "TB count %zu\n", nb_tbs);
|
||||
g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n",
|
||||
nb_tbs ? tst.target_size / nb_tbs : 0,
|
||||
tst.max_target_size);
|
||||
g_string_append_printf(buf, "TB avg host size %zu bytes "
|
||||
"(expansion ratio: %0.1f)\n",
|
||||
nb_tbs ? tst.host_size / nb_tbs : 0,
|
||||
tst.target_size ?
|
||||
(double)tst.host_size / tst.target_size : 0);
|
||||
g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
|
||||
tst.cross_page,
|
||||
nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
|
||||
g_string_append_printf(buf, "direct jump count %zu (%zu%%) "
|
||||
"(2 jumps=%zu %zu%%)\n",
|
||||
tst.direct_jmp_count,
|
||||
nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
|
||||
tst.direct_jmp2_count,
|
||||
nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
|
||||
|
||||
qht_statistics_init(&tb_ctx.htable, &hst);
|
||||
print_qht_statistics(hst, buf);
|
||||
qht_statistics_destroy(&hst);
|
||||
|
||||
g_string_append_printf(buf, "\nStatistics:\n");
|
||||
g_string_append_printf(buf, "TB flush count %u\n",
|
||||
qatomic_read(&tb_ctx.tb_flush_count));
|
||||
g_string_append_printf(buf, "TB invalidate count %u\n",
|
||||
qatomic_read(&tb_ctx.tb_phys_invalidate_count));
|
||||
|
||||
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
|
||||
g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
|
||||
g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
|
||||
g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
|
||||
tcg_dump_info(buf);
|
||||
}
|
||||
|
||||
#else /* CONFIG_USER_ONLY */
|
||||
|
||||
void cpu_interrupt(CPUState *cpu, int mask)
|
||||
@@ -673,3 +800,11 @@ void tcg_flush_jmp_cache(CPUState *cpu)
|
||||
qatomic_set(&jc->array[i].tb, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
|
||||
void tcg_flush_softmmu_tlb(CPUState *cs)
|
||||
{
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
tlb_flush(cs);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -158,7 +158,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
} else {
|
||||
plugin_enabled = plugin_gen_tb_start(cpu, db, false);
|
||||
}
|
||||
db->plugin_enabled = plugin_enabled;
|
||||
|
||||
while (true) {
|
||||
*max_insns = ++db->num_insns;
|
||||
@@ -210,7 +209,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
gen_tb_end(tb, cflags, icount_start_insn, db->num_insns);
|
||||
|
||||
if (plugin_enabled) {
|
||||
plugin_gen_tb_end(cpu, db->num_insns);
|
||||
plugin_gen_tb_end(cpu);
|
||||
}
|
||||
|
||||
/* The disas_log hook may use these values rather than recompute. */
|
||||
|
||||
@@ -14,10 +14,6 @@ void qemu_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
void cpu_exec_reset_hold(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
/* User mode emulation does not support record/replay yet. */
|
||||
|
||||
bool replay_exception(void)
|
||||
|
||||
@@ -1781,7 +1781,7 @@ static AudioState *audio_init(Audiodev *dev, Error **errp)
|
||||
|
||||
QTAILQ_INSERT_TAIL(&audio_states, s, list);
|
||||
QLIST_INIT (&s->card_head);
|
||||
vmstate_register_any(NULL, &vmstate_audio, s);
|
||||
vmstate_register (NULL, 0, &vmstate_audio, s);
|
||||
return s;
|
||||
|
||||
out:
|
||||
|
||||
@@ -97,10 +97,6 @@ static int wav_init_out(HWVoiceOut *hw, struct audsettings *as,
|
||||
dolog ("WAVE files can not handle 32bit formats\n");
|
||||
return -1;
|
||||
|
||||
case AUDIO_FORMAT_F32:
|
||||
dolog("WAVE files can not handle float formats\n");
|
||||
return -1;
|
||||
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
|
||||
@@ -426,7 +426,8 @@ dbus_vmstate_complete(UserCreatable *uc, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
if (vmstate_register_any(VMSTATE_IF(self), &dbus_vmstate, self) < 0) {
|
||||
if (vmstate_register(VMSTATE_IF(self), VMSTATE_INSTANCE_ID_ANY,
|
||||
&dbus_vmstate, self) < 0) {
|
||||
error_setg(errp, "Failed to register vmstate");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -534,8 +534,11 @@ static int tpm_emulator_block_migration(TPMEmulator *tpm_emu)
|
||||
error_setg(&tpm_emu->migration_blocker,
|
||||
"Migration disabled: TPM emulator does not support "
|
||||
"migration");
|
||||
if (migrate_add_blocker(&tpm_emu->migration_blocker, &err) < 0) {
|
||||
if (migrate_add_blocker(tpm_emu->migration_blocker, &err) < 0) {
|
||||
error_report_err(err);
|
||||
error_free(tpm_emu->migration_blocker);
|
||||
tpm_emu->migration_blocker = NULL;
|
||||
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -975,7 +978,8 @@ static void tpm_emulator_inst_init(Object *obj)
|
||||
qemu_add_vm_change_state_handler(tpm_emulator_vm_state_change,
|
||||
tpm_emu);
|
||||
|
||||
vmstate_register_any(NULL, &vmstate_tpm_emulator, obj);
|
||||
vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY,
|
||||
&vmstate_tpm_emulator, obj);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1012,7 +1016,10 @@ static void tpm_emulator_inst_finalize(Object *obj)
|
||||
|
||||
qapi_free_TPMEmulatorOptions(tpm_emu->options);
|
||||
|
||||
migrate_del_blocker(&tpm_emu->migration_blocker);
|
||||
if (tpm_emu->migration_blocker) {
|
||||
migrate_del_blocker(tpm_emu->migration_blocker);
|
||||
error_free(tpm_emu->migration_blocker);
|
||||
}
|
||||
|
||||
tpm_sized_buffer_reset(&state_blobs->volatil);
|
||||
tpm_sized_buffer_reset(&state_blobs->permanent);
|
||||
|
||||
294
block.c
294
block.c
@@ -279,9 +279,8 @@ bool bdrv_is_read_only(BlockDriverState *bs)
|
||||
return !(bs->open_flags & BDRV_O_RDWR);
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
|
||||
bool ignore_allow_rdw, Error **errp)
|
||||
static int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
|
||||
bool ignore_allow_rdw, Error **errp)
|
||||
{
|
||||
IO_CODE();
|
||||
|
||||
@@ -372,9 +371,8 @@ char *bdrv_get_full_backing_filename_from_filename(const char *backed,
|
||||
* setting @errp. In all other cases, NULL will only be returned with
|
||||
* @errp set.
|
||||
*/
|
||||
static char * GRAPH_RDLOCK
|
||||
bdrv_make_absolute_filename(BlockDriverState *relative_to,
|
||||
const char *filename, Error **errp)
|
||||
static char *bdrv_make_absolute_filename(BlockDriverState *relative_to,
|
||||
const char *filename, Error **errp)
|
||||
{
|
||||
char *dir, *full_name;
|
||||
|
||||
@@ -820,17 +818,12 @@ int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
|
||||
int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
BlockDriverState *filtered;
|
||||
|
||||
BlockDriverState *filtered = bdrv_filter_bs(bs);
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (drv && drv->bdrv_probe_geometry) {
|
||||
return drv->bdrv_probe_geometry(bs, geo);
|
||||
}
|
||||
|
||||
filtered = bdrv_filter_bs(bs);
|
||||
if (filtered) {
|
||||
} else if (filtered) {
|
||||
return bdrv_probe_geometry(filtered, geo);
|
||||
}
|
||||
|
||||
@@ -1199,19 +1192,19 @@ static char *bdrv_child_get_parent_desc(BdrvChild *c)
|
||||
return g_strdup_printf("node '%s'", bdrv_get_node_name(parent));
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK bdrv_child_cb_drained_begin(BdrvChild *child)
|
||||
static void bdrv_child_cb_drained_begin(BdrvChild *child)
|
||||
{
|
||||
BlockDriverState *bs = child->opaque;
|
||||
bdrv_do_drained_begin_quiesce(bs, NULL);
|
||||
}
|
||||
|
||||
static bool GRAPH_RDLOCK bdrv_child_cb_drained_poll(BdrvChild *child)
|
||||
static bool bdrv_child_cb_drained_poll(BdrvChild *child)
|
||||
{
|
||||
BlockDriverState *bs = child->opaque;
|
||||
return bdrv_drain_poll(bs, NULL, false);
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK bdrv_child_cb_drained_end(BdrvChild *child)
|
||||
static void bdrv_child_cb_drained_end(BdrvChild *child)
|
||||
{
|
||||
BlockDriverState *bs = child->opaque;
|
||||
bdrv_drained_end(bs);
|
||||
@@ -1257,7 +1250,7 @@ static void bdrv_temp_snapshot_options(int *child_flags, QDict *child_options,
|
||||
*child_flags &= ~BDRV_O_NATIVE_AIO;
|
||||
}
|
||||
|
||||
static void GRAPH_WRLOCK bdrv_backing_attach(BdrvChild *c)
|
||||
static void bdrv_backing_attach(BdrvChild *c)
|
||||
{
|
||||
BlockDriverState *parent = c->opaque;
|
||||
BlockDriverState *backing_hd = c->bs;
|
||||
@@ -1707,14 +1700,12 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name,
|
||||
return 0;
|
||||
open_failed:
|
||||
bs->drv = NULL;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
if (bs->file != NULL) {
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, bs->file);
|
||||
bdrv_graph_wrunlock();
|
||||
assert(!bs->file);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
g_free(bs->opaque);
|
||||
bs->opaque = NULL;
|
||||
return ret;
|
||||
@@ -1856,12 +1847,9 @@ static int bdrv_open_common(BlockDriverState *bs, BlockBackend *file,
|
||||
Error *local_err = NULL;
|
||||
bool ro;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
assert(bs->file == NULL);
|
||||
assert(options != NULL && bs->options != options);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
opts = qemu_opts_create(&bdrv_runtime_opts, NULL, 0, &error_abort);
|
||||
if (!qemu_opts_absorb_qdict(opts, options, errp)) {
|
||||
@@ -1886,10 +1874,7 @@ static int bdrv_open_common(BlockDriverState *bs, BlockBackend *file,
|
||||
}
|
||||
|
||||
if (file != NULL) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_refresh_filename(blk_bs(file));
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
filename = blk_bs(file)->filename;
|
||||
} else {
|
||||
/*
|
||||
@@ -1916,9 +1901,7 @@ static int bdrv_open_common(BlockDriverState *bs, BlockBackend *file,
|
||||
|
||||
if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, ro)) {
|
||||
if (!ro && bdrv_is_whitelisted(drv, true)) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, NULL, NULL);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
} else {
|
||||
ret = -ENOTSUP;
|
||||
}
|
||||
@@ -2983,8 +2966,6 @@ static void bdrv_child_free(BdrvChild *child)
|
||||
{
|
||||
assert(!child->bs);
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
assert(!child->next.le_prev); /* not in children list */
|
||||
|
||||
g_free(child->name);
|
||||
@@ -3219,6 +3200,8 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_wrlock(child_bs);
|
||||
|
||||
child = bdrv_attach_child_common(child_bs, child_name, child_class,
|
||||
child_role, perm, shared_perm, opaque,
|
||||
tran, errp);
|
||||
@@ -3231,8 +3214,9 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
|
||||
|
||||
out:
|
||||
tran_finalize(tran, ret);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
bdrv_schedule_unref(child_bs);
|
||||
bdrv_unref(child_bs);
|
||||
|
||||
return ret < 0 ? NULL : child;
|
||||
}
|
||||
@@ -3537,7 +3521,19 @@ out:
|
||||
*
|
||||
* If a backing child is already present (i.e. we're detaching a node), that
|
||||
* child node must be drained.
|
||||
*
|
||||
* After calling this function, the transaction @tran may only be completed
|
||||
* while holding a writer lock for the graph.
|
||||
*/
|
||||
static int GRAPH_WRLOCK
|
||||
bdrv_set_backing_noperm(BlockDriverState *bs,
|
||||
BlockDriverState *backing_hd,
|
||||
Transaction *tran, Error **errp)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
return bdrv_set_file_or_backing_noperm(bs, backing_hd, true, tran, errp);
|
||||
}
|
||||
|
||||
int bdrv_set_backing_hd_drained(BlockDriverState *bs,
|
||||
BlockDriverState *backing_hd,
|
||||
Error **errp)
|
||||
@@ -3550,8 +3546,9 @@ int bdrv_set_backing_hd_drained(BlockDriverState *bs,
|
||||
if (bs->backing) {
|
||||
assert(bs->backing->bs->quiesce_counter > 0);
|
||||
}
|
||||
bdrv_graph_wrlock(backing_hd);
|
||||
|
||||
ret = bdrv_set_file_or_backing_noperm(bs, backing_hd, true, tran, errp);
|
||||
ret = bdrv_set_backing_noperm(bs, backing_hd, tran, errp);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@@ -3559,25 +3556,20 @@ int bdrv_set_backing_hd_drained(BlockDriverState *bs,
|
||||
ret = bdrv_refresh_perms(bs, tran, errp);
|
||||
out:
|
||||
tran_finalize(tran, ret);
|
||||
bdrv_graph_wrunlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
|
||||
Error **errp)
|
||||
{
|
||||
BlockDriverState *drain_bs;
|
||||
BlockDriverState *drain_bs = bs->backing ? bs->backing->bs : bs;
|
||||
int ret;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
drain_bs = bs->backing ? bs->backing->bs : bs;
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
bdrv_ref(drain_bs);
|
||||
bdrv_drained_begin(drain_bs);
|
||||
bdrv_graph_wrlock(backing_hd);
|
||||
ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(drain_bs);
|
||||
bdrv_unref(drain_bs);
|
||||
|
||||
@@ -3611,7 +3603,6 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
|
||||
Error *local_err = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (bs->backing != NULL) {
|
||||
goto free_exit;
|
||||
@@ -4323,8 +4314,8 @@ static int bdrv_reset_options_allowed(BlockDriverState *bs,
|
||||
/*
|
||||
* Returns true if @child can be reached recursively from @bs
|
||||
*/
|
||||
static bool GRAPH_RDLOCK
|
||||
bdrv_recurse_has_child(BlockDriverState *bs, BlockDriverState *child)
|
||||
static bool bdrv_recurse_has_child(BlockDriverState *bs,
|
||||
BlockDriverState *child)
|
||||
{
|
||||
BdrvChild *c;
|
||||
|
||||
@@ -4365,12 +4356,15 @@ bdrv_recurse_has_child(BlockDriverState *bs, BlockDriverState *child)
|
||||
*
|
||||
* To be called with bs->aio_context locked.
|
||||
*/
|
||||
static BlockReopenQueue * GRAPH_RDLOCK
|
||||
bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs,
|
||||
QDict *options, const BdrvChildClass *klass,
|
||||
BdrvChildRole role, bool parent_is_format,
|
||||
QDict *parent_options, int parent_flags,
|
||||
bool keep_old_opts)
|
||||
static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
|
||||
BlockDriverState *bs,
|
||||
QDict *options,
|
||||
const BdrvChildClass *klass,
|
||||
BdrvChildRole role,
|
||||
bool parent_is_format,
|
||||
QDict *parent_options,
|
||||
int parent_flags,
|
||||
bool keep_old_opts)
|
||||
{
|
||||
assert(bs != NULL);
|
||||
|
||||
@@ -4382,11 +4376,6 @@ bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs,
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/*
|
||||
* Strictly speaking, draining is illegal under GRAPH_RDLOCK. We know that
|
||||
* we've been called with bdrv_graph_rdlock_main_loop(), though, so it's ok
|
||||
* in practice.
|
||||
*/
|
||||
bdrv_drained_begin(bs);
|
||||
|
||||
if (bs_queue == NULL) {
|
||||
@@ -4528,7 +4517,6 @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
|
||||
QDict *options, bool keep_old_opts)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
return bdrv_reopen_queue_child(bs_queue, bs, options, NULL, 0, false,
|
||||
NULL, 0, keep_old_opts);
|
||||
@@ -4748,20 +4736,18 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
|
||||
* Callers must make sure that their AioContext locking is still correct after
|
||||
* this.
|
||||
*/
|
||||
static int GRAPH_UNLOCKED
|
||||
bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
||||
bool is_backing, Transaction *tran,
|
||||
Error **errp)
|
||||
static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
||||
bool is_backing, Transaction *tran,
|
||||
Error **errp)
|
||||
{
|
||||
BlockDriverState *bs = reopen_state->bs;
|
||||
BlockDriverState *new_child_bs;
|
||||
BlockDriverState *old_child_bs;
|
||||
|
||||
BlockDriverState *old_child_bs = is_backing ? child_bs(bs->backing) :
|
||||
child_bs(bs->file);
|
||||
const char *child_name = is_backing ? "backing" : "file";
|
||||
QObject *value;
|
||||
const char *str;
|
||||
AioContext *ctx, *old_ctx;
|
||||
bool has_child;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
@@ -4771,8 +4757,6 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
|
||||
switch (qobject_type(value)) {
|
||||
case QTYPE_QNULL:
|
||||
assert(is_backing); /* The 'file' option does not allow a null value */
|
||||
@@ -4782,16 +4766,11 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
||||
str = qstring_get_str(qobject_to(QString, value));
|
||||
new_child_bs = bdrv_lookup_bs(NULL, str, errp);
|
||||
if (new_child_bs == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto out_rdlock;
|
||||
}
|
||||
|
||||
has_child = bdrv_recurse_has_child(new_child_bs, bs);
|
||||
if (has_child) {
|
||||
return -EINVAL;
|
||||
} else if (bdrv_recurse_has_child(new_child_bs, bs)) {
|
||||
error_setg(errp, "Making '%s' a %s child of '%s' would create a "
|
||||
"cycle", str, child_name, bs->node_name);
|
||||
ret = -EINVAL;
|
||||
goto out_rdlock;
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@@ -4802,23 +4781,19 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
old_child_bs = is_backing ? child_bs(bs->backing) : child_bs(bs->file);
|
||||
if (old_child_bs == new_child_bs) {
|
||||
ret = 0;
|
||||
goto out_rdlock;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (old_child_bs) {
|
||||
if (bdrv_skip_implicit_filters(old_child_bs) == new_child_bs) {
|
||||
ret = 0;
|
||||
goto out_rdlock;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (old_child_bs->implicit) {
|
||||
error_setg(errp, "Cannot replace implicit %s child of %s",
|
||||
child_name, bs->node_name);
|
||||
ret = -EPERM;
|
||||
goto out_rdlock;
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4829,8 +4804,7 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
||||
*/
|
||||
error_setg(errp, "'%s' is a %s filter node that does not support a "
|
||||
"%s child", bs->node_name, bs->drv->format_name, child_name);
|
||||
ret = -EINVAL;
|
||||
goto out_rdlock;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_backing) {
|
||||
@@ -4851,7 +4825,6 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(new_child_bs);
|
||||
|
||||
ret = bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing,
|
||||
@@ -4870,10 +4843,6 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
out_rdlock:
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -4897,9 +4866,9 @@ out_rdlock:
|
||||
* After calling this function, the transaction @change_child_tran may only be
|
||||
* completed while holding a writer lock for the graph.
|
||||
*/
|
||||
static int GRAPH_UNLOCKED
|
||||
bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
|
||||
Transaction *change_child_tran, Error **errp)
|
||||
static int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
|
||||
BlockReopenQueue *queue,
|
||||
Transaction *change_child_tran, Error **errp)
|
||||
{
|
||||
int ret = -1;
|
||||
int old_flags;
|
||||
@@ -4961,10 +4930,7 @@ bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
|
||||
* to r/w. Attempting to set to r/w may fail if either BDRV_O_ALLOW_RDWR is
|
||||
* not set, or if the BDS still has copy_on_read enabled */
|
||||
read_only = !(reopen_state->flags & BDRV_O_RDWR);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_can_set_read_only(reopen_state->bs, read_only, true, &local_err);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
goto error;
|
||||
@@ -4987,9 +4953,7 @@ bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
|
||||
if (local_err != NULL) {
|
||||
error_propagate(errp, local_err);
|
||||
} else {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_refresh_filename(reopen_state->bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
error_setg(errp, "failed while preparing to reopen image '%s'",
|
||||
reopen_state->bs->filename);
|
||||
}
|
||||
@@ -4998,11 +4962,9 @@ bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
|
||||
} else {
|
||||
/* It is currently mandatory to have a bdrv_reopen_prepare()
|
||||
* handler for each supported drv. */
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
error_setg(errp, "Block format '%s' used by node '%s' "
|
||||
"does not support reopening files", drv->format_name,
|
||||
bdrv_get_device_or_node_name(reopen_state->bs));
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
ret = -1;
|
||||
goto error;
|
||||
}
|
||||
@@ -5014,16 +4976,13 @@ bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
|
||||
* file or if the image file has a backing file name as part of
|
||||
* its metadata. Otherwise the 'backing' option can be omitted.
|
||||
*/
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (drv->supports_backing && reopen_state->backing_missing &&
|
||||
(reopen_state->bs->backing || reopen_state->bs->backing_file[0])) {
|
||||
error_setg(errp, "backing is missing for '%s'",
|
||||
reopen_state->bs->node_name);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/*
|
||||
* Allow changing the 'backing' option. The new value can be
|
||||
@@ -5051,8 +5010,6 @@ bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
|
||||
if (qdict_size(reopen_state->options)) {
|
||||
const QDictEntry *entry = qdict_first(reopen_state->options);
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
do {
|
||||
QObject *new = entry->value;
|
||||
QObject *old = qdict_get(reopen_state->bs->options, entry->key);
|
||||
@@ -5126,7 +5083,7 @@ error:
|
||||
* makes them final by swapping the staging BlockDriverState contents into
|
||||
* the active BlockDriverState contents.
|
||||
*/
|
||||
static void GRAPH_UNLOCKED bdrv_reopen_commit(BDRVReopenState *reopen_state)
|
||||
static void bdrv_reopen_commit(BDRVReopenState *reopen_state)
|
||||
{
|
||||
BlockDriver *drv;
|
||||
BlockDriverState *bs;
|
||||
@@ -5143,8 +5100,6 @@ static void GRAPH_UNLOCKED bdrv_reopen_commit(BDRVReopenState *reopen_state)
|
||||
drv->bdrv_reopen_commit(reopen_state);
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/* set BDS specific flags now */
|
||||
qobject_unref(bs->explicit_options);
|
||||
qobject_unref(bs->options);
|
||||
@@ -5166,7 +5121,9 @@ static void GRAPH_UNLOCKED bdrv_reopen_commit(BDRVReopenState *reopen_state)
|
||||
qdict_del(bs->explicit_options, "backing");
|
||||
qdict_del(bs->options, "backing");
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_refresh_limits(bs, NULL, NULL);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_refresh_total_sectors(bs, bs->total_sectors);
|
||||
}
|
||||
|
||||
@@ -5174,7 +5131,7 @@ static void GRAPH_UNLOCKED bdrv_reopen_commit(BDRVReopenState *reopen_state)
|
||||
* Abort the reopen, and delete and free the staged changes in
|
||||
* reopen_state
|
||||
*/
|
||||
static void GRAPH_UNLOCKED bdrv_reopen_abort(BDRVReopenState *reopen_state)
|
||||
static void bdrv_reopen_abort(BDRVReopenState *reopen_state)
|
||||
{
|
||||
BlockDriver *drv;
|
||||
|
||||
@@ -5209,15 +5166,14 @@ static void bdrv_close(BlockDriverState *bs)
|
||||
bs->drv = NULL;
|
||||
}
|
||||
|
||||
bdrv_graph_wrlock(bs);
|
||||
bdrv_graph_wrlock(NULL);
|
||||
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
|
||||
bdrv_unref_child(bs, child);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
assert(!bs->backing);
|
||||
assert(!bs->file);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
g_free(bs->opaque);
|
||||
bs->opaque = NULL;
|
||||
qatomic_set(&bs->copy_on_read, 0);
|
||||
@@ -5422,9 +5378,6 @@ bdrv_replace_node_noperm(BlockDriverState *from,
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch all parents of @from to point to @to instead. @from and @to must be in
|
||||
* the same AioContext and both must be drained.
|
||||
*
|
||||
* With auto_skip=true bdrv_replace_node_common skips updating from parents
|
||||
* if it creates a parent-child relation loop or if parent is block-job.
|
||||
*
|
||||
@@ -5434,9 +5387,10 @@ bdrv_replace_node_noperm(BlockDriverState *from,
|
||||
* With @detach_subchain=true @to must be in a backing chain of @from. In this
|
||||
* case backing link of the cow-parent of @to is removed.
|
||||
*/
|
||||
static int GRAPH_WRLOCK
|
||||
bdrv_replace_node_common(BlockDriverState *from, BlockDriverState *to,
|
||||
bool auto_skip, bool detach_subchain, Error **errp)
|
||||
static int bdrv_replace_node_common(BlockDriverState *from,
|
||||
BlockDriverState *to,
|
||||
bool auto_skip, bool detach_subchain,
|
||||
Error **errp)
|
||||
{
|
||||
Transaction *tran = tran_new();
|
||||
g_autoptr(GSList) refresh_list = NULL;
|
||||
@@ -5445,10 +5399,6 @@ bdrv_replace_node_common(BlockDriverState *from, BlockDriverState *to,
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
assert(from->quiesce_counter);
|
||||
assert(to->quiesce_counter);
|
||||
assert(bdrv_get_aio_context(from) == bdrv_get_aio_context(to));
|
||||
|
||||
if (detach_subchain) {
|
||||
assert(bdrv_chain_contains(from, to));
|
||||
assert(from != to);
|
||||
@@ -5460,6 +5410,17 @@ bdrv_replace_node_common(BlockDriverState *from, BlockDriverState *to,
|
||||
}
|
||||
}
|
||||
|
||||
/* Make sure that @from doesn't go away until we have successfully attached
|
||||
* all of its parents to @to. */
|
||||
bdrv_ref(from);
|
||||
|
||||
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
||||
assert(bdrv_get_aio_context(from) == bdrv_get_aio_context(to));
|
||||
bdrv_drained_begin(from);
|
||||
bdrv_drained_begin(to);
|
||||
|
||||
bdrv_graph_wrlock(to);
|
||||
|
||||
/*
|
||||
* Do the replacement without permission update.
|
||||
* Replacement may influence the permissions, we should calculate new
|
||||
@@ -5488,33 +5449,29 @@ bdrv_replace_node_common(BlockDriverState *from, BlockDriverState *to,
|
||||
|
||||
out:
|
||||
tran_finalize(tran, ret);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
bdrv_drained_end(to);
|
||||
bdrv_drained_end(from);
|
||||
bdrv_unref(from);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
|
||||
Error **errp)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
return bdrv_replace_node_common(from, to, true, false, errp);
|
||||
}
|
||||
|
||||
int bdrv_drop_filter(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BlockDriverState *child_bs;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
child_bs = bdrv_filter_or_cow_bs(bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
bdrv_drained_begin(child_bs);
|
||||
bdrv_graph_wrlock(bs);
|
||||
ret = bdrv_replace_node_common(bs, child_bs, true, true, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(child_bs);
|
||||
|
||||
return ret;
|
||||
return bdrv_replace_node_common(bs, bdrv_filter_or_cow_bs(bs), true, true,
|
||||
errp);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -5541,9 +5498,7 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
assert(!bs_new->backing);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
old_context = bdrv_get_aio_context(bs_top);
|
||||
bdrv_drained_begin(bs_top);
|
||||
@@ -5711,19 +5666,9 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that @bs doesn't go away until we have successfully attached
|
||||
* all of its parents to @new_node_bs and undrained it again.
|
||||
*/
|
||||
bdrv_ref(bs);
|
||||
bdrv_drained_begin(bs);
|
||||
bdrv_drained_begin(new_node_bs);
|
||||
bdrv_graph_wrlock(new_node_bs);
|
||||
ret = bdrv_replace_node(bs, new_node_bs, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(new_node_bs);
|
||||
bdrv_drained_end(bs);
|
||||
bdrv_unref(bs);
|
||||
|
||||
if (ret < 0) {
|
||||
error_prepend(errp, "Could not replace node: ");
|
||||
@@ -5769,14 +5714,13 @@ int coroutine_fn bdrv_co_check(BlockDriverState *bs,
|
||||
* image file header
|
||||
* -ENOTSUP - format driver doesn't support changing the backing file
|
||||
*/
|
||||
int coroutine_fn
|
||||
bdrv_co_change_backing_file(BlockDriverState *bs, const char *backing_file,
|
||||
const char *backing_fmt, bool require)
|
||||
int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file,
|
||||
const char *backing_fmt, bool require)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
int ret;
|
||||
|
||||
IO_CODE();
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (!drv) {
|
||||
return -ENOMEDIUM;
|
||||
@@ -5791,8 +5735,8 @@ bdrv_co_change_backing_file(BlockDriverState *bs, const char *backing_file,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (drv->bdrv_co_change_backing_file != NULL) {
|
||||
ret = drv->bdrv_co_change_backing_file(bs, backing_file, backing_fmt);
|
||||
if (drv->bdrv_change_backing_file != NULL) {
|
||||
ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
|
||||
} else {
|
||||
ret = -ENOTSUP;
|
||||
}
|
||||
@@ -5849,9 +5793,8 @@ BlockDriverState *bdrv_find_base(BlockDriverState *bs)
|
||||
* between @bs and @base is frozen. @errp is set if that's the case.
|
||||
* @base must be reachable from @bs, or NULL.
|
||||
*/
|
||||
static bool GRAPH_RDLOCK
|
||||
bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base,
|
||||
Error **errp)
|
||||
bool bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base,
|
||||
Error **errp)
|
||||
{
|
||||
BlockDriverState *i;
|
||||
BdrvChild *child;
|
||||
@@ -5975,15 +5918,14 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
||||
|
||||
bdrv_ref(top);
|
||||
bdrv_drained_begin(base);
|
||||
bdrv_graph_wrlock(base);
|
||||
|
||||
if (!top->drv || !base->drv) {
|
||||
goto exit_wrlock;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Make sure that base is in the backing chain of top */
|
||||
if (!bdrv_chain_contains(top, base)) {
|
||||
goto exit_wrlock;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* If 'base' recursively inherits from 'top' then we should set
|
||||
@@ -6000,9 +5942,11 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
||||
backing_file_str = base->filename;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
QLIST_FOREACH(c, &top->parents, next_parent) {
|
||||
updated_children = g_slist_prepend(updated_children, c);
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/*
|
||||
* It seems correct to pass detach_subchain=true here, but it triggers
|
||||
@@ -6015,8 +5959,6 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
||||
* That's a FIXME.
|
||||
*/
|
||||
bdrv_replace_node_common(top, base, false, false, &local_err);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
goto exit;
|
||||
@@ -6049,10 +5991,6 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto exit;
|
||||
|
||||
exit_wrlock:
|
||||
bdrv_graph_wrunlock();
|
||||
exit:
|
||||
bdrv_drained_end(base);
|
||||
bdrv_unref(top);
|
||||
@@ -6344,7 +6282,6 @@ BlockDeviceInfoList *bdrv_named_nodes_list(bool flat,
|
||||
BlockDriverState *bs;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
list = NULL;
|
||||
QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
|
||||
@@ -6615,7 +6552,7 @@ int bdrv_has_zero_init_1(BlockDriverState *bs)
|
||||
return 1;
|
||||
}
|
||||
|
||||
int coroutine_mixed_fn bdrv_has_zero_init(BlockDriverState *bs)
|
||||
int bdrv_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
BlockDriverState *filtered;
|
||||
GLOBAL_STATE_CODE();
|
||||
@@ -6730,8 +6667,7 @@ void coroutine_fn bdrv_co_debug_event(BlockDriverState *bs, BlkdebugEvent event)
|
||||
bs->drv->bdrv_co_debug_event(bs, event);
|
||||
}
|
||||
|
||||
static BlockDriverState * GRAPH_RDLOCK
|
||||
bdrv_find_debug_node(BlockDriverState *bs)
|
||||
static BlockDriverState *bdrv_find_debug_node(BlockDriverState *bs)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
|
||||
@@ -6750,8 +6686,6 @@ int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
|
||||
const char *tag)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs = bdrv_find_debug_node(bs);
|
||||
if (bs) {
|
||||
return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
|
||||
@@ -6763,8 +6697,6 @@ int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
|
||||
int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs = bdrv_find_debug_node(bs);
|
||||
if (bs) {
|
||||
return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
|
||||
@@ -6776,8 +6708,6 @@ int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
|
||||
int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
|
||||
bs = bdrv_primary_bs(bs);
|
||||
}
|
||||
@@ -6792,8 +6722,6 @@ int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
|
||||
bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
|
||||
bs = bdrv_primary_bs(bs);
|
||||
}
|
||||
@@ -6822,7 +6750,6 @@ BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
|
||||
BlockDriverState *bs_below;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!bs || !bs->drv || !backing_file) {
|
||||
return NULL;
|
||||
@@ -7034,7 +6961,6 @@ void bdrv_activate_all(Error **errp)
|
||||
BdrvNextIterator it;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
@@ -7050,8 +6976,7 @@ void bdrv_activate_all(Error **errp)
|
||||
}
|
||||
}
|
||||
|
||||
static bool GRAPH_RDLOCK
|
||||
bdrv_has_bds_parent(BlockDriverState *bs, bool only_active)
|
||||
static bool bdrv_has_bds_parent(BlockDriverState *bs, bool only_active)
|
||||
{
|
||||
BdrvChild *parent;
|
||||
GLOBAL_STATE_CODE();
|
||||
@@ -7068,13 +6993,14 @@ bdrv_has_bds_parent(BlockDriverState *bs, bool only_active)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
|
||||
static int bdrv_inactivate_recurse(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *child, *parent;
|
||||
int ret;
|
||||
uint64_t cumulative_perms, cumulative_shared_perms;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!bs->drv) {
|
||||
return -ENOMEDIUM;
|
||||
@@ -7140,7 +7066,6 @@ int bdrv_inactivate_all(void)
|
||||
GSList *aio_ctxs = NULL, *ctx;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
@@ -7280,7 +7205,6 @@ bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
|
||||
{
|
||||
BdrvOpBlocker *blocker;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
|
||||
if (!QLIST_EMPTY(&bs->op_blockers[op])) {
|
||||
blocker = QLIST_FIRST(&bs->op_blockers[op]);
|
||||
@@ -8128,7 +8052,7 @@ static bool append_strong_runtime_options(QDict *d, BlockDriverState *bs)
|
||||
/* Note: This function may return false positives; it may return true
|
||||
* even if opening the backing file specified by bs's image header
|
||||
* would result in exactly bs->backing. */
|
||||
static bool GRAPH_RDLOCK bdrv_backing_overridden(BlockDriverState *bs)
|
||||
static bool bdrv_backing_overridden(BlockDriverState *bs)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
if (bs->backing) {
|
||||
@@ -8502,8 +8426,8 @@ BdrvChild *bdrv_primary_child(BlockDriverState *bs)
|
||||
return found;
|
||||
}
|
||||
|
||||
static BlockDriverState * GRAPH_RDLOCK
|
||||
bdrv_do_skip_filters(BlockDriverState *bs, bool stop_on_explicit_filter)
|
||||
static BlockDriverState *bdrv_do_skip_filters(BlockDriverState *bs,
|
||||
bool stop_on_explicit_filter)
|
||||
{
|
||||
BdrvChild *c;
|
||||
|
||||
|
||||
@@ -384,33 +384,31 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (!bdrv_is_inserted(bs)) {
|
||||
error_setg(errp, "Device is not inserted: %s",
|
||||
bdrv_get_device_name(bs));
|
||||
goto error_rdlock;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!bdrv_is_inserted(target)) {
|
||||
error_setg(errp, "Device is not inserted: %s",
|
||||
bdrv_get_device_name(target));
|
||||
goto error_rdlock;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (compress && !bdrv_supports_compressed_writes(target)) {
|
||||
error_setg(errp, "Compression is not supported for this drive %s",
|
||||
bdrv_get_device_name(target));
|
||||
goto error_rdlock;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
|
||||
goto error_rdlock;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
|
||||
goto error_rdlock;
|
||||
return NULL;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (perf->max_workers < 1 || perf->max_workers > INT_MAX) {
|
||||
error_setg(errp, "max-workers must be between 1 and %d", INT_MAX);
|
||||
@@ -438,7 +436,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
||||
|
||||
len = bdrv_getlength(bs);
|
||||
if (len < 0) {
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
error_setg_errno(errp, -len, "Unable to get length for '%s'",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
goto error;
|
||||
@@ -446,7 +443,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
||||
|
||||
target_len = bdrv_getlength(target);
|
||||
if (target_len < 0) {
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
error_setg_errno(errp, -target_len, "Unable to get length for '%s'",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
goto error;
|
||||
@@ -496,10 +492,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
||||
block_copy_set_speed(bcs, speed);
|
||||
|
||||
/* Required permissions are taken by copy-before-write filter target */
|
||||
bdrv_graph_wrlock(target);
|
||||
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
return &job->common;
|
||||
|
||||
@@ -512,8 +506,4 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
||||
error_rdlock:
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -508,8 +508,6 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
goto out;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
|
||||
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
|
||||
(BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
|
||||
bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
|
||||
@@ -522,7 +520,7 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
if (s->align && (s->align >= INT_MAX || !is_power_of_2(s->align))) {
|
||||
error_setg(errp, "Cannot meet constraints with align %" PRIu64,
|
||||
s->align);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
align = MAX(s->align, bs->file->bs->bl.request_alignment);
|
||||
|
||||
@@ -532,7 +530,7 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
!QEMU_IS_ALIGNED(s->max_transfer, align))) {
|
||||
error_setg(errp, "Cannot meet constraints with max-transfer %" PRIu64,
|
||||
s->max_transfer);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->opt_write_zero = qemu_opt_get_size(opts, "opt-write-zero", 0);
|
||||
@@ -541,7 +539,7 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
!QEMU_IS_ALIGNED(s->opt_write_zero, align))) {
|
||||
error_setg(errp, "Cannot meet constraints with opt-write-zero %" PRIu64,
|
||||
s->opt_write_zero);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->max_write_zero = qemu_opt_get_size(opts, "max-write-zero", 0);
|
||||
@@ -551,7 +549,7 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
MAX(s->opt_write_zero, align)))) {
|
||||
error_setg(errp, "Cannot meet constraints with max-write-zero %" PRIu64,
|
||||
s->max_write_zero);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->opt_discard = qemu_opt_get_size(opts, "opt-discard", 0);
|
||||
@@ -560,7 +558,7 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
!QEMU_IS_ALIGNED(s->opt_discard, align))) {
|
||||
error_setg(errp, "Cannot meet constraints with opt-discard %" PRIu64,
|
||||
s->opt_discard);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->max_discard = qemu_opt_get_size(opts, "max-discard", 0);
|
||||
@@ -570,14 +568,12 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
MAX(s->opt_discard, align)))) {
|
||||
error_setg(errp, "Cannot meet constraints with max-discard %" PRIu64,
|
||||
s->max_discard);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bdrv_debug_event(bs, BLKDBG_NONE);
|
||||
|
||||
ret = 0;
|
||||
out_rdlock:
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
out:
|
||||
if (ret < 0) {
|
||||
qemu_mutex_destroy(&s->lock);
|
||||
@@ -750,10 +746,13 @@ blkdebug_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
return bdrv_co_pdiscard(bs->file, offset, bytes);
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
blkdebug_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum, int64_t *map,
|
||||
BlockDriverState **file)
|
||||
static int coroutine_fn blkdebug_co_block_status(BlockDriverState *bs,
|
||||
bool want_zero,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
int64_t *pnum,
|
||||
int64_t *map,
|
||||
BlockDriverState **file)
|
||||
{
|
||||
int err;
|
||||
|
||||
@@ -974,7 +973,7 @@ blkdebug_co_getlength(BlockDriverState *bs)
|
||||
return bdrv_co_getlength(bs->file->bs);
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK blkdebug_refresh_filename(BlockDriverState *bs)
|
||||
static void blkdebug_refresh_filename(BlockDriverState *bs)
|
||||
{
|
||||
BDRVBlkdebugState *s = bs->opaque;
|
||||
const QDictEntry *e;
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include "block/block_int.h"
|
||||
#include "exec/memory.h"
|
||||
#include "exec/cpu-common.h" /* for qemu_ram_get_fd() */
|
||||
#include "qemu/defer-call.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qapi/qmp/qdict.h"
|
||||
@@ -313,10 +312,10 @@ static void blkio_detach_aio_context(BlockDriverState *bs)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by defer_call_end() or immediately if not in a deferred section.
|
||||
* Called without blkio_lock.
|
||||
* Called by blk_io_unplug() or immediately if not plugged. Called without
|
||||
* blkio_lock.
|
||||
*/
|
||||
static void blkio_deferred_fn(void *opaque)
|
||||
static void blkio_unplug_fn(void *opaque)
|
||||
{
|
||||
BDRVBlkioState *s = opaque;
|
||||
|
||||
@@ -333,7 +332,7 @@ static void blkio_submit_io(BlockDriverState *bs)
|
||||
{
|
||||
BDRVBlkioState *s = bs->opaque;
|
||||
|
||||
defer_call(blkio_deferred_fn, s);
|
||||
blk_io_plug_call(blkio_unplug_fn, s);
|
||||
}
|
||||
|
||||
static int coroutine_fn
|
||||
|
||||
@@ -130,13 +130,7 @@ static int coroutine_fn GRAPH_RDLOCK blkreplay_co_flush(BlockDriverState *bs)
|
||||
static int blkreplay_snapshot_goto(BlockDriverState *bs,
|
||||
const char *snapshot_id)
|
||||
{
|
||||
BlockDriverState *file_bs;
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
file_bs = bs->file->bs;
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
return bdrv_snapshot_goto(file_bs, snapshot_id, NULL);
|
||||
return bdrv_snapshot_goto(bs->file->bs, snapshot_id, NULL);
|
||||
}
|
||||
|
||||
static BlockDriver bdrv_blkreplay = {
|
||||
|
||||
@@ -33,8 +33,8 @@ typedef struct BlkverifyRequest {
|
||||
uint64_t bytes;
|
||||
int flags;
|
||||
|
||||
int GRAPH_RDLOCK_PTR (*request_fn)(
|
||||
BdrvChild *, int64_t, int64_t, QEMUIOVector *, BdrvRequestFlags);
|
||||
int (*request_fn)(BdrvChild *, int64_t, int64_t, QEMUIOVector *,
|
||||
BdrvRequestFlags);
|
||||
|
||||
int ret; /* test image result */
|
||||
int raw_ret; /* raw image result */
|
||||
@@ -170,11 +170,8 @@ static void coroutine_fn blkverify_do_test_req(void *opaque)
|
||||
BlkverifyRequest *r = opaque;
|
||||
BDRVBlkverifyState *s = r->bs->opaque;
|
||||
|
||||
bdrv_graph_co_rdlock();
|
||||
r->ret = r->request_fn(s->test_file, r->offset, r->bytes, r->qiov,
|
||||
r->flags);
|
||||
bdrv_graph_co_rdunlock();
|
||||
|
||||
r->done++;
|
||||
qemu_coroutine_enter_if_inactive(r->co);
|
||||
}
|
||||
@@ -183,16 +180,13 @@ static void coroutine_fn blkverify_do_raw_req(void *opaque)
|
||||
{
|
||||
BlkverifyRequest *r = opaque;
|
||||
|
||||
bdrv_graph_co_rdlock();
|
||||
r->raw_ret = r->request_fn(r->bs->file, r->offset, r->bytes, r->raw_qiov,
|
||||
r->flags);
|
||||
bdrv_graph_co_rdunlock();
|
||||
|
||||
r->done++;
|
||||
qemu_coroutine_enter_if_inactive(r->co);
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
static int coroutine_fn
|
||||
blkverify_co_prwv(BlockDriverState *bs, BlkverifyRequest *r, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov, QEMUIOVector *raw_qiov,
|
||||
int flags, bool is_write)
|
||||
@@ -228,7 +222,7 @@ blkverify_co_prwv(BlockDriverState *bs, BlkverifyRequest *r, uint64_t offset,
|
||||
return r->ret;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
static int coroutine_fn
|
||||
blkverify_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
@@ -257,7 +251,7 @@ blkverify_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
static int coroutine_fn
|
||||
blkverify_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
@@ -288,7 +282,7 @@ blkverify_recurse_can_replace(BlockDriverState *bs,
|
||||
bdrv_recurse_can_replace(s->test_file->bs, to_replace);
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK blkverify_refresh_filename(BlockDriverState *bs)
|
||||
static void blkverify_refresh_filename(BlockDriverState *bs)
|
||||
{
|
||||
BDRVBlkverifyState *s = bs->opaque;
|
||||
|
||||
|
||||
@@ -780,12 +780,11 @@ BlockDriverState *blk_bs(BlockBackend *blk)
|
||||
return blk->root ? blk->root->bs : NULL;
|
||||
}
|
||||
|
||||
static BlockBackend * GRAPH_RDLOCK bdrv_first_blk(BlockDriverState *bs)
|
||||
static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *child;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH(child, &bs->parents, next_parent) {
|
||||
if (child->klass == &child_root) {
|
||||
@@ -813,8 +812,6 @@ bool bdrv_is_root_node(BlockDriverState *bs)
|
||||
BdrvChild *c;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH(c, &bs->parents, next_parent) {
|
||||
if (c->klass != &child_root) {
|
||||
return false;
|
||||
@@ -931,12 +928,10 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
|
||||
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_ref(bs);
|
||||
bdrv_graph_wrlock(bs);
|
||||
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
blk->perm, blk->shared_perm,
|
||||
blk, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
if (blk->root == NULL) {
|
||||
return -EPERM;
|
||||
}
|
||||
@@ -2264,7 +2259,6 @@ void blk_activate(BlockBackend *blk, Error **errp)
|
||||
if (qemu_in_coroutine()) {
|
||||
bdrv_co_activate(bs, errp);
|
||||
} else {
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
bdrv_activate(bs, errp);
|
||||
}
|
||||
}
|
||||
@@ -2390,7 +2384,6 @@ bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
|
||||
{
|
||||
BlockDriverState *bs = blk_bs(blk);
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!bs) {
|
||||
return false;
|
||||
@@ -2668,8 +2661,6 @@ int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
|
||||
int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!blk_is_available(blk)) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
@@ -2730,7 +2721,6 @@ int blk_commit_all(void)
|
||||
{
|
||||
BlockBackend *blk = NULL;
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
while ((blk = blk_all_next(blk)) != NULL) {
|
||||
AioContext *aio_context = blk_get_aio_context(blk);
|
||||
@@ -2911,8 +2901,6 @@ const BdrvChild *blk_root(BlockBackend *blk)
|
||||
int blk_make_empty(BlockBackend *blk, Error **errp)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!blk_is_available(blk)) {
|
||||
error_setg(errp, "No medium inserted");
|
||||
return -ENOMEDIUM;
|
||||
|
||||
@@ -313,12 +313,7 @@ static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
|
||||
{
|
||||
int ret;
|
||||
BlockDriverInfo bdi;
|
||||
bool target_does_cow;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
target_does_cow = bdrv_backing_chain_next(target);
|
||||
bool target_does_cow = bdrv_backing_chain_next(target);
|
||||
|
||||
/*
|
||||
* If there is no backing file on the target, we cannot rely on COW if our
|
||||
@@ -360,8 +355,6 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
||||
BdrvDirtyBitmap *copy_bitmap;
|
||||
bool is_fleecing;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
cluster_size = block_copy_calculate_cluster_size(target->bs, errp);
|
||||
if (cluster_size < 0) {
|
||||
return NULL;
|
||||
@@ -399,9 +392,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
||||
* For more information see commit f8d59dfb40bb and test
|
||||
* tests/qemu-iotests/222
|
||||
*/
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
is_fleecing = bdrv_chain_contains(target->bs, source->bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
s = g_new(BlockCopyState, 1);
|
||||
*s = (BlockCopyState) {
|
||||
|
||||
@@ -105,12 +105,8 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
struct bochs_header bochs;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/* No write support yet */
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -120,8 +116,6 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
ret = bdrv_pread(bs->file, 0, sizeof(bochs), &bochs, 0);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
|
||||
@@ -67,11 +67,7 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
uint32_t offsets_size, max_compressed_block_size = 1, i;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -81,8 +77,6 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/* read header */
|
||||
ret = bdrv_pread(bs->file, 128, 4, &s->block_size, 0);
|
||||
if (ret < 0) {
|
||||
|
||||
@@ -48,10 +48,8 @@ static int commit_prepare(Job *job)
|
||||
{
|
||||
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_unfreeze_backing_chain(s->commit_top_bs, s->base_bs);
|
||||
s->chain_frozen = false;
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/* Remove base node parent that still uses BLK_PERM_WRITE/RESIZE before
|
||||
* the normal backing chain can be restored. */
|
||||
@@ -68,12 +66,9 @@ static void commit_abort(Job *job)
|
||||
{
|
||||
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
|
||||
BlockDriverState *top_bs = blk_bs(s->top);
|
||||
BlockDriverState *commit_top_backing_bs;
|
||||
|
||||
if (s->chain_frozen) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_unfreeze_backing_chain(s->commit_top_bs, s->base_bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
}
|
||||
|
||||
/* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
|
||||
@@ -95,15 +90,8 @@ static void commit_abort(Job *job)
|
||||
* XXX Can (or should) we somehow keep 'consistent read' blocked even
|
||||
* after the failed/cancelled commit job is gone? If we already wrote
|
||||
* something to base, the intermediate images aren't valid any more. */
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
commit_top_backing_bs = s->commit_top_bs->backing->bs;
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
bdrv_drained_begin(commit_top_backing_bs);
|
||||
bdrv_graph_wrlock(commit_top_backing_bs);
|
||||
bdrv_replace_node(s->commit_top_bs, commit_top_backing_bs, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(commit_top_backing_bs);
|
||||
bdrv_replace_node(s->commit_top_bs, s->commit_top_bs->backing->bs,
|
||||
&error_abort);
|
||||
|
||||
bdrv_unref(s->commit_top_bs);
|
||||
bdrv_unref(top_bs);
|
||||
@@ -222,7 +210,7 @@ bdrv_commit_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
|
||||
}
|
||||
|
||||
static GRAPH_RDLOCK void bdrv_commit_top_refresh_filename(BlockDriverState *bs)
|
||||
static void bdrv_commit_top_refresh_filename(BlockDriverState *bs)
|
||||
{
|
||||
pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
|
||||
bs->backing->bs->filename);
|
||||
@@ -267,13 +255,10 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
assert(top != bs);
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_skip_filters(top) == bdrv_skip_filters(base)) {
|
||||
error_setg(errp, "Invalid files for merge: top and base are the same");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
base_size = bdrv_getlength(base);
|
||||
if (base_size < 0) {
|
||||
@@ -339,7 +324,6 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
* this is the responsibility of the interface (i.e. whoever calls
|
||||
* commit_start()).
|
||||
*/
|
||||
bdrv_graph_wrlock(top);
|
||||
s->base_overlay = bdrv_find_overlay(top, base);
|
||||
assert(s->base_overlay);
|
||||
|
||||
@@ -370,20 +354,16 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
|
||||
iter_shared_perms, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
s->chain_frozen = true;
|
||||
|
||||
ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
@@ -416,9 +396,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
|
||||
fail:
|
||||
if (s->chain_frozen) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_unfreeze_backing_chain(commit_top_bs, base);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
}
|
||||
if (s->base) {
|
||||
blk_unref(s->base);
|
||||
@@ -433,11 +411,7 @@ fail:
|
||||
/* commit_top_bs has to be replaced after deleting the block job,
|
||||
* otherwise this would fail because of lack of permissions. */
|
||||
if (commit_top_bs) {
|
||||
bdrv_drained_begin(top);
|
||||
bdrv_graph_wrlock(top);
|
||||
bdrv_replace_node(commit_top_bs, top, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(top);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -460,7 +434,6 @@ int bdrv_commit(BlockDriverState *bs)
|
||||
Error *local_err = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!drv)
|
||||
return -ENOMEDIUM;
|
||||
|
||||
@@ -203,7 +203,7 @@ static int coroutine_fn GRAPH_RDLOCK cbw_co_flush(BlockDriverState *bs)
|
||||
* It's guaranteed that guest writes will not interact in the region until
|
||||
* cbw_snapshot_read_unlock() called.
|
||||
*/
|
||||
static BlockReq * coroutine_fn GRAPH_RDLOCK
|
||||
static coroutine_fn BlockReq *
|
||||
cbw_snapshot_read_lock(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, BdrvChild **file)
|
||||
{
|
||||
@@ -305,7 +305,7 @@ cbw_co_snapshot_block_status(BlockDriverState *bs,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
ret = bdrv_co_block_status(child->bs, offset, cur_bytes, pnum, map, file);
|
||||
ret = bdrv_block_status(child->bs, offset, cur_bytes, pnum, map, file);
|
||||
if (child == s->target) {
|
||||
/*
|
||||
* We refer to s->target only for areas that we've written to it.
|
||||
@@ -335,7 +335,7 @@ cbw_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
return bdrv_co_pdiscard(s->target, offset, bytes);
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK cbw_refresh_filename(BlockDriverState *bs)
|
||||
static void cbw_refresh_filename(BlockDriverState *bs)
|
||||
{
|
||||
pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
|
||||
bs->file->bs->filename);
|
||||
@@ -433,8 +433,6 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
|
||||
@@ -35,8 +35,8 @@ typedef struct BDRVStateCOR {
|
||||
} BDRVStateCOR;
|
||||
|
||||
|
||||
static int GRAPH_UNLOCKED
|
||||
cor_open(BlockDriverState *bs, QDict *options, int flags, Error **errp)
|
||||
static int cor_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
BlockDriverState *bottom_bs = NULL;
|
||||
BDRVStateCOR *state = bs->opaque;
|
||||
@@ -44,15 +44,11 @@ cor_open(BlockDriverState *bs, QDict *options, int flags, Error **errp)
|
||||
const char *bottom_node = qdict_get_try_str(options, "bottom");
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs->supported_read_flags = BDRV_REQ_PREFETCH;
|
||||
|
||||
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
|
||||
@@ -150,11 +146,11 @@ cor_co_preadv_part(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
local_flags = flags;
|
||||
|
||||
/* In case of failure, try to copy-on-read anyway */
|
||||
ret = bdrv_co_is_allocated(bs->file->bs, offset, bytes, &n);
|
||||
ret = bdrv_is_allocated(bs->file->bs, offset, bytes, &n);
|
||||
if (ret <= 0) {
|
||||
ret = bdrv_co_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
|
||||
state->bottom_bs, true, offset,
|
||||
n, &n);
|
||||
ret = bdrv_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
|
||||
state->bottom_bs, true, offset,
|
||||
n, &n);
|
||||
if (ret > 0 || ret < 0) {
|
||||
local_flags |= BDRV_REQ_COPY_ON_READ;
|
||||
}
|
||||
@@ -231,17 +227,13 @@ cor_co_lock_medium(BlockDriverState *bs, bool locked)
|
||||
}
|
||||
|
||||
|
||||
static void GRAPH_UNLOCKED cor_close(BlockDriverState *bs)
|
||||
static void cor_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVStateCOR *s = bs->opaque;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (s->chain_frozen) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
s->chain_frozen = false;
|
||||
bdrv_unfreeze_backing_chain(bs, s->bottom_bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
}
|
||||
|
||||
bdrv_unref(s->bottom_bs);
|
||||
@@ -271,15 +263,12 @@ static BlockDriver bdrv_copy_on_read = {
|
||||
};
|
||||
|
||||
|
||||
void no_coroutine_fn bdrv_cor_filter_drop(BlockDriverState *cor_filter_bs)
|
||||
void bdrv_cor_filter_drop(BlockDriverState *cor_filter_bs)
|
||||
{
|
||||
BDRVStateCOR *s = cor_filter_bs->opaque;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/* unfreeze, as otherwise bdrv_replace_node() will fail */
|
||||
if (s->chain_frozen) {
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
s->chain_frozen = false;
|
||||
bdrv_unfreeze_backing_chain(cor_filter_bs, s->bottom_bs);
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@
|
||||
|
||||
#include "block/block_int.h"
|
||||
|
||||
void no_coroutine_fn GRAPH_UNLOCKED
|
||||
bdrv_cor_filter_drop(BlockDriverState *cor_filter_bs);
|
||||
void bdrv_cor_filter_drop(BlockDriverState *cor_filter_bs);
|
||||
|
||||
#endif /* BLOCK_COPY_ON_READ_H */
|
||||
|
||||
@@ -65,9 +65,6 @@ static int block_crypto_read_func(QCryptoBlock *block,
|
||||
BlockDriverState *bs = opaque;
|
||||
ssize_t ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
ret = bdrv_pread(bs->file, offset, buflen, buf, 0);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not read encryption header");
|
||||
@@ -86,9 +83,6 @@ static int block_crypto_write_func(QCryptoBlock *block,
|
||||
BlockDriverState *bs = opaque;
|
||||
ssize_t ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
ret = bdrv_pwrite(bs->file, offset, buflen, buf, 0);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not write encryption header");
|
||||
@@ -269,15 +263,11 @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
|
||||
unsigned int cflags = 0;
|
||||
QDict *cryptoopts = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs->supported_write_flags = BDRV_REQ_FUA &
|
||||
bs->file->bs->supported_write_flags;
|
||||
|
||||
@@ -838,7 +828,7 @@ block_crypto_amend_options_generic_luks(BlockDriverState *bs,
|
||||
errp);
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
static int
|
||||
block_crypto_amend_options_luks(BlockDriverState *bs,
|
||||
QemuOpts *opts,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
@@ -851,6 +841,8 @@ block_crypto_amend_options_luks(BlockDriverState *bs,
|
||||
QCryptoBlockAmendOptions *amend_options = NULL;
|
||||
int ret = -EINVAL;
|
||||
|
||||
assume_graph_lock(); /* FIXME */
|
||||
|
||||
assert(crypto);
|
||||
assert(crypto->block);
|
||||
|
||||
|
||||
@@ -696,10 +696,8 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
const char *protocol_delimiter;
|
||||
int ret;
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, "curl driver does not support writes",
|
||||
errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
23
block/dmg.c
23
block/dmg.c
@@ -70,8 +70,7 @@ static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
|
||||
static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
|
||||
{
|
||||
uint64_t buffer;
|
||||
int ret;
|
||||
@@ -85,8 +84,7 @@ read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
|
||||
static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
|
||||
{
|
||||
uint32_t buffer;
|
||||
int ret;
|
||||
@@ -323,9 +321,8 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
|
||||
uint64_t info_begin, uint64_t info_length)
|
||||
static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
|
||||
uint64_t info_begin, uint64_t info_length)
|
||||
{
|
||||
BDRVDMGState *s = bs->opaque;
|
||||
int ret;
|
||||
@@ -391,9 +388,8 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
|
||||
uint64_t info_begin, uint64_t info_length)
|
||||
static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
|
||||
uint64_t info_begin, uint64_t info_length)
|
||||
{
|
||||
BDRVDMGState *s = bs->opaque;
|
||||
int ret;
|
||||
@@ -456,11 +452,7 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
int64_t offset;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -469,9 +461,6 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/*
|
||||
* NB: if uncompress submodules are absent,
|
||||
* ie block_module_load return value == 0, the function pointers
|
||||
|
||||
@@ -83,8 +83,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||
uint64_t perm;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (!id_wellformed(export->id)) {
|
||||
error_setg(errp, "Invalid block export id");
|
||||
return NULL;
|
||||
@@ -147,9 +145,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||
* access since the export could be available before migration handover.
|
||||
* ctx was acquired in the caller.
|
||||
*/
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_activate(bs, NULL);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
perm = BLK_PERM_CONSISTENT_READ;
|
||||
if (export->writable) {
|
||||
|
||||
@@ -160,6 +160,7 @@ typedef struct BDRVRawState {
|
||||
bool has_write_zeroes:1;
|
||||
bool use_linux_aio:1;
|
||||
bool use_linux_io_uring:1;
|
||||
int64_t *offset; /* offset of zone append operation */
|
||||
int page_cache_inconsistent; /* errno from fdatasync failure */
|
||||
bool has_fallocate;
|
||||
bool needs_alignment;
|
||||
@@ -2444,13 +2445,12 @@ static bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
|
||||
static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov, int type)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
RawPosixAIOData acb;
|
||||
int ret;
|
||||
uint64_t offset = *offset_ptr;
|
||||
|
||||
if (fd_open(bs) < 0)
|
||||
return -EIO;
|
||||
@@ -2513,8 +2513,8 @@ out:
|
||||
uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
|
||||
if (!BDRV_ZT_IS_CONV(*wp)) {
|
||||
if (type & QEMU_AIO_ZONE_APPEND) {
|
||||
*offset_ptr = *wp;
|
||||
trace_zbd_zone_append_complete(bs, *offset_ptr
|
||||
*s->offset = *wp;
|
||||
trace_zbd_zone_append_complete(bs, *s->offset
|
||||
>> BDRV_SECTOR_BITS);
|
||||
}
|
||||
/* Advance the wp if needed */
|
||||
@@ -2523,10 +2523,7 @@ out:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* write and append write are not allowed to cross zone boundaries
|
||||
*/
|
||||
update_zones_wp(bs, s->fd, offset, 1);
|
||||
update_zones_wp(bs, s->fd, 0, 1);
|
||||
}
|
||||
|
||||
qemu_co_mutex_unlock(&wps->colock);
|
||||
@@ -2539,14 +2536,14 @@ static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ);
|
||||
return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ);
|
||||
}
|
||||
|
||||
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE);
|
||||
return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE);
|
||||
}
|
||||
|
||||
static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
|
||||
@@ -3473,7 +3470,7 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
|
||||
len >> BDRV_SECTOR_BITS);
|
||||
ret = raw_thread_pool_submit(handle_aiocb_zone_mgmt, &acb);
|
||||
if (ret != 0) {
|
||||
update_zones_wp(bs, s->fd, offset, nrz);
|
||||
update_zones_wp(bs, s->fd, offset, i);
|
||||
error_report("ioctl %s failed %d", op_name, ret);
|
||||
return ret;
|
||||
}
|
||||
@@ -3509,6 +3506,8 @@ static int coroutine_fn raw_co_zone_append(BlockDriverState *bs,
|
||||
int64_t zone_size_mask = bs->bl.zone_size - 1;
|
||||
int64_t iov_len = 0;
|
||||
int64_t len = 0;
|
||||
BDRVRawState *s = bs->opaque;
|
||||
s->offset = offset;
|
||||
|
||||
if (*offset & zone_size_mask) {
|
||||
error_report("sector offset %" PRId64 " is not aligned to zone size "
|
||||
@@ -3529,7 +3528,7 @@ static int coroutine_fn raw_co_zone_append(BlockDriverState *bs,
|
||||
}
|
||||
|
||||
trace_zbd_zone_append(bs, *offset >> BDRV_SECTOR_BITS);
|
||||
return raw_co_prw(bs, offset, len, qiov, QEMU_AIO_ZONE_APPEND);
|
||||
return raw_co_prw(bs, *offset, len, qiov, QEMU_AIO_ZONE_APPEND);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -36,8 +36,6 @@ static int compress_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!bs->file->bs->drv || !block_driver_can_compress(bs->file->bs->drv)) {
|
||||
error_setg(errp,
|
||||
"Compression is not supported for underlying format: %s",
|
||||
@@ -99,8 +97,7 @@ compress_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
}
|
||||
|
||||
|
||||
static void GRAPH_RDLOCK
|
||||
compress_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
static void compress_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BlockDriverInfo bdi;
|
||||
int ret;
|
||||
|
||||
@@ -863,13 +863,11 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
|
||||
if (ret == -EACCES || ret == -EROFS) {
|
||||
/* Try to degrade to read-only, but if it doesn't work, still use the
|
||||
* normal error message. */
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_apply_auto_read_only(bs, NULL, NULL) == 0) {
|
||||
open_flags = (open_flags & ~O_RDWR) | O_RDONLY;
|
||||
s->fd = glfs_open(s->glfs, gconf->path, open_flags);
|
||||
ret = s->fd ? 0 : -errno;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
}
|
||||
|
||||
s->supports_seek_data = qemu_gluster_test_seek(s->fd);
|
||||
|
||||
@@ -106,13 +106,12 @@ static uint32_t reader_count(void)
|
||||
return rd;
|
||||
}
|
||||
|
||||
void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs)
|
||||
void bdrv_graph_wrlock(BlockDriverState *bs)
|
||||
{
|
||||
AioContext *ctx = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
assert(!qatomic_read(&has_writer));
|
||||
assert(!qemu_in_coroutine());
|
||||
|
||||
/*
|
||||
* Release only non-mainloop AioContext. The mainloop often relies on the
|
||||
|
||||
145
block/io.c
145
block/io.c
@@ -42,18 +42,13 @@
|
||||
/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
|
||||
#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
|
||||
|
||||
static void coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_parent_cb_resize(BlockDriverState *bs);
|
||||
|
||||
static void bdrv_parent_cb_resize(BlockDriverState *bs);
|
||||
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int64_t bytes, BdrvRequestFlags flags);
|
||||
|
||||
static void GRAPH_RDLOCK
|
||||
bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
|
||||
static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
|
||||
{
|
||||
BdrvChild *c, *next;
|
||||
IO_OR_GS_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
|
||||
if (c == ignore) {
|
||||
@@ -75,12 +70,9 @@ void bdrv_parent_drained_end_single(BdrvChild *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK
|
||||
bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
|
||||
static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
|
||||
{
|
||||
BdrvChild *c;
|
||||
IO_OR_GS_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH(c, &bs->parents, next_parent) {
|
||||
if (c == ignore) {
|
||||
@@ -92,22 +84,17 @@ bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
|
||||
|
||||
bool bdrv_parent_drained_poll_single(BdrvChild *c)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
|
||||
if (c->klass->drained_poll) {
|
||||
return c->klass->drained_poll(c);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool GRAPH_RDLOCK
|
||||
bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
|
||||
bool ignore_bds_parents)
|
||||
static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
|
||||
bool ignore_bds_parents)
|
||||
{
|
||||
BdrvChild *c, *next;
|
||||
bool busy = false;
|
||||
IO_OR_GS_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
|
||||
if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
|
||||
@@ -127,7 +114,6 @@ void bdrv_parent_drained_begin_single(BdrvChild *c)
|
||||
c->quiesced_parent = true;
|
||||
|
||||
if (c->klass->drained_begin) {
|
||||
/* called with rdlock taken, but it doesn't really need it. */
|
||||
c->klass->drained_begin(c);
|
||||
}
|
||||
}
|
||||
@@ -277,9 +263,6 @@ bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
|
||||
static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
|
||||
BdrvChild *ignore_parent)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
return bdrv_drain_poll(bs, ignore_parent, false);
|
||||
}
|
||||
|
||||
@@ -379,7 +362,6 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
|
||||
|
||||
/* Stop things in parent-to-child order */
|
||||
if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
bdrv_parent_drained_begin(bs, parent);
|
||||
if (bs->drv && bs->drv->bdrv_drain_begin) {
|
||||
bs->drv->bdrv_drain_begin(bs);
|
||||
@@ -426,16 +408,12 @@ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
|
||||
bdrv_co_yield_to_drain(bs, false, parent, false);
|
||||
return;
|
||||
}
|
||||
|
||||
/* At this point, we should be always running in the main loop. */
|
||||
GLOBAL_STATE_CODE();
|
||||
assert(bs->quiesce_counter > 0);
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/* Re-enable things in child-to-parent order */
|
||||
old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
|
||||
if (old_quiesce_counter == 1) {
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
if (bs->drv && bs->drv->bdrv_drain_end) {
|
||||
bs->drv->bdrv_drain_end(bs);
|
||||
}
|
||||
@@ -459,8 +437,6 @@ void bdrv_drain(BlockDriverState *bs)
|
||||
static void bdrv_drain_assert_idle(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *child, *next;
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
assert(qatomic_read(&bs->in_flight) == 0);
|
||||
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
|
||||
@@ -474,9 +450,7 @@ static bool bdrv_drain_all_poll(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
bool result = false;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
|
||||
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
|
||||
@@ -1249,8 +1223,8 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
ret = 1; /* "already allocated", so nothing will be copied */
|
||||
pnum = MIN(align_bytes, max_transfer);
|
||||
} else {
|
||||
ret = bdrv_co_is_allocated(bs, align_offset,
|
||||
MIN(align_bytes, max_transfer), &pnum);
|
||||
ret = bdrv_is_allocated(bs, align_offset,
|
||||
MIN(align_bytes, max_transfer), &pnum);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Safe to treat errors in querying allocation as if
|
||||
@@ -1397,7 +1371,7 @@ bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
|
||||
/* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
|
||||
flags &= ~BDRV_REQ_COPY_ON_READ;
|
||||
|
||||
ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum);
|
||||
ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@@ -2029,7 +2003,7 @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void coroutine_fn GRAPH_RDLOCK
|
||||
static inline void coroutine_fn
|
||||
bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
BdrvTrackedRequest *req, int ret)
|
||||
{
|
||||
@@ -2356,7 +2330,6 @@ int bdrv_flush_all(void)
|
||||
int result = 0;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/*
|
||||
* bdrv queue is managed by record/replay,
|
||||
@@ -2410,9 +2383,9 @@ int bdrv_flush_all(void)
|
||||
* set to the host mapping and BDS corresponding to the guest offset.
|
||||
*/
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
|
||||
int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map, BlockDriverState **file)
|
||||
bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
|
||||
int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map, BlockDriverState **file)
|
||||
{
|
||||
int64_t total_size;
|
||||
int64_t n; /* bytes */
|
||||
@@ -2571,8 +2544,8 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
|
||||
|
||||
if (ret & BDRV_BLOCK_RAW) {
|
||||
assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
|
||||
ret = bdrv_co_do_block_status(local_file, want_zero, local_map,
|
||||
*pnum, pnum, &local_map, &local_file);
|
||||
ret = bdrv_co_block_status(local_file, want_zero, local_map,
|
||||
*pnum, pnum, &local_map, &local_file);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -2599,8 +2572,8 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
|
||||
int64_t file_pnum;
|
||||
int ret2;
|
||||
|
||||
ret2 = bdrv_co_do_block_status(local_file, want_zero, local_map,
|
||||
*pnum, &file_pnum, NULL, NULL);
|
||||
ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
|
||||
*pnum, &file_pnum, NULL, NULL);
|
||||
if (ret2 >= 0) {
|
||||
/* Ignore errors. This is just providing extra information, it
|
||||
* is useful but not necessary.
|
||||
@@ -2667,8 +2640,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum,
|
||||
map, file);
|
||||
ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
|
||||
++*depth;
|
||||
if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
|
||||
return ret;
|
||||
@@ -2684,8 +2656,8 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
|
||||
p = bdrv_filter_or_cow_bs(p))
|
||||
{
|
||||
ret = bdrv_co_do_block_status(p, want_zero, offset, bytes, pnum,
|
||||
map, file);
|
||||
ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
|
||||
file);
|
||||
++*depth;
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
@@ -2751,13 +2723,21 @@ int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
|
||||
bytes, pnum, map, file, NULL);
|
||||
}
|
||||
|
||||
int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum,
|
||||
int64_t *map, BlockDriverState **file)
|
||||
int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
|
||||
int64_t offset, int64_t bytes, int64_t *pnum,
|
||||
int64_t *map, BlockDriverState **file)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
|
||||
offset, bytes, pnum, map, file);
|
||||
return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
|
||||
pnum, map, file, NULL);
|
||||
}
|
||||
|
||||
int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map, BlockDriverState **file)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
|
||||
offset, bytes, pnum, map, file);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2804,6 +2784,45 @@ int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
|
||||
return !!(ret & BDRV_BLOCK_ALLOCATED);
|
||||
}
|
||||
|
||||
int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
int64_t *pnum)
|
||||
{
|
||||
int ret;
|
||||
int64_t dummy;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
|
||||
bytes, pnum ? pnum : &dummy, NULL,
|
||||
NULL, NULL);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
return !!(ret & BDRV_BLOCK_ALLOCATED);
|
||||
}
|
||||
|
||||
/* See bdrv_is_allocated_above for documentation */
|
||||
int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
|
||||
BlockDriverState *base,
|
||||
bool include_base, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
int depth;
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_co_common_block_status_above(top, base, include_base, false,
|
||||
offset, bytes, pnum, NULL, NULL,
|
||||
&depth);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret & BDRV_BLOCK_ALLOCATED) {
|
||||
return depth;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
|
||||
*
|
||||
@@ -2821,18 +2840,18 @@ int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
|
||||
* words, the result is not necessarily the maximum possible range);
|
||||
* but 'pnum' will only be 0 when end of file is reached.
|
||||
*/
|
||||
int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs,
|
||||
BlockDriverState *base,
|
||||
bool include_base, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
int bdrv_is_allocated_above(BlockDriverState *top,
|
||||
BlockDriverState *base,
|
||||
bool include_base, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
int depth;
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_co_common_block_status_above(bs, base, include_base, false,
|
||||
offset, bytes, pnum, NULL, NULL,
|
||||
&depth);
|
||||
ret = bdrv_common_block_status_above(top, base, include_base, false,
|
||||
offset, bytes, pnum, NULL, NULL,
|
||||
&depth);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -3532,13 +3551,9 @@ int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
|
||||
bytes, read_flags, write_flags);
|
||||
}
|
||||
|
||||
static void coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_parent_cb_resize(BlockDriverState *bs)
|
||||
static void bdrv_parent_cb_resize(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *c;
|
||||
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH(c, &bs->parents, next_parent) {
|
||||
if (c->klass->resize) {
|
||||
c->klass->resize(c);
|
||||
@@ -3685,8 +3700,6 @@ out:
|
||||
void bdrv_cancel_in_flight(BlockDriverState *bs)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!bs || !bs->drv) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
#include "block/block.h"
|
||||
#include "block/raw-aio.h"
|
||||
#include "qemu/coroutine.h"
|
||||
#include "qemu/defer-call.h"
|
||||
#include "qapi/error.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
#include "trace.h"
|
||||
@@ -125,9 +124,6 @@ static void luring_process_completions(LuringState *s)
|
||||
{
|
||||
struct io_uring_cqe *cqes;
|
||||
int total_bytes;
|
||||
|
||||
defer_call_begin();
|
||||
|
||||
/*
|
||||
* Request completion callbacks can run the nested event loop.
|
||||
* Schedule ourselves so the nested event loop will "see" remaining
|
||||
@@ -220,10 +216,7 @@ end:
|
||||
aio_co_wake(luringcb->co);
|
||||
}
|
||||
}
|
||||
|
||||
qemu_bh_cancel(s->completion_bh);
|
||||
|
||||
defer_call_end();
|
||||
}
|
||||
|
||||
static int ioq_submit(LuringState *s)
|
||||
@@ -313,7 +306,7 @@ static void ioq_init(LuringQueue *io_q)
|
||||
io_q->blocked = false;
|
||||
}
|
||||
|
||||
static void luring_deferred_fn(void *opaque)
|
||||
static void luring_unplug_fn(void *opaque)
|
||||
{
|
||||
LuringState *s = opaque;
|
||||
trace_luring_unplug_fn(s, s->io_q.blocked, s->io_q.in_queue,
|
||||
@@ -374,7 +367,7 @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
|
||||
return ret;
|
||||
}
|
||||
|
||||
defer_call(luring_deferred_fn, s);
|
||||
blk_io_plug_call(luring_unplug_fn, s);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1925,9 +1925,7 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
/* Check the write protect flag of the LUN if we want to write */
|
||||
if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) &&
|
||||
iscsilun->write_protected) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, "LUN is write protected", errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
#include "block/raw-aio.h"
|
||||
#include "qemu/event_notifier.h"
|
||||
#include "qemu/coroutine.h"
|
||||
#include "qemu/defer-call.h"
|
||||
#include "qapi/error.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
|
||||
@@ -205,8 +204,6 @@ static void qemu_laio_process_completions(LinuxAioState *s)
|
||||
{
|
||||
struct io_event *events;
|
||||
|
||||
defer_call_begin();
|
||||
|
||||
/* Reschedule so nested event loops see currently pending completions */
|
||||
qemu_bh_schedule(s->completion_bh);
|
||||
|
||||
@@ -233,8 +230,6 @@ static void qemu_laio_process_completions(LinuxAioState *s)
|
||||
* own `for` loop. If we are the last all counters dropped to zero. */
|
||||
s->event_max = 0;
|
||||
s->event_idx = 0;
|
||||
|
||||
defer_call_end();
|
||||
}
|
||||
|
||||
static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
|
||||
@@ -358,7 +353,7 @@ static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch)
|
||||
return max_batch;
|
||||
}
|
||||
|
||||
static void laio_deferred_fn(void *opaque)
|
||||
static void laio_unplug_fn(void *opaque)
|
||||
{
|
||||
LinuxAioState *s = opaque;
|
||||
|
||||
@@ -398,7 +393,7 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
|
||||
if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch)) {
|
||||
ioq_submit(s);
|
||||
} else {
|
||||
defer_call(laio_deferred_fn, s);
|
||||
blk_io_plug_call(laio_unplug_fn, s);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ block_ss.add(files(
|
||||
'mirror.c',
|
||||
'nbd.c',
|
||||
'null.c',
|
||||
'plug.c',
|
||||
'preallocate.c',
|
||||
'progress_meter.c',
|
||||
'qapi.c',
|
||||
|
||||
229
block/mirror.c
229
block/mirror.c
@@ -55,18 +55,10 @@ typedef struct MirrorBlockJob {
|
||||
BlockMirrorBackingMode backing_mode;
|
||||
/* Whether the target image requires explicit zero-initialization */
|
||||
bool zero_target;
|
||||
/*
|
||||
* To be accesssed with atomics. Written only under the BQL (required by the
|
||||
* current implementation of mirror_change()).
|
||||
*/
|
||||
MirrorCopyMode copy_mode;
|
||||
BlockdevOnError on_source_error, on_target_error;
|
||||
/*
|
||||
* To be accessed with atomics.
|
||||
*
|
||||
* Set when the target is synced (dirty bitmap is clean, nothing in flight)
|
||||
* and the job is running in active mode.
|
||||
*/
|
||||
/* Set when the target is synced (dirty bitmap is clean, nothing
|
||||
* in flight) and the job is running in active mode */
|
||||
bool actively_synced;
|
||||
bool should_complete;
|
||||
int64_t granularity;
|
||||
@@ -130,7 +122,7 @@ typedef enum MirrorMethod {
|
||||
static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
|
||||
int error)
|
||||
{
|
||||
qatomic_set(&s->actively_synced, false);
|
||||
s->actively_synced = false;
|
||||
if (read) {
|
||||
return block_job_error_action(&s->common, s->on_source_error,
|
||||
true, error);
|
||||
@@ -479,7 +471,7 @@ static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
|
||||
return bytes_handled;
|
||||
}
|
||||
|
||||
static void coroutine_fn GRAPH_RDLOCK mirror_iteration(MirrorBlockJob *s)
|
||||
static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
|
||||
{
|
||||
BlockDriverState *source = s->mirror_top_bs->backing->bs;
|
||||
MirrorOp *pseudo_op;
|
||||
@@ -567,9 +559,9 @@ static void coroutine_fn GRAPH_RDLOCK mirror_iteration(MirrorBlockJob *s)
|
||||
|
||||
assert(!(offset % s->granularity));
|
||||
WITH_GRAPH_RDLOCK_GUARD() {
|
||||
ret = bdrv_co_block_status_above(source, NULL, offset,
|
||||
nb_chunks * s->granularity,
|
||||
&io_bytes, NULL, NULL);
|
||||
ret = bdrv_block_status_above(source, NULL, offset,
|
||||
nb_chunks * s->granularity,
|
||||
&io_bytes, NULL, NULL);
|
||||
}
|
||||
if (ret < 0) {
|
||||
io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
|
||||
@@ -678,7 +670,6 @@ static int mirror_exit_common(Job *job)
|
||||
s->prepared = true;
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
|
||||
mirror_top_bs = s->mirror_top_bs;
|
||||
bs_opaque = mirror_top_bs->opaque;
|
||||
@@ -697,8 +688,6 @@ static int mirror_exit_common(Job *job)
|
||||
bdrv_ref(mirror_top_bs);
|
||||
bdrv_ref(target_bs);
|
||||
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/*
|
||||
* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
|
||||
* inserting target_bs at s->to_replace, where we might not be able to get
|
||||
@@ -712,12 +701,12 @@ static int mirror_exit_common(Job *job)
|
||||
* these permissions any more means that we can't allow any new requests on
|
||||
* mirror_top_bs from now on, so keep it drained. */
|
||||
bdrv_drained_begin(mirror_top_bs);
|
||||
bdrv_drained_begin(target_bs);
|
||||
bs_opaque->stop = true;
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
|
||||
&error_abort);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
|
||||
BlockDriverState *backing = s->is_none_mode ? src : s->base;
|
||||
@@ -740,7 +729,6 @@ static int mirror_exit_common(Job *job)
|
||||
local_err = NULL;
|
||||
}
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (s->to_replace) {
|
||||
replace_aio_context = bdrv_get_aio_context(s->to_replace);
|
||||
@@ -758,13 +746,15 @@ static int mirror_exit_common(Job *job)
|
||||
/* The mirror job has no requests in flight any more, but we need to
|
||||
* drain potential other users of the BDS before changing the graph. */
|
||||
assert(s->in_drain);
|
||||
bdrv_drained_begin(to_replace);
|
||||
bdrv_drained_begin(target_bs);
|
||||
/*
|
||||
* Cannot use check_to_replace_node() here, because that would
|
||||
* check for an op blocker on @to_replace, and we have our own
|
||||
* there.
|
||||
*
|
||||
* TODO Pull out the writer lock from bdrv_replace_node() to here
|
||||
*/
|
||||
bdrv_graph_wrlock(target_bs);
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_recurse_can_replace(src, to_replace)) {
|
||||
bdrv_replace_node(to_replace, target_bs, &local_err);
|
||||
} else {
|
||||
@@ -773,8 +763,8 @@ static int mirror_exit_common(Job *job)
|
||||
"would not lead to an abrupt change of visible data",
|
||||
to_replace->node_name, target_bs->node_name);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(to_replace);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_drained_end(target_bs);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
ret = -EPERM;
|
||||
@@ -789,6 +779,7 @@ static int mirror_exit_common(Job *job)
|
||||
aio_context_release(replace_aio_context);
|
||||
}
|
||||
g_free(s->replaces);
|
||||
bdrv_unref(target_bs);
|
||||
|
||||
/*
|
||||
* Remove the mirror filter driver from the graph. Before this, get rid of
|
||||
@@ -796,12 +787,7 @@ static int mirror_exit_common(Job *job)
|
||||
* valid.
|
||||
*/
|
||||
block_job_remove_all_bdrv(bjob);
|
||||
bdrv_graph_wrlock(mirror_top_bs);
|
||||
bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
bdrv_drained_end(target_bs);
|
||||
bdrv_unref(target_bs);
|
||||
|
||||
bs_opaque->job = NULL;
|
||||
|
||||
@@ -839,18 +825,14 @@ static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
|
||||
}
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s)
|
||||
static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
|
||||
{
|
||||
int64_t offset;
|
||||
BlockDriverState *bs;
|
||||
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
|
||||
BlockDriverState *target_bs = blk_bs(s->target);
|
||||
int ret;
|
||||
int64_t count;
|
||||
|
||||
bdrv_graph_co_rdlock();
|
||||
bs = s->mirror_top_bs->backing->bs;
|
||||
bdrv_graph_co_rdunlock();
|
||||
|
||||
if (s->zero_target) {
|
||||
if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
|
||||
bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
|
||||
@@ -897,8 +879,8 @@ static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s)
|
||||
}
|
||||
|
||||
WITH_GRAPH_RDLOCK_GUARD() {
|
||||
ret = bdrv_co_is_allocated_above(bs, s->base_overlay, true, offset,
|
||||
bytes, &count);
|
||||
ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset,
|
||||
bytes, &count);
|
||||
}
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
@@ -930,7 +912,7 @@ static int coroutine_fn mirror_flush(MirrorBlockJob *s)
|
||||
static int coroutine_fn mirror_run(Job *job, Error **errp)
|
||||
{
|
||||
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
|
||||
BlockDriverState *bs;
|
||||
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
|
||||
MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque;
|
||||
BlockDriverState *target_bs = blk_bs(s->target);
|
||||
bool need_drain = true;
|
||||
@@ -942,10 +924,6 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
|
||||
checking for a NULL string */
|
||||
int ret = 0;
|
||||
|
||||
bdrv_graph_co_rdlock();
|
||||
bs = bdrv_filter_bs(s->mirror_top_bs);
|
||||
bdrv_graph_co_rdunlock();
|
||||
|
||||
if (job_is_cancelled(&s->common.job)) {
|
||||
goto immediate_exit;
|
||||
}
|
||||
@@ -984,7 +962,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
|
||||
if (s->bdev_length == 0) {
|
||||
/* Transition to the READY state and wait for complete. */
|
||||
job_transition_to_ready(&s->common.job);
|
||||
qatomic_set(&s->actively_synced, true);
|
||||
s->actively_synced = true;
|
||||
while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
|
||||
job_yield(&s->common.job);
|
||||
}
|
||||
@@ -1006,13 +984,13 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
|
||||
} else {
|
||||
s->target_cluster_size = BDRV_SECTOR_SIZE;
|
||||
}
|
||||
bdrv_graph_co_rdunlock();
|
||||
if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
|
||||
s->granularity < s->target_cluster_size) {
|
||||
s->buf_size = MAX(s->buf_size, s->target_cluster_size);
|
||||
s->cow_bitmap = bitmap_new(length);
|
||||
}
|
||||
s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
|
||||
bdrv_graph_co_rdunlock();
|
||||
|
||||
s->buf = qemu_try_blockalign(bs, s->buf_size);
|
||||
if (s->buf == NULL) {
|
||||
@@ -1078,9 +1056,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
|
||||
mirror_wait_for_free_in_flight_slot(s);
|
||||
continue;
|
||||
} else if (cnt != 0) {
|
||||
bdrv_graph_co_rdlock();
|
||||
mirror_iteration(s);
|
||||
bdrv_graph_co_rdunlock();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1098,9 +1074,9 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
|
||||
* the target in a consistent state.
|
||||
*/
|
||||
job_transition_to_ready(&s->common.job);
|
||||
}
|
||||
if (qatomic_read(&s->copy_mode) != MIRROR_COPY_MODE_BACKGROUND) {
|
||||
qatomic_set(&s->actively_synced, true);
|
||||
if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
|
||||
s->actively_synced = true;
|
||||
}
|
||||
}
|
||||
|
||||
should_complete = s->should_complete ||
|
||||
@@ -1270,48 +1246,6 @@ static bool commit_active_cancel(Job *job, bool force)
|
||||
return force || !job_is_ready(job);
|
||||
}
|
||||
|
||||
static void mirror_change(BlockJob *job, BlockJobChangeOptions *opts,
|
||||
Error **errp)
|
||||
{
|
||||
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
|
||||
BlockJobChangeOptionsMirror *change_opts = &opts->u.mirror;
|
||||
MirrorCopyMode current;
|
||||
|
||||
/*
|
||||
* The implementation relies on the fact that copy_mode is only written
|
||||
* under the BQL. Otherwise, further synchronization would be required.
|
||||
*/
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (qatomic_read(&s->copy_mode) == change_opts->copy_mode) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (change_opts->copy_mode != MIRROR_COPY_MODE_WRITE_BLOCKING) {
|
||||
error_setg(errp, "Change to copy mode '%s' is not implemented",
|
||||
MirrorCopyMode_str(change_opts->copy_mode));
|
||||
return;
|
||||
}
|
||||
|
||||
current = qatomic_cmpxchg(&s->copy_mode, MIRROR_COPY_MODE_BACKGROUND,
|
||||
change_opts->copy_mode);
|
||||
if (current != MIRROR_COPY_MODE_BACKGROUND) {
|
||||
error_setg(errp, "Expected current copy mode '%s', got '%s'",
|
||||
MirrorCopyMode_str(MIRROR_COPY_MODE_BACKGROUND),
|
||||
MirrorCopyMode_str(current));
|
||||
}
|
||||
}
|
||||
|
||||
static void mirror_query(BlockJob *job, BlockJobInfo *info)
|
||||
{
|
||||
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
|
||||
|
||||
info->u.mirror = (BlockJobInfoMirror) {
|
||||
.actively_synced = qatomic_read(&s->actively_synced),
|
||||
};
|
||||
}
|
||||
|
||||
static const BlockJobDriver mirror_job_driver = {
|
||||
.job_driver = {
|
||||
.instance_size = sizeof(MirrorBlockJob),
|
||||
@@ -1326,8 +1260,6 @@ static const BlockJobDriver mirror_job_driver = {
|
||||
.cancel = mirror_cancel,
|
||||
},
|
||||
.drained_poll = mirror_drained_poll,
|
||||
.change = mirror_change,
|
||||
.query = mirror_query,
|
||||
};
|
||||
|
||||
static const BlockJobDriver commit_active_job_driver = {
|
||||
@@ -1446,7 +1378,7 @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
|
||||
bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
|
||||
bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
|
||||
bitmap_end - bitmap_offset);
|
||||
qatomic_set(&job->actively_synced, false);
|
||||
job->actively_synced = false;
|
||||
|
||||
action = mirror_error_action(job, false, -ret);
|
||||
if (action == BLOCK_ERROR_ACTION_REPORT) {
|
||||
@@ -1505,8 +1437,7 @@ static void coroutine_fn GRAPH_RDLOCK active_write_settle(MirrorOp *op)
|
||||
uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
|
||||
op->s->granularity);
|
||||
|
||||
if (!--op->s->in_active_write_counter &&
|
||||
qatomic_read(&op->s->actively_synced)) {
|
||||
if (!--op->s->in_active_write_counter && op->s->actively_synced) {
|
||||
BdrvChild *source = op->s->mirror_top_bs->backing;
|
||||
|
||||
if (QLIST_FIRST(&source->bs->parents) == source &&
|
||||
@@ -1532,21 +1463,21 @@ bdrv_mirror_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
|
||||
}
|
||||
|
||||
static bool should_copy_to_target(MirrorBDSOpaque *s)
|
||||
{
|
||||
return s->job && s->job->ret >= 0 &&
|
||||
!job_is_cancelled(&s->job->common.job) &&
|
||||
qatomic_read(&s->job->copy_mode) == MIRROR_COPY_MODE_WRITE_BLOCKING;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorMethod method,
|
||||
bool copy_to_target, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
|
||||
int flags)
|
||||
{
|
||||
MirrorOp *op = NULL;
|
||||
MirrorBDSOpaque *s = bs->opaque;
|
||||
int ret = 0;
|
||||
bool copy_to_target = false;
|
||||
|
||||
if (s->job) {
|
||||
copy_to_target = s->job->ret >= 0 &&
|
||||
!job_is_cancelled(&s->job->common.job) &&
|
||||
s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
|
||||
}
|
||||
|
||||
if (copy_to_target) {
|
||||
op = active_write_prepare(s->job, offset, bytes);
|
||||
@@ -1569,11 +1500,6 @@ bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorMethod method,
|
||||
abort();
|
||||
}
|
||||
|
||||
if (!copy_to_target && s->job && s->job->dirty_bitmap) {
|
||||
qatomic_set(&s->job->actively_synced, false);
|
||||
bdrv_set_dirty_bitmap(s->job->dirty_bitmap, offset, bytes);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@@ -1593,10 +1519,17 @@ static int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_mirror_top_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
MirrorBDSOpaque *s = bs->opaque;
|
||||
QEMUIOVector bounce_qiov;
|
||||
void *bounce_buf;
|
||||
int ret = 0;
|
||||
bool copy_to_target = should_copy_to_target(bs->opaque);
|
||||
bool copy_to_target = false;
|
||||
|
||||
if (s->job) {
|
||||
copy_to_target = s->job->ret >= 0 &&
|
||||
!job_is_cancelled(&s->job->common.job) &&
|
||||
s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
|
||||
}
|
||||
|
||||
if (copy_to_target) {
|
||||
/* The guest might concurrently modify the data to write; but
|
||||
@@ -1613,8 +1546,8 @@ bdrv_mirror_top_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
flags &= ~BDRV_REQ_REGISTERED_BUF;
|
||||
}
|
||||
|
||||
ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, copy_to_target,
|
||||
offset, bytes, qiov, flags);
|
||||
ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
|
||||
flags);
|
||||
|
||||
if (copy_to_target) {
|
||||
qemu_iovec_destroy(&bounce_qiov);
|
||||
@@ -1637,20 +1570,18 @@ static int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
bool copy_to_target = should_copy_to_target(bs->opaque);
|
||||
return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, copy_to_target,
|
||||
offset, bytes, NULL, flags);
|
||||
return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
|
||||
flags);
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_mirror_top_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
{
|
||||
bool copy_to_target = should_copy_to_target(bs->opaque);
|
||||
return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, copy_to_target,
|
||||
offset, bytes, NULL, 0);
|
||||
return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
|
||||
static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
|
||||
{
|
||||
if (bs->backing == NULL) {
|
||||
/* we can be here after failed bdrv_attach_child in
|
||||
@@ -1760,15 +1691,12 @@ static BlockJob *mirror_start_job(
|
||||
buf_size = DEFAULT_MIRROR_BUF_SIZE;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
|
||||
error_setg(errp, "Can't mirror node into itself");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
target_is_backing = bdrv_chain_contains(bs, target);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/* In the case of active commit, add dummy driver to provide consistent
|
||||
* reads on the top, while disabling it in the intermediate nodes, and make
|
||||
@@ -1851,19 +1779,14 @@ static BlockJob *mirror_start_job(
|
||||
}
|
||||
|
||||
target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
|
||||
} else {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
|
||||
/*
|
||||
* We may want to allow this in the future, but it would
|
||||
* require taking some extra care.
|
||||
*/
|
||||
error_setg(errp, "Cannot mirror to a filter on top of a node in "
|
||||
"the source's backing chain");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
goto fail;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
} else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
|
||||
/*
|
||||
* We may want to allow this in the future, but it would
|
||||
* require taking some extra care.
|
||||
*/
|
||||
error_setg(errp, "Cannot mirror to a filter on top of a node in the "
|
||||
"source's backing chain");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
s->target = blk_new(s->common.job.aio_context,
|
||||
@@ -1884,14 +1807,13 @@ static BlockJob *mirror_start_job(
|
||||
blk_set_allow_aio_context_change(s->target, true);
|
||||
blk_set_disable_request_queuing(s->target, true);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
s->replaces = g_strdup(replaces);
|
||||
s->on_source_error = on_source_error;
|
||||
s->on_target_error = on_target_error;
|
||||
s->is_none_mode = is_none_mode;
|
||||
s->backing_mode = backing_mode;
|
||||
s->zero_target = zero_target;
|
||||
qatomic_set(&s->copy_mode, copy_mode);
|
||||
s->copy_mode = copy_mode;
|
||||
s->base = base;
|
||||
s->base_overlay = bdrv_find_overlay(bs, base);
|
||||
s->granularity = granularity;
|
||||
@@ -1900,27 +1822,20 @@ static BlockJob *mirror_start_job(
|
||||
if (auto_complete) {
|
||||
s->should_complete = true;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
s->dirty_bitmap = bdrv_create_dirty_bitmap(s->mirror_top_bs, granularity,
|
||||
NULL, errp);
|
||||
s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
|
||||
if (!s->dirty_bitmap) {
|
||||
goto fail;
|
||||
}
|
||||
if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
|
||||
bdrv_disable_dirty_bitmap(s->dirty_bitmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* The dirty bitmap is set by bdrv_mirror_top_do_write() when not in active
|
||||
* mode.
|
||||
*/
|
||||
bdrv_disable_dirty_bitmap(s->dirty_bitmap);
|
||||
|
||||
bdrv_graph_wrlock(bs);
|
||||
ret = block_job_add_bdrv(&s->common, "source", bs, 0,
|
||||
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
|
||||
BLK_PERM_CONSISTENT_READ,
|
||||
errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -1965,17 +1880,14 @@ static BlockJob *mirror_start_job(
|
||||
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
|
||||
iter_shared_perms, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
QTAILQ_INIT(&s->ops_in_flight);
|
||||
|
||||
@@ -2000,14 +1912,11 @@ fail:
|
||||
}
|
||||
|
||||
bs_opaque->stop = true;
|
||||
bdrv_drained_begin(bs);
|
||||
bdrv_graph_wrlock(bs);
|
||||
assert(mirror_top_bs->backing->bs == bs);
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
|
||||
&error_abort);
|
||||
bdrv_replace_node(mirror_top_bs, bs, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
|
||||
|
||||
bdrv_unref(mirror_top_bs);
|
||||
|
||||
@@ -2036,12 +1945,8 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
|
||||
MirrorSyncMode_str(mode));
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
|
||||
base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
mirror_start_job(job_id, bs, creation_flags, target, replaces,
|
||||
speed, granularity, buf_size, backing_mode, zero_target,
|
||||
on_source_error, on_target_error, unmap, NULL, NULL,
|
||||
|
||||
@@ -144,9 +144,6 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict)
|
||||
AioContext *aio_context;
|
||||
Error *local_err = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs = bdrv_find_node(id);
|
||||
if (bs) {
|
||||
qmp_blockdev_del(id, &local_err);
|
||||
@@ -206,9 +203,6 @@ void hmp_commit(Monitor *mon, const QDict *qdict)
|
||||
BlockBackend *blk;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!strcmp(device, "all")) {
|
||||
ret = blk_commit_all();
|
||||
} else {
|
||||
@@ -849,7 +843,7 @@ void hmp_info_block_jobs(Monitor *mon, const QDict *qdict)
|
||||
}
|
||||
|
||||
while (list) {
|
||||
if (list->value->type == JOB_TYPE_STREAM) {
|
||||
if (strcmp(list->value->type, "stream") == 0) {
|
||||
monitor_printf(mon, "Streaming device %s: Completed %" PRId64
|
||||
" of %" PRId64 " bytes, speed limit %" PRId64
|
||||
" bytes/s\n",
|
||||
@@ -861,7 +855,7 @@ void hmp_info_block_jobs(Monitor *mon, const QDict *qdict)
|
||||
monitor_printf(mon, "Type %s, device %s: Completed %" PRId64
|
||||
" of %" PRId64 " bytes, speed limit %" PRId64
|
||||
" bytes/s\n",
|
||||
JobType_str(list->value->type),
|
||||
list->value->type,
|
||||
list->value->device,
|
||||
list->value->offset,
|
||||
list->value->len,
|
||||
@@ -902,8 +896,6 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
|
||||
SnapshotEntry *snapshot_entry;
|
||||
Error *err = NULL;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs = bdrv_all_find_vmstate_bs(NULL, false, NULL, &err);
|
||||
if (!bs) {
|
||||
error_report_err(err);
|
||||
|
||||
@@ -275,8 +275,7 @@ static bool nbd_client_will_reconnect(BDRVNBDState *s)
|
||||
* Return failure if the server's advertised options are incompatible with the
|
||||
* client's needs.
|
||||
*/
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
nbd_handle_updated_info(BlockDriverState *bs, Error **errp)
|
||||
static int nbd_handle_updated_info(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
int ret;
|
||||
|
||||
@@ -843,7 +843,7 @@ static void nfs_refresh_filename(BlockDriverState *bs)
|
||||
}
|
||||
}
|
||||
|
||||
static char * GRAPH_RDLOCK nfs_dirname(BlockDriverState *bs, Error **errp)
|
||||
static char *nfs_dirname(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
NFSClient *client = bs->opaque;
|
||||
|
||||
|
||||
12
block/nvme.c
12
block/nvme.c
@@ -16,7 +16,6 @@
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qmp/qdict.h"
|
||||
#include "qapi/qmp/qstring.h"
|
||||
#include "qemu/defer-call.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/module.h"
|
||||
@@ -417,10 +416,9 @@ static bool nvme_process_completion(NVMeQueuePair *q)
|
||||
q->cq_phase = !q->cq_phase;
|
||||
}
|
||||
cid = le16_to_cpu(c->cid);
|
||||
if (cid == 0 || cid > NVME_NUM_REQS) {
|
||||
warn_report("NVMe: Unexpected CID in completion queue: %" PRIu32
|
||||
", should be within: 1..%u inclusively", cid,
|
||||
NVME_NUM_REQS);
|
||||
if (cid == 0 || cid > NVME_QUEUE_SIZE) {
|
||||
warn_report("NVMe: Unexpected CID in completion queue: %"PRIu32", "
|
||||
"queue size: %u", cid, NVME_QUEUE_SIZE);
|
||||
continue;
|
||||
}
|
||||
trace_nvme_complete_command(s, q->index, cid);
|
||||
@@ -478,7 +476,7 @@ static void nvme_trace_command(const NvmeCmd *cmd)
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_deferred_fn(void *opaque)
|
||||
static void nvme_unplug_fn(void *opaque)
|
||||
{
|
||||
NVMeQueuePair *q = opaque;
|
||||
|
||||
@@ -505,7 +503,7 @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
|
||||
q->need_kick++;
|
||||
qemu_mutex_unlock(&q->lock);
|
||||
|
||||
defer_call(nvme_deferred_fn, q);
|
||||
blk_io_plug_call(nvme_unplug_fn, q);
|
||||
}
|
||||
|
||||
static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
|
||||
|
||||
@@ -59,10 +59,11 @@ typedef struct ParallelsDirtyBitmapFeature {
|
||||
} QEMU_PACKED ParallelsDirtyBitmapFeature;
|
||||
|
||||
/* Given L1 table read bitmap data from the image and populate @bitmap */
|
||||
static int GRAPH_RDLOCK
|
||||
parallels_load_bitmap_data(BlockDriverState *bs, const uint64_t *l1_table,
|
||||
uint32_t l1_size, BdrvDirtyBitmap *bitmap,
|
||||
Error **errp)
|
||||
static int parallels_load_bitmap_data(BlockDriverState *bs,
|
||||
const uint64_t *l1_table,
|
||||
uint32_t l1_size,
|
||||
BdrvDirtyBitmap *bitmap,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVParallelsState *s = bs->opaque;
|
||||
int ret = 0;
|
||||
@@ -119,16 +120,17 @@ finish:
|
||||
* @data buffer (of @data_size size) is the Dirty bitmaps feature which
|
||||
* consists of ParallelsDirtyBitmapFeature followed by L1 table.
|
||||
*/
|
||||
static BdrvDirtyBitmap * GRAPH_RDLOCK
|
||||
parallels_load_bitmap(BlockDriverState *bs, uint8_t *data, size_t data_size,
|
||||
Error **errp)
|
||||
static BdrvDirtyBitmap *parallels_load_bitmap(BlockDriverState *bs,
|
||||
uint8_t *data,
|
||||
size_t data_size,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
ParallelsDirtyBitmapFeature bf;
|
||||
g_autofree uint64_t *l1_table = NULL;
|
||||
BdrvDirtyBitmap *bitmap;
|
||||
QemuUUID uuid;
|
||||
char uuidstr[UUID_STR_LEN];
|
||||
char uuidstr[UUID_FMT_LEN + 1];
|
||||
int i;
|
||||
|
||||
if (data_size < sizeof(bf)) {
|
||||
@@ -181,9 +183,8 @@ parallels_load_bitmap(BlockDriverState *bs, uint8_t *data, size_t data_size,
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
parallels_parse_format_extension(BlockDriverState *bs, uint8_t *ext_cluster,
|
||||
Error **errp)
|
||||
static int parallels_parse_format_extension(BlockDriverState *bs,
|
||||
uint8_t *ext_cluster, Error **errp)
|
||||
{
|
||||
BDRVParallelsState *s = bs->opaque;
|
||||
int ret;
|
||||
|
||||
@@ -200,7 +200,7 @@ static int mark_used(BlockDriverState *bs, unsigned long *bitmap,
|
||||
* bitmap anyway, as much as we can. This information will be used for
|
||||
* error resolution.
|
||||
*/
|
||||
static int GRAPH_RDLOCK parallels_fill_used_bitmap(BlockDriverState *bs)
|
||||
static int parallels_fill_used_bitmap(BlockDriverState *bs)
|
||||
{
|
||||
BDRVParallelsState *s = bs->opaque;
|
||||
int64_t payload_bytes;
|
||||
@@ -415,10 +415,14 @@ parallels_co_flush_to_os(BlockDriverState *bs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
parallels_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum, int64_t *map,
|
||||
BlockDriverState **file)
|
||||
|
||||
static int coroutine_fn parallels_co_block_status(BlockDriverState *bs,
|
||||
bool want_zero,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
int64_t *pnum,
|
||||
int64_t *map,
|
||||
BlockDriverState **file)
|
||||
{
|
||||
BDRVParallelsState *s = bs->opaque;
|
||||
int count;
|
||||
@@ -1185,7 +1189,7 @@ static int parallels_probe(const uint8_t *buf, int buf_size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK parallels_update_header(BlockDriverState *bs)
|
||||
static int parallels_update_header(BlockDriverState *bs)
|
||||
{
|
||||
BDRVParallelsState *s = bs->opaque;
|
||||
unsigned size = MAX(bdrv_opt_mem_align(bs->file->bs),
|
||||
@@ -1255,8 +1259,6 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
file_nb_sectors = bdrv_nb_sectors(bs->file->bs);
|
||||
if (file_nb_sectors < 0) {
|
||||
return -EINVAL;
|
||||
@@ -1364,9 +1366,9 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
error_setg(&s->migration_blocker, "The Parallels format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
|
||||
ret = migrate_add_blocker_normal(&s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Migration blocker error");
|
||||
goto fail;
|
||||
}
|
||||
qemu_co_mutex_init(&s->lock);
|
||||
@@ -1401,7 +1403,7 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
ret = bdrv_check(bs, &res, BDRV_FIX_ERRORS | BDRV_FIX_LEAKS);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not repair corrupted image");
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@@ -1418,6 +1420,7 @@ fail:
|
||||
*/
|
||||
parallels_free_used_bitmap(bs);
|
||||
|
||||
error_free(s->migration_blocker);
|
||||
g_free(s->bat_dirty_bmap);
|
||||
qemu_vfree(s->header);
|
||||
return ret;
|
||||
@@ -1428,8 +1431,6 @@ static void parallels_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVParallelsState *s = bs->opaque;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if ((bs->open_flags & BDRV_O_RDWR) && !(bs->open_flags & BDRV_O_INACTIVE)) {
|
||||
s->header->inuse = 0;
|
||||
parallels_update_header(bs);
|
||||
@@ -1444,7 +1445,8 @@ static void parallels_close(BlockDriverState *bs)
|
||||
g_free(s->bat_dirty_bmap);
|
||||
qemu_vfree(s->header);
|
||||
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
}
|
||||
|
||||
static bool parallels_is_support_dirty_bitmaps(BlockDriverState *bs)
|
||||
|
||||
@@ -90,8 +90,7 @@ typedef struct BDRVParallelsState {
|
||||
Error *migration_blocker;
|
||||
} BDRVParallelsState;
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
parallels_read_format_extension(BlockDriverState *bs, int64_t ext_off,
|
||||
Error **errp);
|
||||
int parallels_read_format_extension(BlockDriverState *bs,
|
||||
int64_t ext_off, Error **errp);
|
||||
|
||||
#endif
|
||||
|
||||
159
block/plug.c
Normal file
159
block/plug.c
Normal file
@@ -0,0 +1,159 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Block I/O plugging
|
||||
*
|
||||
* Copyright Red Hat.
|
||||
*
|
||||
* This API defers a function call within a blk_io_plug()/blk_io_unplug()
|
||||
* section, allowing multiple calls to batch up. This is a performance
|
||||
* optimization that is used in the block layer to submit several I/O requests
|
||||
* at once instead of individually:
|
||||
*
|
||||
* blk_io_plug(); <-- start of plugged region
|
||||
* ...
|
||||
* blk_io_plug_call(my_func, my_obj); <-- deferred my_func(my_obj) call
|
||||
* blk_io_plug_call(my_func, my_obj); <-- another
|
||||
* blk_io_plug_call(my_func, my_obj); <-- another
|
||||
* ...
|
||||
* blk_io_unplug(); <-- end of plugged region, my_func(my_obj) is called once
|
||||
*
|
||||
* This code is actually generic and not tied to the block layer. If another
|
||||
* subsystem needs this functionality, it could be renamed.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/coroutine-tls.h"
|
||||
#include "qemu/notify.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
|
||||
/* A function call that has been deferred until unplug() */
|
||||
typedef struct {
|
||||
void (*fn)(void *);
|
||||
void *opaque;
|
||||
} UnplugFn;
|
||||
|
||||
/* Per-thread state */
|
||||
typedef struct {
|
||||
unsigned count; /* how many times has plug() been called? */
|
||||
GArray *unplug_fns; /* functions to call at unplug time */
|
||||
} Plug;
|
||||
|
||||
/* Use get_ptr_plug() to fetch this thread-local value */
|
||||
QEMU_DEFINE_STATIC_CO_TLS(Plug, plug);
|
||||
|
||||
/* Called at thread cleanup time */
|
||||
static void blk_io_plug_atexit(Notifier *n, void *value)
|
||||
{
|
||||
Plug *plug = get_ptr_plug();
|
||||
g_array_free(plug->unplug_fns, TRUE);
|
||||
}
|
||||
|
||||
/* This won't involve coroutines, so use __thread */
|
||||
static __thread Notifier blk_io_plug_atexit_notifier;
|
||||
|
||||
/**
|
||||
* blk_io_plug_call:
|
||||
* @fn: a function pointer to be invoked
|
||||
* @opaque: a user-defined argument to @fn()
|
||||
*
|
||||
* Call @fn(@opaque) immediately if not within a blk_io_plug()/blk_io_unplug()
|
||||
* section.
|
||||
*
|
||||
* Otherwise defer the call until the end of the outermost
|
||||
* blk_io_plug()/blk_io_unplug() section in this thread. If the same
|
||||
* @fn/@opaque pair has already been deferred, it will only be called once upon
|
||||
* blk_io_unplug() so that accumulated calls are batched into a single call.
|
||||
*
|
||||
* The caller must ensure that @opaque is not freed before @fn() is invoked.
|
||||
*/
|
||||
void blk_io_plug_call(void (*fn)(void *), void *opaque)
|
||||
{
|
||||
Plug *plug = get_ptr_plug();
|
||||
|
||||
/* Call immediately if we're not plugged */
|
||||
if (plug->count == 0) {
|
||||
fn(opaque);
|
||||
return;
|
||||
}
|
||||
|
||||
GArray *array = plug->unplug_fns;
|
||||
if (!array) {
|
||||
array = g_array_new(FALSE, FALSE, sizeof(UnplugFn));
|
||||
plug->unplug_fns = array;
|
||||
blk_io_plug_atexit_notifier.notify = blk_io_plug_atexit;
|
||||
qemu_thread_atexit_add(&blk_io_plug_atexit_notifier);
|
||||
}
|
||||
|
||||
UnplugFn *fns = (UnplugFn *)array->data;
|
||||
UnplugFn new_fn = {
|
||||
.fn = fn,
|
||||
.opaque = opaque,
|
||||
};
|
||||
|
||||
/*
|
||||
* There won't be many, so do a linear search. If this becomes a bottleneck
|
||||
* then a binary search (glib 2.62+) or different data structure could be
|
||||
* used.
|
||||
*/
|
||||
for (guint i = 0; i < array->len; i++) {
|
||||
if (memcmp(&fns[i], &new_fn, sizeof(new_fn)) == 0) {
|
||||
return; /* already exists */
|
||||
}
|
||||
}
|
||||
|
||||
g_array_append_val(array, new_fn);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_io_plug: Defer blk_io_plug_call() functions until blk_io_unplug()
|
||||
*
|
||||
* blk_io_plug/unplug are thread-local operations. This means that multiple
|
||||
* threads can simultaneously call plug/unplug, but the caller must ensure that
|
||||
* each unplug() is called in the same thread of the matching plug().
|
||||
*
|
||||
* Nesting is supported. blk_io_plug_call() functions are only called at the
|
||||
* outermost blk_io_unplug().
|
||||
*/
|
||||
void blk_io_plug(void)
|
||||
{
|
||||
Plug *plug = get_ptr_plug();
|
||||
|
||||
assert(plug->count < UINT32_MAX);
|
||||
|
||||
plug->count++;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_io_unplug: Run any pending blk_io_plug_call() functions
|
||||
*
|
||||
* There must have been a matching blk_io_plug() call in the same thread prior
|
||||
* to this blk_io_unplug() call.
|
||||
*/
|
||||
void blk_io_unplug(void)
|
||||
{
|
||||
Plug *plug = get_ptr_plug();
|
||||
|
||||
assert(plug->count > 0);
|
||||
|
||||
if (--plug->count > 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
GArray *array = plug->unplug_fns;
|
||||
if (!array) {
|
||||
return;
|
||||
}
|
||||
|
||||
UnplugFn *fns = (UnplugFn *)array->data;
|
||||
|
||||
for (guint i = 0; i < array->len; i++) {
|
||||
fns[i].fn(fns[i].opaque);
|
||||
}
|
||||
|
||||
/*
|
||||
* This resets the array without freeing memory so that appending is cheap
|
||||
* in the future.
|
||||
*/
|
||||
g_array_set_size(array, 0);
|
||||
}
|
||||
@@ -143,8 +143,6 @@ static int preallocate_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
BDRVPreallocateState *s = bs->opaque;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/*
|
||||
* s->data_end and friends should be initialized on permission update.
|
||||
* For this to work, mark them invalid.
|
||||
@@ -157,8 +155,6 @@ static int preallocate_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!preallocate_absorb_opts(&s->opts, options, bs->file->bs, errp)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -173,8 +169,7 @@ static int preallocate_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
preallocate_truncate_to_real_size(BlockDriverState *bs, Error **errp)
|
||||
static int preallocate_truncate_to_real_size(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BDRVPreallocateState *s = bs->opaque;
|
||||
int ret;
|
||||
@@ -205,9 +200,6 @@ static void preallocate_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVPreallocateState *s = bs->opaque;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
qemu_bh_cancel(s->drop_resize_bh);
|
||||
qemu_bh_delete(s->drop_resize_bh);
|
||||
|
||||
@@ -231,9 +223,6 @@ static int preallocate_reopen_prepare(BDRVReopenState *reopen_state,
|
||||
PreallocateOpts *opts = g_new0(PreallocateOpts, 1);
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!preallocate_absorb_opts(opts, reopen_state->options,
|
||||
reopen_state->bs->file->bs, errp)) {
|
||||
g_free(opts);
|
||||
@@ -294,7 +283,7 @@ static bool can_write_resize(uint64_t perm)
|
||||
return (perm & BLK_PERM_WRITE) && (perm & BLK_PERM_RESIZE);
|
||||
}
|
||||
|
||||
static bool GRAPH_RDLOCK has_prealloc_perms(BlockDriverState *bs)
|
||||
static bool has_prealloc_perms(BlockDriverState *bs)
|
||||
{
|
||||
BDRVPreallocateState *s = bs->opaque;
|
||||
|
||||
@@ -510,8 +499,7 @@ preallocate_co_getlength(BlockDriverState *bs)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
preallocate_drop_resize(BlockDriverState *bs, Error **errp)
|
||||
static int preallocate_drop_resize(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BDRVPreallocateState *s = bs->opaque;
|
||||
int ret;
|
||||
@@ -537,16 +525,15 @@ preallocate_drop_resize(BlockDriverState *bs, Error **errp)
|
||||
*/
|
||||
s->data_end = s->file_end = s->zero_start = -EINVAL;
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_child_refresh_perms(bs, bs->file, NULL);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void preallocate_drop_resize_bh(void *opaque)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/*
|
||||
* In case of errors, we'll simply keep the exclusive lock on the image
|
||||
* indefinitely.
|
||||
@@ -554,8 +541,8 @@ static void preallocate_drop_resize_bh(void *opaque)
|
||||
preallocate_drop_resize(opaque, NULL);
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK
|
||||
preallocate_set_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared)
|
||||
static void preallocate_set_perm(BlockDriverState *bs,
|
||||
uint64_t perm, uint64_t shared)
|
||||
{
|
||||
BDRVPreallocateState *s = bs->opaque;
|
||||
|
||||
|
||||
@@ -169,16 +169,14 @@ void qmp_blockdev_close_tray(const char *device,
|
||||
}
|
||||
}
|
||||
|
||||
static void GRAPH_UNLOCKED
|
||||
blockdev_remove_medium(const char *device, const char *id, Error **errp)
|
||||
static void blockdev_remove_medium(const char *device, const char *id,
|
||||
Error **errp)
|
||||
{
|
||||
BlockBackend *blk;
|
||||
BlockDriverState *bs;
|
||||
AioContext *aio_context;
|
||||
bool has_attached_device;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
blk = qmp_get_blk(device, id, errp);
|
||||
if (!blk) {
|
||||
return;
|
||||
@@ -207,12 +205,9 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp)
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
blk_remove_bs(blk);
|
||||
|
||||
@@ -237,7 +232,6 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
|
||||
BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
AioContext *ctx;
|
||||
bool has_device;
|
||||
int ret;
|
||||
|
||||
@@ -259,11 +253,7 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
|
||||
return;
|
||||
}
|
||||
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
ret = blk_insert_bs(blk, bs, errp);
|
||||
aio_context_release(ctx);
|
||||
|
||||
if (ret < 0) {
|
||||
return;
|
||||
}
|
||||
@@ -289,8 +279,6 @@ static void blockdev_insert_medium(const char *device, const char *id,
|
||||
BlockBackend *blk;
|
||||
BlockDriverState *bs;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
blk = qmp_get_blk(device, id, errp);
|
||||
if (!blk) {
|
||||
return;
|
||||
|
||||
11
block/qapi.c
11
block/qapi.c
@@ -225,8 +225,9 @@ int bdrv_query_snapshot_info_list(BlockDriverState *bs,
|
||||
* Helper function for other query info functions. Store information about @bs
|
||||
* in @info, setting @errp on error.
|
||||
*/
|
||||
static void GRAPH_RDLOCK
|
||||
bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp)
|
||||
static void bdrv_do_query_node_info(BlockDriverState *bs,
|
||||
BlockNodeInfo *info,
|
||||
Error **errp)
|
||||
{
|
||||
int64_t size;
|
||||
const char *backing_filename;
|
||||
@@ -422,8 +423,8 @@ fail:
|
||||
}
|
||||
|
||||
/* @p_info will be set only on success. */
|
||||
static void GRAPH_RDLOCK
|
||||
bdrv_query_info(BlockBackend *blk, BlockInfo **p_info, Error **errp)
|
||||
static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
|
||||
Error **errp)
|
||||
{
|
||||
BlockInfo *info = g_malloc0(sizeof(*info));
|
||||
BlockDriverState *bs = blk_bs(blk);
|
||||
@@ -671,8 +672,6 @@ BlockInfoList *qmp_query_block(Error **errp)
|
||||
BlockBackend *blk;
|
||||
Error *local_err = NULL;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
|
||||
BlockInfoList *info;
|
||||
|
||||
|
||||
18
block/qcow.c
18
block/qcow.c
@@ -124,11 +124,9 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
goto fail_unlocked;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
|
||||
ret = bdrv_pread(bs->file, 0, sizeof(header), &header, 0);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
@@ -306,21 +304,18 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
error_setg(&s->migration_blocker, "The qcow format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
|
||||
ret = migrate_add_blocker_normal(&s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
qobject_unref(encryptopts);
|
||||
qapi_free_QCryptoBlockOpenOptions(crypto_opts);
|
||||
qemu_co_mutex_init(&s->lock);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
fail_unlocked:
|
||||
fail:
|
||||
g_free(s->l1_table);
|
||||
qemu_vfree(s->l2_cache);
|
||||
g_free(s->cluster_cache);
|
||||
@@ -804,7 +799,8 @@ static void qcow_close(BlockDriverState *bs)
|
||||
g_free(s->cluster_cache);
|
||||
g_free(s->cluster_data);
|
||||
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_UNLOCKED
|
||||
@@ -1027,7 +1023,7 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK qcow_make_empty(BlockDriverState *bs)
|
||||
static int qcow_make_empty(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
uint32_t l1_length = s->l1_size * sizeof(uint64_t);
|
||||
|
||||
@@ -105,7 +105,7 @@ static inline bool can_write(BlockDriverState *bs)
|
||||
return !bdrv_is_read_only(bs) && !(bdrv_get_flags(bs) & BDRV_O_INACTIVE);
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK update_header_sync(BlockDriverState *bs)
|
||||
static int update_header_sync(BlockDriverState *bs)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -156,9 +156,10 @@ static int64_t get_bitmap_bytes_needed(int64_t len, uint32_t granularity)
|
||||
return DIV_ROUND_UP(num_bits, 8);
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
check_constraints_on_bitmap(BlockDriverState *bs, const char *name,
|
||||
uint32_t granularity, Error **errp)
|
||||
static int check_constraints_on_bitmap(BlockDriverState *bs,
|
||||
const char *name,
|
||||
uint32_t granularity,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int granularity_bits = ctz32(granularity);
|
||||
@@ -203,9 +204,8 @@ check_constraints_on_bitmap(BlockDriverState *bs, const char *name,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK
|
||||
clear_bitmap_table(BlockDriverState *bs, uint64_t *bitmap_table,
|
||||
uint32_t bitmap_table_size)
|
||||
static void clear_bitmap_table(BlockDriverState *bs, uint64_t *bitmap_table,
|
||||
uint32_t bitmap_table_size)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int i;
|
||||
@@ -221,9 +221,8 @@ clear_bitmap_table(BlockDriverState *bs, uint64_t *bitmap_table,
|
||||
}
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
bitmap_table_load(BlockDriverState *bs, Qcow2BitmapTable *tb,
|
||||
uint64_t **bitmap_table)
|
||||
static int bitmap_table_load(BlockDriverState *bs, Qcow2BitmapTable *tb,
|
||||
uint64_t **bitmap_table)
|
||||
{
|
||||
int ret;
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
@@ -260,8 +259,7 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb)
|
||||
static int free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb)
|
||||
{
|
||||
int ret;
|
||||
uint64_t *bitmap_table;
|
||||
@@ -552,9 +550,8 @@ static uint32_t bitmap_list_count(Qcow2BitmapList *bm_list)
|
||||
* Get bitmap list from qcow2 image. Actually reads bitmap directory,
|
||||
* checks it and convert to bitmap list.
|
||||
*/
|
||||
static Qcow2BitmapList * GRAPH_RDLOCK
|
||||
bitmap_list_load(BlockDriverState *bs, uint64_t offset, uint64_t size,
|
||||
Error **errp)
|
||||
static Qcow2BitmapList *bitmap_list_load(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t size, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
@@ -733,9 +730,8 @@ out:
|
||||
* Store bitmap list to qcow2 image as a bitmap directory.
|
||||
* Everything is checked.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
bitmap_list_store(BlockDriverState *bs, Qcow2BitmapList *bm_list,
|
||||
uint64_t *offset, uint64_t *size, bool in_place)
|
||||
static int bitmap_list_store(BlockDriverState *bs, Qcow2BitmapList *bm_list,
|
||||
uint64_t *offset, uint64_t *size, bool in_place)
|
||||
{
|
||||
int ret;
|
||||
uint8_t *dir;
|
||||
@@ -833,9 +829,8 @@ fail:
|
||||
* Bitmap List end
|
||||
*/
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
update_ext_header_and_dir_in_place(BlockDriverState *bs,
|
||||
Qcow2BitmapList *bm_list)
|
||||
static int update_ext_header_and_dir_in_place(BlockDriverState *bs,
|
||||
Qcow2BitmapList *bm_list)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int ret;
|
||||
@@ -882,8 +877,8 @@ update_ext_header_and_dir_in_place(BlockDriverState *bs,
|
||||
*/
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
update_ext_header_and_dir(BlockDriverState *bs, Qcow2BitmapList *bm_list)
|
||||
static int update_ext_header_and_dir(BlockDriverState *bs,
|
||||
Qcow2BitmapList *bm_list)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int ret;
|
||||
@@ -963,7 +958,7 @@ static void set_readonly_helper(gpointer bitmap, gpointer value)
|
||||
* If header_updated is not NULL then it is set appropriately regardless of
|
||||
* the return value.
|
||||
*/
|
||||
bool coroutine_fn
|
||||
bool coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_load_dirty_bitmaps(BlockDriverState *bs,
|
||||
bool *header_updated, Error **errp)
|
||||
{
|
||||
@@ -1276,9 +1271,9 @@ out:
|
||||
/* store_bitmap_data()
|
||||
* Store bitmap to image, filling bitmap table accordingly.
|
||||
*/
|
||||
static uint64_t * GRAPH_RDLOCK
|
||||
store_bitmap_data(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
|
||||
uint32_t *bitmap_table_size, Error **errp)
|
||||
static uint64_t *store_bitmap_data(BlockDriverState *bs,
|
||||
BdrvDirtyBitmap *bitmap,
|
||||
uint32_t *bitmap_table_size, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
@@ -1375,8 +1370,7 @@ fail:
|
||||
* Store bm->dirty_bitmap to qcow2.
|
||||
* Set bm->table_offset and bm->table_size accordingly.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
store_bitmap(BlockDriverState *bs, Qcow2Bitmap *bm, Error **errp)
|
||||
static int store_bitmap(BlockDriverState *bs, Qcow2Bitmap *bm, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
uint64_t *tb;
|
||||
|
||||
@@ -163,8 +163,7 @@ int qcow2_cache_destroy(Qcow2Cache *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
|
||||
static int qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -179,8 +178,7 @@ qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
|
||||
static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int ret = 0;
|
||||
@@ -320,9 +318,8 @@ int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
|
||||
void **table, bool read_from_disk)
|
||||
static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
|
||||
uint64_t offset, void **table, bool read_from_disk)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int i;
|
||||
|
||||
@@ -207,9 +207,8 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
|
||||
* the cache is used; otherwise the L2 slice is loaded from the image
|
||||
* file.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
l2_load(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t l2_offset, uint64_t **l2_slice)
|
||||
static int l2_load(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t l2_offset, uint64_t **l2_slice)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int start_of_slice = l2_entry_size(s) *
|
||||
@@ -270,7 +269,7 @@ int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
|
||||
*
|
||||
*/
|
||||
|
||||
static int GRAPH_RDLOCK l2_allocate(BlockDriverState *bs, int l1_index)
|
||||
static int l2_allocate(BlockDriverState *bs, int l1_index)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
uint64_t old_l2_offset;
|
||||
@@ -391,10 +390,11 @@ fail:
|
||||
* If the L2 entry is invalid return -errno and set @type to
|
||||
* QCOW2_SUBCLUSTER_INVALID.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
qcow2_get_subcluster_range_type(BlockDriverState *bs, uint64_t l2_entry,
|
||||
uint64_t l2_bitmap, unsigned sc_from,
|
||||
QCow2SubclusterType *type)
|
||||
static int qcow2_get_subcluster_range_type(BlockDriverState *bs,
|
||||
uint64_t l2_entry,
|
||||
uint64_t l2_bitmap,
|
||||
unsigned sc_from,
|
||||
QCow2SubclusterType *type)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
uint32_t val;
|
||||
@@ -441,10 +441,9 @@ qcow2_get_subcluster_range_type(BlockDriverState *bs, uint64_t l2_entry,
|
||||
* On failure return -errno and update @l2_index to point to the
|
||||
* invalid entry.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
count_contiguous_subclusters(BlockDriverState *bs, int nb_clusters,
|
||||
unsigned sc_index, uint64_t *l2_slice,
|
||||
unsigned *l2_index)
|
||||
static int count_contiguous_subclusters(BlockDriverState *bs, int nb_clusters,
|
||||
unsigned sc_index, uint64_t *l2_slice,
|
||||
unsigned *l2_index)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int i, count = 0;
|
||||
@@ -752,9 +751,9 @@ fail:
|
||||
*
|
||||
* Returns 0 on success, -errno in failure case
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
get_cluster_table(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t **new_l2_slice, int *new_l2_index)
|
||||
static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t **new_l2_slice,
|
||||
int *new_l2_index)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
unsigned int l2_index;
|
||||
@@ -1156,10 +1155,11 @@ void coroutine_fn qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
|
||||
*
|
||||
* Returns 0 on success, -errno on failure.
|
||||
*/
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
calculate_l2_meta(BlockDriverState *bs, uint64_t host_cluster_offset,
|
||||
uint64_t guest_offset, unsigned bytes, uint64_t *l2_slice,
|
||||
QCowL2Meta **m, bool keep_old)
|
||||
static int coroutine_fn calculate_l2_meta(BlockDriverState *bs,
|
||||
uint64_t host_cluster_offset,
|
||||
uint64_t guest_offset, unsigned bytes,
|
||||
uint64_t *l2_slice, QCowL2Meta **m,
|
||||
bool keep_old)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int sc_index, l2_index = offset_to_l2_slice_index(s, guest_offset);
|
||||
@@ -1329,8 +1329,7 @@ calculate_l2_meta(BlockDriverState *bs, uint64_t host_cluster_offset,
|
||||
* requires a new allocation (that is, if the cluster is unallocated
|
||||
* or has refcount > 1 and therefore cannot be written in-place).
|
||||
*/
|
||||
static bool GRAPH_RDLOCK
|
||||
cluster_needs_new_alloc(BlockDriverState *bs, uint64_t l2_entry)
|
||||
static bool cluster_needs_new_alloc(BlockDriverState *bs, uint64_t l2_entry)
|
||||
{
|
||||
switch (qcow2_get_cluster_type(bs, l2_entry)) {
|
||||
case QCOW2_CLUSTER_NORMAL:
|
||||
@@ -1361,9 +1360,9 @@ cluster_needs_new_alloc(BlockDriverState *bs, uint64_t l2_entry)
|
||||
* allocated and can be overwritten in-place (this includes clusters
|
||||
* of type QCOW2_CLUSTER_ZERO_ALLOC).
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
count_single_write_clusters(BlockDriverState *bs, int nb_clusters,
|
||||
uint64_t *l2_slice, int l2_index, bool new_alloc)
|
||||
static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters,
|
||||
uint64_t *l2_slice, int l2_index,
|
||||
bool new_alloc)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
uint64_t l2_entry = get_l2_entry(s, l2_slice, l2_index);
|
||||
@@ -1491,9 +1490,9 @@ static int coroutine_fn handle_dependencies(BlockDriverState *bs,
|
||||
*
|
||||
* -errno: in error cases
|
||||
*/
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
handle_copied(BlockDriverState *bs, uint64_t guest_offset,
|
||||
uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
|
||||
static int coroutine_fn handle_copied(BlockDriverState *bs,
|
||||
uint64_t guest_offset, uint64_t *host_offset, uint64_t *bytes,
|
||||
QCowL2Meta **m)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int l2_index;
|
||||
@@ -1601,9 +1600,10 @@ out:
|
||||
* function has been waiting for another request and the allocation must be
|
||||
* restarted, but the whole request should not be failed.
|
||||
*/
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
|
||||
uint64_t *host_offset, uint64_t *nb_clusters)
|
||||
static int coroutine_fn do_alloc_cluster_offset(BlockDriverState *bs,
|
||||
uint64_t guest_offset,
|
||||
uint64_t *host_offset,
|
||||
uint64_t *nb_clusters)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
||||
@@ -1658,9 +1658,9 @@ do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
|
||||
*
|
||||
* -errno: in error cases
|
||||
*/
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
|
||||
uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
|
||||
static int coroutine_fn handle_alloc(BlockDriverState *bs,
|
||||
uint64_t guest_offset, uint64_t *host_offset, uint64_t *bytes,
|
||||
QCowL2Meta **m)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int l2_index;
|
||||
@@ -1898,9 +1898,9 @@ again:
|
||||
* all clusters in the same L2 slice) and returns the number of discarded
|
||||
* clusters.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, uint64_t nb_clusters,
|
||||
enum qcow2_discard_type type, bool full_discard)
|
||||
static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t nb_clusters,
|
||||
enum qcow2_discard_type type, bool full_discard)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
uint64_t *l2_slice;
|
||||
@@ -1984,7 +1984,7 @@ discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, uint64_t nb_clusters,
|
||||
/* If we keep the reference, pass on the discard still */
|
||||
bdrv_pdiscard(s->data_file, old_l2_entry & L2E_OFFSET_MASK,
|
||||
s->cluster_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
|
||||
@@ -2037,7 +2037,7 @@ fail:
|
||||
* all clusters in the same L2 slice) and returns the number of zeroed
|
||||
* clusters.
|
||||
*/
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
static int coroutine_fn
|
||||
zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t nb_clusters, int flags)
|
||||
{
|
||||
@@ -2062,15 +2062,9 @@ zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
|
||||
QCow2ClusterType type = qcow2_get_cluster_type(bs, old_l2_entry);
|
||||
bool unmap = (type == QCOW2_CLUSTER_COMPRESSED) ||
|
||||
((flags & BDRV_REQ_MAY_UNMAP) && qcow2_cluster_is_allocated(type));
|
||||
bool keep_reference =
|
||||
(s->discard_no_unref && type != QCOW2_CLUSTER_COMPRESSED);
|
||||
uint64_t new_l2_entry = old_l2_entry;
|
||||
uint64_t new_l2_entry = unmap ? 0 : old_l2_entry;
|
||||
uint64_t new_l2_bitmap = old_l2_bitmap;
|
||||
|
||||
if (unmap && !keep_reference) {
|
||||
new_l2_entry = 0;
|
||||
}
|
||||
|
||||
if (has_subclusters(s)) {
|
||||
new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES;
|
||||
} else {
|
||||
@@ -2088,17 +2082,9 @@ zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
|
||||
set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap);
|
||||
}
|
||||
|
||||
/* Then decrease the refcount */
|
||||
if (unmap) {
|
||||
if (!keep_reference) {
|
||||
/* Then decrease the refcount */
|
||||
qcow2_free_any_cluster(bs, old_l2_entry, QCOW2_DISCARD_REQUEST);
|
||||
} else if (s->discard_passthrough[QCOW2_DISCARD_REQUEST] &&
|
||||
(type == QCOW2_CLUSTER_NORMAL ||
|
||||
type == QCOW2_CLUSTER_ZERO_ALLOC)) {
|
||||
/* If we keep the reference, pass on the discard still */
|
||||
bdrv_pdiscard(s->data_file, old_l2_entry & L2E_OFFSET_MASK,
|
||||
s->cluster_size);
|
||||
}
|
||||
qcow2_free_any_cluster(bs, old_l2_entry, QCOW2_DISCARD_REQUEST);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2107,7 +2093,7 @@ zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
|
||||
return nb_clusters;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
static int coroutine_fn
|
||||
zero_l2_subclusters(BlockDriverState *bs, uint64_t offset,
|
||||
unsigned nb_subclusters)
|
||||
{
|
||||
@@ -2245,12 +2231,11 @@ fail:
|
||||
* status_cb(). l1_entries contains the total number of L1 entries and
|
||||
* *visited_l1_entries counts all visited L1 entries.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
|
||||
int l1_size, int64_t *visited_l1_entries,
|
||||
int64_t l1_entries,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
void *cb_opaque)
|
||||
static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
|
||||
int l1_size, int64_t *visited_l1_entries,
|
||||
int64_t l1_entries,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
void *cb_opaque)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
bool is_active_l1 = (l1_table == s->l1_table);
|
||||
|
||||
@@ -229,9 +229,9 @@ static void set_refcount_ro6(void *refcount_array, uint64_t index,
|
||||
}
|
||||
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
load_refcount_block(BlockDriverState *bs, int64_t refcount_block_offset,
|
||||
void **refcount_block)
|
||||
static int load_refcount_block(BlockDriverState *bs,
|
||||
int64_t refcount_block_offset,
|
||||
void **refcount_block)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
||||
@@ -302,9 +302,8 @@ static int in_same_refcount_block(BDRVQcow2State *s, uint64_t offset_a,
|
||||
*
|
||||
* Returns 0 on success or -errno in error case
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index,
|
||||
void **refcount_block)
|
||||
static int alloc_refcount_block(BlockDriverState *bs,
|
||||
int64_t cluster_index, void **refcount_block)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
unsigned int refcount_table_index;
|
||||
@@ -807,9 +806,12 @@ found:
|
||||
/* XXX: cache several refcount block clusters ? */
|
||||
/* @addend is the absolute value of the addend; if @decrease is set, @addend
|
||||
* will be subtracted from the current refcount, otherwise it will be added */
|
||||
static int GRAPH_RDLOCK
|
||||
update_refcount(BlockDriverState *bs, int64_t offset, int64_t length,
|
||||
uint64_t addend, bool decrease, enum qcow2_discard_type type)
|
||||
static int update_refcount(BlockDriverState *bs,
|
||||
int64_t offset,
|
||||
int64_t length,
|
||||
uint64_t addend,
|
||||
bool decrease,
|
||||
enum qcow2_discard_type type)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int64_t start, last, cluster_offset;
|
||||
@@ -965,8 +967,8 @@ int qcow2_update_cluster_refcount(BlockDriverState *bs,
|
||||
|
||||
|
||||
/* return < 0 if error */
|
||||
static int64_t GRAPH_RDLOCK
|
||||
alloc_clusters_noref(BlockDriverState *bs, uint64_t size, uint64_t max)
|
||||
static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size,
|
||||
uint64_t max)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
uint64_t i, nb_clusters, refcount;
|
||||
@@ -2300,7 +2302,7 @@ calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
||||
* Compares the actual reference count for each cluster in the image against the
|
||||
* refcount as reported by the refcount structures on-disk.
|
||||
*/
|
||||
static void coroutine_fn GRAPH_RDLOCK
|
||||
static void coroutine_fn
|
||||
compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
||||
BdrvCheckMode fix, bool *rebuild,
|
||||
int64_t *highest_cluster,
|
||||
@@ -3101,22 +3103,20 @@ int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
|
||||
*
|
||||
* @allocated should be set to true if a new cluster has been allocated.
|
||||
*/
|
||||
typedef int /* GRAPH_RDLOCK_PTR */
|
||||
(RefblockFinishOp)(BlockDriverState *bs, uint64_t **reftable,
|
||||
uint64_t reftable_index, uint64_t *reftable_size,
|
||||
void *refblock, bool refblock_empty,
|
||||
bool *allocated, Error **errp);
|
||||
typedef int (RefblockFinishOp)(BlockDriverState *bs, uint64_t **reftable,
|
||||
uint64_t reftable_index, uint64_t *reftable_size,
|
||||
void *refblock, bool refblock_empty,
|
||||
bool *allocated, Error **errp);
|
||||
|
||||
/**
|
||||
* This "operation" for walk_over_reftable() allocates the refblock on disk (if
|
||||
* it is not empty) and inserts its offset into the new reftable. The size of
|
||||
* this new reftable is increased as required.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
|
||||
uint64_t reftable_index, uint64_t *reftable_size,
|
||||
void *refblock, bool refblock_empty, bool *allocated,
|
||||
Error **errp)
|
||||
static int alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
|
||||
uint64_t reftable_index, uint64_t *reftable_size,
|
||||
void *refblock, bool refblock_empty, bool *allocated,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int64_t offset;
|
||||
@@ -3166,11 +3166,10 @@ alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
|
||||
* offset specified by the new reftable's entry. It does not modify the new
|
||||
* reftable or change any refcounts.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
flush_refblock(BlockDriverState *bs, uint64_t **reftable,
|
||||
uint64_t reftable_index, uint64_t *reftable_size,
|
||||
void *refblock, bool refblock_empty, bool *allocated,
|
||||
Error **errp)
|
||||
static int flush_refblock(BlockDriverState *bs, uint64_t **reftable,
|
||||
uint64_t reftable_index, uint64_t *reftable_size,
|
||||
void *refblock, bool refblock_empty, bool *allocated,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int64_t offset;
|
||||
@@ -3211,17 +3210,16 @@ flush_refblock(BlockDriverState *bs, uint64_t **reftable,
|
||||
*
|
||||
* @allocated is set to true if a new cluster has been allocated.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
walk_over_reftable(BlockDriverState *bs, uint64_t **new_reftable,
|
||||
uint64_t *new_reftable_index,
|
||||
uint64_t *new_reftable_size,
|
||||
void *new_refblock, int new_refblock_size,
|
||||
int new_refcount_bits,
|
||||
RefblockFinishOp *operation, bool *allocated,
|
||||
Qcow2SetRefcountFunc *new_set_refcount,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
void *cb_opaque, int index, int total,
|
||||
Error **errp)
|
||||
static int walk_over_reftable(BlockDriverState *bs, uint64_t **new_reftable,
|
||||
uint64_t *new_reftable_index,
|
||||
uint64_t *new_reftable_size,
|
||||
void *new_refblock, int new_refblock_size,
|
||||
int new_refcount_bits,
|
||||
RefblockFinishOp *operation, bool *allocated,
|
||||
Qcow2SetRefcountFunc *new_set_refcount,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
void *cb_opaque, int index, int total,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
uint64_t reftable_index;
|
||||
@@ -3547,8 +3545,8 @@ done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn GRAPH_RDLOCK
|
||||
get_refblock_offset(BlockDriverState *bs, uint64_t offset)
|
||||
static int64_t coroutine_fn get_refblock_offset(BlockDriverState *bs,
|
||||
uint64_t offset)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
uint32_t index = offset_to_reftable_index(s, offset);
|
||||
@@ -3567,7 +3565,7 @@ get_refblock_offset(BlockDriverState *bs, uint64_t offset)
|
||||
return covering_refblock_offset;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
static int coroutine_fn
|
||||
qcow2_discard_refcount_block(BlockDriverState *bs, uint64_t discard_block_offs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
||||
209
block/qcow2.c
209
block/qcow2.c
@@ -95,10 +95,9 @@ static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename)
|
||||
}
|
||||
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset,
|
||||
uint8_t *buf, size_t buflen,
|
||||
void *opaque, Error **errp)
|
||||
static int qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset,
|
||||
uint8_t *buf, size_t buflen,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
BlockDriverState *bs = opaque;
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
@@ -157,7 +156,7 @@ qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, void *opaque,
|
||||
|
||||
|
||||
/* The graph lock must be held when called in coroutine context */
|
||||
static int coroutine_mixed_fn GRAPH_RDLOCK
|
||||
static int coroutine_mixed_fn
|
||||
qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset,
|
||||
const uint8_t *buf, size_t buflen,
|
||||
void *opaque, Error **errp)
|
||||
@@ -537,7 +536,7 @@ int qcow2_mark_dirty(BlockDriverState *bs)
|
||||
* function when there are no pending requests, it does not guard against
|
||||
* concurrent requests dirtying the image.
|
||||
*/
|
||||
static int GRAPH_RDLOCK qcow2_mark_clean(BlockDriverState *bs)
|
||||
static int qcow2_mark_clean(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
||||
@@ -571,8 +570,7 @@ int qcow2_mark_corrupt(BlockDriverState *bs)
|
||||
* Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
|
||||
* before if necessary.
|
||||
*/
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_mark_consistent(BlockDriverState *bs)
|
||||
static int coroutine_fn qcow2_mark_consistent(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
||||
@@ -982,9 +980,10 @@ typedef struct Qcow2ReopenState {
|
||||
QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */
|
||||
} Qcow2ReopenState;
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
qcow2_update_options_prepare(BlockDriverState *bs, Qcow2ReopenState *r,
|
||||
QDict *options, int flags, Error **errp)
|
||||
static int qcow2_update_options_prepare(BlockDriverState *bs,
|
||||
Qcow2ReopenState *r,
|
||||
QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
QemuOpts *opts = NULL;
|
||||
@@ -1261,7 +1260,7 @@ static void qcow2_update_options_abort(BlockDriverState *bs,
|
||||
qapi_free_QCryptoBlockOpenOptions(r->crypto_opts);
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
static int coroutine_fn
|
||||
qcow2_update_options(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
@@ -1970,17 +1969,13 @@ static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
bs->bl.pdiscard_alignment = s->cluster_size;
|
||||
}
|
||||
|
||||
static int GRAPH_UNLOCKED
|
||||
qcow2_reopen_prepare(BDRVReopenState *state,BlockReopenQueue *queue,
|
||||
Error **errp)
|
||||
static int qcow2_reopen_prepare(BDRVReopenState *state,
|
||||
BlockReopenQueue *queue, Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = state->bs->opaque;
|
||||
Qcow2ReopenState *r;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
r = g_new0(Qcow2ReopenState, 1);
|
||||
state->opaque = r;
|
||||
|
||||
@@ -2030,8 +2025,6 @@ static void qcow2_reopen_commit(BDRVReopenState *state)
|
||||
{
|
||||
BDRVQcow2State *s = state->bs->opaque;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
qcow2_update_options_commit(state->bs, state->opaque);
|
||||
if (!s->data_file) {
|
||||
/*
|
||||
@@ -2045,8 +2038,6 @@ static void qcow2_reopen_commit(BDRVReopenState *state)
|
||||
|
||||
static void qcow2_reopen_commit_post(BDRVReopenState *state)
|
||||
{
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (state->flags & BDRV_O_RDWR) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
@@ -2067,8 +2058,6 @@ static void qcow2_reopen_abort(BDRVReopenState *state)
|
||||
{
|
||||
BDRVQcow2State *s = state->bs->opaque;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!s->data_file) {
|
||||
/*
|
||||
* If we don't have an external data file, s->data_file was cleared by
|
||||
@@ -2742,7 +2731,7 @@ fail_nometa:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK qcow2_inactivate(BlockDriverState *bs)
|
||||
static int qcow2_inactivate(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int ret, result = 0;
|
||||
@@ -2777,8 +2766,7 @@ static int GRAPH_RDLOCK qcow2_inactivate(BlockDriverState *bs)
|
||||
return result;
|
||||
}
|
||||
|
||||
static void coroutine_mixed_fn GRAPH_RDLOCK
|
||||
qcow2_do_close(BlockDriverState *bs, bool close_data_file)
|
||||
static void qcow2_do_close(BlockDriverState *bs, bool close_data_file)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
qemu_vfree(s->l1_table);
|
||||
@@ -2805,24 +2793,18 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file)
|
||||
g_free(s->image_backing_format);
|
||||
|
||||
if (close_data_file && has_data_file(bs)) {
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, s->data_file);
|
||||
bdrv_graph_wrunlock();
|
||||
s->data_file = NULL;
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
}
|
||||
|
||||
qcow2_refcount_close(bs);
|
||||
qcow2_free_snapshots(bs);
|
||||
}
|
||||
|
||||
static void GRAPH_UNLOCKED qcow2_close(BlockDriverState *bs)
|
||||
static void qcow2_close(BlockDriverState *bs)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
qcow2_do_close(bs, true);
|
||||
}
|
||||
|
||||
@@ -3160,9 +3142,8 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_co_change_backing_file(BlockDriverState *bs, const char *backing_file,
|
||||
const char *backing_fmt)
|
||||
static int qcow2_change_backing_file(BlockDriverState *bs,
|
||||
const char *backing_file, const char *backing_fmt)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
||||
@@ -3822,11 +3803,8 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
|
||||
backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt);
|
||||
}
|
||||
|
||||
bdrv_graph_co_rdlock();
|
||||
ret = bdrv_co_change_backing_file(blk_bs(blk), qcow2_opts->backing_file,
|
||||
backing_format, false);
|
||||
bdrv_graph_co_rdunlock();
|
||||
|
||||
ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file,
|
||||
backing_format, false);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not assign backing file '%s' "
|
||||
"with format '%s'", qcow2_opts->backing_file,
|
||||
@@ -4013,8 +3991,7 @@ finish:
|
||||
}
|
||||
|
||||
|
||||
static bool coroutine_fn GRAPH_RDLOCK
|
||||
is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
{
|
||||
int64_t nr;
|
||||
int res;
|
||||
@@ -4035,7 +4012,7 @@ is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
* backing file. So, we need a loop.
|
||||
*/
|
||||
do {
|
||||
res = bdrv_co_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
|
||||
res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
|
||||
offset += nr;
|
||||
bytes -= nr;
|
||||
} while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes);
|
||||
@@ -4099,8 +4076,8 @@ qcow2_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
int ret;
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
@@ -4845,7 +4822,7 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK make_completely_empty(BlockDriverState *bs)
|
||||
static int make_completely_empty(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
Error *local_err = NULL;
|
||||
@@ -4996,7 +4973,7 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK qcow2_make_empty(BlockDriverState *bs)
|
||||
static int qcow2_make_empty(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
uint64_t offset, end_offset;
|
||||
@@ -5040,7 +5017,7 @@ static int GRAPH_RDLOCK qcow2_make_empty(BlockDriverState *bs)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static coroutine_fn GRAPH_RDLOCK int qcow2_co_flush_to_os(BlockDriverState *bs)
|
||||
static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int ret;
|
||||
@@ -5231,8 +5208,8 @@ qcow2_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ImageInfoSpecific * GRAPH_RDLOCK
|
||||
qcow2_get_specific_info(BlockDriverState *bs, Error **errp)
|
||||
static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
ImageInfoSpecific *spec_info;
|
||||
@@ -5311,8 +5288,7 @@ qcow2_get_specific_info(BlockDriverState *bs, Error **errp)
|
||||
return spec_info;
|
||||
}
|
||||
|
||||
static int coroutine_mixed_fn GRAPH_RDLOCK
|
||||
qcow2_has_zero_init(BlockDriverState *bs)
|
||||
static int coroutine_mixed_fn qcow2_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
bool preallocated;
|
||||
@@ -5390,7 +5366,7 @@ qcow2_co_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
|
||||
return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0);
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK qcow2_has_compressed_clusters(BlockDriverState *bs)
|
||||
static int qcow2_has_compressed_clusters(BlockDriverState *bs)
|
||||
{
|
||||
int64_t offset = 0;
|
||||
int64_t bytes = bdrv_getlength(bs);
|
||||
@@ -5426,10 +5402,9 @@ static int GRAPH_RDLOCK qcow2_has_compressed_clusters(BlockDriverState *bs)
|
||||
* Downgrades an image's version. To achieve this, any incompatible features
|
||||
* have to be removed.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
qcow2_downgrade(BlockDriverState *bs, int target_version,
|
||||
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
|
||||
Error **errp)
|
||||
static int qcow2_downgrade(BlockDriverState *bs, int target_version,
|
||||
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int current_version = s->qcow_version;
|
||||
@@ -5537,10 +5512,9 @@ qcow2_downgrade(BlockDriverState *bs, int target_version,
|
||||
* features of older versions, some things may have to be presented
|
||||
* differently.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
qcow2_upgrade(BlockDriverState *bs, int target_version,
|
||||
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
|
||||
Error **errp)
|
||||
static int qcow2_upgrade(BlockDriverState *bs, int target_version,
|
||||
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
bool need_snapshot_update;
|
||||
@@ -5666,10 +5640,11 @@ static void qcow2_amend_helper_cb(BlockDriverState *bs,
|
||||
info->original_cb_opaque);
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
|
||||
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
|
||||
bool force, Error **errp)
|
||||
static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
void *cb_opaque,
|
||||
bool force,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int old_version = s->qcow_version, new_version = old_version;
|
||||
@@ -6124,64 +6099,64 @@ static const char *const qcow2_strong_runtime_opts[] = {
|
||||
};
|
||||
|
||||
BlockDriver bdrv_qcow2 = {
|
||||
.format_name = "qcow2",
|
||||
.instance_size = sizeof(BDRVQcow2State),
|
||||
.bdrv_probe = qcow2_probe,
|
||||
.bdrv_open = qcow2_open,
|
||||
.bdrv_close = qcow2_close,
|
||||
.bdrv_reopen_prepare = qcow2_reopen_prepare,
|
||||
.bdrv_reopen_commit = qcow2_reopen_commit,
|
||||
.bdrv_reopen_commit_post = qcow2_reopen_commit_post,
|
||||
.bdrv_reopen_abort = qcow2_reopen_abort,
|
||||
.bdrv_join_options = qcow2_join_options,
|
||||
.bdrv_child_perm = bdrv_default_perms,
|
||||
.bdrv_co_create_opts = qcow2_co_create_opts,
|
||||
.bdrv_co_create = qcow2_co_create,
|
||||
.bdrv_has_zero_init = qcow2_has_zero_init,
|
||||
.bdrv_co_block_status = qcow2_co_block_status,
|
||||
.format_name = "qcow2",
|
||||
.instance_size = sizeof(BDRVQcow2State),
|
||||
.bdrv_probe = qcow2_probe,
|
||||
.bdrv_open = qcow2_open,
|
||||
.bdrv_close = qcow2_close,
|
||||
.bdrv_reopen_prepare = qcow2_reopen_prepare,
|
||||
.bdrv_reopen_commit = qcow2_reopen_commit,
|
||||
.bdrv_reopen_commit_post = qcow2_reopen_commit_post,
|
||||
.bdrv_reopen_abort = qcow2_reopen_abort,
|
||||
.bdrv_join_options = qcow2_join_options,
|
||||
.bdrv_child_perm = bdrv_default_perms,
|
||||
.bdrv_co_create_opts = qcow2_co_create_opts,
|
||||
.bdrv_co_create = qcow2_co_create,
|
||||
.bdrv_has_zero_init = qcow2_has_zero_init,
|
||||
.bdrv_co_block_status = qcow2_co_block_status,
|
||||
|
||||
.bdrv_co_preadv_part = qcow2_co_preadv_part,
|
||||
.bdrv_co_pwritev_part = qcow2_co_pwritev_part,
|
||||
.bdrv_co_flush_to_os = qcow2_co_flush_to_os,
|
||||
.bdrv_co_preadv_part = qcow2_co_preadv_part,
|
||||
.bdrv_co_pwritev_part = qcow2_co_pwritev_part,
|
||||
.bdrv_co_flush_to_os = qcow2_co_flush_to_os,
|
||||
|
||||
.bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes,
|
||||
.bdrv_co_pdiscard = qcow2_co_pdiscard,
|
||||
.bdrv_co_copy_range_from = qcow2_co_copy_range_from,
|
||||
.bdrv_co_copy_range_to = qcow2_co_copy_range_to,
|
||||
.bdrv_co_truncate = qcow2_co_truncate,
|
||||
.bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part,
|
||||
.bdrv_make_empty = qcow2_make_empty,
|
||||
.bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes,
|
||||
.bdrv_co_pdiscard = qcow2_co_pdiscard,
|
||||
.bdrv_co_copy_range_from = qcow2_co_copy_range_from,
|
||||
.bdrv_co_copy_range_to = qcow2_co_copy_range_to,
|
||||
.bdrv_co_truncate = qcow2_co_truncate,
|
||||
.bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part,
|
||||
.bdrv_make_empty = qcow2_make_empty,
|
||||
|
||||
.bdrv_snapshot_create = qcow2_snapshot_create,
|
||||
.bdrv_snapshot_goto = qcow2_snapshot_goto,
|
||||
.bdrv_snapshot_delete = qcow2_snapshot_delete,
|
||||
.bdrv_snapshot_list = qcow2_snapshot_list,
|
||||
.bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp,
|
||||
.bdrv_measure = qcow2_measure,
|
||||
.bdrv_co_get_info = qcow2_co_get_info,
|
||||
.bdrv_get_specific_info = qcow2_get_specific_info,
|
||||
.bdrv_snapshot_create = qcow2_snapshot_create,
|
||||
.bdrv_snapshot_goto = qcow2_snapshot_goto,
|
||||
.bdrv_snapshot_delete = qcow2_snapshot_delete,
|
||||
.bdrv_snapshot_list = qcow2_snapshot_list,
|
||||
.bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp,
|
||||
.bdrv_measure = qcow2_measure,
|
||||
.bdrv_co_get_info = qcow2_co_get_info,
|
||||
.bdrv_get_specific_info = qcow2_get_specific_info,
|
||||
|
||||
.bdrv_co_save_vmstate = qcow2_co_save_vmstate,
|
||||
.bdrv_co_load_vmstate = qcow2_co_load_vmstate,
|
||||
.bdrv_co_save_vmstate = qcow2_co_save_vmstate,
|
||||
.bdrv_co_load_vmstate = qcow2_co_load_vmstate,
|
||||
|
||||
.is_format = true,
|
||||
.supports_backing = true,
|
||||
.bdrv_co_change_backing_file = qcow2_co_change_backing_file,
|
||||
.is_format = true,
|
||||
.supports_backing = true,
|
||||
.bdrv_change_backing_file = qcow2_change_backing_file,
|
||||
|
||||
.bdrv_refresh_limits = qcow2_refresh_limits,
|
||||
.bdrv_co_invalidate_cache = qcow2_co_invalidate_cache,
|
||||
.bdrv_inactivate = qcow2_inactivate,
|
||||
.bdrv_refresh_limits = qcow2_refresh_limits,
|
||||
.bdrv_co_invalidate_cache = qcow2_co_invalidate_cache,
|
||||
.bdrv_inactivate = qcow2_inactivate,
|
||||
|
||||
.create_opts = &qcow2_create_opts,
|
||||
.amend_opts = &qcow2_amend_opts,
|
||||
.strong_runtime_opts = qcow2_strong_runtime_opts,
|
||||
.mutable_opts = mutable_opts,
|
||||
.bdrv_co_check = qcow2_co_check,
|
||||
.bdrv_amend_options = qcow2_amend_options,
|
||||
.bdrv_co_amend = qcow2_co_amend,
|
||||
.create_opts = &qcow2_create_opts,
|
||||
.amend_opts = &qcow2_amend_opts,
|
||||
.strong_runtime_opts = qcow2_strong_runtime_opts,
|
||||
.mutable_opts = mutable_opts,
|
||||
.bdrv_co_check = qcow2_co_check,
|
||||
.bdrv_amend_options = qcow2_amend_options,
|
||||
.bdrv_co_amend = qcow2_co_amend,
|
||||
|
||||
.bdrv_detach_aio_context = qcow2_detach_aio_context,
|
||||
.bdrv_attach_aio_context = qcow2_attach_aio_context,
|
||||
.bdrv_detach_aio_context = qcow2_detach_aio_context,
|
||||
.bdrv_attach_aio_context = qcow2_attach_aio_context,
|
||||
|
||||
.bdrv_supports_persistent_dirty_bitmap =
|
||||
qcow2_supports_persistent_dirty_bitmap,
|
||||
|
||||
244
block/qcow2.h
244
block/qcow2.h
@@ -641,7 +641,7 @@ static inline void set_l2_bitmap(BDRVQcow2State *s, uint64_t *l2_slice,
|
||||
l2_slice[idx + 1] = cpu_to_be64(bitmap);
|
||||
}
|
||||
|
||||
static inline bool GRAPH_RDLOCK has_data_file(BlockDriverState *bs)
|
||||
static inline bool has_data_file(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
return (s->data_file != bs->file);
|
||||
@@ -709,8 +709,8 @@ static inline int64_t qcow2_vm_state_offset(BDRVQcow2State *s)
|
||||
return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits);
|
||||
}
|
||||
|
||||
static inline QCow2ClusterType GRAPH_RDLOCK
|
||||
qcow2_get_cluster_type(BlockDriverState *bs, uint64_t l2_entry)
|
||||
static inline QCow2ClusterType qcow2_get_cluster_type(BlockDriverState *bs,
|
||||
uint64_t l2_entry)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
||||
@@ -743,7 +743,7 @@ qcow2_get_cluster_type(BlockDriverState *bs, uint64_t l2_entry)
|
||||
* (this checks the whole entry and bitmap, not only the bits related
|
||||
* to subcluster @sc_index).
|
||||
*/
|
||||
static inline GRAPH_RDLOCK
|
||||
static inline
|
||||
QCow2SubclusterType qcow2_get_subcluster_type(BlockDriverState *bs,
|
||||
uint64_t l2_entry,
|
||||
uint64_t l2_bitmap,
|
||||
@@ -834,14 +834,13 @@ int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
|
||||
int refcount_order, bool generous_increase,
|
||||
uint64_t *refblock_count);
|
||||
|
||||
int GRAPH_RDLOCK qcow2_mark_dirty(BlockDriverState *bs);
|
||||
int GRAPH_RDLOCK qcow2_mark_corrupt(BlockDriverState *bs);
|
||||
int GRAPH_RDLOCK qcow2_update_header(BlockDriverState *bs);
|
||||
int qcow2_mark_dirty(BlockDriverState *bs);
|
||||
int qcow2_mark_corrupt(BlockDriverState *bs);
|
||||
int qcow2_update_header(BlockDriverState *bs);
|
||||
|
||||
void GRAPH_RDLOCK
|
||||
qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
|
||||
int64_t size, const char *message_format, ...)
|
||||
G_GNUC_PRINTF(5, 6);
|
||||
void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
|
||||
int64_t size, const char *message_format, ...)
|
||||
G_GNUC_PRINTF(5, 6);
|
||||
|
||||
int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t entries, size_t entry_len,
|
||||
@@ -852,208 +851,165 @@ int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
|
||||
int coroutine_fn GRAPH_RDLOCK qcow2_refcount_init(BlockDriverState *bs);
|
||||
void qcow2_refcount_close(BlockDriverState *bs);
|
||||
|
||||
int GRAPH_RDLOCK qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
|
||||
uint64_t *refcount);
|
||||
int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
|
||||
uint64_t *refcount);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
|
||||
uint64_t addend, bool decrease,
|
||||
enum qcow2_discard_type type);
|
||||
int qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
|
||||
uint64_t addend, bool decrease,
|
||||
enum qcow2_discard_type type);
|
||||
|
||||
int64_t GRAPH_RDLOCK
|
||||
qcow2_refcount_area(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t additional_clusters, bool exact_size,
|
||||
int new_refblock_index,
|
||||
uint64_t new_refblock_offset);
|
||||
|
||||
int64_t GRAPH_RDLOCK
|
||||
qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
|
||||
|
||||
int64_t GRAPH_RDLOCK coroutine_fn
|
||||
qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
|
||||
int64_t nb_clusters);
|
||||
int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t additional_clusters, bool exact_size,
|
||||
int new_refblock_index,
|
||||
uint64_t new_refblock_offset);
|
||||
|
||||
int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
|
||||
int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
|
||||
int64_t nb_clusters);
|
||||
int64_t coroutine_fn GRAPH_RDLOCK qcow2_alloc_bytes(BlockDriverState *bs, int size);
|
||||
void GRAPH_RDLOCK qcow2_free_clusters(BlockDriverState *bs,
|
||||
int64_t offset, int64_t size,
|
||||
enum qcow2_discard_type type);
|
||||
void GRAPH_RDLOCK
|
||||
qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
|
||||
enum qcow2_discard_type type);
|
||||
void qcow2_free_clusters(BlockDriverState *bs,
|
||||
int64_t offset, int64_t size,
|
||||
enum qcow2_discard_type type);
|
||||
void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
|
||||
enum qcow2_discard_type type);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_update_snapshot_refcount(BlockDriverState *bs, int64_t l1_table_offset,
|
||||
int l1_size, int addend);
|
||||
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
|
||||
int64_t l1_table_offset, int l1_size, int addend);
|
||||
|
||||
int GRAPH_RDLOCK qcow2_flush_caches(BlockDriverState *bs);
|
||||
int GRAPH_RDLOCK qcow2_write_caches(BlockDriverState *bs);
|
||||
int qcow2_flush_caches(BlockDriverState *bs);
|
||||
int qcow2_write_caches(BlockDriverState *bs);
|
||||
int coroutine_fn qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
||||
BdrvCheckMode fix);
|
||||
|
||||
void GRAPH_RDLOCK qcow2_process_discards(BlockDriverState *bs, int ret);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
|
||||
int64_t size);
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
|
||||
int64_t size, bool data_file);
|
||||
void qcow2_process_discards(BlockDriverState *bs, int ret);
|
||||
|
||||
int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
|
||||
int64_t size);
|
||||
int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
|
||||
int64_t size, bool data_file);
|
||||
int coroutine_fn qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
|
||||
void **refcount_table,
|
||||
int64_t *refcount_table_size,
|
||||
int64_t offset, int64_t size);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
void *cb_opaque, Error **errp);
|
||||
int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
void *cb_opaque, Error **errp);
|
||||
int coroutine_fn GRAPH_RDLOCK qcow2_shrink_reftable(BlockDriverState *bs);
|
||||
|
||||
int64_t coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
|
||||
int64_t coroutine_fn qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_detect_metadata_preallocation(BlockDriverState *bs);
|
||||
|
||||
/* qcow2-cluster.c functions */
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, bool exact_size);
|
||||
int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
|
||||
bool exact_size);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t max_size);
|
||||
|
||||
int GRAPH_RDLOCK qcow2_write_l1_entry(BlockDriverState *bs, int l1_index);
|
||||
int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index);
|
||||
int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
|
||||
uint8_t *buf, int nb_sectors, bool enc, Error **errp);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
|
||||
unsigned int *bytes, uint64_t *host_offset,
|
||||
QCow2SubclusterType *subcluster_type);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
|
||||
unsigned int *bytes, uint64_t *host_offset,
|
||||
QCowL2Meta **m);
|
||||
|
||||
int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
|
||||
unsigned int *bytes, uint64_t *host_offset,
|
||||
QCow2SubclusterType *subcluster_type);
|
||||
int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
|
||||
unsigned int *bytes,
|
||||
uint64_t *host_offset, QCowL2Meta **m);
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
||||
int compressed_size, uint64_t *host_offset);
|
||||
void GRAPH_RDLOCK
|
||||
qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
|
||||
uint64_t *coffset, int *csize);
|
||||
void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
|
||||
uint64_t *coffset, int *csize);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
|
||||
|
||||
void coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
enum qcow2_discard_type type, bool full_discard);
|
||||
void coroutine_fn qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
|
||||
int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, enum qcow2_discard_type type,
|
||||
bool full_discard);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
int flags);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_expand_zero_clusters(BlockDriverState *bs,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
void *cb_opaque);
|
||||
int qcow2_expand_zero_clusters(BlockDriverState *bs,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
void *cb_opaque);
|
||||
|
||||
/* qcow2-snapshot.c functions */
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_snapshot_delete(BlockDriverState *bs, const char *snapshot_id,
|
||||
const char *name, Error **errp);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_snapshot_load_tmp(BlockDriverState *bs, const char *snapshot_id,
|
||||
const char *name, Error **errp);
|
||||
int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
|
||||
int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id);
|
||||
int qcow2_snapshot_delete(BlockDriverState *bs,
|
||||
const char *snapshot_id,
|
||||
const char *name,
|
||||
Error **errp);
|
||||
int qcow2_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab);
|
||||
int qcow2_snapshot_load_tmp(BlockDriverState *bs,
|
||||
const char *snapshot_id,
|
||||
const char *name,
|
||||
Error **errp);
|
||||
|
||||
void qcow2_free_snapshots(BlockDriverState *bs);
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_read_snapshots(BlockDriverState *bs, Error **errp);
|
||||
int GRAPH_RDLOCK qcow2_write_snapshots(BlockDriverState *bs);
|
||||
int qcow2_write_snapshots(BlockDriverState *bs);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_check_read_snapshot_table(BlockDriverState *bs, BdrvCheckResult *result,
|
||||
BdrvCheckMode fix);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_check_fix_snapshot_table(BlockDriverState *bs, BdrvCheckResult *result,
|
||||
BdrvCheckMode fix);
|
||||
int coroutine_fn qcow2_check_fix_snapshot_table(BlockDriverState *bs,
|
||||
BdrvCheckResult *result,
|
||||
BdrvCheckMode fix);
|
||||
|
||||
/* qcow2-cache.c functions */
|
||||
Qcow2Cache * GRAPH_RDLOCK
|
||||
qcow2_cache_create(BlockDriverState *bs, int num_tables, unsigned table_size);
|
||||
|
||||
Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
|
||||
unsigned table_size);
|
||||
int qcow2_cache_destroy(Qcow2Cache *c);
|
||||
|
||||
void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table);
|
||||
int GRAPH_RDLOCK qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c);
|
||||
int GRAPH_RDLOCK qcow2_cache_write(BlockDriverState *bs, Qcow2Cache *c);
|
||||
int GRAPH_RDLOCK qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
|
||||
Qcow2Cache *dependency);
|
||||
int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c);
|
||||
int qcow2_cache_write(BlockDriverState *bs, Qcow2Cache *c);
|
||||
int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
|
||||
Qcow2Cache *dependency);
|
||||
void qcow2_cache_depends_on_flush(Qcow2Cache *c);
|
||||
|
||||
void qcow2_cache_clean_unused(Qcow2Cache *c);
|
||||
int GRAPH_RDLOCK qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
|
||||
void **table);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
|
||||
void **table);
|
||||
int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c);
|
||||
|
||||
int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
|
||||
void **table);
|
||||
int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
|
||||
void **table);
|
||||
void qcow2_cache_put(Qcow2Cache *c, void **table);
|
||||
void *qcow2_cache_is_table_offset(Qcow2Cache *c, uint64_t offset);
|
||||
void qcow2_cache_discard(Qcow2Cache *c, void *table);
|
||||
|
||||
/* qcow2-bitmap.c functions */
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
int coroutine_fn
|
||||
qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
||||
void **refcount_table,
|
||||
int64_t *refcount_table_size);
|
||||
|
||||
bool coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated,
|
||||
Error **errp);
|
||||
|
||||
bool GRAPH_RDLOCK
|
||||
qcow2_get_bitmap_info_list(BlockDriverState *bs,
|
||||
Qcow2BitmapInfoList **info_list, Error **errp);
|
||||
|
||||
int GRAPH_RDLOCK qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);
|
||||
int GRAPH_RDLOCK qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp);
|
||||
|
||||
bool GRAPH_RDLOCK
|
||||
qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs, bool release_stored,
|
||||
Error **errp);
|
||||
|
||||
bool coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
|
||||
uint32_t granularity, Error **errp);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
qcow2_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
|
||||
Error **errp);
|
||||
|
||||
qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated, Error **errp);
|
||||
bool qcow2_get_bitmap_info_list(BlockDriverState *bs,
|
||||
Qcow2BitmapInfoList **info_list, Error **errp);
|
||||
int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);
|
||||
int coroutine_fn qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp);
|
||||
bool qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs,
|
||||
bool release_stored, Error **errp);
|
||||
int qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp);
|
||||
bool coroutine_fn qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs,
|
||||
const char *name,
|
||||
uint32_t granularity,
|
||||
Error **errp);
|
||||
int coroutine_fn qcow2_co_remove_persistent_dirty_bitmap(BlockDriverState *bs,
|
||||
const char *name,
|
||||
Error **errp);
|
||||
bool qcow2_supports_persistent_dirty_bitmap(BlockDriverState *bs);
|
||||
uint64_t qcow2_get_persistent_dirty_bitmap_size(BlockDriverState *bs,
|
||||
uint32_t cluster_size);
|
||||
|
||||
86
block/qed.c
86
block/qed.c
@@ -612,7 +612,7 @@ static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK bdrv_qed_do_close(BlockDriverState *bs)
|
||||
static void bdrv_qed_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQEDState *s = bs->opaque;
|
||||
|
||||
@@ -631,14 +631,6 @@ static void GRAPH_RDLOCK bdrv_qed_do_close(BlockDriverState *bs)
|
||||
qemu_vfree(s->l1_table);
|
||||
}
|
||||
|
||||
static void GRAPH_UNLOCKED bdrv_qed_close(BlockDriverState *bs)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bdrv_qed_do_close(bs);
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_UNLOCKED
|
||||
bdrv_qed_co_create(BlockdevCreateOptions *opts, Error **errp)
|
||||
{
|
||||
@@ -1146,7 +1138,7 @@ out:
|
||||
/**
|
||||
* Check if the QED_F_NEED_CHECK bit should be set during allocating write
|
||||
*/
|
||||
static bool GRAPH_RDLOCK qed_should_set_need_check(BDRVQEDState *s)
|
||||
static bool qed_should_set_need_check(BDRVQEDState *s)
|
||||
{
|
||||
/* The flush before L2 update path ensures consistency */
|
||||
if (s->bs->backing) {
|
||||
@@ -1451,10 +1443,12 @@ bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QED_AIOCB_WRITE | QED_AIOCB_ZERO);
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_qed_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
|
||||
PreallocMode prealloc, BdrvRequestFlags flags,
|
||||
Error **errp)
|
||||
static int coroutine_fn bdrv_qed_co_truncate(BlockDriverState *bs,
|
||||
int64_t offset,
|
||||
bool exact,
|
||||
PreallocMode prealloc,
|
||||
BdrvRequestFlags flags,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVQEDState *s = bs->opaque;
|
||||
uint64_t old_image_size;
|
||||
@@ -1504,9 +1498,9 @@ bdrv_qed_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_qed_co_change_backing_file(BlockDriverState *bs, const char *backing_file,
|
||||
const char *backing_fmt)
|
||||
static int bdrv_qed_change_backing_file(BlockDriverState *bs,
|
||||
const char *backing_file,
|
||||
const char *backing_fmt)
|
||||
{
|
||||
BDRVQEDState *s = bs->opaque;
|
||||
QEDHeader new_header, le_header;
|
||||
@@ -1568,7 +1562,7 @@ bdrv_qed_co_change_backing_file(BlockDriverState *bs, const char *backing_file,
|
||||
}
|
||||
|
||||
/* Write new header */
|
||||
ret = bdrv_co_pwrite_sync(bs->file, 0, buffer_len, buffer, 0);
|
||||
ret = bdrv_pwrite_sync(bs->file, 0, buffer_len, buffer, 0);
|
||||
g_free(buffer);
|
||||
if (ret == 0) {
|
||||
memcpy(&s->header, &new_header, sizeof(new_header));
|
||||
@@ -1582,7 +1576,7 @@ bdrv_qed_co_invalidate_cache(BlockDriverState *bs, Error **errp)
|
||||
BDRVQEDState *s = bs->opaque;
|
||||
int ret;
|
||||
|
||||
bdrv_qed_do_close(bs);
|
||||
bdrv_qed_close(bs);
|
||||
|
||||
bdrv_qed_init_state(bs);
|
||||
qemu_co_mutex_lock(&s->table_lock);
|
||||
@@ -1642,34 +1636,34 @@ static QemuOptsList qed_create_opts = {
|
||||
};
|
||||
|
||||
static BlockDriver bdrv_qed = {
|
||||
.format_name = "qed",
|
||||
.instance_size = sizeof(BDRVQEDState),
|
||||
.create_opts = &qed_create_opts,
|
||||
.is_format = true,
|
||||
.supports_backing = true,
|
||||
.format_name = "qed",
|
||||
.instance_size = sizeof(BDRVQEDState),
|
||||
.create_opts = &qed_create_opts,
|
||||
.is_format = true,
|
||||
.supports_backing = true,
|
||||
|
||||
.bdrv_probe = bdrv_qed_probe,
|
||||
.bdrv_open = bdrv_qed_open,
|
||||
.bdrv_close = bdrv_qed_close,
|
||||
.bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
|
||||
.bdrv_child_perm = bdrv_default_perms,
|
||||
.bdrv_co_create = bdrv_qed_co_create,
|
||||
.bdrv_co_create_opts = bdrv_qed_co_create_opts,
|
||||
.bdrv_has_zero_init = bdrv_has_zero_init_1,
|
||||
.bdrv_co_block_status = bdrv_qed_co_block_status,
|
||||
.bdrv_co_readv = bdrv_qed_co_readv,
|
||||
.bdrv_co_writev = bdrv_qed_co_writev,
|
||||
.bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
|
||||
.bdrv_co_truncate = bdrv_qed_co_truncate,
|
||||
.bdrv_co_getlength = bdrv_qed_co_getlength,
|
||||
.bdrv_co_get_info = bdrv_qed_co_get_info,
|
||||
.bdrv_refresh_limits = bdrv_qed_refresh_limits,
|
||||
.bdrv_co_change_backing_file = bdrv_qed_co_change_backing_file,
|
||||
.bdrv_co_invalidate_cache = bdrv_qed_co_invalidate_cache,
|
||||
.bdrv_co_check = bdrv_qed_co_check,
|
||||
.bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
|
||||
.bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
|
||||
.bdrv_drain_begin = bdrv_qed_drain_begin,
|
||||
.bdrv_probe = bdrv_qed_probe,
|
||||
.bdrv_open = bdrv_qed_open,
|
||||
.bdrv_close = bdrv_qed_close,
|
||||
.bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
|
||||
.bdrv_child_perm = bdrv_default_perms,
|
||||
.bdrv_co_create = bdrv_qed_co_create,
|
||||
.bdrv_co_create_opts = bdrv_qed_co_create_opts,
|
||||
.bdrv_has_zero_init = bdrv_has_zero_init_1,
|
||||
.bdrv_co_block_status = bdrv_qed_co_block_status,
|
||||
.bdrv_co_readv = bdrv_qed_co_readv,
|
||||
.bdrv_co_writev = bdrv_qed_co_writev,
|
||||
.bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
|
||||
.bdrv_co_truncate = bdrv_qed_co_truncate,
|
||||
.bdrv_co_getlength = bdrv_qed_co_getlength,
|
||||
.bdrv_co_get_info = bdrv_qed_co_get_info,
|
||||
.bdrv_refresh_limits = bdrv_qed_refresh_limits,
|
||||
.bdrv_change_backing_file = bdrv_qed_change_backing_file,
|
||||
.bdrv_co_invalidate_cache = bdrv_qed_co_invalidate_cache,
|
||||
.bdrv_co_check = bdrv_qed_co_check,
|
||||
.bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
|
||||
.bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
|
||||
.bdrv_drain_begin = bdrv_qed_drain_begin,
|
||||
};
|
||||
|
||||
static void bdrv_qed_init(void)
|
||||
|
||||
@@ -185,7 +185,7 @@ enum {
|
||||
/**
|
||||
* Header functions
|
||||
*/
|
||||
int GRAPH_RDLOCK qed_write_header_sync(BDRVQEDState *s);
|
||||
int qed_write_header_sync(BDRVQEDState *s);
|
||||
|
||||
/**
|
||||
* L2 cache functions
|
||||
|
||||
@@ -206,7 +206,7 @@ static void quorum_report_bad(QuorumOpType type, uint64_t offset,
|
||||
end_sector - start_sector);
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK quorum_report_failure(QuorumAIOCB *acb)
|
||||
static void quorum_report_failure(QuorumAIOCB *acb)
|
||||
{
|
||||
const char *reference = bdrv_get_device_or_node_name(acb->bs);
|
||||
int64_t start_sector = acb->offset / BDRV_SECTOR_SIZE;
|
||||
@@ -219,7 +219,7 @@ static void GRAPH_RDLOCK quorum_report_failure(QuorumAIOCB *acb)
|
||||
|
||||
static int quorum_vote_error(QuorumAIOCB *acb);
|
||||
|
||||
static bool GRAPH_RDLOCK quorum_has_too_much_io_failed(QuorumAIOCB *acb)
|
||||
static bool quorum_has_too_much_io_failed(QuorumAIOCB *acb)
|
||||
{
|
||||
BDRVQuorumState *s = acb->bs->opaque;
|
||||
|
||||
|
||||
@@ -95,9 +95,9 @@ end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
raw_apply_options(BlockDriverState *bs, BDRVRawState *s, uint64_t offset,
|
||||
bool has_size, uint64_t size, Error **errp)
|
||||
static int raw_apply_options(BlockDriverState *bs, BDRVRawState *s,
|
||||
uint64_t offset, bool has_size, uint64_t size,
|
||||
Error **errp)
|
||||
{
|
||||
int64_t real_size = 0;
|
||||
|
||||
@@ -145,9 +145,6 @@ static int raw_reopen_prepare(BDRVReopenState *reopen_state,
|
||||
uint64_t offset, size;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
assert(reopen_state != NULL);
|
||||
assert(reopen_state->bs != NULL);
|
||||
|
||||
@@ -282,10 +279,11 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
raw_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum, int64_t *map,
|
||||
BlockDriverState **file)
|
||||
static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
|
||||
bool want_zero, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum,
|
||||
int64_t *map,
|
||||
BlockDriverState **file)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
*pnum = bytes;
|
||||
@@ -399,7 +397,7 @@ raw_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
|
||||
return bdrv_co_get_info(bs->file->bs, bdi);
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK raw_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
bs->bl.has_variable_length = bs->file->bs->bl.has_variable_length;
|
||||
|
||||
@@ -454,7 +452,7 @@ raw_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
|
||||
return bdrv_co_ioctl(bs->file->bs, req, buf);
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK raw_has_zero_init(BlockDriverState *bs)
|
||||
static int raw_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
return bdrv_has_zero_init(bs->file->bs);
|
||||
}
|
||||
@@ -476,8 +474,6 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
BdrvChildRole file_role;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
ret = raw_read_options(options, &offset, &has_size, &size, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
@@ -495,8 +491,6 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
|
||||
bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
file_role, false, errp);
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -547,8 +541,7 @@ static int raw_probe(const uint8_t *buf, int buf_size, const char *filename)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
raw_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
|
||||
static int raw_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
int ret;
|
||||
@@ -565,8 +558,7 @@ raw_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
raw_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
|
||||
static int raw_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
if (s->offset || s->has_size) {
|
||||
@@ -616,7 +608,7 @@ static const char *const raw_strong_runtime_opts[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static void GRAPH_RDLOCK raw_cancel_in_flight(BlockDriverState *bs)
|
||||
static void raw_cancel_in_flight(BlockDriverState *bs)
|
||||
{
|
||||
bdrv_cancel_in_flight(bs->file->bs);
|
||||
}
|
||||
|
||||
@@ -1168,9 +1168,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
/* If we are using an rbd snapshot, we must be r/o, otherwise
|
||||
* leave as-is */
|
||||
if (s->snap != NULL) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
r = bdrv_apply_auto_read_only(bs, "rbd snapshots are read-only", errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (r < 0) {
|
||||
goto failed_post_open;
|
||||
}
|
||||
@@ -1210,8 +1208,6 @@ static int qemu_rbd_reopen_prepare(BDRVReopenState *state,
|
||||
BDRVRBDState *s = state->bs->opaque;
|
||||
int ret = 0;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (s->snap && state->flags & BDRV_O_RDWR) {
|
||||
error_setg(errp,
|
||||
"Cannot change node '%s' to r/w when using RBD snapshot",
|
||||
|
||||
@@ -276,10 +276,10 @@ replication_co_writev(BlockDriverState *bs, int64_t sector_num,
|
||||
while (remaining_sectors > 0) {
|
||||
int64_t count;
|
||||
|
||||
ret = bdrv_co_is_allocated_above(top->bs, base->bs, false,
|
||||
sector_num * BDRV_SECTOR_SIZE,
|
||||
remaining_sectors * BDRV_SECTOR_SIZE,
|
||||
&count);
|
||||
ret = bdrv_is_allocated_above(top->bs, base->bs, false,
|
||||
sector_num * BDRV_SECTOR_SIZE,
|
||||
remaining_sectors * BDRV_SECTOR_SIZE,
|
||||
&count);
|
||||
if (ret < 0) {
|
||||
goto out1;
|
||||
}
|
||||
@@ -307,16 +307,13 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void GRAPH_UNLOCKED
|
||||
secondary_do_checkpoint(BlockDriverState *bs, Error **errp)
|
||||
static void secondary_do_checkpoint(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BDRVReplicationState *s = bs->opaque;
|
||||
BdrvChild *active_disk;
|
||||
BdrvChild *active_disk = bs->file;
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!s->backup_job) {
|
||||
error_setg(errp, "Backup job was cancelled unexpectedly");
|
||||
return;
|
||||
@@ -328,7 +325,6 @@ secondary_do_checkpoint(BlockDriverState *bs, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
active_disk = bs->file;
|
||||
if (!active_disk->bs->drv) {
|
||||
error_setg(errp, "Active disk %s is ejected",
|
||||
active_disk->bs->node_name);
|
||||
@@ -364,9 +360,6 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
|
||||
BdrvChild *hidden_disk, *secondary_disk;
|
||||
BlockReopenQueue *reopen_queue = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/*
|
||||
* s->hidden_disk and s->secondary_disk may not be set yet, as they will
|
||||
* only be set after the children are writable.
|
||||
@@ -434,8 +427,7 @@ static void backup_job_completed(void *opaque, int ret)
|
||||
backup_job_cleanup(bs);
|
||||
}
|
||||
|
||||
static bool GRAPH_RDLOCK
|
||||
check_top_bs(BlockDriverState *top_bs, BlockDriverState *bs)
|
||||
static bool check_top_bs(BlockDriverState *top_bs, BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *child;
|
||||
|
||||
@@ -466,8 +458,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
Error *local_err = NULL;
|
||||
BackupPerf perf = { .use_copy_range = true, .max_workers = 1 };
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
s = bs->opaque;
|
||||
@@ -500,11 +490,9 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
case REPLICATION_MODE_PRIMARY:
|
||||
break;
|
||||
case REPLICATION_MODE_SECONDARY:
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
active_disk = bs->file;
|
||||
if (!active_disk || !active_disk->bs || !active_disk->bs->backing) {
|
||||
error_setg(errp, "Active disk doesn't have backing file");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
@@ -512,7 +500,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
hidden_disk = active_disk->bs->backing;
|
||||
if (!hidden_disk->bs || !hidden_disk->bs->backing) {
|
||||
error_setg(errp, "Hidden disk doesn't have backing file");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
@@ -520,11 +507,9 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
secondary_disk = hidden_disk->bs->backing;
|
||||
if (!secondary_disk->bs || !bdrv_has_blk(secondary_disk->bs)) {
|
||||
error_setg(errp, "The secondary disk doesn't have block backend");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/* verify the length */
|
||||
active_length = bdrv_getlength(active_disk->bs);
|
||||
@@ -541,16 +526,13 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
/* Must be true, or the bdrv_getlength() calls would have failed */
|
||||
assert(active_disk->bs->drv && hidden_disk->bs->drv);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (!active_disk->bs->drv->bdrv_make_empty ||
|
||||
!hidden_disk->bs->drv->bdrv_make_empty) {
|
||||
error_setg(errp,
|
||||
"Active disk or hidden disk doesn't support make_empty");
|
||||
aio_context_release(aio_context);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/* reopen the backing file in r/w mode */
|
||||
reopen_backing_file(bs, true, &local_err);
|
||||
@@ -584,6 +566,8 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
/* start backup job now */
|
||||
error_setg(&s->blocker,
|
||||
"Block device is in use by internal backup job");
|
||||
@@ -592,7 +576,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
if (!top_bs || !bdrv_is_root_node(top_bs) ||
|
||||
!check_top_bs(top_bs, bs)) {
|
||||
error_setg(errp, "No top_bs or it is invalid");
|
||||
bdrv_graph_wrunlock();
|
||||
reopen_backing_file(bs, false, NULL);
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
@@ -600,8 +583,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
bdrv_op_block_all(top_bs, s->blocker);
|
||||
bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
|
||||
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
s->backup_job = backup_job_create(
|
||||
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
|
||||
0, MIRROR_SYNC_MODE_NONE, NULL, 0, false, NULL,
|
||||
@@ -756,13 +737,11 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
s->stage = BLOCK_REPLICATION_FAILOVER;
|
||||
s->commit_job = commit_active_start(
|
||||
NULL, bs->file->bs, s->secondary_disk->bs,
|
||||
JOB_INTERNAL, 0, BLOCKDEV_ON_ERROR_REPORT,
|
||||
NULL, replication_done, bs, true, errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
break;
|
||||
default:
|
||||
aio_context_release(aio_context);
|
||||
|
||||
@@ -73,7 +73,7 @@ snapshot_access_co_pwritev_part(BlockDriverState *bs,
|
||||
}
|
||||
|
||||
|
||||
static void GRAPH_RDLOCK snapshot_access_refresh_filename(BlockDriverState *bs)
|
||||
static void snapshot_access_refresh_filename(BlockDriverState *bs)
|
||||
{
|
||||
pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
|
||||
bs->file->bs->filename);
|
||||
@@ -85,9 +85,6 @@ static int snapshot_access_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
|
||||
BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
|
||||
false, errp);
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!bs->file) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -155,15 +155,11 @@ bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs,
|
||||
* back if the given BDS does not support snapshots.
|
||||
* Return NULL if there is no BDS to (safely) fall back to.
|
||||
*/
|
||||
static BdrvChild * GRAPH_RDLOCK
|
||||
bdrv_snapshot_fallback_child(BlockDriverState *bs)
|
||||
static BdrvChild *bdrv_snapshot_fallback_child(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *fallback = bdrv_primary_child(bs);
|
||||
BdrvChild *child;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
/* We allow fallback only to primary child */
|
||||
if (!fallback) {
|
||||
return NULL;
|
||||
@@ -186,10 +182,8 @@ bdrv_snapshot_fallback_child(BlockDriverState *bs)
|
||||
return fallback;
|
||||
}
|
||||
|
||||
static BlockDriverState * GRAPH_RDLOCK
|
||||
bdrv_snapshot_fallback(BlockDriverState *bs)
|
||||
static BlockDriverState *bdrv_snapshot_fallback(BlockDriverState *bs)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
return child_bs(bdrv_snapshot_fallback_child(bs));
|
||||
}
|
||||
|
||||
@@ -260,10 +254,7 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
|
||||
return ret;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
fallback = bdrv_snapshot_fallback_child(bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (fallback) {
|
||||
QDict *options;
|
||||
QDict *file_options;
|
||||
@@ -311,10 +302,7 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
|
||||
* respective option (with the qdict_put_str() call above).
|
||||
* Assert that .bdrv_open() has attached the right BDS as primary child.
|
||||
*/
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
assert(bdrv_primary_bs(bs) == fallback_bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
bdrv_unref(fallback_bs);
|
||||
return ret;
|
||||
}
|
||||
@@ -386,12 +374,10 @@ int bdrv_snapshot_delete(BlockDriverState *bs,
|
||||
int bdrv_snapshot_list(BlockDriverState *bs,
|
||||
QEMUSnapshotInfo **psn_info)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
BlockDriver *drv = bs->drv;
|
||||
BlockDriverState *fallback_bs = bdrv_snapshot_fallback(bs);
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
if (!drv) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
@@ -432,7 +418,6 @@ int bdrv_snapshot_load_tmp(BlockDriverState *bs,
|
||||
BlockDriver *drv = bs->drv;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!drv) {
|
||||
error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, bdrv_get_device_name(bs));
|
||||
@@ -477,9 +462,9 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs,
|
||||
}
|
||||
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
bdrv_all_get_snapshot_devices(bool has_devices, strList *devices,
|
||||
GList **all_bdrvs, Error **errp)
|
||||
static int bdrv_all_get_snapshot_devices(bool has_devices, strList *devices,
|
||||
GList **all_bdrvs,
|
||||
Error **errp)
|
||||
{
|
||||
g_autoptr(GList) bdrvs = NULL;
|
||||
|
||||
@@ -511,11 +496,8 @@ bdrv_all_get_snapshot_devices(bool has_devices, strList *devices,
|
||||
}
|
||||
|
||||
|
||||
static bool GRAPH_RDLOCK bdrv_all_snapshots_includes_bs(BlockDriverState *bs)
|
||||
static bool bdrv_all_snapshots_includes_bs(BlockDriverState *bs)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
|
||||
return false;
|
||||
}
|
||||
@@ -536,7 +518,6 @@ bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
|
||||
GList *iterbdrvs;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
|
||||
return false;
|
||||
@@ -573,7 +554,6 @@ int bdrv_all_delete_snapshot(const char *name,
|
||||
GList *iterbdrvs;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
|
||||
return -1;
|
||||
@@ -613,15 +593,10 @@ int bdrv_all_goto_snapshot(const char *name,
|
||||
{
|
||||
g_autoptr(GList) bdrvs = NULL;
|
||||
GList *iterbdrvs;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (ret < 0) {
|
||||
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -630,22 +605,15 @@ int bdrv_all_goto_snapshot(const char *name,
|
||||
BlockDriverState *bs = iterbdrvs->data;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
int ret = 0;
|
||||
bool all_snapshots_includes_bs;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
all_snapshots_includes_bs = bdrv_all_snapshots_includes_bs(bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (devices || all_snapshots_includes_bs) {
|
||||
if (devices || bdrv_all_snapshots_includes_bs(bs)) {
|
||||
ret = bdrv_snapshot_goto(bs, name, errp);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
error_prepend(errp, "Could not load snapshot '%s' on '%s': ",
|
||||
name, bdrv_get_device_or_node_name(bs));
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -663,7 +631,6 @@ int bdrv_all_has_snapshot(const char *name,
|
||||
GList *iterbdrvs;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
|
||||
return -1;
|
||||
@@ -706,9 +673,7 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
|
||||
{
|
||||
g_autoptr(GList) bdrvs = NULL;
|
||||
GList *iterbdrvs;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
|
||||
return -1;
|
||||
@@ -750,7 +715,6 @@ BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs,
|
||||
GList *iterbdrvs;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
|
||||
return NULL;
|
||||
|
||||
@@ -53,20 +53,13 @@ static int coroutine_fn stream_populate(BlockBackend *blk,
|
||||
static int stream_prepare(Job *job)
|
||||
{
|
||||
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
|
||||
BlockDriverState *unfiltered_bs;
|
||||
BlockDriverState *unfiltered_bs_cow;
|
||||
BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs);
|
||||
BlockDriverState *unfiltered_bs_cow = bdrv_cow_bs(unfiltered_bs);
|
||||
BlockDriverState *base;
|
||||
BlockDriverState *unfiltered_base;
|
||||
Error *local_err = NULL;
|
||||
int ret = 0;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
unfiltered_bs = bdrv_skip_filters(s->target_bs);
|
||||
unfiltered_bs_cow = bdrv_cow_bs(unfiltered_bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/* We should drop filter at this point, as filter hold the backing chain */
|
||||
bdrv_cor_filter_drop(s->cor_filter_bs);
|
||||
s->cor_filter_bs = NULL;
|
||||
@@ -85,12 +78,10 @@ static int stream_prepare(Job *job)
|
||||
bdrv_drained_begin(unfiltered_bs_cow);
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
base = bdrv_filter_or_cow_bs(s->above_base);
|
||||
unfiltered_base = bdrv_skip_filters(base);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (unfiltered_bs_cow) {
|
||||
if (bdrv_cow_child(unfiltered_bs)) {
|
||||
const char *base_id = NULL, *base_fmt = NULL;
|
||||
if (unfiltered_base) {
|
||||
base_id = s->backing_file_str ?: unfiltered_base->filename;
|
||||
@@ -99,9 +90,7 @@ static int stream_prepare(Job *job)
|
||||
}
|
||||
}
|
||||
|
||||
bdrv_graph_wrlock(base);
|
||||
bdrv_set_backing_hd_drained(unfiltered_bs, base, &local_err);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
/*
|
||||
* This call will do I/O, so the graph can change again from here on.
|
||||
@@ -149,19 +138,18 @@ static void stream_clean(Job *job)
|
||||
static int coroutine_fn stream_run(Job *job, Error **errp)
|
||||
{
|
||||
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
|
||||
BlockDriverState *unfiltered_bs;
|
||||
BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs);
|
||||
int64_t len;
|
||||
int64_t offset = 0;
|
||||
int error = 0;
|
||||
int64_t n = 0; /* bytes */
|
||||
|
||||
WITH_GRAPH_RDLOCK_GUARD() {
|
||||
unfiltered_bs = bdrv_skip_filters(s->target_bs);
|
||||
if (unfiltered_bs == s->base_overlay) {
|
||||
/* Nothing to stream */
|
||||
return 0;
|
||||
}
|
||||
if (unfiltered_bs == s->base_overlay) {
|
||||
/* Nothing to stream */
|
||||
return 0;
|
||||
}
|
||||
|
||||
WITH_GRAPH_RDLOCK_GUARD() {
|
||||
len = bdrv_co_getlength(s->target_bs);
|
||||
if (len < 0) {
|
||||
return len;
|
||||
@@ -184,7 +172,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
|
||||
copy = false;
|
||||
|
||||
WITH_GRAPH_RDLOCK_GUARD() {
|
||||
ret = bdrv_co_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
|
||||
ret = bdrv_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
|
||||
if (ret == 1) {
|
||||
/* Allocated in the top, no need to copy. */
|
||||
} else if (ret >= 0) {
|
||||
@@ -192,9 +180,9 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
|
||||
* Copy if allocated in the intermediate images. Limit to the
|
||||
* known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE).
|
||||
*/
|
||||
ret = bdrv_co_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
|
||||
s->base_overlay, true,
|
||||
offset, n, &n);
|
||||
ret = bdrv_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
|
||||
s->base_overlay, true,
|
||||
offset, n, &n);
|
||||
/* Finish early if end of backing file has been reached */
|
||||
if (ret == 0 && n == 0) {
|
||||
n = len - offset;
|
||||
@@ -268,8 +256,6 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
||||
assert(!(base && bottom));
|
||||
assert(!(backing_file_str && bottom));
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
|
||||
if (bottom) {
|
||||
/*
|
||||
* New simple interface. The code is written in terms of old interface
|
||||
@@ -286,7 +272,7 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
||||
if (!base_overlay) {
|
||||
error_setg(errp, "'%s' is not in the backing chain of '%s'",
|
||||
base->node_name, bs->node_name);
|
||||
goto out_rdlock;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -308,7 +294,7 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
||||
if (bs_read_only) {
|
||||
/* Hold the chain during reopen */
|
||||
if (bdrv_freeze_backing_chain(bs, above_base, errp) < 0) {
|
||||
goto out_rdlock;
|
||||
return;
|
||||
}
|
||||
|
||||
ret = bdrv_reopen_set_read_only(bs, false, errp);
|
||||
@@ -317,12 +303,10 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
||||
bdrv_unfreeze_backing_chain(bs, above_base);
|
||||
|
||||
if (ret < 0) {
|
||||
goto out_rdlock;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
opts = qdict_new();
|
||||
|
||||
qdict_put_str(opts, "driver", "copy-on-read");
|
||||
@@ -366,10 +350,8 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
||||
* already have our own plans. Also don't allow resize as the image size is
|
||||
* queried only at the job start and then cached.
|
||||
*/
|
||||
bdrv_graph_wrlock(bs);
|
||||
if (block_job_add_bdrv(&s->common, "active node", bs, 0,
|
||||
basic_flags | BLK_PERM_WRITE, errp)) {
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -389,11 +371,9 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
||||
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
|
||||
basic_flags, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
s->base_overlay = base_overlay;
|
||||
s->above_base = above_base;
|
||||
@@ -417,8 +397,4 @@ fail:
|
||||
if (bs_read_only) {
|
||||
bdrv_reopen_set_read_only(bs, true, NULL);
|
||||
}
|
||||
return;
|
||||
|
||||
out_rdlock:
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
}
|
||||
|
||||
@@ -84,9 +84,6 @@ static int throttle_open(BlockDriverState *bs, QDict *options,
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs->supported_write_flags = bs->file->bs->supported_write_flags |
|
||||
BDRV_REQ_WRITE_UNCHANGED;
|
||||
bs->supported_zero_flags = bs->file->bs->supported_zero_flags |
|
||||
|
||||
22
block/vdi.c
22
block/vdi.c
@@ -239,7 +239,7 @@ static void vdi_header_to_le(VdiHeader *header)
|
||||
|
||||
static void vdi_header_print(VdiHeader *header)
|
||||
{
|
||||
char uuidstr[UUID_STR_LEN];
|
||||
char uuidstr[37];
|
||||
QemuUUID uuid;
|
||||
logout("text %s", header->text);
|
||||
logout("signature 0x%08x\n", header->signature);
|
||||
@@ -383,8 +383,6 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
logout("\n");
|
||||
|
||||
ret = bdrv_pread(bs->file, 0, sizeof(header), &header, 0);
|
||||
@@ -497,9 +495,9 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
error_setg(&s->migration_blocker, "The vdi format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
|
||||
ret = migrate_add_blocker_normal(&s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail_free_bmap;
|
||||
}
|
||||
|
||||
@@ -520,10 +518,11 @@ static int vdi_reopen_prepare(BDRVReopenState *state,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
vdi_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum, int64_t *map,
|
||||
BlockDriverState **file)
|
||||
static int coroutine_fn vdi_co_block_status(BlockDriverState *bs,
|
||||
bool want_zero,
|
||||
int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map,
|
||||
BlockDriverState **file)
|
||||
{
|
||||
BDRVVdiState *s = (BDRVVdiState *)bs->opaque;
|
||||
size_t bmap_index = offset / s->block_size;
|
||||
@@ -986,10 +985,11 @@ static void vdi_close(BlockDriverState *bs)
|
||||
|
||||
qemu_vfree(s->bmap);
|
||||
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK vdi_has_zero_init(BlockDriverState *bs)
|
||||
static int vdi_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
BDRVVdiState *s = bs->opaque;
|
||||
|
||||
|
||||
@@ -55,9 +55,8 @@ static const MSGUID zero_guid = { 0 };
|
||||
|
||||
/* Allow peeking at the hdr entry at the beginning of the current
|
||||
* read index, without advancing the read index */
|
||||
static int GRAPH_RDLOCK
|
||||
vhdx_log_peek_hdr(BlockDriverState *bs, VHDXLogEntries *log,
|
||||
VHDXLogEntryHeader *hdr)
|
||||
static int vhdx_log_peek_hdr(BlockDriverState *bs, VHDXLogEntries *log,
|
||||
VHDXLogEntryHeader *hdr)
|
||||
{
|
||||
int ret = 0;
|
||||
uint64_t offset;
|
||||
@@ -108,7 +107,7 @@ static int vhdx_log_inc_idx(uint32_t idx, uint64_t length)
|
||||
|
||||
|
||||
/* Reset the log to empty */
|
||||
static void GRAPH_RDLOCK vhdx_log_reset(BlockDriverState *bs, BDRVVHDXState *s)
|
||||
static void vhdx_log_reset(BlockDriverState *bs, BDRVVHDXState *s)
|
||||
{
|
||||
MSGUID guid = { 0 };
|
||||
s->log.read = s->log.write = 0;
|
||||
@@ -128,10 +127,9 @@ static void GRAPH_RDLOCK vhdx_log_reset(BlockDriverState *bs, BDRVVHDXState *s)
|
||||
* not modified.
|
||||
*
|
||||
* 0 is returned on success, -errno otherwise. */
|
||||
static int GRAPH_RDLOCK
|
||||
vhdx_log_read_sectors(BlockDriverState *bs, VHDXLogEntries *log,
|
||||
uint32_t *sectors_read, void *buffer,
|
||||
uint32_t num_sectors, bool peek)
|
||||
static int vhdx_log_read_sectors(BlockDriverState *bs, VHDXLogEntries *log,
|
||||
uint32_t *sectors_read, void *buffer,
|
||||
uint32_t num_sectors, bool peek)
|
||||
{
|
||||
int ret = 0;
|
||||
uint64_t offset;
|
||||
@@ -335,9 +333,9 @@ static int vhdx_compute_desc_sectors(uint32_t desc_cnt)
|
||||
* will allocate all the space for buffer, which must be NULL when
|
||||
* passed into this function. Each descriptor will also be validated,
|
||||
* and error returned if any are invalid. */
|
||||
static int GRAPH_RDLOCK
|
||||
vhdx_log_read_desc(BlockDriverState *bs, BDRVVHDXState *s, VHDXLogEntries *log,
|
||||
VHDXLogDescEntries **buffer, bool convert_endian)
|
||||
static int vhdx_log_read_desc(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
VHDXLogEntries *log, VHDXLogDescEntries **buffer,
|
||||
bool convert_endian)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t desc_sectors;
|
||||
@@ -414,9 +412,8 @@ exit:
|
||||
* For a zero descriptor, it may describe multiple sectors to fill with zeroes.
|
||||
* In this case, it should be noted that zeroes are written to disk, and the
|
||||
* image file is not extended as a sparse file. */
|
||||
static int GRAPH_RDLOCK
|
||||
vhdx_log_flush_desc(BlockDriverState *bs, VHDXLogDescriptor *desc,
|
||||
VHDXLogDataSector *data)
|
||||
static int vhdx_log_flush_desc(BlockDriverState *bs, VHDXLogDescriptor *desc,
|
||||
VHDXLogDataSector *data)
|
||||
{
|
||||
int ret = 0;
|
||||
uint64_t seq, file_offset;
|
||||
@@ -487,8 +484,8 @@ exit:
|
||||
* file, and then set the log to 'empty' status once complete.
|
||||
*
|
||||
* The log entries should be validate prior to flushing */
|
||||
static int GRAPH_RDLOCK
|
||||
vhdx_log_flush(BlockDriverState *bs, BDRVVHDXState *s, VHDXLogSequence *logs)
|
||||
static int vhdx_log_flush(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
VHDXLogSequence *logs)
|
||||
{
|
||||
int ret = 0;
|
||||
int i;
|
||||
@@ -587,10 +584,9 @@ exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
vhdx_validate_log_entry(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
VHDXLogEntries *log, uint64_t seq,
|
||||
bool *valid, VHDXLogEntryHeader *entry)
|
||||
static int vhdx_validate_log_entry(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
VHDXLogEntries *log, uint64_t seq,
|
||||
bool *valid, VHDXLogEntryHeader *entry)
|
||||
{
|
||||
int ret = 0;
|
||||
VHDXLogEntryHeader hdr;
|
||||
@@ -667,8 +663,8 @@ free_and_exit:
|
||||
/* Search through the log circular buffer, and find the valid, active
|
||||
* log sequence, if any exists
|
||||
* */
|
||||
static int GRAPH_RDLOCK
|
||||
vhdx_log_search(BlockDriverState *bs, BDRVVHDXState *s, VHDXLogSequence *logs)
|
||||
static int vhdx_log_search(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
VHDXLogSequence *logs)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t tail;
|
||||
|
||||
47
block/vhdx.c
47
block/vhdx.c
@@ -353,9 +353,8 @@ exit:
|
||||
*
|
||||
* - non-current header is updated with largest sequence number
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
vhdx_update_header(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
bool generate_data_write_guid, MSGUID *log_guid)
|
||||
static int vhdx_update_header(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
bool generate_data_write_guid, MSGUID *log_guid)
|
||||
{
|
||||
int ret = 0;
|
||||
int hdr_idx = 0;
|
||||
@@ -417,8 +416,8 @@ int vhdx_update_headers(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
}
|
||||
|
||||
/* opens the specified header block from the VHDX file header section */
|
||||
static void GRAPH_RDLOCK
|
||||
vhdx_parse_header(BlockDriverState *bs, BDRVVHDXState *s, Error **errp)
|
||||
static void vhdx_parse_header(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
VHDXHeader *header1;
|
||||
@@ -518,8 +517,7 @@ exit:
|
||||
}
|
||||
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
vhdx_open_region_tables(BlockDriverState *bs, BDRVVHDXState *s)
|
||||
static int vhdx_open_region_tables(BlockDriverState *bs, BDRVVHDXState *s)
|
||||
{
|
||||
int ret = 0;
|
||||
uint8_t *buffer;
|
||||
@@ -636,8 +634,7 @@ fail:
|
||||
* Also, if the File Parameters indicate this is a differencing file,
|
||||
* we must also look for the Parent Locator metadata item.
|
||||
*/
|
||||
static int GRAPH_RDLOCK
|
||||
vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
|
||||
static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
|
||||
{
|
||||
int ret = 0;
|
||||
uint8_t *buffer;
|
||||
@@ -888,8 +885,7 @@ static void vhdx_calc_bat_entries(BDRVVHDXState *s)
|
||||
|
||||
}
|
||||
|
||||
static int coroutine_mixed_fn GRAPH_RDLOCK
|
||||
vhdx_check_bat_entries(BlockDriverState *bs, int *errcnt)
|
||||
static int vhdx_check_bat_entries(BlockDriverState *bs, int *errcnt)
|
||||
{
|
||||
BDRVVHDXState *s = bs->opaque;
|
||||
int64_t image_file_size = bdrv_getlength(bs->file->bs);
|
||||
@@ -989,7 +985,8 @@ static void vhdx_close(BlockDriverState *bs)
|
||||
s->bat = NULL;
|
||||
qemu_vfree(s->parent_entries);
|
||||
s->parent_entries = NULL;
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
qemu_vfree(s->log.hdr);
|
||||
s->log.hdr = NULL;
|
||||
vhdx_region_unregister_all(s);
|
||||
@@ -1004,15 +1001,11 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
uint64_t signature;
|
||||
Error *local_err = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
s->bat = NULL;
|
||||
s->first_visible_write = true;
|
||||
|
||||
@@ -1100,8 +1093,9 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
error_setg(&s->migration_blocker, "The vhdx format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
ret = migrate_add_blocker_normal(&s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -1699,7 +1693,7 @@ exit:
|
||||
* Fixed images: default state of the BAT is fully populated, with
|
||||
* file offsets and state PAYLOAD_BLOCK_FULLY_PRESENT.
|
||||
*/
|
||||
static int coroutine_fn GRAPH_UNLOCKED
|
||||
static int coroutine_fn
|
||||
vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
|
||||
uint64_t image_size, VHDXImageType type,
|
||||
bool use_zero_blocks, uint64_t file_offset,
|
||||
@@ -1712,7 +1706,6 @@ vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
|
||||
uint64_t unused;
|
||||
int block_state;
|
||||
VHDXSectorInfo sinfo;
|
||||
bool has_zero_init;
|
||||
|
||||
assert(s->bat == NULL);
|
||||
|
||||
@@ -1742,13 +1735,9 @@ vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
bdrv_graph_co_rdlock();
|
||||
has_zero_init = bdrv_has_zero_init(blk_bs(blk));
|
||||
bdrv_graph_co_rdunlock();
|
||||
|
||||
if (type == VHDX_TYPE_FIXED ||
|
||||
use_zero_blocks ||
|
||||
has_zero_init == 0) {
|
||||
bdrv_has_zero_init(blk_bs(blk)) == 0) {
|
||||
/* for a fixed file, the default BAT entry is not zero */
|
||||
s->bat = g_try_malloc0(length);
|
||||
if (length && s->bat == NULL) {
|
||||
@@ -1791,7 +1780,7 @@ exit:
|
||||
* to create the BAT itself, we will also cause the BAT to be
|
||||
* created.
|
||||
*/
|
||||
static int coroutine_fn GRAPH_UNLOCKED
|
||||
static int coroutine_fn
|
||||
vhdx_create_new_region_table(BlockBackend *blk, uint64_t image_size,
|
||||
uint32_t block_size, uint32_t sector_size,
|
||||
uint32_t log_size, bool use_zero_blocks,
|
||||
@@ -2167,9 +2156,9 @@ fail:
|
||||
* r/w and any log has already been replayed, so there is nothing (currently)
|
||||
* for us to do here
|
||||
*/
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
vhdx_co_check(BlockDriverState *bs, BdrvCheckResult *result,
|
||||
BdrvCheckMode fix)
|
||||
static int coroutine_fn vhdx_co_check(BlockDriverState *bs,
|
||||
BdrvCheckResult *result,
|
||||
BdrvCheckMode fix)
|
||||
{
|
||||
BDRVVHDXState *s = bs->opaque;
|
||||
|
||||
@@ -2182,7 +2171,7 @@ vhdx_co_check(BlockDriverState *bs, BdrvCheckResult *result,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK vhdx_has_zero_init(BlockDriverState *bs)
|
||||
static int vhdx_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
BDRVVHDXState *s = bs->opaque;
|
||||
int state;
|
||||
|
||||
14
block/vhdx.h
14
block/vhdx.h
@@ -401,9 +401,8 @@ typedef struct BDRVVHDXState {
|
||||
|
||||
void vhdx_guid_generate(MSGUID *guid);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
vhdx_update_headers(BlockDriverState *bs, BDRVVHDXState *s, bool rw,
|
||||
MSGUID *log_guid);
|
||||
int vhdx_update_headers(BlockDriverState *bs, BDRVVHDXState *s, bool rw,
|
||||
MSGUID *log_guid);
|
||||
|
||||
uint32_t vhdx_update_checksum(uint8_t *buf, size_t size, int crc_offset);
|
||||
uint32_t vhdx_checksum_calc(uint32_t crc, uint8_t *buf, size_t size,
|
||||
@@ -411,9 +410,8 @@ uint32_t vhdx_checksum_calc(uint32_t crc, uint8_t *buf, size_t size,
|
||||
|
||||
bool vhdx_checksum_is_valid(uint8_t *buf, size_t size, int crc_offset);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed,
|
||||
Error **errp);
|
||||
int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed,
|
||||
Error **errp);
|
||||
|
||||
int coroutine_fn GRAPH_RDLOCK
|
||||
vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
@@ -449,8 +447,6 @@ void vhdx_metadata_header_le_import(VHDXMetadataTableHeader *hdr);
|
||||
void vhdx_metadata_header_le_export(VHDXMetadataTableHeader *hdr);
|
||||
void vhdx_metadata_entry_le_import(VHDXMetadataTableEntry *e);
|
||||
void vhdx_metadata_entry_le_export(VHDXMetadataTableEntry *e);
|
||||
|
||||
int GRAPH_RDLOCK
|
||||
vhdx_user_visible_write(BlockDriverState *bs, BDRVVHDXState *s);
|
||||
int vhdx_user_visible_write(BlockDriverState *bs, BDRVVHDXState *s);
|
||||
|
||||
#endif
|
||||
|
||||
82
block/vmdk.c
82
block/vmdk.c
@@ -300,8 +300,7 @@ static void vmdk_free_last_extent(BlockDriverState *bs)
|
||||
}
|
||||
|
||||
/* Return -ve errno, or 0 on success and write CID into *pcid. */
|
||||
static int GRAPH_RDLOCK
|
||||
vmdk_read_cid(BlockDriverState *bs, int parent, uint32_t *pcid)
|
||||
static int vmdk_read_cid(BlockDriverState *bs, int parent, uint32_t *pcid)
|
||||
{
|
||||
char *desc;
|
||||
uint32_t cid;
|
||||
@@ -381,7 +380,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK vmdk_is_cid_valid(BlockDriverState *bs)
|
||||
static int coroutine_fn vmdk_is_cid_valid(BlockDriverState *bs)
|
||||
{
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
uint32_t cur_pcid;
|
||||
@@ -416,9 +415,6 @@ static int vmdk_reopen_prepare(BDRVReopenState *state,
|
||||
BDRVVmdkReopenState *rs;
|
||||
int i;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
assert(state != NULL);
|
||||
assert(state->bs != NULL);
|
||||
assert(state->opaque == NULL);
|
||||
@@ -455,9 +451,6 @@ static void vmdk_reopen_commit(BDRVReopenState *state)
|
||||
BDRVVmdkReopenState *rs = state->opaque;
|
||||
int i;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
for (i = 0; i < s->num_extents; i++) {
|
||||
if (rs->extents_using_bs_file[i]) {
|
||||
s->extents[i].file = state->bs->file;
|
||||
@@ -472,7 +465,7 @@ static void vmdk_reopen_abort(BDRVReopenState *state)
|
||||
vmdk_reopen_clean(state);
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK vmdk_parent_open(BlockDriverState *bs)
|
||||
static int vmdk_parent_open(BlockDriverState *bs)
|
||||
{
|
||||
char *p_name;
|
||||
char *desc;
|
||||
@@ -585,8 +578,8 @@ static int vmdk_add_extent(BlockDriverState *bs,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent, Error **errp)
|
||||
static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
size_t l1_size;
|
||||
@@ -648,9 +641,9 @@ vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent, Error **errp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
vmdk_open_vmfs_sparse(BlockDriverState *bs, BdrvChild *file, int flags,
|
||||
Error **errp)
|
||||
static int vmdk_open_vmfs_sparse(BlockDriverState *bs,
|
||||
BdrvChild *file,
|
||||
int flags, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
uint32_t magic;
|
||||
@@ -804,9 +797,9 @@ static int check_se_sparse_volatile_header(VMDKSESparseVolatileHeader *header,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
vmdk_open_se_sparse(BlockDriverState *bs, BdrvChild *file, int flags,
|
||||
Error **errp)
|
||||
static int vmdk_open_se_sparse(BlockDriverState *bs,
|
||||
BdrvChild *file,
|
||||
int flags, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
VMDKSESparseConstHeader const_header;
|
||||
@@ -920,9 +913,9 @@ static char *vmdk_read_desc(BdrvChild *file, uint64_t desc_offset, Error **errp)
|
||||
return buf;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
vmdk_open_vmdk4(BlockDriverState *bs, BdrvChild *file, int flags,
|
||||
QDict *options, Error **errp)
|
||||
static int vmdk_open_vmdk4(BlockDriverState *bs,
|
||||
BdrvChild *file,
|
||||
int flags, QDict *options, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
uint32_t magic;
|
||||
@@ -1102,9 +1095,8 @@ static int vmdk_parse_description(const char *desc, const char *opt_name,
|
||||
}
|
||||
|
||||
/* Open an extent file and append to bs array */
|
||||
static int GRAPH_RDLOCK
|
||||
vmdk_open_sparse(BlockDriverState *bs, BdrvChild *file, int flags,
|
||||
char *buf, QDict *options, Error **errp)
|
||||
static int vmdk_open_sparse(BlockDriverState *bs, BdrvChild *file, int flags,
|
||||
char *buf, QDict *options, Error **errp)
|
||||
{
|
||||
uint32_t magic;
|
||||
|
||||
@@ -1131,9 +1123,8 @@ static const char *next_line(const char *s)
|
||||
return s;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
||||
Error **errp)
|
||||
static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
|
||||
QDict *options, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
int matches;
|
||||
@@ -1152,8 +1143,6 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
||||
char extent_opt_prefix[32];
|
||||
Error *local_err = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
for (p = desc; *p; p = next_line(p)) {
|
||||
/* parse extent line in one of below formats:
|
||||
*
|
||||
@@ -1234,11 +1223,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
||||
ret = vmdk_add_extent(bs, extent_file, true, sectors,
|
||||
0, 0, 0, 0, 0, &extent, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
extent->flat_start_offset = flat_offset << 9;
|
||||
@@ -1253,32 +1240,26 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
||||
}
|
||||
g_free(buf);
|
||||
if (ret) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
extent = &s->extents[s->num_extents - 1];
|
||||
} else if (!strcmp(type, "SESPARSE")) {
|
||||
ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp);
|
||||
if (ret) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
extent = &s->extents[s->num_extents - 1];
|
||||
} else {
|
||||
error_setg(errp, "Unsupported extent type '%s'", type);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = -ENOTSUP;
|
||||
goto out;
|
||||
}
|
||||
@@ -1302,9 +1283,8 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
vmdk_open_desc_file(BlockDriverState *bs, int flags, char *buf, QDict *options,
|
||||
Error **errp)
|
||||
static int vmdk_open_desc_file(BlockDriverState *bs, int flags, char *buf,
|
||||
QDict *options, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
char ct[128];
|
||||
@@ -1393,8 +1373,9 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
error_setg(&s->migration_blocker, "The vmdk format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
ret = migrate_add_blocker_normal(&s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -2554,10 +2535,7 @@ vmdk_co_do_create(int64_t size,
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
bdrv_graph_co_rdlock();
|
||||
ret = vmdk_read_cid(blk_bs(backing), 0, &parent_cid);
|
||||
bdrv_graph_co_rdunlock();
|
||||
blk_co_unref(backing);
|
||||
if (ret) {
|
||||
error_setg(errp, "Failed to read parent CID");
|
||||
@@ -2876,7 +2854,8 @@ static void vmdk_close(BlockDriverState *bs)
|
||||
vmdk_free_extents(bs);
|
||||
g_free(s->create_type);
|
||||
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn GRAPH_RDLOCK
|
||||
@@ -2904,7 +2883,7 @@ vmdk_co_get_allocated_file_size(BlockDriverState *bs)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK vmdk_has_zero_init(BlockDriverState *bs)
|
||||
static int vmdk_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
int i;
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
@@ -2921,7 +2900,7 @@ static int GRAPH_RDLOCK vmdk_has_zero_init(BlockDriverState *bs)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static VmdkExtentInfo * GRAPH_RDLOCK vmdk_get_extent_info(VmdkExtent *extent)
|
||||
static VmdkExtentInfo *vmdk_get_extent_info(VmdkExtent *extent)
|
||||
{
|
||||
VmdkExtentInfo *info = g_new0(VmdkExtentInfo, 1);
|
||||
|
||||
@@ -2998,8 +2977,8 @@ vmdk_co_check(BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ImageInfoSpecific * GRAPH_RDLOCK
|
||||
vmdk_get_specific_info(BlockDriverState *bs, Error **errp)
|
||||
static ImageInfoSpecific *vmdk_get_specific_info(BlockDriverState *bs,
|
||||
Error **errp)
|
||||
{
|
||||
int i;
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
@@ -3054,9 +3033,8 @@ vmdk_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK
|
||||
vmdk_gather_child_options(BlockDriverState *bs, QDict *target,
|
||||
bool backing_overridden)
|
||||
static void vmdk_gather_child_options(BlockDriverState *bs, QDict *target,
|
||||
bool backing_overridden)
|
||||
{
|
||||
/* No children but file and backing can be explicitly specified (TODO) */
|
||||
qdict_put(target, "file",
|
||||
|
||||
11
block/vpc.c
11
block/vpc.c
@@ -238,8 +238,6 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
opts = qemu_opts_create(&vpc_runtime_opts, NULL, 0, &error_abort);
|
||||
if (!qemu_opts_absorb_qdict(opts, options, errp)) {
|
||||
ret = -EINVAL;
|
||||
@@ -451,9 +449,9 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
error_setg(&s->migration_blocker, "The vpc format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
|
||||
ret = migrate_add_blocker_normal(&s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -1170,7 +1168,7 @@ fail:
|
||||
}
|
||||
|
||||
|
||||
static int GRAPH_RDLOCK vpc_has_zero_init(BlockDriverState *bs)
|
||||
static int vpc_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
BDRVVPCState *s = bs->opaque;
|
||||
|
||||
@@ -1189,7 +1187,8 @@ static void vpc_close(BlockDriverState *bs)
|
||||
g_free(s->pageentry_u8);
|
||||
#endif
|
||||
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
}
|
||||
|
||||
static QemuOptsList vpc_create_opts = {
|
||||
|
||||
@@ -1144,8 +1144,6 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
QemuOpts *opts;
|
||||
int ret;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
#ifdef DEBUG
|
||||
vvv = s;
|
||||
#endif
|
||||
@@ -1268,8 +1266,9 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
"The vvfat (rw) format used by node '%s' "
|
||||
"does not support live migration",
|
||||
bdrv_get_device_or_node_name(bs));
|
||||
ret = migrate_add_blocker_normal(&s->migration_blocker, errp);
|
||||
ret = migrate_add_blocker(s->migration_blocker, errp);
|
||||
if (ret < 0) {
|
||||
error_free(s->migration_blocker);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@@ -1481,8 +1480,8 @@ vvfat_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sector
|
||||
if (s->qcow) {
|
||||
int64_t n;
|
||||
int ret;
|
||||
ret = bdrv_co_is_allocated(s->qcow->bs, sector_num * BDRV_SECTOR_SIZE,
|
||||
(nb_sectors - i) * BDRV_SECTOR_SIZE, &n);
|
||||
ret = bdrv_is_allocated(s->qcow->bs, sector_num * BDRV_SECTOR_SIZE,
|
||||
(nb_sectors - i) * BDRV_SECTOR_SIZE, &n);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -1807,10 +1806,10 @@ cluster_was_modified(BDRVVVFATState *s, uint32_t cluster_num)
|
||||
}
|
||||
|
||||
for (i = 0; !was_modified && i < s->sectors_per_cluster; i++) {
|
||||
was_modified = bdrv_co_is_allocated(s->qcow->bs,
|
||||
(cluster2sector(s, cluster_num) +
|
||||
i) * BDRV_SECTOR_SIZE,
|
||||
BDRV_SECTOR_SIZE, NULL);
|
||||
was_modified = bdrv_is_allocated(s->qcow->bs,
|
||||
(cluster2sector(s, cluster_num) +
|
||||
i) * BDRV_SECTOR_SIZE,
|
||||
BDRV_SECTOR_SIZE, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1968,9 +1967,9 @@ get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const ch
|
||||
for (i = 0; i < s->sectors_per_cluster; i++) {
|
||||
int res;
|
||||
|
||||
res = bdrv_co_is_allocated(s->qcow->bs,
|
||||
(offs + i) * BDRV_SECTOR_SIZE,
|
||||
BDRV_SECTOR_SIZE, NULL);
|
||||
res = bdrv_is_allocated(s->qcow->bs,
|
||||
(offs + i) * BDRV_SECTOR_SIZE,
|
||||
BDRV_SECTOR_SIZE, NULL);
|
||||
if (res < 0) {
|
||||
return -1;
|
||||
}
|
||||
@@ -3238,7 +3237,8 @@ static void vvfat_close(BlockDriverState *bs)
|
||||
g_free(s->cluster_buffer);
|
||||
|
||||
if (s->qcow) {
|
||||
migrate_del_blocker(&s->migration_blocker);
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
141
blockdev.c
141
blockdev.c
@@ -255,13 +255,13 @@ void drive_check_orphaned(void)
|
||||
* Ignore default drives, because we create certain default
|
||||
* drives unconditionally, then leave them unclaimed. Not the
|
||||
* users fault.
|
||||
* Ignore IF_VIRTIO or IF_XEN, because it gets desugared into
|
||||
* -device, so we can leave failing to -device.
|
||||
* Ignore IF_VIRTIO, because it gets desugared into -device,
|
||||
* so we can leave failing to -device.
|
||||
* Ignore IF_NONE, because leaving unclaimed IF_NONE remains
|
||||
* available for device_add is a feature.
|
||||
*/
|
||||
if (dinfo->is_default || dinfo->type == IF_VIRTIO
|
||||
|| dinfo->type == IF_XEN || dinfo->type == IF_NONE) {
|
||||
|| dinfo->type == IF_NONE) {
|
||||
continue;
|
||||
}
|
||||
if (!blk_get_attached_dev(blk)) {
|
||||
@@ -977,15 +977,6 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type,
|
||||
qemu_opt_set(devopts, "driver", "virtio-blk", &error_abort);
|
||||
qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"),
|
||||
&error_abort);
|
||||
} else if (type == IF_XEN) {
|
||||
QemuOpts *devopts;
|
||||
devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0,
|
||||
&error_abort);
|
||||
qemu_opt_set(devopts, "driver",
|
||||
(media == MEDIA_CDROM) ? "xen-cdrom" : "xen-disk",
|
||||
&error_abort);
|
||||
qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"),
|
||||
&error_abort);
|
||||
}
|
||||
|
||||
filename = qemu_opt_get(legacy_opts, "file");
|
||||
@@ -1050,8 +1041,6 @@ static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp)
|
||||
BlockDriverState *bs;
|
||||
AioContext *aio_context;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs = bdrv_lookup_bs(name, name, errp);
|
||||
if (bs == NULL) {
|
||||
return NULL;
|
||||
@@ -1147,9 +1136,6 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
|
||||
SnapshotInfo *info = NULL;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs = qmp_get_root_bs(device, errp);
|
||||
if (!bs) {
|
||||
return NULL;
|
||||
@@ -1235,9 +1221,6 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal,
|
||||
AioContext *aio_context;
|
||||
int ret1;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
tran_add(tran, &internal_snapshot_drv, state);
|
||||
|
||||
device = internal->device;
|
||||
@@ -1326,9 +1309,6 @@ static void internal_snapshot_abort(void *opaque)
|
||||
AioContext *aio_context;
|
||||
Error *local_error = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!state->created) {
|
||||
return;
|
||||
}
|
||||
@@ -1610,12 +1590,7 @@ static void external_snapshot_abort(void *opaque)
|
||||
aio_context_acquire(aio_context);
|
||||
}
|
||||
|
||||
bdrv_drained_begin(state->new_bs);
|
||||
bdrv_graph_wrlock(state->old_bs);
|
||||
bdrv_replace_node(state->new_bs, state->old_bs, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(state->new_bs);
|
||||
|
||||
bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */
|
||||
|
||||
aio_context_release(aio_context);
|
||||
@@ -1679,8 +1654,6 @@ static void drive_backup_action(DriveBackup *backup,
|
||||
bool set_backing_hd = false;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
tran_add(tran, &drive_backup_drv, state);
|
||||
|
||||
if (!backup->has_mode) {
|
||||
@@ -1710,9 +1683,7 @@ static void drive_backup_action(DriveBackup *backup,
|
||||
}
|
||||
|
||||
/* Early check to avoid creating target */
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1739,7 +1710,6 @@ static void drive_backup_action(DriveBackup *backup,
|
||||
flags |= BDRV_O_NO_BACKING;
|
||||
set_backing_hd = true;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
size = bdrv_getlength(bs);
|
||||
if (size < 0) {
|
||||
@@ -1751,13 +1721,10 @@ static void drive_backup_action(DriveBackup *backup,
|
||||
assert(format);
|
||||
if (source) {
|
||||
/* Implicit filters should not appear in the filename */
|
||||
BlockDriverState *explicit_backing;
|
||||
BlockDriverState *explicit_backing =
|
||||
bdrv_skip_implicit_filters(source);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
explicit_backing = bdrv_skip_implicit_filters(source);
|
||||
bdrv_refresh_filename(explicit_backing);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
bdrv_img_create(backup->target, format,
|
||||
explicit_backing->filename,
|
||||
explicit_backing->drv->format_name, NULL,
|
||||
@@ -2377,13 +2344,10 @@ void coroutine_fn qmp_block_resize(const char *device, const char *node_name,
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_graph_co_rdlock();
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
|
||||
error_setg(errp, QERR_DEVICE_IN_USE, device);
|
||||
bdrv_graph_co_rdunlock();
|
||||
return;
|
||||
}
|
||||
bdrv_graph_co_rdunlock();
|
||||
|
||||
blk = blk_co_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, errp);
|
||||
if (!blk) {
|
||||
@@ -2423,8 +2387,6 @@ void qmp_block_stream(const char *job_id, const char *device,
|
||||
Error *local_err = NULL;
|
||||
int job_flags = JOB_DEFAULT;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (base && base_node) {
|
||||
error_setg(errp, "'base' and 'base-node' cannot be specified "
|
||||
"at the same time");
|
||||
@@ -2455,12 +2417,11 @@ void qmp_block_stream(const char *job_id, const char *device,
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (base) {
|
||||
base_bs = bdrv_find_backing_image(bs, base);
|
||||
if (base_bs == NULL) {
|
||||
error_setg(errp, "Can't find '%s' in the backing chain", base);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
assert(bdrv_get_aio_context(base_bs) == aio_context);
|
||||
}
|
||||
@@ -2468,36 +2429,35 @@ void qmp_block_stream(const char *job_id, const char *device,
|
||||
if (base_node) {
|
||||
base_bs = bdrv_lookup_bs(NULL, base_node, errp);
|
||||
if (!base_bs) {
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
if (bs == base_bs || !bdrv_chain_contains(bs, base_bs)) {
|
||||
error_setg(errp, "Node '%s' is not a backing image of '%s'",
|
||||
base_node, device);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
assert(bdrv_get_aio_context(base_bs) == aio_context);
|
||||
|
||||
bdrv_refresh_filename(base_bs);
|
||||
}
|
||||
|
||||
if (bottom) {
|
||||
bottom_bs = bdrv_lookup_bs(NULL, bottom, errp);
|
||||
if (!bottom_bs) {
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
if (!bottom_bs->drv) {
|
||||
error_setg(errp, "Node '%s' is not open", bottom);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
if (bottom_bs->drv->is_filter) {
|
||||
error_setg(errp, "Node '%s' is a filter, use a non-filter node "
|
||||
"as 'bottom'", bottom);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
if (!bdrv_chain_contains(bs, bottom_bs)) {
|
||||
error_setg(errp, "Node '%s' is not in a chain starting from '%s'",
|
||||
bottom, device);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
assert(bdrv_get_aio_context(bottom_bs) == aio_context);
|
||||
}
|
||||
@@ -2510,10 +2470,9 @@ void qmp_block_stream(const char *job_id, const char *device,
|
||||
iter = bdrv_filter_or_cow_bs(iter))
|
||||
{
|
||||
if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_STREAM, errp)) {
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/* if we are streaming the entire chain, the result will have no backing
|
||||
* file, and specifying one is therefore an error */
|
||||
@@ -2542,11 +2501,6 @@ void qmp_block_stream(const char *job_id, const char *device,
|
||||
|
||||
out:
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
|
||||
out_rdlock:
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
void qmp_block_commit(const char *job_id, const char *device,
|
||||
@@ -2881,8 +2835,6 @@ BlockDeviceInfoList *qmp_query_named_block_nodes(bool has_flat,
|
||||
|
||||
XDbgBlockGraph *qmp_x_debug_query_block_graph(Error **errp)
|
||||
{
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
return bdrv_get_xdbg_block_graph(errp);
|
||||
}
|
||||
|
||||
@@ -2984,7 +2936,6 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
|
||||
|
||||
if (replaces) {
|
||||
BlockDriverState *to_replace_bs;
|
||||
AioContext *aio_context;
|
||||
AioContext *replace_aio_context;
|
||||
int64_t bs_size, replace_size;
|
||||
|
||||
@@ -2999,19 +2950,10 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
|
||||
return;
|
||||
}
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
replace_aio_context = bdrv_get_aio_context(to_replace_bs);
|
||||
/*
|
||||
* bdrv_getlength() is a co-wrapper and uses AIO_WAIT_WHILE. Be sure not
|
||||
* to acquire the same AioContext twice.
|
||||
*/
|
||||
if (replace_aio_context != aio_context) {
|
||||
aio_context_acquire(replace_aio_context);
|
||||
}
|
||||
aio_context_acquire(replace_aio_context);
|
||||
replace_size = bdrv_getlength(to_replace_bs);
|
||||
if (replace_aio_context != aio_context) {
|
||||
aio_context_release(replace_aio_context);
|
||||
}
|
||||
aio_context_release(replace_aio_context);
|
||||
|
||||
if (replace_size < 0) {
|
||||
error_setg_errno(errp, -replace_size,
|
||||
@@ -3056,9 +2998,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
}
|
||||
|
||||
/* Early check to avoid creating target */
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3082,7 +3022,6 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
if (arg->sync == MIRROR_SYNC_MODE_NONE) {
|
||||
target_backing_bs = bs;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
size = bdrv_getlength(bs);
|
||||
if (size < 0) {
|
||||
@@ -3115,21 +3054,16 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
bdrv_img_create(arg->target, format,
|
||||
NULL, NULL, NULL, size, flags, false, &local_err);
|
||||
} else {
|
||||
BlockDriverState *explicit_backing;
|
||||
/* Implicit filters should not appear in the filename */
|
||||
BlockDriverState *explicit_backing =
|
||||
bdrv_skip_implicit_filters(target_backing_bs);
|
||||
|
||||
switch (arg->mode) {
|
||||
case NEW_IMAGE_MODE_EXISTING:
|
||||
break;
|
||||
case NEW_IMAGE_MODE_ABSOLUTE_PATHS:
|
||||
/*
|
||||
* Create new image with backing file.
|
||||
* Implicit filters should not appear in the filename.
|
||||
*/
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
explicit_backing = bdrv_skip_implicit_filters(target_backing_bs);
|
||||
/* create new image with backing file */
|
||||
bdrv_refresh_filename(explicit_backing);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
bdrv_img_create(arg->target, format,
|
||||
explicit_backing->filename,
|
||||
explicit_backing->drv->format_name,
|
||||
@@ -3165,11 +3099,9 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
zero_target = (arg->sync == MIRROR_SYNC_MODE_FULL &&
|
||||
(arg->mode == NEW_IMAGE_MODE_EXISTING ||
|
||||
!bdrv_has_zero_init(target_bs)));
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
|
||||
/* Honor bdrv_try_change_aio_context() context acquisition requirements. */
|
||||
@@ -3412,20 +3344,6 @@ void qmp_block_job_dismiss(const char *id, Error **errp)
|
||||
job_dismiss_locked(&job, errp);
|
||||
}
|
||||
|
||||
void qmp_block_job_change(BlockJobChangeOptions *opts, Error **errp)
|
||||
{
|
||||
BlockJob *job;
|
||||
|
||||
JOB_LOCK_GUARD();
|
||||
job = find_block_job_locked(opts->id, errp);
|
||||
|
||||
if (!job) {
|
||||
return;
|
||||
}
|
||||
|
||||
block_job_change_locked(job, opts, errp);
|
||||
}
|
||||
|
||||
void qmp_change_backing_file(const char *device,
|
||||
const char *image_node_name,
|
||||
const char *backing_file,
|
||||
@@ -3446,38 +3364,35 @@ void qmp_change_backing_file(const char *device,
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
|
||||
image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!image_bs) {
|
||||
error_setg(errp, "image file not found");
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (bdrv_find_base(image_bs) == image_bs) {
|
||||
error_setg(errp, "not allowing backing file change on an image "
|
||||
"without a backing file");
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* even though we are not necessarily operating on bs, we need it to
|
||||
* determine if block ops are currently prohibited on the chain */
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) {
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* final sanity check */
|
||||
if (!bdrv_chain_contains(bs, image_bs)) {
|
||||
error_setg(errp, "'%s' and image file are not in the same chain",
|
||||
device);
|
||||
goto out_rdlock;
|
||||
goto out;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/* if not r/w, reopen to make r/w */
|
||||
ro = bdrv_is_read_only(image_bs);
|
||||
@@ -3505,11 +3420,6 @@ void qmp_change_backing_file(const char *device,
|
||||
|
||||
out:
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
|
||||
out_rdlock:
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
void qmp_blockdev_add(BlockdevOptions *options, Error **errp)
|
||||
@@ -3599,7 +3509,6 @@ void qmp_blockdev_del(const char *node_name, Error **errp)
|
||||
BlockDriverState *bs;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs = bdrv_find_node(node_name);
|
||||
if (!bs) {
|
||||
@@ -3727,8 +3636,6 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
|
||||
AioContext *new_context;
|
||||
BlockDriverState *bs;
|
||||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
bs = bdrv_find_node(node_name);
|
||||
if (!bs) {
|
||||
error_setg(errp, "Failed to find node with node-name='%s'", node_name);
|
||||
|
||||
35
blockjob.c
35
blockjob.c
@@ -198,9 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
|
||||
* one to make sure that such a concurrent access does not attempt
|
||||
* to process an already freed BdrvChild.
|
||||
*/
|
||||
aio_context_release(job->job.aio_context);
|
||||
bdrv_graph_wrlock(NULL);
|
||||
aio_context_acquire(job->job.aio_context);
|
||||
while (job->nodes) {
|
||||
GSList *l = job->nodes;
|
||||
BdrvChild *c = l->data;
|
||||
@@ -330,26 +328,6 @@ static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
|
||||
return block_job_set_speed_locked(job, speed, errp);
|
||||
}
|
||||
|
||||
void block_job_change_locked(BlockJob *job, BlockJobChangeOptions *opts,
|
||||
Error **errp)
|
||||
{
|
||||
const BlockJobDriver *drv = block_job_driver(job);
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (job_apply_verb_locked(&job->job, JOB_VERB_CHANGE, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (drv->change) {
|
||||
job_unlock();
|
||||
drv->change(job, opts, errp);
|
||||
job_lock();
|
||||
} else {
|
||||
error_setg(errp, "Job type does not support change");
|
||||
}
|
||||
}
|
||||
|
||||
void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n)
|
||||
{
|
||||
IO_CODE();
|
||||
@@ -378,7 +356,6 @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
|
||||
{
|
||||
BlockJobInfo *info;
|
||||
uint64_t progress_current, progress_total;
|
||||
const BlockJobDriver *drv = block_job_driver(job);
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
@@ -391,7 +368,7 @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
|
||||
&progress_total);
|
||||
|
||||
info = g_new0(BlockJobInfo, 1);
|
||||
info->type = job_type(&job->job);
|
||||
info->type = g_strdup(job_type_str(&job->job));
|
||||
info->device = g_strdup(job->job.id);
|
||||
info->busy = job->job.busy;
|
||||
info->paused = job->job.pause_count > 0;
|
||||
@@ -408,11 +385,6 @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
|
||||
g_strdup(error_get_pretty(job->job.err)) :
|
||||
g_strdup(strerror(-job->job.ret));
|
||||
}
|
||||
if (drv->query) {
|
||||
job_unlock();
|
||||
drv->query(job, info);
|
||||
job_lock();
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
@@ -514,8 +486,6 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
||||
int ret;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_wrlock(bs);
|
||||
|
||||
if (job_id == NULL && !(flags & JOB_INTERNAL)) {
|
||||
job_id = bdrv_get_device_name(bs);
|
||||
}
|
||||
@@ -523,7 +493,6 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
||||
job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
|
||||
flags, cb, opaque, errp);
|
||||
if (job == NULL) {
|
||||
bdrv_graph_wrunlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -563,11 +532,9 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
bdrv_graph_wrunlock();
|
||||
return job;
|
||||
|
||||
fail:
|
||||
bdrv_graph_wrunlock();
|
||||
job_early_fail(&job->job);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
#define TARGET_ARCH_H
|
||||
|
||||
#include "qemu.h"
|
||||
#include "target/arm/cpu-features.h"
|
||||
|
||||
void target_cpu_set_tls(CPUARMState *env, target_ulong newtls);
|
||||
target_ulong target_cpu_get_tls(CPUARMState *env);
|
||||
|
||||
@@ -118,7 +118,7 @@ void fork_end(int child)
|
||||
*/
|
||||
CPU_FOREACH_SAFE(cpu, next_cpu) {
|
||||
if (cpu != thread_cpu) {
|
||||
QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node);
|
||||
QTAILQ_REMOVE_RCU(&cpus, cpu, node);
|
||||
}
|
||||
}
|
||||
mmap_fork_end(child);
|
||||
|
||||
@@ -171,7 +171,7 @@ static int msmouse_chr_write(struct Chardev *s, const uint8_t *buf, int len)
|
||||
return len;
|
||||
}
|
||||
|
||||
static const QemuInputHandler msmouse_handler = {
|
||||
static QemuInputHandler msmouse_handler = {
|
||||
.name = "QEMU Microsoft Mouse",
|
||||
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL,
|
||||
.event = msmouse_input_event,
|
||||
|
||||
@@ -178,7 +178,7 @@ static void wctablet_input_sync(DeviceState *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static const QemuInputHandler wctablet_handler = {
|
||||
static QemuInputHandler wctablet_handler = {
|
||||
.name = "QEMU Wacom Pen Tablet",
|
||||
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_ABS,
|
||||
.event = wctablet_input_event,
|
||||
|
||||
@@ -14,7 +14,6 @@ CONFIG_SAM460EX=y
|
||||
CONFIG_MAC_OLDWORLD=y
|
||||
CONFIG_MAC_NEWWORLD=y
|
||||
|
||||
CONFIG_AMIGAONE=y
|
||||
CONFIG_PEGASOS2=y
|
||||
|
||||
# For PReP
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
# target-specific defaults, can still be overridden on
|
||||
# the command line
|
||||
|
||||
[built-in options]
|
||||
bindir = ''
|
||||
prefix = '/qemu'
|
||||
|
||||
[project options]
|
||||
qemu_suffix = ''
|
||||
@@ -1,5 +1,4 @@
|
||||
TARGET_ARCH=hppa
|
||||
TARGET_ABI32=y
|
||||
TARGET_SYSTBL_ABI=common,32
|
||||
TARGET_SYSTBL=syscall.tbl
|
||||
TARGET_BIG_ENDIAN=y
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# Default configuration for loongarch64-linux-user
|
||||
TARGET_ARCH=loongarch64
|
||||
TARGET_BASE_ARCH=loongarch
|
||||
TARGET_XML_FILES=gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu.xml
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
TARGET_ARCH=sparc
|
||||
TARGET_BIG_ENDIAN=y
|
||||
TARGET_SUPPORTS_MTTCG=y
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
TARGET_ARCH=sparc64
|
||||
TARGET_BASE_ARCH=sparc
|
||||
TARGET_BIG_ENDIAN=y
|
||||
TARGET_SUPPORTS_MTTCG=y
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user