Compare commits
46 Commits
migration-
...
multifd-fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d1d244fee2 | ||
|
|
e9097aa796 | ||
|
|
6852aa7095 | ||
|
|
0c8268e451 | ||
|
|
3178bc4887 | ||
|
|
1b1abab324 | ||
|
|
a91d02beff | ||
|
|
18390bf0db | ||
|
|
2e56913132 | ||
|
|
a65773761f | ||
|
|
878376502b | ||
|
|
3e57cbec66 | ||
|
|
820d8f89dd | ||
|
|
20a1fbc213 | ||
|
|
a0c8b80118 | ||
|
|
511b4c771f | ||
|
|
d45ef03731 | ||
|
|
0cf59dd7e5 | ||
|
|
4f09663a06 | ||
|
|
16850352af | ||
|
|
a00eb30c25 | ||
|
|
1aa3e96e5e | ||
|
|
3e17d0e38e | ||
|
|
59f0f3484f | ||
|
|
0a6f5a13e7 | ||
|
|
a2fc948619 | ||
|
|
998282b3e1 | ||
|
|
4e741d0b73 | ||
|
|
d483cd144c | ||
|
|
2323516885 | ||
|
|
aedb8c129f | ||
|
|
9822b2b727 | ||
|
|
234536b94c | ||
|
|
7153bf88c6 | ||
|
|
ab92f505c9 | ||
|
|
6d11fca4f8 | ||
|
|
a1fcb9c8dd | ||
|
|
0de102e7a0 | ||
|
|
1bbba6b1c9 | ||
|
|
a8a71314aa | ||
|
|
ee48645f4b | ||
|
|
2a87d0b46a | ||
|
|
4f1b968bbd | ||
|
|
02d8b7234c | ||
|
|
29ec5d67fb | ||
|
|
1af5512e79 |
@@ -68,7 +68,7 @@ variables:
|
||||
|
||||
#############################################################
|
||||
# Stage 2: fine tune execution of jobs in specific scenarios
|
||||
# where the catch all logic is inappropriate
|
||||
# where the catch all logic is inapprorpaite
|
||||
#############################################################
|
||||
|
||||
# Optional jobs should not be run unless manually triggered
|
||||
|
||||
@@ -2,21 +2,11 @@
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache
|
||||
key: "$CI_JOB_NAME"
|
||||
when: always
|
||||
before_script:
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ccache --zero-stats
|
||||
- ../configure --enable-werror --disable-docs --enable-fdt=system
|
||||
${TARGETS:+--target-list="$TARGETS"}
|
||||
$CONFIGURE_ARGS ||
|
||||
@@ -30,7 +20,6 @@
|
||||
then
|
||||
make -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- ccache --show-stats
|
||||
|
||||
# We jump some hoops in common_test_job_template to avoid
|
||||
# rebuilding all the object files we skip in the artifacts
|
||||
|
||||
@@ -30,7 +30,6 @@ avocado-system-alpine:
|
||||
variables:
|
||||
IMAGE: alpine
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:avr arch:loongarch64 arch:mips64 arch:mipsel
|
||||
|
||||
build-system-ubuntu:
|
||||
extends:
|
||||
@@ -41,7 +40,8 @@ build-system-ubuntu:
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
CONFIGURE_ARGS: --enable-docs
|
||||
TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu
|
||||
TARGETS: alpha-softmmu cris-softmmu hppa-softmmu
|
||||
microblazeel-softmmu mips64el-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-ubuntu:
|
||||
@@ -61,7 +61,6 @@ avocado-system-ubuntu:
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:alpha arch:microblaze arch:mips64el
|
||||
|
||||
build-system-debian:
|
||||
extends:
|
||||
@@ -73,7 +72,7 @@ build-system-debian:
|
||||
IMAGE: debian-amd64
|
||||
CONFIGURE_ARGS: --with-coroutine=sigaltstack
|
||||
TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
|
||||
sparc-softmmu xtensa-softmmu
|
||||
sparc-softmmu xtensaeb-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-debian:
|
||||
@@ -93,7 +92,6 @@ avocado-system-debian:
|
||||
variables:
|
||||
IMAGE: debian-amd64
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:arm arch:i386 arch:riscv64 arch:sh4 arch:sparc arch:xtensa
|
||||
|
||||
crash-test-debian:
|
||||
extends: .native_test_job_template
|
||||
@@ -105,7 +103,7 @@ crash-test-debian:
|
||||
script:
|
||||
- cd build
|
||||
- make NINJA=":" check-venv
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
|
||||
- tests/venv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
|
||||
|
||||
build-system-fedora:
|
||||
extends:
|
||||
@@ -116,7 +114,7 @@ build-system-fedora:
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
|
||||
TARGETS: microblaze-softmmu mips-softmmu
|
||||
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
|
||||
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
@@ -137,8 +135,6 @@ avocado-system-fedora:
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:microblaze arch:mips arch:xtensa arch:m68k
|
||||
arch:riscv32 arch:ppc arch:sparc64
|
||||
|
||||
crash-test-fedora:
|
||||
extends: .native_test_job_template
|
||||
@@ -150,8 +146,8 @@ crash-test-fedora:
|
||||
script:
|
||||
- cd build
|
||||
- make NINJA=":" check-venv
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
|
||||
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
|
||||
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
|
||||
|
||||
build-system-centos:
|
||||
extends:
|
||||
@@ -184,8 +180,6 @@ avocado-system-centos:
|
||||
variables:
|
||||
IMAGE: centos8
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:ppc64 arch:or1k arch:390x arch:x86_64 arch:rx
|
||||
arch:sh4 arch:nios2
|
||||
|
||||
build-system-opensuse:
|
||||
extends:
|
||||
@@ -215,7 +209,6 @@ avocado-system-opensuse:
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
AVOCADO_TAGS: arch:s390x arch:x86_64 arch:aarch64
|
||||
|
||||
|
||||
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
|
||||
|
||||
@@ -15,10 +15,8 @@
|
||||
stage: build
|
||||
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
|
||||
needs: []
|
||||
# 20 mins larger than "timeout_in" in cirrus/build.yml
|
||||
# as there's often a 5-10 minute delay before Cirrus CI
|
||||
# actually starts the task
|
||||
timeout: 80m
|
||||
allow_failure: true
|
||||
script:
|
||||
- source .gitlab-ci.d/cirrus/$NAME.vars
|
||||
- sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
|
||||
@@ -52,7 +50,7 @@ x64-freebsd-13-build:
|
||||
NAME: freebsd-13
|
||||
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
|
||||
CIRRUS_VM_IMAGE_SELECTOR: image_family
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-13-2
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-13-1
|
||||
CIRRUS_VM_CPUS: 8
|
||||
CIRRUS_VM_RAM: 8G
|
||||
UPDATE_COMMAND: pkg update; pkg upgrade -y
|
||||
|
||||
@@ -16,8 +16,6 @@ env:
|
||||
TEST_TARGETS: "@TEST_TARGETS@"
|
||||
|
||||
build_task:
|
||||
# A little shorter than GitLab timeout in ../cirrus.yml
|
||||
timeout_in: 60m
|
||||
install_script:
|
||||
- @UPDATE_COMMAND@
|
||||
- @INSTALL_COMMAND@ @PKGS@
|
||||
|
||||
@@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip-3.8'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-tomli py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
||||
|
||||
@@ -11,6 +11,6 @@ MAKE='/opt/homebrew/bin/gmake'
|
||||
NINJA='/opt/homebrew/bin/ninja'
|
||||
PACKAGING_COMMAND='brew'
|
||||
PIP3='/opt/homebrew/bin/pip3'
|
||||
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd'
|
||||
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli'
|
||||
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol tesseract usbredir vde vte3 xorriso zlib zstd'
|
||||
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme'
|
||||
PYTHON='/opt/homebrew/bin/python3'
|
||||
|
||||
@@ -95,7 +95,6 @@ riscv64-debian-cross-container:
|
||||
allow_failure: true
|
||||
variables:
|
||||
NAME: debian-riscv64-cross
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
|
||||
# we can however build TCG tests using a non-sid base
|
||||
riscv64-debian-test-cross-container:
|
||||
|
||||
@@ -2,20 +2,10 @@
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache
|
||||
key: "$CI_JOB_NAME"
|
||||
when: always
|
||||
timeout: 80m
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ccache --zero-stats
|
||||
- ../configure --enable-werror --disable-docs --enable-fdt=system
|
||||
--disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
|
||||
--target-list-exclude="arm-softmmu cris-softmmu
|
||||
@@ -28,7 +18,6 @@
|
||||
version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
|
||||
mv -v qemu-setup*.exe qemu-setup-${version}.exe;
|
||||
fi
|
||||
- ccache --show-stats
|
||||
|
||||
# Job to cross-build specific accelerators.
|
||||
#
|
||||
@@ -40,15 +29,7 @@
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
timeout: 30m
|
||||
cache:
|
||||
paths:
|
||||
- ccache/
|
||||
key: "$CI_JOB_NAME"
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
@@ -59,14 +40,7 @@
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache/
|
||||
key: "$CI_JOB_NAME"
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
|
||||
@@ -5,14 +5,13 @@
|
||||
- windows
|
||||
- windows-1809
|
||||
cache:
|
||||
key: "$CI_JOB_NAME"
|
||||
key: "${CI_JOB_NAME}-cache"
|
||||
paths:
|
||||
- msys64/var/cache
|
||||
- ccache
|
||||
when: always
|
||||
needs: []
|
||||
stage: build
|
||||
timeout: 100m
|
||||
timeout: 80m
|
||||
variables:
|
||||
# This feature doesn't (currently) work with PowerShell, it stops
|
||||
# the echo'ing of commands being run and doesn't show any timing
|
||||
@@ -73,7 +72,6 @@
|
||||
bison diffutils flex
|
||||
git grep make sed
|
||||
$MINGW_TARGET-capstone
|
||||
$MINGW_TARGET-ccache
|
||||
$MINGW_TARGET-curl
|
||||
$MINGW_TARGET-cyrus-sasl
|
||||
$MINGW_TARGET-dtc
|
||||
@@ -103,18 +101,11 @@
|
||||
- Write-Output "Running build at $(Get-Date -Format u)"
|
||||
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
|
||||
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
|
||||
- $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR"
|
||||
- $env:CCACHE_DIR = "$env:CCACHE_BASEDIR/ccache"
|
||||
- $env:CCACHE_MAXSIZE = "500M"
|
||||
- $env:CCACHE_DEPEND = 1 # cache misses are too expensive with preprocessor mode
|
||||
- $env:CC = "ccache gcc"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ..\msys64\usr\bin\bash -lc "ccache --zero-stats"
|
||||
- ..\msys64\usr\bin\bash -lc "../configure --enable-fdt=system $CONFIGURE_ARGS"
|
||||
- ..\msys64\usr\bin\bash -lc "make"
|
||||
- ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;"
|
||||
- ..\msys64\usr\bin\bash -lc "ccache --show-stats"
|
||||
- Write-Output "Finished build at $(Get-Date -Format u)"
|
||||
|
||||
msys2-64bit:
|
||||
|
||||
16
.mailmap
16
.mailmap
@@ -40,26 +40,12 @@ Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
|
||||
# for the cvs2svn initialization commit e63c3dc74bf.
|
||||
|
||||
# Next, translate a few commits where mailman rewrote the From: line due
|
||||
# to strict SPF and DMARC. Usually, our build process should be flagging
|
||||
# commits like these before maintainer merges; if you find the need to add
|
||||
# a line here, please also report a bug against the part of the build
|
||||
# process that let the mis-attribution slip through in the first place.
|
||||
#
|
||||
# If the mailing list munges your emails, use:
|
||||
# git config sendemail.from '"Your Name" <your.email@example.com>'
|
||||
# the use of "" in that line will differ from the typically unquoted
|
||||
# 'git config user.name', which in turn is sufficient for 'git send-email'
|
||||
# to add an extra From: line in the body of your email that takes
|
||||
# precedence over any munged From: in the mail's headers.
|
||||
# See https://lists.openembedded.org/g/openembedded-core/message/166515
|
||||
# and https://lists.gnu.org/archive/html/qemu-devel/2023-09/msg06784.html
|
||||
# to strict SPF, although we prefer to avoid adding more entries like that.
|
||||
Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org>
|
||||
Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> Andrey Drobyshev via <qemu-block@nongnu.org>
|
||||
BALATON Zoltan <balaton@eik.bme.hu> BALATON Zoltan via <qemu-ppc@nongnu.org>
|
||||
|
||||
# Next, replace old addresses by a more recent one.
|
||||
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com>
|
||||
|
||||
@@ -34,7 +34,7 @@ env:
|
||||
- BASE_CONFIG="--disable-docs --disable-tools"
|
||||
- TEST_BUILD_CMD=""
|
||||
- TEST_CMD="make check V=1"
|
||||
# This is broadly a list of "mainline" system targets which have support across the major distros
|
||||
# This is broadly a list of "mainline" softmmu targets which have support across the major distros
|
||||
- MAIN_SOFTMMU_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
|
||||
- CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime"
|
||||
- CCACHE_MAXSIZE=1G
|
||||
@@ -197,7 +197,7 @@ jobs:
|
||||
$(exit $BUILD_RC);
|
||||
fi
|
||||
|
||||
- name: "[s390x] GCC (other-system)"
|
||||
- name: "[s390x] GCC (other-softmmu)"
|
||||
arch: s390x
|
||||
dist: focal
|
||||
addons:
|
||||
|
||||
163
MAINTAINERS
163
MAINTAINERS
@@ -137,11 +137,10 @@ Overall TCG CPUs
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: system/cpus.c
|
||||
F: system/watchpoint.c
|
||||
F: cpu-common.c
|
||||
F: cpu-target.c
|
||||
F: page-vary-target.c
|
||||
F: softmmu/cpus.c
|
||||
F: softmmu/watchpoint.c
|
||||
F: cpus-common.c
|
||||
F: page-vary.c
|
||||
F: page-vary-common.c
|
||||
F: accel/tcg/
|
||||
F: accel/stubs/tcg-stub.c
|
||||
@@ -248,6 +247,7 @@ F: disas/hppa.c
|
||||
|
||||
LoongArch TCG CPUs
|
||||
M: Song Gao <gaosong@loongson.cn>
|
||||
M: Xiaojuan Yang <yangxiaojuan@loongson.cn>
|
||||
S: Maintained
|
||||
F: target/loongarch/
|
||||
F: tests/tcg/loongarch64/
|
||||
@@ -298,9 +298,11 @@ F: hw/openrisc/
|
||||
F: tests/tcg/openrisc/
|
||||
|
||||
PowerPC TCG CPUs
|
||||
M: Nicholas Piggin <npiggin@gmail.com>
|
||||
M: Daniel Henrique Barboza <danielhb413@gmail.com>
|
||||
R: Cédric Le Goater <clg@kaod.org>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
R: Nicholas Piggin <npiggin@gmail.com>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: target/ppc/
|
||||
@@ -317,11 +319,8 @@ R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
|
||||
R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
|
||||
L: qemu-riscv@nongnu.org
|
||||
S: Supported
|
||||
F: configs/targets/riscv*
|
||||
F: docs/system/target-riscv.rst
|
||||
F: target/riscv/
|
||||
F: hw/riscv/
|
||||
F: hw/intc/riscv*
|
||||
F: include/hw/riscv/
|
||||
F: linux-user/host/riscv32/
|
||||
F: linux-user/host/riscv64/
|
||||
@@ -333,7 +332,6 @@ L: qemu-riscv@nongnu.org
|
||||
S: Supported
|
||||
F: target/riscv/insn_trans/trans_xthead.c.inc
|
||||
F: target/riscv/xthead*.decode
|
||||
F: disas/riscv-xthead*
|
||||
|
||||
RISC-V XVentanaCondOps extension
|
||||
M: Philipp Tomsich <philipp.tomsich@vrull.eu>
|
||||
@@ -341,7 +339,6 @@ L: qemu-riscv@nongnu.org
|
||||
S: Maintained
|
||||
F: target/riscv/XVentanaCondOps.decode
|
||||
F: target/riscv/insn_trans/trans_xventanacondops.c.inc
|
||||
F: disas/riscv-xventana*
|
||||
|
||||
RENESAS RX CPUs
|
||||
R: Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
@@ -441,9 +438,10 @@ F: target/mips/kvm*
|
||||
F: target/mips/sysemu/
|
||||
|
||||
PPC KVM CPUs
|
||||
M: Nicholas Piggin <npiggin@gmail.com>
|
||||
R: Daniel Henrique Barboza <danielhb413@gmail.com>
|
||||
M: Daniel Henrique Barboza <danielhb413@gmail.com>
|
||||
R: Cédric Le Goater <clg@kaod.org>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
S: Odd Fixes
|
||||
F: target/ppc/kvm.c
|
||||
|
||||
@@ -545,6 +543,14 @@ F: include/sysemu/xen.h
|
||||
F: include/sysemu/xen-mapcache.h
|
||||
F: stubs/xen-hw-stub.c
|
||||
|
||||
Guest CPU Cores (HAXM)
|
||||
---------------------
|
||||
X86 HAXM CPUs
|
||||
S: Orphan
|
||||
F: accel/stubs/hax-stub.c
|
||||
F: include/sysemu/hax.h
|
||||
F: target/i386/hax/
|
||||
|
||||
Guest CPU Cores (NVMM)
|
||||
----------------------
|
||||
NetBSD Virtual Machine Monitor (NVMM) CPU support
|
||||
@@ -561,7 +567,6 @@ M: Cornelia Huck <cohuck@redhat.com>
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: linux-headers/
|
||||
F: include/standard-headers/
|
||||
F: scripts/update-linux-headers.sh
|
||||
|
||||
POSIX
|
||||
@@ -944,9 +949,6 @@ R: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/arm/sbsa-ref.c
|
||||
F: hw/misc/sbsa_ec.c
|
||||
F: hw/watchdog/sbsa_gwdt.c
|
||||
F: include/hw/watchdog/sbsa_gwdt.h
|
||||
F: docs/system/arm/sbsa.rst
|
||||
F: tests/avocado/machine_aarch64_sbsaref.py
|
||||
|
||||
@@ -1032,16 +1034,6 @@ S: Maintained
|
||||
F: hw/ssi/xlnx-versal-ospi.c
|
||||
F: include/hw/ssi/xlnx-versal-ospi.h
|
||||
|
||||
Xilinx Versal CFI
|
||||
M: Francisco Iglesias <francisco.iglesias@amd.com>
|
||||
S: Maintained
|
||||
F: hw/misc/xlnx-cfi-if.c
|
||||
F: include/hw/misc/xlnx-cfi-if.h
|
||||
F: hw/misc/xlnx-versal-cfu.c
|
||||
F: include/hw/misc/xlnx-versal-cfu.h
|
||||
F: hw/misc/xlnx-versal-cframe-reg.c
|
||||
F: include/hw/misc/xlnx-versal-cframe-reg.h
|
||||
|
||||
STM32F100
|
||||
M: Alexandre Iooss <erdnaxe@crans.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
@@ -1190,6 +1182,7 @@ F: pc-bios/hppa-firmware.img
|
||||
LoongArch Machines
|
||||
------------------
|
||||
Virt
|
||||
M: Xiaojuan Yang <yangxiaojuan@loongson.cn>
|
||||
M: Song Gao <gaosong@loongson.cn>
|
||||
S: Maintained
|
||||
F: docs/system/loongarch/virt.rst
|
||||
@@ -1236,9 +1229,6 @@ F: hw/misc/mac_via.c
|
||||
F: hw/nubus/*
|
||||
F: hw/display/macfb.c
|
||||
F: hw/block/swim.c
|
||||
F: hw/misc/djmemc.c
|
||||
F: hw/misc/iosb.c
|
||||
F: hw/audio/asc.c
|
||||
F: hw/m68k/bootinfo.h
|
||||
F: include/standard-headers/asm-m68k/bootinfo.h
|
||||
F: include/standard-headers/asm-m68k/bootinfo-mac.h
|
||||
@@ -1248,9 +1238,6 @@ F: include/hw/display/macfb.h
|
||||
F: include/hw/block/swim.h
|
||||
F: include/hw/m68k/q800.h
|
||||
F: include/hw/m68k/q800-glue.h
|
||||
F: include/hw/misc/djmemc.h
|
||||
F: include/hw/misc/iosb.h
|
||||
F: include/hw/audio/asc.h
|
||||
|
||||
virt
|
||||
M: Laurent Vivier <laurent@vivier.eu>
|
||||
@@ -1293,10 +1280,8 @@ M: Hervé Poussineau <hpoussin@reactos.org>
|
||||
R: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com>
|
||||
S: Maintained
|
||||
F: hw/mips/jazz.c
|
||||
F: hw/display/g364fb.c
|
||||
F: hw/display/jazz_led.c
|
||||
F: hw/dma/rc4030.c
|
||||
F: hw/nvram/ds1225y.c
|
||||
|
||||
Malta
|
||||
M: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
@@ -1443,10 +1428,10 @@ F: include/hw/rtc/m48t59.h
|
||||
F: tests/avocado/ppc_prep_40p.py
|
||||
|
||||
sPAPR (pseries)
|
||||
M: Nicholas Piggin <npiggin@gmail.com>
|
||||
R: Daniel Henrique Barboza <danielhb413@gmail.com>
|
||||
M: Daniel Henrique Barboza <danielhb413@gmail.com>
|
||||
R: Cédric Le Goater <clg@kaod.org>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
R: Harsh Prateek Bora <harshpb@linux.ibm.com>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
@@ -1465,8 +1450,8 @@ F: tests/avocado/ppc_pseries.py
|
||||
|
||||
PowerNV (Non-Virtualized)
|
||||
M: Cédric Le Goater <clg@kaod.org>
|
||||
M: Nicholas Piggin <npiggin@gmail.com>
|
||||
R: Frédéric Barrat <fbarrat@linux.ibm.com>
|
||||
R: Nicholas Piggin <npiggin@gmail.com>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: docs/system/ppc/powernv.rst
|
||||
@@ -1510,9 +1495,12 @@ F: include/hw/pci-host/mv64361.h
|
||||
|
||||
Virtual Open Firmware (VOF)
|
||||
M: Alexey Kardashevskiy <aik@ozlabs.ru>
|
||||
R: Cédric Le Goater <clg@kaod.org>
|
||||
R: Daniel Henrique Barboza <danielhb413@gmail.com>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
R: Greg Kurz <groug@kaod.org>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
S: Maintained
|
||||
F: hw/ppc/spapr_vof*
|
||||
F: hw/ppc/vof*
|
||||
F: include/hw/ppc/vof*
|
||||
@@ -1534,7 +1522,6 @@ Microchip PolarFire SoC Icicle Kit
|
||||
M: Bin Meng <bin.meng@windriver.com>
|
||||
L: qemu-riscv@nongnu.org
|
||||
S: Supported
|
||||
F: docs/system/riscv/microchip-icicle-kit.rst
|
||||
F: hw/riscv/microchip_pfsoc.c
|
||||
F: hw/char/mchp_pfsoc_mmuart.c
|
||||
F: hw/misc/mchp_pfsoc_dmc.c
|
||||
@@ -1550,7 +1537,6 @@ Shakti C class SoC
|
||||
M: Vijai Kumar K <vijai@behindbytes.com>
|
||||
L: qemu-riscv@nongnu.org
|
||||
S: Supported
|
||||
F: docs/system/riscv/shakti-c.rst
|
||||
F: hw/riscv/shakti_c.c
|
||||
F: hw/char/shakti_uart.c
|
||||
F: include/hw/riscv/shakti_c.h
|
||||
@@ -1562,7 +1548,6 @@ M: Bin Meng <bin.meng@windriver.com>
|
||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||
L: qemu-riscv@nongnu.org
|
||||
S: Supported
|
||||
F: docs/system/riscv/sifive_u.rst
|
||||
F: hw/*/*sifive*.c
|
||||
F: include/hw/*/*sifive*.h
|
||||
|
||||
@@ -1785,6 +1770,7 @@ M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
|
||||
R: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
R: Yanan Wang <wangyanan55@huawei.com>
|
||||
S: Supported
|
||||
F: cpu.c
|
||||
F: hw/core/cpu.c
|
||||
F: hw/core/machine-qmp-cmds.c
|
||||
F: hw/core/machine.c
|
||||
@@ -1990,7 +1976,6 @@ M: Marc-André Lureau <marcandre.lureau@redhat.com>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: hw/char/
|
||||
F: include/hw/char/
|
||||
|
||||
Network devices
|
||||
M: Jason Wang <jasowang@redhat.com>
|
||||
@@ -2127,7 +2112,7 @@ S: Maintained
|
||||
F: docs/interop/virtio-balloon-stats.rst
|
||||
F: hw/virtio/virtio-balloon*.c
|
||||
F: include/hw/virtio/virtio-balloon.h
|
||||
F: system/balloon.c
|
||||
F: softmmu/balloon.c
|
||||
F: include/sysemu/balloon.h
|
||||
|
||||
virtio-9p
|
||||
@@ -2173,13 +2158,6 @@ T: git https://gitlab.com/cohuck/qemu.git s390-next
|
||||
T: git https://github.com/borntraeger/qemu.git s390-next
|
||||
L: qemu-s390x@nongnu.org
|
||||
|
||||
virtio-dmabuf
|
||||
M: Albert Esteve <aesteve@redhat.com>
|
||||
S: Supported
|
||||
F: hw/display/virtio-dmabuf.c
|
||||
F: include/hw/virtio/virtio-dmabuf.h
|
||||
F: tests/unit/test-virtio-dmabuf.c
|
||||
|
||||
virtiofs
|
||||
M: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
S: Supported
|
||||
@@ -2278,13 +2256,6 @@ F: tests/qtest/nvme-test.c
|
||||
F: docs/system/devices/nvme.rst
|
||||
T: git git://git.infradead.org/qemu-nvme.git nvme-next
|
||||
|
||||
ufs
|
||||
M: Jeuk Kim <jeuk20.kim@samsung.com>
|
||||
S: Supported
|
||||
F: hw/ufs/*
|
||||
F: include/block/ufs.h
|
||||
F: tests/qtest/ufs-test.c
|
||||
|
||||
megasas
|
||||
M: Hannes Reinecke <hare@suse.com>
|
||||
L: qemu-block@nongnu.org
|
||||
@@ -2814,7 +2785,7 @@ Device Tree
|
||||
M: Alistair Francis <alistair.francis@wdc.com>
|
||||
R: David Gibson <david@gibson.dropbear.id.au>
|
||||
S: Maintained
|
||||
F: system/device_tree.c
|
||||
F: softmmu/device_tree.c
|
||||
F: include/sysemu/device_tree.h
|
||||
|
||||
Dump
|
||||
@@ -2855,7 +2826,7 @@ F: include/exec/gdbstub.h
|
||||
F: include/gdbstub/*
|
||||
F: gdb-xml/
|
||||
F: tests/tcg/multiarch/gdbstub/
|
||||
F: scripts/feature_to_c.py
|
||||
F: scripts/feature_to_c.sh
|
||||
F: scripts/probe-gdb-support.py
|
||||
|
||||
Memory API
|
||||
@@ -2870,11 +2841,11 @@ F: include/exec/memory.h
|
||||
F: include/exec/ram_addr.h
|
||||
F: include/exec/ramblock.h
|
||||
F: include/sysemu/memory_mapping.h
|
||||
F: system/dma-helpers.c
|
||||
F: system/ioport.c
|
||||
F: system/memory.c
|
||||
F: system/memory_mapping.c
|
||||
F: system/physmem.c
|
||||
F: softmmu/dma-helpers.c
|
||||
F: softmmu/ioport.c
|
||||
F: softmmu/memory.c
|
||||
F: softmmu/memory_mapping.c
|
||||
F: softmmu/physmem.c
|
||||
F: include/exec/memory-internal.h
|
||||
F: scripts/coccinelle/memory-region-housekeeping.cocci
|
||||
|
||||
@@ -2889,7 +2860,6 @@ F: hw/mem/pc-dimm.c
|
||||
F: include/hw/mem/memory-device.h
|
||||
F: include/hw/mem/nvdimm.h
|
||||
F: include/hw/mem/pc-dimm.h
|
||||
F: stubs/memory_device.c
|
||||
F: docs/nvdimm.txt
|
||||
|
||||
SPICE
|
||||
@@ -2928,12 +2898,13 @@ F: include/sysemu/runstate.h
|
||||
F: include/sysemu/runstate-action.h
|
||||
F: util/main-loop.c
|
||||
F: util/qemu-timer.c
|
||||
F: system/vl.c
|
||||
F: system/main.c
|
||||
F: system/cpus.c
|
||||
F: system/cpu-throttle.c
|
||||
F: system/cpu-timers.c
|
||||
F: system/runstate*
|
||||
F: softmmu/vl.c
|
||||
F: softmmu/main.c
|
||||
F: softmmu/cpus.c
|
||||
F: softmmu/cpu-throttle.c
|
||||
F: softmmu/cpu-timers.c
|
||||
F: softmmu/icount.c
|
||||
F: softmmu/runstate*
|
||||
F: qapi/run-state.json
|
||||
|
||||
Read, Copy, Update (RCU)
|
||||
@@ -2977,17 +2948,12 @@ W: http://info.iet.unipi.it/~luigi/netmap/
|
||||
S: Maintained
|
||||
F: net/netmap.c
|
||||
|
||||
AF_XDP network backend
|
||||
R: Ilya Maximets <i.maximets@ovn.org>
|
||||
F: net/af-xdp.c
|
||||
|
||||
Host Memory Backends
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
M: Igor Mammedov <imammedo@redhat.com>
|
||||
S: Maintained
|
||||
F: backends/hostmem*.c
|
||||
F: include/sysemu/hostmem.h
|
||||
F: docs/system/vm-templating.rst
|
||||
T: git https://gitlab.com/ehabkost/qemu.git machine-next
|
||||
|
||||
Cryptodev Backends
|
||||
@@ -3107,7 +3073,7 @@ F: qapi/qom.json
|
||||
F: qapi/qdev.json
|
||||
F: scripts/coccinelle/qom-parent-type.cocci
|
||||
F: scripts/qom-cast-macro-clean-cocci-gen.py
|
||||
F: system/qdev-monitor.c
|
||||
F: softmmu/qdev-monitor.c
|
||||
F: stubs/qdev.c
|
||||
F: qom/
|
||||
F: tests/unit/check-qom-interface.c
|
||||
@@ -3141,8 +3107,7 @@ M: Thomas Huth <thuth@redhat.com>
|
||||
M: Laurent Vivier <lvivier@redhat.com>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: system/qtest.c
|
||||
F: include/sysemu/qtest.h
|
||||
F: softmmu/qtest.c
|
||||
F: accel/qtest/
|
||||
F: tests/qtest/
|
||||
F: docs/devel/qgraph.rst
|
||||
@@ -3197,7 +3162,6 @@ F: stubs/
|
||||
|
||||
Tracing
|
||||
M: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
R: Mads Ynddal <mads@ynddal.dk>
|
||||
S: Maintained
|
||||
F: trace/
|
||||
F: trace-events
|
||||
@@ -3210,15 +3174,10 @@ F: docs/tools/qemu-trace-stap.rst
|
||||
F: docs/devel/tracing.rst
|
||||
T: git https://github.com/stefanha/qemu.git tracing
|
||||
|
||||
Simpletrace
|
||||
M: Mads Ynddal <mads@ynddal.dk>
|
||||
S: Maintained
|
||||
F: scripts/simpletrace.py
|
||||
|
||||
TPM
|
||||
M: Stefan Berger <stefanb@linux.ibm.com>
|
||||
S: Maintained
|
||||
F: system/tpm*
|
||||
F: softmmu/tpm*
|
||||
F: hw/tpm/*
|
||||
F: include/hw/acpi/tpm.h
|
||||
F: include/sysemu/tpm*
|
||||
@@ -3234,8 +3193,7 @@ F: scripts/checkpatch.pl
|
||||
|
||||
Migration
|
||||
M: Juan Quintela <quintela@redhat.com>
|
||||
M: Peter Xu <peterx@redhat.com>
|
||||
M: Fabiano Rosas <farosas@suse.de>
|
||||
R: Peter Xu <peterx@redhat.com>
|
||||
R: Leonardo Bras <leobras@redhat.com>
|
||||
S: Maintained
|
||||
F: hw/core/vmstate-if.c
|
||||
@@ -3250,20 +3208,11 @@ F: docs/devel/migration.rst
|
||||
F: qapi/migration.json
|
||||
F: tests/migration/
|
||||
F: util/userfaultfd.c
|
||||
X: migration/rdma*
|
||||
|
||||
RDMA Migration
|
||||
M: Juan Quintela <quintela@redhat.com>
|
||||
R: Li Zhijian <lizhijian@fujitsu.com>
|
||||
R: Peter Xu <peterx@redhat.com>
|
||||
R: Leonardo Bras <leobras@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: migration/rdma*
|
||||
|
||||
Migration dirty limit and dirty page rate
|
||||
M: Hyman Huang <yong.huang@smartx.com>
|
||||
S: Maintained
|
||||
F: system/dirtylimit.c
|
||||
F: softmmu/dirtylimit.c
|
||||
F: include/sysemu/dirtylimit.h
|
||||
F: migration/dirtyrate.c
|
||||
F: migration/dirtyrate.h
|
||||
@@ -3287,7 +3236,7 @@ F: scripts/xml-preprocess*
|
||||
Seccomp
|
||||
M: Daniel P. Berrange <berrange@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: system/qemu-seccomp.c
|
||||
F: softmmu/qemu-seccomp.c
|
||||
F: include/sysemu/seccomp.h
|
||||
F: tests/unit/test-seccomp.c
|
||||
|
||||
@@ -3421,12 +3370,6 @@ M: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
|
||||
S: Maintained
|
||||
F: contrib/elf2dmp/
|
||||
|
||||
Overall sensors
|
||||
M: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
S: Odd Fixes
|
||||
F: hw/sensor
|
||||
F: include/hw/sensor
|
||||
|
||||
I2C and SMBus
|
||||
M: Corey Minyard <cminyard@mvista.com>
|
||||
S: Maintained
|
||||
@@ -3592,7 +3535,7 @@ M: Alistair Francis <Alistair.Francis@wdc.com>
|
||||
L: qemu-riscv@nongnu.org
|
||||
S: Maintained
|
||||
F: tcg/riscv/
|
||||
F: disas/riscv.[ch]
|
||||
F: disas/riscv.c
|
||||
|
||||
S390 TCG target
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
@@ -3712,7 +3655,7 @@ T: git https://github.com/stefanha/qemu.git block
|
||||
Bootdevice
|
||||
M: Gonglei <arei.gonglei@huawei.com>
|
||||
S: Maintained
|
||||
F: system/bootdevice.c
|
||||
F: softmmu/bootdevice.c
|
||||
|
||||
Quorum
|
||||
M: Alberto Garcia <berto@igalia.com>
|
||||
@@ -3758,7 +3701,6 @@ S: Supported
|
||||
F: block/parallels.c
|
||||
F: block/parallels-ext.c
|
||||
F: docs/interop/parallels.txt
|
||||
T: git https://src.openvz.org/scm/~den/qemu.git parallels
|
||||
|
||||
qed
|
||||
M: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
@@ -3864,7 +3806,7 @@ F: docs/block-replication.txt
|
||||
PVRDMA
|
||||
M: Yuval Shaia <yuval.shaia.ml@gmail.com>
|
||||
M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
|
||||
S: Odd Fixes
|
||||
S: Maintained
|
||||
F: hw/rdma/*
|
||||
F: hw/rdma/vmw/*
|
||||
F: docs/pvrdma.txt
|
||||
@@ -3928,7 +3870,6 @@ F: .github/workflows/lockdown.yml
|
||||
F: .gitlab-ci.yml
|
||||
F: .gitlab-ci.d/
|
||||
F: .travis.yml
|
||||
F: docs/devel/ci*
|
||||
F: scripts/ci/
|
||||
F: tests/docker/
|
||||
F: tests/vm/
|
||||
|
||||
29
Makefile
29
Makefile
@@ -164,6 +164,14 @@ ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_PLUGIN),y)
|
||||
.PHONY: plugins
|
||||
plugins:
|
||||
$(call quiet-command,\
|
||||
$(MAKE) $(SUBDIR_MAKEFLAGS) -C contrib/plugins V="$(V)", \
|
||||
"BUILD", "example plugins")
|
||||
endif # $(CONFIG_PLUGIN)
|
||||
|
||||
else # config-host.mak does not exist
|
||||
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
|
||||
$(error Please call configure before running make)
|
||||
@@ -176,20 +184,15 @@ include $(SRC_PATH)/tests/Makefile.include
|
||||
|
||||
all: recurse-all
|
||||
|
||||
SUBDIR_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(SUBDIRS)))
|
||||
.PHONY: $(SUBDIR_RULES)
|
||||
$(SUBDIR_RULES):
|
||||
ROMS_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(ROMS)))
|
||||
.PHONY: $(ROMS_RULES)
|
||||
$(ROMS_RULES):
|
||||
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),)
|
||||
|
||||
ifneq ($(filter contrib/plugins, $(SUBDIRS)),)
|
||||
.PHONY: plugins
|
||||
plugins: contrib/plugins/all
|
||||
endif
|
||||
|
||||
.PHONY: recurse-all recurse-clean
|
||||
recurse-all: $(addsuffix /all, $(SUBDIRS))
|
||||
recurse-clean: $(addsuffix /clean, $(SUBDIRS))
|
||||
recurse-distclean: $(addsuffix /distclean, $(SUBDIRS))
|
||||
recurse-all: $(addsuffix /all, $(ROMS))
|
||||
recurse-clean: $(addsuffix /clean, $(ROMS))
|
||||
recurse-distclean: $(addsuffix /distclean, $(ROMS))
|
||||
|
||||
######################################################################
|
||||
|
||||
@@ -293,7 +296,7 @@ help:
|
||||
$(call print-help,cscope,Generate cscope index)
|
||||
$(call print-help,sparse,Run sparse on the QEMU source)
|
||||
@echo ''
|
||||
ifneq ($(filter contrib/plugins, $(SUBDIRS)),)
|
||||
ifeq ($(CONFIG_PLUGIN),y)
|
||||
@echo 'Plugin targets:'
|
||||
$(call print-help,plugins,Build the example TCG plugins)
|
||||
@echo ''
|
||||
@@ -313,7 +316,7 @@ endif
|
||||
@echo 'Documentation targets:'
|
||||
$(call print-help,html man,Build documentation in specified format)
|
||||
@echo ''
|
||||
ifneq ($(filter msi, $(ninja-targets)),)
|
||||
ifdef CONFIG_WIN32
|
||||
@echo 'Windows targets:'
|
||||
$(call print-help,installer,Build NSIS-based installer for QEMU)
|
||||
$(call print-help,msi,Build MSI-based installer for qemu-ga)
|
||||
|
||||
@@ -4,6 +4,9 @@ config WHPX
|
||||
config NVMM
|
||||
bool
|
||||
|
||||
config HAX
|
||||
bool
|
||||
|
||||
config HVF
|
||||
bool
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
#include "hw/core/accel-cpu.h"
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "accel-system.h"
|
||||
#include "accel-softmmu.h"
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static const TypeInfo accel_type = {
|
||||
@@ -119,37 +119,16 @@ void accel_cpu_instance_init(CPUState *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
|
||||
bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
|
||||
/* target specific realization */
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize
|
||||
&& !cc->accel_cpu->cpu_target_realize(cpu, errp)) {
|
||||
return false;
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) {
|
||||
return cc->accel_cpu->cpu_realizefn(cpu, errp);
|
||||
}
|
||||
|
||||
/* generic realization */
|
||||
if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void accel_cpu_common_unrealize(CPUState *cpu)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
|
||||
/* generic unrealization */
|
||||
if (acc->cpu_common_unrealize) {
|
||||
acc->cpu_common_unrealize(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
int accel_supported_gdbstub_sstep_flags(void)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
@@ -28,7 +28,7 @@
|
||||
#include "hw/boards.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "accel-system.h"
|
||||
#include "accel-softmmu.h"
|
||||
|
||||
int accel_init_machine(AccelState *accel, MachineState *ms)
|
||||
{
|
||||
@@ -99,8 +99,8 @@ static const TypeInfo accel_ops_type_info = {
|
||||
.class_size = sizeof(AccelOpsClass),
|
||||
};
|
||||
|
||||
static void accel_system_register_types(void)
|
||||
static void accel_softmmu_register_types(void)
|
||||
{
|
||||
type_register_static(&accel_ops_type_info);
|
||||
}
|
||||
type_init(accel_system_register_types);
|
||||
type_init(accel_softmmu_register_types);
|
||||
@@ -7,9 +7,9 @@
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_SYSTEM_H
|
||||
#define ACCEL_SYSTEM_H
|
||||
#ifndef ACCEL_SOFTMMU_H
|
||||
#define ACCEL_SOFTMMU_H
|
||||
|
||||
void accel_init_ops_interfaces(AccelClass *ac);
|
||||
|
||||
#endif /* ACCEL_SYSTEM_H */
|
||||
#endif /* ACCEL_SOFTMMU_H */
|
||||
@@ -27,7 +27,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
#ifndef _WIN32
|
||||
|
||||
@@ -428,7 +428,7 @@ static void *hvf_cpu_thread_fn(void *arg)
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
hvf_init_vcpu(cpu);
|
||||
@@ -474,7 +474,7 @@ static void hvf_start_vcpu_thread(CPUState *cpu)
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
|
||||
static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
|
||||
static int hvf_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
int err;
|
||||
@@ -512,7 +512,7 @@ static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
|
||||
static int hvf_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
int err;
|
||||
|
||||
@@ -51,7 +51,7 @@ void assert_hvf_ok(hv_return_t ret)
|
||||
abort();
|
||||
}
|
||||
|
||||
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
|
||||
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, target_ulong pc)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
r = kvm_init_vcpu(cpu, &error_fatal);
|
||||
|
||||
@@ -174,31 +174,13 @@ void kvm_resample_fd_notify(int gsi)
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int kvm_get_max_memslots(void)
|
||||
int kvm_get_max_memslots(void)
|
||||
{
|
||||
KVMState *s = KVM_STATE(current_accel());
|
||||
|
||||
return s->nr_slots;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_free_memslots(void)
|
||||
{
|
||||
unsigned int used_slots = 0;
|
||||
KVMState *s = kvm_state;
|
||||
int i;
|
||||
|
||||
kvm_slots_lock();
|
||||
for (i = 0; i < s->nr_as; i++) {
|
||||
if (!s->as[i].ml) {
|
||||
continue;
|
||||
}
|
||||
used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots);
|
||||
}
|
||||
kvm_slots_unlock();
|
||||
|
||||
return s->nr_slots - used_slots;
|
||||
}
|
||||
|
||||
/* Called with KVMMemoryListener.slots_lock held */
|
||||
static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
|
||||
{
|
||||
@@ -214,6 +196,19 @@ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool kvm_has_free_slot(MachineState *ms)
|
||||
{
|
||||
KVMState *s = KVM_STATE(ms->accelerator);
|
||||
bool result;
|
||||
KVMMemoryListener *kml = &s->memory_listener;
|
||||
|
||||
kvm_slots_lock();
|
||||
result = !!kvm_get_free_slot(kml);
|
||||
kvm_slots_unlock();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Called with KVMMemoryListener.slots_lock held */
|
||||
static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
|
||||
{
|
||||
@@ -1392,7 +1387,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
}
|
||||
start_addr += slot_size;
|
||||
size -= slot_size;
|
||||
kml->nr_used_slots--;
|
||||
} while (size);
|
||||
return;
|
||||
}
|
||||
@@ -1418,7 +1412,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
ram_start_offset += slot_size;
|
||||
ram += slot_size;
|
||||
size -= slot_size;
|
||||
kml->nr_used_slots++;
|
||||
} while (size);
|
||||
}
|
||||
|
||||
@@ -1461,13 +1454,15 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void kvm_dirty_ring_reaper_init(KVMState *s)
|
||||
static int kvm_dirty_ring_reaper_init(KVMState *s)
|
||||
{
|
||||
struct KVMDirtyRingReaper *r = &s->reaper;
|
||||
|
||||
qemu_thread_create(&r->reaper_thr, "kvm-reaper",
|
||||
kvm_dirty_ring_reaper_thread,
|
||||
s, QEMU_THREAD_JOINABLE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_dirty_ring_init(KVMState *s)
|
||||
@@ -2463,7 +2458,7 @@ static int kvm_init(MachineState *ms)
|
||||
KVMState *s;
|
||||
const KVMCapabilityInfo *missing_cap;
|
||||
int ret;
|
||||
int type;
|
||||
int type = 0;
|
||||
uint64_t dirty_log_manual_caps;
|
||||
|
||||
qemu_mutex_init(&kml_slots_lock);
|
||||
@@ -2528,13 +2523,6 @@ static int kvm_init(MachineState *ms)
|
||||
type = mc->kvm_type(ms, kvm_type);
|
||||
} else if (mc->kvm_type) {
|
||||
type = mc->kvm_type(ms, NULL);
|
||||
} else {
|
||||
type = kvm_arch_get_default_type(ms);
|
||||
}
|
||||
|
||||
if (type < 0) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
do {
|
||||
@@ -2749,7 +2737,10 @@ static int kvm_init(MachineState *ms)
|
||||
}
|
||||
|
||||
if (s->kvm_dirty_ring_size) {
|
||||
kvm_dirty_ring_reaper_init(s);
|
||||
ret = kvm_dirty_ring_reaper_init(s);
|
||||
if (ret) {
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
|
||||
@@ -2767,7 +2758,6 @@ err:
|
||||
if (s->fd != -1) {
|
||||
close(s->fd);
|
||||
}
|
||||
g_free(s->as);
|
||||
g_free(s->memory_listener.slots);
|
||||
|
||||
return ret;
|
||||
@@ -2858,13 +2848,7 @@ bool kvm_cpu_check_are_resettable(void)
|
||||
static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
if (!cpu->vcpu_dirty) {
|
||||
int ret = kvm_arch_get_registers(cpu);
|
||||
if (ret) {
|
||||
error_report("Failed to get registers: %s", strerror(-ret));
|
||||
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
|
||||
vm_stop(RUN_STATE_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
kvm_arch_get_registers(cpu);
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
}
|
||||
@@ -2878,13 +2862,7 @@ void kvm_cpu_synchronize_state(CPUState *cpu)
|
||||
|
||||
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
|
||||
if (ret) {
|
||||
error_report("Failed to put registers after reset: %s", strerror(-ret));
|
||||
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
|
||||
vm_stop(RUN_STATE_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
@@ -2895,12 +2873,7 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu)
|
||||
|
||||
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
|
||||
if (ret) {
|
||||
error_report("Failed to put registers after init: %s", strerror(-ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
@@ -2993,14 +2966,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
MemTxAttrs attrs;
|
||||
|
||||
if (cpu->vcpu_dirty) {
|
||||
ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
|
||||
if (ret) {
|
||||
error_report("Failed to put registers after init: %s",
|
||||
strerror(-ret));
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
@@ -3340,7 +3306,8 @@ bool kvm_arm_supports_user_irq(void)
|
||||
}
|
||||
|
||||
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
||||
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
|
||||
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
|
||||
target_ulong pc)
|
||||
{
|
||||
struct kvm_sw_breakpoint *bp;
|
||||
|
||||
@@ -3794,7 +3761,6 @@ static void kvm_accel_instance_init(Object *obj)
|
||||
/* KVM dirty ring is by default off */
|
||||
s->kvm_dirty_ring_size = 0;
|
||||
s->kvm_dirty_ring_with_bitmap = false;
|
||||
s->kvm_eager_split_size = 0;
|
||||
s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
|
||||
s->notify_window = 0;
|
||||
s->xen_version = 0;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
specific_ss.add(files('accel-target.c'))
|
||||
system_ss.add(files('accel-system.c', 'accel-blocker.c'))
|
||||
specific_ss.add(files('accel-common.c', 'accel-blocker.c'))
|
||||
system_ss.add(files('accel-softmmu.c'))
|
||||
user_ss.add(files('accel-user.c'))
|
||||
|
||||
subdir('tcg')
|
||||
|
||||
24
accel/stubs/hax-stub.c
Normal file
24
accel/stubs/hax-stub.c
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* QEMU HAXM support
|
||||
*
|
||||
* Copyright (c) 2015, Intel Corporation
|
||||
*
|
||||
* Copyright 2016 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/hax.h"
|
||||
|
||||
bool hax_allowed;
|
||||
|
||||
int hax_sync_vcpus(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -109,14 +109,9 @@ int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_max_memslots(void)
|
||||
bool kvm_has_free_slot(MachineState *ms)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_free_memslots(void)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
void kvm_init_cpu_signals(CPUState *cpu)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
system_stubs_ss = ss.source_set()
|
||||
system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
|
||||
sysemu_stubs_ss = ss.source_set()
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
|
||||
|
||||
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss)
|
||||
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: sysemu_stubs_ss)
|
||||
|
||||
@@ -69,12 +69,11 @@
|
||||
# define END _le
|
||||
#endif
|
||||
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
@@ -88,11 +87,10 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
}
|
||||
|
||||
#if DATA_SIZE < 16
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
ret = qatomic_xchg__nocheck(haddr, val);
|
||||
@@ -102,11 +100,11 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
|
||||
}
|
||||
|
||||
#define GEN_ATOMIC_HELPER(X) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
DATA_TYPE *haddr, ret; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
ret = qatomic_##X(haddr, val); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
@@ -133,11 +131,11 @@ GEN_ATOMIC_HELPER(xor_fetch)
|
||||
* of CF_PARALLEL's value, we'll trace just a read and a write.
|
||||
*/
|
||||
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
smp_mb(); \
|
||||
cmp = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
@@ -174,12 +172,11 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
|
||||
# define END _be
|
||||
#endif
|
||||
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
@@ -193,11 +190,10 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
}
|
||||
|
||||
#if DATA_SIZE < 16
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
ABI_TYPE ret;
|
||||
|
||||
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
@@ -207,11 +203,11 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
|
||||
}
|
||||
|
||||
#define GEN_ATOMIC_HELPER(X) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
DATA_TYPE *haddr, ret; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
ret = qatomic_##X(haddr, BSWAP(val)); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
@@ -235,11 +231,11 @@ GEN_ATOMIC_HELPER(xor_fetch)
|
||||
* of CF_PARALLEL's value, we'll trace just a read and a write.
|
||||
*/
|
||||
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
smp_mb(); \
|
||||
ldn = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
|
||||
@@ -20,8 +20,9 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "qemu/plugin.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal.h"
|
||||
|
||||
bool tcg_allowed;
|
||||
|
||||
@@ -32,10 +33,40 @@ void cpu_loop_exit_noexc(CPUState *cpu)
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
void cpu_reloading_memory_map(void)
|
||||
{
|
||||
if (qemu_in_vcpu_thread() && current_cpu->running) {
|
||||
/* The guest can in theory prolong the RCU critical section as long
|
||||
* as it feels like. The major problem with this is that because it
|
||||
* can do multiple reconfigurations of the memory map within the
|
||||
* critical section, we could potentially accumulate an unbounded
|
||||
* collection of memory data structures awaiting reclamation.
|
||||
*
|
||||
* Because the only thing we're currently protecting with RCU is the
|
||||
* memory data structures, it's sufficient to break the critical section
|
||||
* in this callback, which we know will get called every time the
|
||||
* memory map is rearranged.
|
||||
*
|
||||
* (If we add anything else in the system that uses RCU to protect
|
||||
* its data structures, we will need to implement some other mechanism
|
||||
* to force TCG CPUs to exit the critical section, at which point this
|
||||
* part of this callback might become unnecessary.)
|
||||
*
|
||||
* This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
|
||||
* only protects cpu->as->dispatch. Since we know our caller is about
|
||||
* to reload it, it's safe to split the critical section.
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
rcu_read_lock();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void cpu_loop_exit(CPUState *cpu)
|
||||
{
|
||||
/* Undo the setting in cpu_tb_exec. */
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
/* Undo any setting in generated code. */
|
||||
qemu_plugin_disable_mem_helpers(cpu);
|
||||
siglongjmp(cpu->jmp_env, 1);
|
||||
|
||||
@@ -42,8 +42,7 @@
|
||||
#include "tb-jmp-cache.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal-target.h"
|
||||
#include "internal.h"
|
||||
|
||||
/* -icount align implementation. */
|
||||
|
||||
@@ -74,7 +73,7 @@ static void align_clocks(SyncClocks *sc, CPUState *cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
||||
cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
|
||||
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
|
||||
sc->last_cpu_icount = cpu_icount;
|
||||
|
||||
@@ -125,7 +124,7 @@ static void init_delay_params(SyncClocks *sc, CPUState *cpu)
|
||||
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
|
||||
sc->last_cpu_icount
|
||||
= cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
||||
= cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
|
||||
if (sc->diff_clk < max_delay) {
|
||||
max_delay = sc->diff_clk;
|
||||
}
|
||||
@@ -223,7 +222,7 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
|
||||
struct tb_desc desc;
|
||||
uint32_t h;
|
||||
|
||||
desc.env = cpu_env(cpu);
|
||||
desc.env = cpu->env_ptr;
|
||||
desc.cs_base = cs_base;
|
||||
desc.flags = flags;
|
||||
desc.cflags = cflags;
|
||||
@@ -445,7 +444,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
||||
static inline TranslationBlock * QEMU_DISABLE_CFI
|
||||
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
{
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
uintptr_t ret;
|
||||
TranslationBlock *last_tb;
|
||||
const void *tb_ptr = itb->tc.ptr;
|
||||
@@ -456,7 +455,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
|
||||
qemu_thread_jit_execute();
|
||||
ret = tcg_qemu_tb_exec(env, tb_ptr);
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
qemu_plugin_disable_mem_helpers(cpu);
|
||||
/*
|
||||
* TODO: Delay swapping back to the read-write region of the TB
|
||||
@@ -566,7 +565,7 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
|
||||
|
||||
void cpu_exec_step_atomic(CPUState *cpu)
|
||||
{
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
TranslationBlock *tb;
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
@@ -718,10 +717,10 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
if (cpu->exception_index < 0) {
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (replay_has_exception()
|
||||
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
/* Execute just one insn to trigger exception pending in the log */
|
||||
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
|
||||
| CF_LAST_IO | CF_NOIRQ | 1;
|
||||
| CF_NOIRQ | 1;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
@@ -808,7 +807,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
* Ensure zeroing happens before reading cpu->exit_request or
|
||||
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
|
||||
*/
|
||||
qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
|
||||
qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0);
|
||||
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
@@ -899,7 +898,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
if (unlikely(qatomic_read(&cpu->exit_request))
|
||||
|| (icount_enabled()
|
||||
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
|
||||
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
if (cpu->exception_index == -1) {
|
||||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
@@ -924,7 +923,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
}
|
||||
|
||||
*last_tb = NULL;
|
||||
insns_left = qatomic_read(&cpu->neg.icount_decr.u32);
|
||||
insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
||||
if (insns_left < 0) {
|
||||
/* Something asked us to stop executing chained TBs; just
|
||||
* continue round the main loop. Whatever requested the exit
|
||||
@@ -943,7 +942,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
icount_update(cpu);
|
||||
/* Refill decrementer and continue execution. */
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu->neg.icount_decr.u16.low = insns_left;
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
/*
|
||||
@@ -977,7 +976,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
|
||||
uint64_t cs_base;
|
||||
uint32_t flags, cflags;
|
||||
|
||||
cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
|
||||
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
|
||||
|
||||
/*
|
||||
* When requested, use an exact setting for cflags for the next
|
||||
@@ -1089,7 +1088,7 @@ int cpu_exec(CPUState *cpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
void tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
{
|
||||
static bool tcg_target_initialized;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
@@ -1105,8 +1104,6 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
tcg_iommu_init_notifier_list(cpu);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
/* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* undo the initializations in reverse order */
|
||||
|
||||
1161
accel/tcg/cputlb.c
1161
accel/tcg/cputlb.c
File diff suppressed because it is too large
Load Diff
@@ -1,28 +0,0 @@
|
||||
/*
|
||||
* Internal execution defines for qemu (target agnostic)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_COMMON_H
|
||||
#define ACCEL_TCG_INTERNAL_COMMON_H
|
||||
|
||||
#include "exec/translation-block.h"
|
||||
|
||||
extern int64_t max_delay;
|
||||
extern int64_t max_advance;
|
||||
|
||||
void dump_exec_info(GString *buf);
|
||||
|
||||
/*
|
||||
* Return true if CS is not running in parallel with other cpus, either
|
||||
* because there are no other cpus or we are within an exclusive context.
|
||||
*/
|
||||
static inline bool cpu_in_serial_context(CPUState *cs)
|
||||
{
|
||||
return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,13 +1,13 @@
|
||||
/*
|
||||
* Internal execution defines for qemu (target specific)
|
||||
* Internal execution defines for qemu
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_TARGET_H
|
||||
#define ACCEL_TCG_INTERNAL_TARGET_H
|
||||
#ifndef ACCEL_TCG_INTERNAL_H
|
||||
#define ACCEL_TCG_INTERNAL_H
|
||||
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/translate-all.h"
|
||||
@@ -80,9 +80,6 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
|
||||
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
uintptr_t host_pc);
|
||||
|
||||
bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
|
||||
void tcg_exec_unrealizefn(CPUState *cpu);
|
||||
|
||||
/* Return the current PC from CPU, which may be cached in TB. */
|
||||
static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
{
|
||||
@@ -93,6 +90,18 @@ static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if CS is not running in parallel with other cpus, either
|
||||
* because there are no other cpus or we are within an exclusive context.
|
||||
*/
|
||||
static inline bool cpu_in_serial_context(CPUState *cs)
|
||||
{
|
||||
return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
|
||||
}
|
||||
|
||||
extern int64_t max_delay;
|
||||
extern int64_t max_advance;
|
||||
|
||||
extern bool one_insn_per_tb;
|
||||
|
||||
/**
|
||||
@@ -26,7 +26,7 @@
|
||||
* If the operation must be split into two operations to be
|
||||
* examined separately for atomicity, return -lg2.
|
||||
*/
|
||||
static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
|
||||
static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop)
|
||||
{
|
||||
MemOp atom = memop & MO_ATOM_MASK;
|
||||
MemOp size = memop & MO_SIZE;
|
||||
@@ -93,7 +93,7 @@ static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
|
||||
* host atomicity in order to avoid racing. This reduction
|
||||
* avoids looping with cpu_loop_exit_atomic.
|
||||
*/
|
||||
if (cpu_in_serial_context(cpu)) {
|
||||
if (cpu_in_serial_context(env_cpu(env))) {
|
||||
return MO_8;
|
||||
}
|
||||
return atmax;
|
||||
@@ -139,14 +139,14 @@ static inline uint64_t load_atomic8(void *pv)
|
||||
|
||||
/**
|
||||
* load_atomic8_or_exit:
|
||||
* @cpu: generic cpu state
|
||||
* @env: cpu context
|
||||
* @ra: host unwind address
|
||||
* @pv: host address
|
||||
*
|
||||
* Atomically load 8 aligned bytes from @pv.
|
||||
* If this is not possible, longjmp out to restart serially.
|
||||
*/
|
||||
static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
|
||||
static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
|
||||
{
|
||||
if (HAVE_al8) {
|
||||
return load_atomic8(pv);
|
||||
@@ -168,19 +168,19 @@ static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
|
||||
#endif
|
||||
|
||||
/* Ultimate fallback: re-execute in serial context. */
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
|
||||
/**
|
||||
* load_atomic16_or_exit:
|
||||
* @cpu: generic cpu state
|
||||
* @env: cpu context
|
||||
* @ra: host unwind address
|
||||
* @pv: host address
|
||||
*
|
||||
* Atomically load 16 aligned bytes from @pv.
|
||||
* If this is not possible, longjmp out to restart serially.
|
||||
*/
|
||||
static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
|
||||
static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
|
||||
{
|
||||
Int128 *p = __builtin_assume_aligned(pv, 16);
|
||||
|
||||
@@ -212,7 +212,7 @@ static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
|
||||
}
|
||||
|
||||
/* Ultimate fallback: re-execute in serial context. */
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -263,7 +263,7 @@ static uint64_t load_atom_extract_al8x2(void *pv)
|
||||
|
||||
/**
|
||||
* load_atom_extract_al8_or_exit:
|
||||
* @cpu: generic cpu state
|
||||
* @env: cpu context
|
||||
* @ra: host unwind address
|
||||
* @pv: host address
|
||||
* @s: object size in bytes, @s <= 4.
|
||||
@@ -273,7 +273,7 @@ static uint64_t load_atom_extract_al8x2(void *pv)
|
||||
* 8-byte load and extract.
|
||||
* The value is returned in the low bits of a uint32_t.
|
||||
*/
|
||||
static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, int s)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -281,12 +281,12 @@ static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8;
|
||||
|
||||
pv = (void *)(pi & ~7);
|
||||
return load_atomic8_or_exit(cpu, ra, pv) >> shr;
|
||||
return load_atomic8_or_exit(env, ra, pv) >> shr;
|
||||
}
|
||||
|
||||
/**
|
||||
* load_atom_extract_al16_or_exit:
|
||||
* @cpu: generic cpu state
|
||||
* @env: cpu context
|
||||
* @ra: host unwind address
|
||||
* @p: host address
|
||||
* @s: object size in bytes, @s <= 8.
|
||||
@@ -299,7 +299,7 @@ static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* If this is not possible, longjmp out to restart serially.
|
||||
*/
|
||||
static uint64_t load_atom_extract_al16_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, int s)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -312,7 +312,7 @@ static uint64_t load_atom_extract_al16_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
* Provoke SIGBUS if possible otherwise.
|
||||
*/
|
||||
pv = (void *)(pi & ~7);
|
||||
r = load_atomic16_or_exit(cpu, ra, pv);
|
||||
r = load_atomic16_or_exit(env, ra, pv);
|
||||
|
||||
r = int128_urshift(r, shr);
|
||||
return int128_getlo(r);
|
||||
@@ -394,7 +394,7 @@ static inline uint64_t load_atom_8_by_8_or_4(void *pv)
|
||||
*
|
||||
* Load 2 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -410,7 +410,7 @@ static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
}
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
return lduw_he_p(pv);
|
||||
@@ -421,9 +421,9 @@ static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
return load_atomic4(pv - 1) >> 8;
|
||||
}
|
||||
if ((pi & 15) != 7) {
|
||||
return load_atom_extract_al8_or_exit(cpu, ra, pv, 2);
|
||||
return load_atom_extract_al8_or_exit(env, ra, pv, 2);
|
||||
}
|
||||
return load_atom_extract_al16_or_exit(cpu, ra, pv, 2);
|
||||
return load_atom_extract_al16_or_exit(env, ra, pv, 2);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@@ -436,7 +436,7 @@ static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Load 4 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -452,7 +452,7 @@ static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
}
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
case MO_16:
|
||||
@@ -466,9 +466,9 @@ static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
return load_atom_extract_al4x2(pv);
|
||||
case MO_32:
|
||||
if (!(pi & 4)) {
|
||||
return load_atom_extract_al8_or_exit(cpu, ra, pv, 4);
|
||||
return load_atom_extract_al8_or_exit(env, ra, pv, 4);
|
||||
}
|
||||
return load_atom_extract_al16_or_exit(cpu, ra, pv, 4);
|
||||
return load_atom_extract_al16_or_exit(env, ra, pv, 4);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@@ -481,7 +481,7 @@ static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Load 8 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -498,12 +498,12 @@ static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
return load_atom_extract_al16_or_al8(pv, 8);
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
if (atmax == MO_64) {
|
||||
if (!HAVE_al8 && (pi & 7) == 0) {
|
||||
load_atomic8_or_exit(cpu, ra, pv);
|
||||
load_atomic8_or_exit(env, ra, pv);
|
||||
}
|
||||
return load_atom_extract_al16_or_exit(cpu, ra, pv, 8);
|
||||
return load_atom_extract_al16_or_exit(env, ra, pv, 8);
|
||||
}
|
||||
if (HAVE_al8_fast) {
|
||||
return load_atom_extract_al8x2(pv);
|
||||
@@ -519,7 +519,7 @@ static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
if (HAVE_al8) {
|
||||
return load_atom_extract_al8x2(pv);
|
||||
}
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@@ -532,7 +532,7 @@ static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Load 16 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -548,7 +548,7 @@ static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
return atomic16_read_ro(pv);
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
memcpy(&r, pv, 16);
|
||||
@@ -563,20 +563,20 @@ static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
break;
|
||||
case MO_64:
|
||||
if (!HAVE_al8) {
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
a = load_atomic8(pv);
|
||||
b = load_atomic8(pv + 8);
|
||||
break;
|
||||
case -MO_64:
|
||||
if (!HAVE_al8) {
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
a = load_atom_extract_al8x2(pv);
|
||||
b = load_atom_extract_al8x2(pv + 8);
|
||||
break;
|
||||
case MO_128:
|
||||
return load_atomic16_or_exit(cpu, ra, pv);
|
||||
return load_atomic16_or_exit(env, ra, pv);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@@ -857,7 +857,7 @@ static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
|
||||
*
|
||||
* Store 2 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
static void store_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop, uint16_t val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -868,7 +868,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
if (atmax == MO_8) {
|
||||
stw_he_p(pv, val);
|
||||
return;
|
||||
@@ -897,7 +897,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -908,7 +908,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Store 4 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
static void store_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop, uint32_t val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -919,7 +919,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
stl_he_p(pv, val);
|
||||
@@ -961,7 +961,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
}
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@@ -975,7 +975,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Store 8 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
static void store_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop, uint64_t val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -986,7 +986,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
stq_he_p(pv, val);
|
||||
@@ -1029,7 +1029,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1040,7 +1040,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
*
|
||||
* Store 16 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
static void store_atom_16(CPUArchState *env, uintptr_t ra,
|
||||
void *pv, MemOp memop, Int128 val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@@ -1052,7 +1052,7 @@ static void store_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
|
||||
a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val);
|
||||
b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val);
|
||||
@@ -1111,5 +1111,5 @@ static void store_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
}
|
||||
|
||||
@@ -8,231 +8,6 @@
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
/*
|
||||
* Load helpers for tcg-ldst.h
|
||||
*/
|
||||
|
||||
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||
return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
/*
|
||||
* Provide signed versions of the load routines as well. We can of course
|
||||
* avoid this for 64-bit data, or for 32-bit data on 32-bit host.
|
||||
*/
|
||||
|
||||
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
return do_ld16_mmu(env_cpu(env), addr, oi, retaddr);
|
||||
}
|
||||
|
||||
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
|
||||
{
|
||||
return helper_ld16_mmu(env, addr, oi, GETPC());
|
||||
}
|
||||
|
||||
/*
|
||||
* Store helpers for tcg-ldst.h
|
||||
*/
|
||||
|
||||
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||
do_st1_mmu(env_cpu(env), addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
|
||||
{
|
||||
helper_st16_mmu(env, addr, val, oi, GETPC());
|
||||
}
|
||||
|
||||
/*
|
||||
* Load helpers for cpu_ldst.h
|
||||
*/
|
||||
|
||||
static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
|
||||
{
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
}
|
||||
|
||||
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint8_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
|
||||
ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint16_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
Int128 ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
|
||||
plugin_load_cb(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store helpers for cpu_ldst.h
|
||||
*/
|
||||
|
||||
static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
|
||||
{
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
helper_stb_mmu(env, addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, oi);
|
||||
}
|
||||
|
||||
void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, oi);
|
||||
}
|
||||
|
||||
void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, oi);
|
||||
}
|
||||
|
||||
void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, oi);
|
||||
}
|
||||
|
||||
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, oi);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrappers of the above
|
||||
*/
|
||||
|
||||
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
tcg_ss = ss.source_set()
|
||||
common_ss.add(when: 'CONFIG_TCG', if_true: files(
|
||||
'cpu-exec-common.c',
|
||||
))
|
||||
tcg_ss.add(files(
|
||||
'tcg-all.c',
|
||||
'cpu-exec-common.c',
|
||||
'cpu-exec.c',
|
||||
'tb-maint.c',
|
||||
'tcg-runtime-gvec.c',
|
||||
@@ -13,19 +11,13 @@ tcg_ss.add(files(
|
||||
))
|
||||
tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
|
||||
tcg_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c'))
|
||||
if get_option('plugins')
|
||||
tcg_ss.add(files('plugin-gen.c'))
|
||||
endif
|
||||
tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')])
|
||||
tcg_ss.add(when: libdw, if_true: files('debuginfo.c'))
|
||||
tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c'))
|
||||
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
|
||||
|
||||
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
|
||||
'cputlb.c',
|
||||
))
|
||||
|
||||
system_ss.add(when: ['CONFIG_TCG'], if_true: files(
|
||||
'icount-common.c',
|
||||
'monitor.c',
|
||||
))
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
static void dump_drift_info(GString *buf)
|
||||
|
||||
@@ -104,7 +104,7 @@ static void gen_empty_udata_cb(void)
|
||||
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
|
||||
|
||||
tcg_gen_movi_ptr(udata, 0);
|
||||
tcg_gen_ld_i32(cpu_index, tcg_env,
|
||||
tcg_gen_ld_i32(cpu_index, cpu_env,
|
||||
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
|
||||
gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
|
||||
|
||||
@@ -138,7 +138,7 @@ static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
|
||||
|
||||
tcg_gen_movi_i32(meminfo, info);
|
||||
tcg_gen_movi_ptr(udata, 0);
|
||||
tcg_gen_ld_i32(cpu_index, tcg_env,
|
||||
tcg_gen_ld_i32(cpu_index, cpu_env,
|
||||
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
|
||||
|
||||
gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
|
||||
@@ -157,7 +157,7 @@ static void gen_empty_mem_helper(void)
|
||||
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
|
||||
|
||||
tcg_gen_movi_ptr(ptr, 0);
|
||||
tcg_gen_st_ptr(ptr, tcg_env, offsetof(CPUState, plugin_mem_cbs) -
|
||||
tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
|
||||
offsetof(ArchCPU, env));
|
||||
tcg_temp_free_ptr(ptr);
|
||||
}
|
||||
@@ -581,7 +581,7 @@ void plugin_gen_disable_mem_helpers(void)
|
||||
if (!tcg_ctx->plugin_tb->mem_helper) {
|
||||
return;
|
||||
}
|
||||
tcg_gen_st_ptr(tcg_constant_ptr(NULL), tcg_env,
|
||||
tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env,
|
||||
offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
|
||||
}
|
||||
|
||||
@@ -849,7 +849,7 @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
|
||||
} else {
|
||||
if (ptb->vaddr2 == -1) {
|
||||
ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
|
||||
get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2);
|
||||
get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
|
||||
}
|
||||
pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
|
||||
}
|
||||
@@ -866,14 +866,10 @@ void plugin_gen_insn_end(void)
|
||||
* do any clean-up here and make sure things are reset in
|
||||
* plugin_gen_tb_start.
|
||||
*/
|
||||
void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
|
||||
void plugin_gen_tb_end(CPUState *cpu)
|
||||
{
|
||||
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
|
||||
|
||||
/* translator may have removed instructions, update final count */
|
||||
g_assert(num_insns <= ptb->n);
|
||||
ptb->n = num_insns;
|
||||
|
||||
/* collect instrumentation requests */
|
||||
qemu_plugin_tb_trans_cb(cpu, ptb);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Translation Block Maintenance
|
||||
* Translation Block Maintaince
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
@@ -29,8 +29,7 @@
|
||||
#include "tcg/tcg.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal-target.h"
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
/* List iterators for lists of tagged pointers in TranslationBlock. */
|
||||
@@ -208,12 +207,13 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
|
||||
{
|
||||
PageDesc *pd;
|
||||
void **lp;
|
||||
int i;
|
||||
|
||||
/* Level 1. Always allocated. */
|
||||
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
||||
|
||||
/* Level 2..N-1. */
|
||||
for (int i = v_l2_levels; i > 0; i--) {
|
||||
for (i = v_l2_levels; i > 0; i--) {
|
||||
void **p = qatomic_rcu_read(lp);
|
||||
|
||||
if (p == NULL) {
|
||||
@@ -1083,8 +1083,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
|
||||
if (current_tb_modified) {
|
||||
/* Force execution of one insn next time. */
|
||||
CPUState *cpu = current_cpu;
|
||||
cpu->cflags_next_tb =
|
||||
1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu);
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -1154,8 +1153,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
if (current_tb_modified) {
|
||||
page_collection_unlock(pages);
|
||||
/* Force execution of one insn next time. */
|
||||
current_cpu->cflags_next_tb =
|
||||
1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu);
|
||||
current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
|
||||
mmap_unlock();
|
||||
cpu_loop_exit_noexc(current_cpu);
|
||||
}
|
||||
|
||||
@@ -111,14 +111,14 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
|
||||
* each vCPU execution. However u16.high can be raised
|
||||
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
|
||||
*/
|
||||
g_assert(cpu->neg.icount_decr.u16.low == 0);
|
||||
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
|
||||
g_assert(cpu->icount_extra == 0);
|
||||
|
||||
replay_mutex_lock();
|
||||
|
||||
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu->neg.icount_decr.u16.low = insns_left;
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
if (cpu->icount_budget == 0) {
|
||||
@@ -138,7 +138,7 @@ void icount_process_data(CPUState *cpu)
|
||||
icount_update(cpu);
|
||||
|
||||
/* Reset the counters */
|
||||
cpu->neg.icount_decr.u16.low = 0;
|
||||
cpu_neg(cpu)->icount_decr.u16.low = 0;
|
||||
cpu->icount_extra = 0;
|
||||
cpu->icount_budget = 0;
|
||||
|
||||
@@ -153,7 +153,7 @@ void icount_handle_interrupt(CPUState *cpu, int mask)
|
||||
|
||||
tcg_handle_interrupt(cpu, mask);
|
||||
if (qemu_cpu_is_self(cpu) &&
|
||||
!cpu->neg.can_do_io
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "hw/boards.h"
|
||||
#include "tcg/startup.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tcg-accel-ops.h"
|
||||
#include "tcg-accel-ops-mttcg.h"
|
||||
|
||||
@@ -80,7 +80,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
@@ -100,9 +100,14 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
break;
|
||||
case EXCP_HALTED:
|
||||
/*
|
||||
* Usually cpu->halted is set, but may have already been
|
||||
* reset by another thread by the time we arrive here.
|
||||
* during start-up the vCPU is reset and the thread is
|
||||
* kicked several times. If we don't ensure we go back
|
||||
* to sleep in the halted state we won't cleanly
|
||||
* start-up when the vCPU is enabled.
|
||||
*
|
||||
* cpu->halted should ensure we sleep in wait_io_event
|
||||
*/
|
||||
g_assert(cpu->halted);
|
||||
break;
|
||||
case EXCP_ATOMIC:
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
#include "qemu/notify.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "tcg/startup.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tcg-accel-ops.h"
|
||||
#include "tcg-accel-ops-rr.h"
|
||||
#include "tcg-accel-ops-icount.h"
|
||||
@@ -192,7 +192,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
@@ -334,7 +334,7 @@ void rr_start_vcpu_thread(CPUState *cpu)
|
||||
cpu->thread = single_tcg_cpu_thread;
|
||||
cpu->halt_cond = single_tcg_halt_cond;
|
||||
cpu->thread_id = first_cpu->thread_id;
|
||||
cpu->neg.can_do_io = 1;
|
||||
cpu->can_do_io = 1;
|
||||
cpu->created = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
} else {
|
||||
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
#include "sysemu/tcg.h"
|
||||
#include "exec/replay-core.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "tcg/startup.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tcg/oversized-guest.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
@@ -38,7 +38,7 @@
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#include "hw/boards.h"
|
||||
#endif
|
||||
#include "internal-target.h"
|
||||
#include "internal.h"
|
||||
|
||||
struct TCGState {
|
||||
AccelState parent_obj;
|
||||
@@ -121,7 +121,7 @@ static int tcg_init_machine(MachineState *ms)
|
||||
* There's no guest base to take into account, so go ahead and
|
||||
* initialize the prologue now.
|
||||
*/
|
||||
tcg_prologue_init();
|
||||
tcg_prologue_init(tcg_ctx);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
@@ -227,8 +227,6 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "tcg";
|
||||
ac->init_machine = tcg_init_machine;
|
||||
ac->cpu_common_realize = tcg_exec_realizefn;
|
||||
ac->cpu_common_unrealize = tcg_exec_unrealizefn;
|
||||
ac->allowed = &tcg_allowed;
|
||||
ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;
|
||||
|
||||
|
||||
@@ -1042,32 +1042,6 @@ DO_CMP2(64)
|
||||
#undef DO_CMP1
|
||||
#undef DO_CMP2
|
||||
|
||||
#define DO_CMP1(NAME, TYPE, OP) \
|
||||
void HELPER(NAME)(void *d, void *a, uint64_t b64, uint32_t desc) \
|
||||
{ \
|
||||
intptr_t oprsz = simd_oprsz(desc); \
|
||||
TYPE inv = simd_data(desc), b = b64; \
|
||||
for (intptr_t i = 0; i < oprsz; i += sizeof(TYPE)) { \
|
||||
*(TYPE *)(d + i) = -((*(TYPE *)(a + i) OP b) ^ inv); \
|
||||
} \
|
||||
clear_high(d, oprsz, desc); \
|
||||
}
|
||||
|
||||
#define DO_CMP2(SZ) \
|
||||
DO_CMP1(gvec_eqs##SZ, uint##SZ##_t, ==) \
|
||||
DO_CMP1(gvec_lts##SZ, int##SZ##_t, <) \
|
||||
DO_CMP1(gvec_les##SZ, int##SZ##_t, <=) \
|
||||
DO_CMP1(gvec_ltus##SZ, uint##SZ##_t, <) \
|
||||
DO_CMP1(gvec_leus##SZ, uint##SZ##_t, <=)
|
||||
|
||||
DO_CMP2(8)
|
||||
DO_CMP2(16)
|
||||
DO_CMP2(32)
|
||||
DO_CMP2(64)
|
||||
|
||||
#undef DO_CMP1
|
||||
#undef DO_CMP2
|
||||
|
||||
void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc)
|
||||
{
|
||||
intptr_t oprsz = simd_oprsz(desc);
|
||||
|
||||
@@ -297,29 +297,4 @@ DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_eqs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_eqs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_eqs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_eqs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_lts8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_lts16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_lts32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_lts64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_les8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_les16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_les32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_les64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_ltus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_ltus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_ltus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_ltus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_leus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_leus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_leus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_leus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||
|
||||
@@ -61,8 +61,7 @@
|
||||
#include "tb-jmp-cache.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal-target.h"
|
||||
#include "internal.h"
|
||||
#include "perf.h"
|
||||
#include "tcg/insn-start-words.h"
|
||||
|
||||
@@ -215,7 +214,7 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
* Reset the cycle counter to the start of the block and
|
||||
* shift if to the number of actually executed instructions.
|
||||
*/
|
||||
cpu->neg.icount_decr.u16.low += insns_left;
|
||||
cpu_neg(cpu)->icount_decr.u16.low += insns_left;
|
||||
}
|
||||
|
||||
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
|
||||
@@ -289,7 +288,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
vaddr pc, uint64_t cs_base,
|
||||
uint32_t flags, int cflags)
|
||||
{
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
TranslationBlock *tb, *existing_tb;
|
||||
tb_page_addr_t phys_pc, phys_p2;
|
||||
tcg_insn_unit *gen_code_buf;
|
||||
@@ -345,6 +344,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
tcg_ctx->page_bits = TARGET_PAGE_BITS;
|
||||
tcg_ctx->page_mask = TARGET_PAGE_MASK;
|
||||
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
|
||||
tcg_ctx->tlb_fast_offset =
|
||||
(int)offsetof(ArchCPU, neg.tlb.f) - (int)offsetof(ArchCPU, env);
|
||||
#endif
|
||||
tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
|
||||
#ifdef TCG_GUEST_DEFAULT_MO
|
||||
@@ -579,7 +580,7 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
|
||||
} else {
|
||||
/* The exception probably happened in a helper. The CPU state should
|
||||
have been saved before calling it. Fetch the PC from there. */
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
tb_page_addr_t addr;
|
||||
@@ -622,7 +623,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
||||
cc = CPU_GET_CLASS(cpu);
|
||||
if (cc->tcg_ops->io_recompile_replay_branch &&
|
||||
cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
|
||||
cpu->neg.icount_decr.u16.low++;
|
||||
cpu_neg(cpu)->icount_decr.u16.low++;
|
||||
n = 2;
|
||||
}
|
||||
|
||||
@@ -778,7 +779,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
cpu->interrupt_request |= mask;
|
||||
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
@@ -14,23 +14,28 @@
|
||||
#include "exec/translator.h"
|
||||
#include "exec/plugin-gen.h"
|
||||
#include "tcg/tcg-op-common.h"
|
||||
#include "internal-target.h"
|
||||
#include "internal.h"
|
||||
|
||||
static void set_can_do_io(DisasContextBase *db, bool val)
|
||||
static void gen_io_start(void)
|
||||
{
|
||||
if (db->saved_can_do_io != val) {
|
||||
db->saved_can_do_io = val;
|
||||
|
||||
QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
|
||||
tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
|
||||
offsetof(ArchCPU, parent_obj.neg.can_do_io) -
|
||||
offsetof(ArchCPU, env));
|
||||
}
|
||||
tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
|
||||
offsetof(ArchCPU, parent_obj.can_do_io) -
|
||||
offsetof(ArchCPU, env));
|
||||
}
|
||||
|
||||
bool translator_io_start(DisasContextBase *db)
|
||||
{
|
||||
set_can_do_io(db, true);
|
||||
uint32_t cflags = tb_cflags(db->tb);
|
||||
|
||||
if (!(cflags & CF_USE_ICOUNT)) {
|
||||
return false;
|
||||
}
|
||||
if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
|
||||
/* Already started in translator_loop. */
|
||||
return true;
|
||||
}
|
||||
|
||||
gen_io_start();
|
||||
|
||||
/*
|
||||
* Ensure that this instruction will be the last in the TB.
|
||||
@@ -42,17 +47,14 @@ bool translator_io_start(DisasContextBase *db)
|
||||
return true;
|
||||
}
|
||||
|
||||
static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
|
||||
static TCGOp *gen_tb_start(uint32_t cflags)
|
||||
{
|
||||
TCGv_i32 count = NULL;
|
||||
TCGv_i32 count = tcg_temp_new_i32();
|
||||
TCGOp *icount_start_insn = NULL;
|
||||
|
||||
if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
|
||||
count = tcg_temp_new_i32();
|
||||
tcg_gen_ld_i32(count, tcg_env,
|
||||
offsetof(ArchCPU, parent_obj.neg.icount_decr.u32)
|
||||
- offsetof(ArchCPU, env));
|
||||
}
|
||||
tcg_gen_ld_i32(count, cpu_env,
|
||||
offsetof(ArchCPU, neg.icount_decr.u32) -
|
||||
offsetof(ArchCPU, env));
|
||||
|
||||
if (cflags & CF_USE_ICOUNT) {
|
||||
/*
|
||||
@@ -79,18 +81,21 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
|
||||
}
|
||||
|
||||
if (cflags & CF_USE_ICOUNT) {
|
||||
tcg_gen_st16_i32(count, tcg_env,
|
||||
offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low)
|
||||
- offsetof(ArchCPU, env));
|
||||
tcg_gen_st16_i32(count, cpu_env,
|
||||
offsetof(ArchCPU, neg.icount_decr.u16.low) -
|
||||
offsetof(ArchCPU, env));
|
||||
/*
|
||||
* cpu->can_do_io is cleared automatically here at the beginning of
|
||||
* each translation block. The cost is minimal and only paid for
|
||||
* -icount, plus it would be very easy to forget doing it in the
|
||||
* translator. Doing it here means we don't need a gen_io_end() to
|
||||
* go with gen_io_start().
|
||||
*/
|
||||
tcg_gen_st_i32(tcg_constant_i32(0), cpu_env,
|
||||
offsetof(ArchCPU, parent_obj.can_do_io) -
|
||||
offsetof(ArchCPU, env));
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu->neg.can_do_io is set automatically here at the beginning of
|
||||
* each translation block. The cost is minimal, plus it would be
|
||||
* very easy to forget doing it in the translator.
|
||||
*/
|
||||
set_can_do_io(db, db->max_insns == 1 && (cflags & CF_LAST_IO));
|
||||
|
||||
return icount_start_insn;
|
||||
}
|
||||
|
||||
@@ -139,7 +144,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
db->num_insns = 0;
|
||||
db->max_insns = *max_insns;
|
||||
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
|
||||
db->saved_can_do_io = -1;
|
||||
db->host_addr[0] = host_pc;
|
||||
db->host_addr[1] = NULL;
|
||||
|
||||
@@ -147,18 +151,11 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
|
||||
|
||||
/* Start translating. */
|
||||
icount_start_insn = gen_tb_start(db, cflags);
|
||||
icount_start_insn = gen_tb_start(cflags);
|
||||
ops->tb_start(db, cpu);
|
||||
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
|
||||
|
||||
if (cflags & CF_MEMI_ONLY) {
|
||||
/* We should only see CF_MEMI_ONLY for io_recompile. */
|
||||
assert(cflags & CF_LAST_IO);
|
||||
plugin_enabled = plugin_gen_tb_start(cpu, db, true);
|
||||
} else {
|
||||
plugin_enabled = plugin_gen_tb_start(cpu, db, false);
|
||||
}
|
||||
db->plugin_enabled = plugin_enabled;
|
||||
plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
|
||||
|
||||
while (true) {
|
||||
*max_insns = ++db->num_insns;
|
||||
@@ -175,9 +172,13 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
the next instruction. */
|
||||
if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
|
||||
/* Accept I/O on the last instruction. */
|
||||
set_can_do_io(db, true);
|
||||
gen_io_start();
|
||||
ops->translate_insn(db, cpu);
|
||||
} else {
|
||||
/* we should only see CF_MEMI_ONLY for io_recompile */
|
||||
tcg_debug_assert(!(cflags & CF_MEMI_ONLY));
|
||||
ops->translate_insn(db, cpu);
|
||||
}
|
||||
ops->translate_insn(db, cpu);
|
||||
|
||||
/*
|
||||
* We can't instrument after instructions that change control
|
||||
@@ -210,7 +211,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
gen_tb_end(tb, cflags, icount_start_insn, db->num_insns);
|
||||
|
||||
if (plugin_enabled) {
|
||||
plugin_gen_tb_end(cpu, db->num_insns);
|
||||
plugin_gen_tb_end(cpu);
|
||||
}
|
||||
|
||||
/* The disas_log hook may use these values rather than recompute. */
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#include "hw/core/cpu.h"
|
||||
#include "exec/replay-core.h"
|
||||
|
||||
bool enable_cpu_pm = false;
|
||||
|
||||
void cpu_resume(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -29,8 +29,7 @@
|
||||
#include "qemu/atomic128.h"
|
||||
#include "trace/trace-root.h"
|
||||
#include "tcg/tcg-ldst.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal-target.h"
|
||||
#include "internal.h"
|
||||
|
||||
__thread uintptr_t helper_retaddr;
|
||||
|
||||
@@ -940,9 +939,9 @@ void *page_get_target_data(target_ulong address)
|
||||
void page_reset_target_data(target_ulong start, target_ulong last) { }
|
||||
#endif /* TARGET_PAGE_DATA_SIZE */
|
||||
|
||||
/* The system-mode versions of these helpers are in cputlb.c. */
|
||||
/* The softmmu versions of these helpers are in cputlb.c. */
|
||||
|
||||
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
|
||||
static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
|
||||
MemOp mop, uintptr_t ra, MMUAccessType type)
|
||||
{
|
||||
int a_bits = get_alignment_bits(mop);
|
||||
@@ -950,39 +949,60 @@ static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
|
||||
|
||||
/* Enforce guest required alignment. */
|
||||
if (unlikely(addr & ((1 << a_bits) - 1))) {
|
||||
cpu_loop_exit_sigbus(cpu, addr, type, ra);
|
||||
cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
|
||||
}
|
||||
|
||||
ret = g2h(cpu, addr);
|
||||
ret = g2h(env_cpu(env), addr);
|
||||
set_helper_retaddr(ra);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#include "ldst_atomicity.c.inc"
|
||||
|
||||
static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
uintptr_t ra, MMUAccessType access_type)
|
||||
static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOp mop, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint8_t ret;
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_8);
|
||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
|
||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
||||
ret = ldub_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
uintptr_t ra, MMUAccessType access_type)
|
||||
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
return do_ld1_mmu(env, addr, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOp mop, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint16_t ret;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_16);
|
||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
|
||||
ret = load_atom_2(cpu, ra, haddr, mop);
|
||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
||||
ret = load_atom_2(env, ra, haddr, mop);
|
||||
clear_helper_retaddr();
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
@@ -991,16 +1011,36 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
uintptr_t ra, MMUAccessType access_type)
|
||||
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
return do_ld2_mmu(env, addr, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOp mop, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint32_t ret;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_32);
|
||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
|
||||
ret = load_atom_4(cpu, ra, haddr, mop);
|
||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
||||
ret = load_atom_4(env, ra, haddr, mop);
|
||||
clear_helper_retaddr();
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
@@ -1009,16 +1049,36 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
uintptr_t ra, MMUAccessType access_type)
|
||||
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
return do_ld4_mmu(env, addr, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOp mop, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint64_t ret;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_64);
|
||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
|
||||
ret = load_atom_8(cpu, ra, haddr, mop);
|
||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
||||
ret = load_atom_8(env, ra, haddr, mop);
|
||||
clear_helper_retaddr();
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
@@ -1027,17 +1087,30 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
return do_ld8_mmu(env, addr, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOp mop, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
Int128 ret;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_128);
|
||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
|
||||
ret = load_atom_16(cpu, ra, haddr, mop);
|
||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
||||
ret = load_atom_16(env, ra, haddr, mop);
|
||||
clear_helper_retaddr();
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
@@ -1046,81 +1119,171 @@ static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
|
||||
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
return do_ld16_mmu(env, addr, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi)
|
||||
{
|
||||
return helper_ld16_mmu(env, addr, oi, GETPC());
|
||||
}
|
||||
|
||||
Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
|
||||
MemOp mop, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_8);
|
||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
|
||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
||||
stb_p(haddr, val);
|
||||
clear_helper_retaddr();
|
||||
}
|
||||
|
||||
static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
do_st1_mmu(env, addr, val, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
do_st1_mmu(env, addr, val, get_memop(oi), ra);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
||||
MemOp mop, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_16);
|
||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
store_atom_2(cpu, ra, haddr, mop, val);
|
||||
store_atom_2(env, ra, haddr, mop, val);
|
||||
clear_helper_retaddr();
|
||||
}
|
||||
|
||||
static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
do_st2_mmu(env, addr, val, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
do_st2_mmu(env, addr, val, get_memop(oi), ra);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
MemOp mop, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_32);
|
||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
store_atom_4(cpu, ra, haddr, mop, val);
|
||||
store_atom_4(env, ra, haddr, mop, val);
|
||||
clear_helper_retaddr();
|
||||
}
|
||||
|
||||
static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
do_st4_mmu(env, addr, val, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
do_st4_mmu(env, addr, val, get_memop(oi), ra);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
MemOp mop, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_64);
|
||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
val = bswap64(val);
|
||||
}
|
||||
store_atom_8(cpu, ra, haddr, mop, val);
|
||||
store_atom_8(env, ra, haddr, mop, val);
|
||||
clear_helper_retaddr();
|
||||
}
|
||||
|
||||
static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
do_st8_mmu(env, addr, val, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
do_st8_mmu(env, addr, val, get_memop(oi), ra);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
|
||||
MemOp mop, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
MemOpIdx mop = get_memop(oi);
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_128);
|
||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
val = bswap128(val);
|
||||
}
|
||||
store_atom_16(cpu, ra, haddr, mop, val);
|
||||
store_atom_16(env, ra, haddr, mop, val);
|
||||
clear_helper_retaddr();
|
||||
}
|
||||
|
||||
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
do_st16_mmu(env, addr, val, get_memop(oi), ra);
|
||||
}
|
||||
|
||||
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
|
||||
{
|
||||
helper_st16_mmu(env, addr, val, oi, GETPC());
|
||||
}
|
||||
|
||||
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr,
|
||||
Int128 val, MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
do_st16_mmu(env, addr, val, get_memop(oi), ra);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
|
||||
{
|
||||
uint32_t ret;
|
||||
@@ -1167,7 +1330,7 @@ uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
|
||||
void *haddr;
|
||||
uint8_t ret;
|
||||
|
||||
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
|
||||
ret = ldub_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
return ret;
|
||||
@@ -1179,7 +1342,7 @@ uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
|
||||
void *haddr;
|
||||
uint16_t ret;
|
||||
|
||||
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
|
||||
ret = lduw_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
if (get_memop(oi) & MO_BSWAP) {
|
||||
@@ -1194,7 +1357,7 @@ uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
|
||||
void *haddr;
|
||||
uint32_t ret;
|
||||
|
||||
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
|
||||
ret = ldl_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
if (get_memop(oi) & MO_BSWAP) {
|
||||
@@ -1209,7 +1372,7 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
|
||||
void *haddr;
|
||||
uint64_t ret;
|
||||
|
||||
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
||||
ret = ldq_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
if (get_memop(oi) & MO_BSWAP) {
|
||||
@@ -1223,7 +1386,7 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
|
||||
/*
|
||||
* Do not allow unaligned operations to proceed. Return the host address.
|
||||
*/
|
||||
static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
|
||||
int size, uintptr_t retaddr)
|
||||
{
|
||||
MemOp mop = get_memop(oi);
|
||||
@@ -1232,15 +1395,15 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
|
||||
/* Enforce guest required alignment. */
|
||||
if (unlikely(addr & ((1 << a_bits) - 1))) {
|
||||
cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr);
|
||||
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr);
|
||||
}
|
||||
|
||||
/* Enforce qemu required alignment. */
|
||||
if (unlikely(addr & (size - 1))) {
|
||||
cpu_loop_exit_atomic(cpu, retaddr);
|
||||
cpu_loop_exit_atomic(env_cpu(env), retaddr);
|
||||
}
|
||||
|
||||
ret = g2h(cpu, addr);
|
||||
ret = g2h(env_cpu(env), addr);
|
||||
set_helper_retaddr(retaddr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -904,7 +904,7 @@ static void alsa_init_per_direction(AudiodevAlsaPerDirectionOptions *apdo)
|
||||
}
|
||||
}
|
||||
|
||||
static void *alsa_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *alsa_audio_init(Audiodev *dev)
|
||||
{
|
||||
AudiodevAlsaOptions *aopts;
|
||||
assert(dev->driver == AUDIODEV_DRIVER_ALSA);
|
||||
@@ -960,6 +960,7 @@ static struct audio_driver alsa_audio_driver = {
|
||||
.init = alsa_audio_init,
|
||||
.fini = alsa_audio_fini,
|
||||
.pcm_ops = &alsa_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = INT_MAX,
|
||||
.voice_size_out = sizeof (ALSAVoiceOut),
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include "audio/audio.h"
|
||||
#include "monitor/hmp.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qmp/qdict.h"
|
||||
|
||||
static QLIST_HEAD (capture_list_head, CaptureState) capture_head;
|
||||
@@ -66,11 +65,10 @@ void hmp_wavcapture(Monitor *mon, const QDict *qdict)
|
||||
int nchannels = qdict_get_try_int(qdict, "nchannels", 2);
|
||||
const char *audiodev = qdict_get_str(qdict, "audiodev");
|
||||
CaptureState *s;
|
||||
Error *local_err = NULL;
|
||||
AudioState *as = audio_state_by_name(audiodev, &local_err);
|
||||
AudioState *as = audio_state_by_name(audiodev);
|
||||
|
||||
if (!as) {
|
||||
error_report_err(local_err);
|
||||
monitor_printf(mon, "Audiodev '%s' not found\n", audiodev);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
236
audio/audio.c
236
audio/audio.c
@@ -32,9 +32,7 @@
|
||||
#include "qapi/qobject-input-visitor.h"
|
||||
#include "qapi/qapi-visit-audio.h"
|
||||
#include "qapi/qapi-commands-audio.h"
|
||||
#include "qapi/qmp/qdict.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/help_option.h"
|
||||
@@ -63,22 +61,19 @@ const char *audio_prio_list[] = {
|
||||
"spice",
|
||||
CONFIG_AUDIO_DRIVERS
|
||||
"none",
|
||||
"wav",
|
||||
NULL
|
||||
};
|
||||
|
||||
static QLIST_HEAD(, audio_driver) audio_drivers;
|
||||
static AudiodevListHead audiodevs =
|
||||
QSIMPLEQ_HEAD_INITIALIZER(audiodevs);
|
||||
static AudiodevListHead default_audiodevs =
|
||||
QSIMPLEQ_HEAD_INITIALIZER(default_audiodevs);
|
||||
|
||||
static AudiodevListHead audiodevs = QSIMPLEQ_HEAD_INITIALIZER(audiodevs);
|
||||
|
||||
void audio_driver_register(audio_driver *drv)
|
||||
{
|
||||
QLIST_INSERT_HEAD(&audio_drivers, drv, next);
|
||||
}
|
||||
|
||||
static audio_driver *audio_driver_lookup(const char *name)
|
||||
audio_driver *audio_driver_lookup(const char *name)
|
||||
{
|
||||
struct audio_driver *d;
|
||||
Error *local_err = NULL;
|
||||
@@ -104,7 +99,6 @@ static audio_driver *audio_driver_lookup(const char *name)
|
||||
|
||||
static QTAILQ_HEAD(AudioStateHead, AudioState) audio_states =
|
||||
QTAILQ_HEAD_INITIALIZER(audio_states);
|
||||
static AudioState *default_audio_state;
|
||||
|
||||
const struct mixeng_volume nominal_volume = {
|
||||
.mute = 0,
|
||||
@@ -117,6 +111,8 @@ const struct mixeng_volume nominal_volume = {
|
||||
#endif
|
||||
};
|
||||
|
||||
static bool legacy_config = true;
|
||||
|
||||
int audio_bug (const char *funcname, int cond)
|
||||
{
|
||||
if (cond) {
|
||||
@@ -1557,11 +1553,9 @@ size_t audio_generic_read(HWVoiceIn *hw, void *buf, size_t size)
|
||||
}
|
||||
|
||||
static int audio_driver_init(AudioState *s, struct audio_driver *drv,
|
||||
Audiodev *dev, Error **errp)
|
||||
bool msg, Audiodev *dev)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
|
||||
s->drv_opaque = drv->init(dev, &local_err);
|
||||
s->drv_opaque = drv->init(dev);
|
||||
|
||||
if (s->drv_opaque) {
|
||||
if (!drv->pcm_ops->get_buffer_in) {
|
||||
@@ -1573,15 +1567,13 @@ static int audio_driver_init(AudioState *s, struct audio_driver *drv,
|
||||
drv->pcm_ops->put_buffer_out = audio_generic_put_buffer_out;
|
||||
}
|
||||
|
||||
audio_init_nb_voices_out(s, drv, 1);
|
||||
audio_init_nb_voices_in(s, drv, 0);
|
||||
audio_init_nb_voices_out(s, drv);
|
||||
audio_init_nb_voices_in(s, drv);
|
||||
s->drv = drv;
|
||||
return 0;
|
||||
} else {
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
} else {
|
||||
error_setg(errp, "Could not init `%s' audio driver", drv->name);
|
||||
if (msg) {
|
||||
dolog("Could not init `%s' audio driver\n", drv->name);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
@@ -1661,7 +1653,6 @@ static void free_audio_state(AudioState *s)
|
||||
|
||||
void audio_cleanup(void)
|
||||
{
|
||||
default_audio_state = NULL;
|
||||
while (!QTAILQ_EMPTY(&audio_states)) {
|
||||
AudioState *s = QTAILQ_FIRST(&audio_states);
|
||||
QTAILQ_REMOVE(&audio_states, s, list);
|
||||
@@ -1688,25 +1679,19 @@ static const VMStateDescription vmstate_audio = {
|
||||
}
|
||||
};
|
||||
|
||||
void audio_create_default_audiodevs(void)
|
||||
static void audio_validate_opts(Audiodev *dev, Error **errp);
|
||||
|
||||
static AudiodevListEntry *audiodev_find(
|
||||
AudiodevListHead *head, const char *drvname)
|
||||
{
|
||||
for (int i = 0; audio_prio_list[i]; i++) {
|
||||
if (audio_driver_lookup(audio_prio_list[i])) {
|
||||
QDict *dict = qdict_new();
|
||||
Audiodev *dev = NULL;
|
||||
Visitor *v;
|
||||
|
||||
qdict_put_str(dict, "driver", audio_prio_list[i]);
|
||||
qdict_put_str(dict, "id", "#default");
|
||||
|
||||
v = qobject_input_visitor_new_keyval(QOBJECT(dict));
|
||||
qobject_unref(dict);
|
||||
visit_type_Audiodev(v, NULL, &dev, &error_fatal);
|
||||
visit_free(v);
|
||||
|
||||
audio_define_default(dev, &error_abort);
|
||||
AudiodevListEntry *e;
|
||||
QSIMPLEQ_FOREACH(e, head, next) {
|
||||
if (strcmp(AudiodevDriver_str(e->dev->driver), drvname) == 0) {
|
||||
return e;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1715,16 +1700,62 @@ void audio_create_default_audiodevs(void)
|
||||
* if dev == NULL => legacy implicit initialization, return the already created
|
||||
* state or create a new one
|
||||
*/
|
||||
static AudioState *audio_init(Audiodev *dev, Error **errp)
|
||||
static AudioState *audio_init(Audiodev *dev, const char *name)
|
||||
{
|
||||
static bool atexit_registered;
|
||||
size_t i;
|
||||
int done = 0;
|
||||
const char *drvname;
|
||||
VMChangeStateEntry *vmse;
|
||||
const char *drvname = NULL;
|
||||
VMChangeStateEntry *e;
|
||||
AudioState *s;
|
||||
struct audio_driver *driver;
|
||||
/* silence gcc warning about uninitialized variable */
|
||||
AudiodevListHead head = QSIMPLEQ_HEAD_INITIALIZER(head);
|
||||
|
||||
if (using_spice) {
|
||||
/*
|
||||
* When using spice allow the spice audio driver being picked
|
||||
* as default.
|
||||
*
|
||||
* Temporary hack. Using audio devices without explicit
|
||||
* audiodev= property is already deprecated. Same goes for
|
||||
* the -soundhw switch. Once this support gets finally
|
||||
* removed we can also drop the concept of a default audio
|
||||
* backend and this can go away.
|
||||
*/
|
||||
driver = audio_driver_lookup("spice");
|
||||
if (driver) {
|
||||
driver->can_be_default = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev) {
|
||||
/* -audiodev option */
|
||||
legacy_config = false;
|
||||
drvname = AudiodevDriver_str(dev->driver);
|
||||
} else if (!QTAILQ_EMPTY(&audio_states)) {
|
||||
if (!legacy_config) {
|
||||
dolog("Device %s: audiodev default parameter is deprecated, please "
|
||||
"specify audiodev=%s\n", name,
|
||||
QTAILQ_FIRST(&audio_states)->dev->id);
|
||||
}
|
||||
return QTAILQ_FIRST(&audio_states);
|
||||
} else {
|
||||
/* legacy implicit initialization */
|
||||
head = audio_handle_legacy_opts();
|
||||
/*
|
||||
* In case of legacy initialization, all Audiodevs in the list will have
|
||||
* the same configuration (except the driver), so it doesn't matter which
|
||||
* one we chose. We need an Audiodev to set up AudioState before we can
|
||||
* init a driver. Also note that dev at this point is still in the
|
||||
* list.
|
||||
*/
|
||||
dev = QSIMPLEQ_FIRST(&head)->dev;
|
||||
audio_validate_opts(dev, &error_abort);
|
||||
}
|
||||
|
||||
s = g_new0(AudioState, 1);
|
||||
s->dev = dev;
|
||||
|
||||
QLIST_INIT (&s->hw_head_out);
|
||||
QLIST_INIT (&s->hw_head_in);
|
||||
@@ -1736,36 +1767,56 @@ static AudioState *audio_init(Audiodev *dev, Error **errp)
|
||||
|
||||
s->ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, audio_timer, s);
|
||||
|
||||
if (dev) {
|
||||
/* -audiodev option */
|
||||
s->dev = dev;
|
||||
drvname = AudiodevDriver_str(dev->driver);
|
||||
s->nb_hw_voices_out = audio_get_pdo_out(dev)->voices;
|
||||
s->nb_hw_voices_in = audio_get_pdo_in(dev)->voices;
|
||||
|
||||
if (s->nb_hw_voices_out < 1) {
|
||||
dolog ("Bogus number of playback voices %d, setting to 1\n",
|
||||
s->nb_hw_voices_out);
|
||||
s->nb_hw_voices_out = 1;
|
||||
}
|
||||
|
||||
if (s->nb_hw_voices_in < 0) {
|
||||
dolog ("Bogus number of capture voices %d, setting to 0\n",
|
||||
s->nb_hw_voices_in);
|
||||
s->nb_hw_voices_in = 0;
|
||||
}
|
||||
|
||||
if (drvname) {
|
||||
driver = audio_driver_lookup(drvname);
|
||||
if (driver) {
|
||||
done = !audio_driver_init(s, driver, dev, errp);
|
||||
done = !audio_driver_init(s, driver, true, dev);
|
||||
} else {
|
||||
error_setg(errp, "Unknown audio driver `%s'\n", drvname);
|
||||
dolog ("Unknown audio driver `%s'\n", drvname);
|
||||
}
|
||||
if (!done) {
|
||||
goto out;
|
||||
free_audio_state(s);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
assert(!default_audio_state);
|
||||
for (;;) {
|
||||
AudiodevListEntry *e = QSIMPLEQ_FIRST(&default_audiodevs);
|
||||
if (!e) {
|
||||
error_setg(errp, "no default audio driver available");
|
||||
goto out;
|
||||
for (i = 0; audio_prio_list[i]; i++) {
|
||||
AudiodevListEntry *e = audiodev_find(&head, audio_prio_list[i]);
|
||||
driver = audio_driver_lookup(audio_prio_list[i]);
|
||||
|
||||
if (e && driver) {
|
||||
s->dev = dev = e->dev;
|
||||
audio_validate_opts(dev, &error_abort);
|
||||
done = !audio_driver_init(s, driver, false, dev);
|
||||
if (done) {
|
||||
e->dev = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
s->dev = dev = e->dev;
|
||||
drvname = AudiodevDriver_str(dev->driver);
|
||||
driver = audio_driver_lookup(drvname);
|
||||
if (!audio_driver_init(s, driver, dev, NULL)) {
|
||||
break;
|
||||
}
|
||||
QSIMPLEQ_REMOVE_HEAD(&default_audiodevs, next);
|
||||
}
|
||||
}
|
||||
audio_free_audiodev_list(&head);
|
||||
|
||||
if (!done) {
|
||||
driver = audio_driver_lookup("none");
|
||||
done = !audio_driver_init(s, driver, false, dev);
|
||||
assert(done);
|
||||
dolog("warning: Using timer based audio emulation\n");
|
||||
}
|
||||
|
||||
if (dev->timer_period <= 0) {
|
||||
s->period_ticks = 1;
|
||||
@@ -1773,8 +1824,8 @@ static AudioState *audio_init(Audiodev *dev, Error **errp)
|
||||
s->period_ticks = dev->timer_period * (int64_t)SCALE_US;
|
||||
}
|
||||
|
||||
vmse = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
|
||||
if (!vmse) {
|
||||
e = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
|
||||
if (!e) {
|
||||
dolog ("warning: Could not register change state handler\n"
|
||||
"(Audio can continue looping even after stopping the VM)\n");
|
||||
}
|
||||
@@ -1783,41 +1834,27 @@ static AudioState *audio_init(Audiodev *dev, Error **errp)
|
||||
QLIST_INIT (&s->card_head);
|
||||
vmstate_register (NULL, 0, &vmstate_audio, s);
|
||||
return s;
|
||||
|
||||
out:
|
||||
free_audio_state(s);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
AudioState *audio_get_default_audio_state(Error **errp)
|
||||
void audio_free_audiodev_list(AudiodevListHead *head)
|
||||
{
|
||||
if (!default_audio_state) {
|
||||
default_audio_state = audio_init(NULL, errp);
|
||||
if (!default_audio_state) {
|
||||
if (!QSIMPLEQ_EMPTY(&audiodevs)) {
|
||||
error_append_hint(errp, "Perhaps you wanted to use -audio or set audiodev=%s?\n",
|
||||
QSIMPLEQ_FIRST(&audiodevs)->dev->id);
|
||||
}
|
||||
}
|
||||
AudiodevListEntry *e;
|
||||
while ((e = QSIMPLEQ_FIRST(head))) {
|
||||
QSIMPLEQ_REMOVE_HEAD(head, next);
|
||||
qapi_free_Audiodev(e->dev);
|
||||
g_free(e);
|
||||
}
|
||||
|
||||
return default_audio_state;
|
||||
}
|
||||
|
||||
bool AUD_register_card (const char *name, QEMUSoundCard *card, Error **errp)
|
||||
void AUD_register_card (const char *name, QEMUSoundCard *card)
|
||||
{
|
||||
if (!card->state) {
|
||||
card->state = audio_get_default_audio_state(errp);
|
||||
if (!card->state) {
|
||||
return false;
|
||||
}
|
||||
card->state = audio_init(NULL, name);
|
||||
}
|
||||
|
||||
card->name = g_strdup (name);
|
||||
memset (&card->entries, 0, sizeof (card->entries));
|
||||
QLIST_INSERT_HEAD(&card->state->card_head, card, entries);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void AUD_remove_card (QEMUSoundCard *card)
|
||||
@@ -1839,8 +1876,10 @@ CaptureVoiceOut *AUD_add_capture(
|
||||
struct capture_callback *cb;
|
||||
|
||||
if (!s) {
|
||||
error_report("Capturing without setting an audiodev is not supported");
|
||||
abort();
|
||||
if (!legacy_config) {
|
||||
dolog("Capturing without setting an audiodev is deprecated\n");
|
||||
}
|
||||
s = audio_init(NULL, NULL);
|
||||
}
|
||||
|
||||
if (!audio_get_pdo_out(s->dev)->mixing_engine) {
|
||||
@@ -1861,8 +1900,10 @@ CaptureVoiceOut *AUD_add_capture(
|
||||
cap = audio_pcm_capture_find_specific(s, as);
|
||||
if (cap) {
|
||||
QLIST_INSERT_HEAD (&cap->cb_head, cb, entries);
|
||||
return cap;
|
||||
} else {
|
||||
HWVoiceOut *hw;
|
||||
CaptureVoiceOut *cap;
|
||||
|
||||
cap = g_malloc0(sizeof(*cap));
|
||||
|
||||
@@ -1896,9 +1937,8 @@ CaptureVoiceOut *AUD_add_capture(
|
||||
QLIST_FOREACH(hw, &s->hw_head_out, entries) {
|
||||
audio_attach_capture (hw);
|
||||
}
|
||||
return cap;
|
||||
}
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque)
|
||||
@@ -2144,24 +2184,17 @@ void audio_define(Audiodev *dev)
|
||||
QSIMPLEQ_INSERT_TAIL(&audiodevs, e, next);
|
||||
}
|
||||
|
||||
void audio_define_default(Audiodev *dev, Error **errp)
|
||||
{
|
||||
AudiodevListEntry *e;
|
||||
|
||||
audio_validate_opts(dev, errp);
|
||||
|
||||
e = g_new0(AudiodevListEntry, 1);
|
||||
e->dev = dev;
|
||||
QSIMPLEQ_INSERT_TAIL(&default_audiodevs, e, next);
|
||||
}
|
||||
|
||||
void audio_init_audiodevs(void)
|
||||
bool audio_init_audiodevs(void)
|
||||
{
|
||||
AudiodevListEntry *e;
|
||||
|
||||
QSIMPLEQ_FOREACH(e, &audiodevs, next) {
|
||||
audio_init(e->dev, &error_fatal);
|
||||
if (!audio_init(e->dev, NULL)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
audsettings audiodev_to_audsettings(AudiodevPerDirectionOptions *pdo)
|
||||
@@ -2223,7 +2256,7 @@ int audio_buffer_bytes(AudiodevPerDirectionOptions *pdo,
|
||||
audioformat_bytes_per_sample(as->fmt);
|
||||
}
|
||||
|
||||
AudioState *audio_state_by_name(const char *name, Error **errp)
|
||||
AudioState *audio_state_by_name(const char *name)
|
||||
{
|
||||
AudioState *s;
|
||||
QTAILQ_FOREACH(s, &audio_states, list) {
|
||||
@@ -2232,7 +2265,6 @@ AudioState *audio_state_by_name(const char *name, Error **errp)
|
||||
return s;
|
||||
}
|
||||
}
|
||||
error_setg(errp, "audiodev '%s' not found", name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ typedef struct QEMUAudioTimeStamp {
|
||||
void AUD_vlog (const char *cap, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0);
|
||||
void AUD_log (const char *cap, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
|
||||
|
||||
bool AUD_register_card (const char *name, QEMUSoundCard *card, Error **errp);
|
||||
void AUD_register_card (const char *name, QEMUSoundCard *card);
|
||||
void AUD_remove_card (QEMUSoundCard *card);
|
||||
CaptureVoiceOut *AUD_add_capture(
|
||||
AudioState *s,
|
||||
@@ -169,14 +169,12 @@ void audio_sample_from_uint64(void *samples, int pos,
|
||||
uint64_t left, uint64_t right);
|
||||
|
||||
void audio_define(Audiodev *audio);
|
||||
void audio_define_default(Audiodev *dev, Error **errp);
|
||||
void audio_parse_option(const char *opt);
|
||||
void audio_create_default_audiodevs(void);
|
||||
void audio_init_audiodevs(void);
|
||||
bool audio_init_audiodevs(void);
|
||||
void audio_help(void);
|
||||
void audio_legacy_help(void);
|
||||
|
||||
AudioState *audio_state_by_name(const char *name, Error **errp);
|
||||
AudioState *audio_get_default_audio_state(Error **errp);
|
||||
AudioState *audio_state_by_name(const char *name);
|
||||
const char *audio_get_id(QEMUSoundCard *card);
|
||||
|
||||
#define DEFINE_AUDIO_PROPERTIES(_s, _f) \
|
||||
|
||||
@@ -140,12 +140,13 @@ typedef struct audio_driver audio_driver;
|
||||
struct audio_driver {
|
||||
const char *name;
|
||||
const char *descr;
|
||||
void *(*init) (Audiodev *, Error **);
|
||||
void *(*init) (Audiodev *);
|
||||
void (*fini) (void *);
|
||||
#ifdef CONFIG_GIO
|
||||
void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager, bool p2p);
|
||||
#endif
|
||||
struct audio_pcm_ops *pcm_ops;
|
||||
int can_be_default;
|
||||
int max_voices_out;
|
||||
int max_voices_in;
|
||||
size_t voice_size_out;
|
||||
@@ -242,6 +243,7 @@ extern const struct mixeng_volume nominal_volume;
|
||||
extern const char *audio_prio_list[];
|
||||
|
||||
void audio_driver_register(audio_driver *drv);
|
||||
audio_driver *audio_driver_lookup(const char *name);
|
||||
|
||||
void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as);
|
||||
void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len);
|
||||
@@ -295,6 +297,9 @@ typedef struct AudiodevListEntry {
|
||||
} AudiodevListEntry;
|
||||
|
||||
typedef QSIMPLEQ_HEAD(, AudiodevListEntry) AudiodevListHead;
|
||||
AudiodevListHead audio_handle_legacy_opts(void);
|
||||
|
||||
void audio_free_audiodev_list(AudiodevListHead *head);
|
||||
|
||||
void audio_create_pdos(Audiodev *dev);
|
||||
AudiodevPerDirectionOptions *audio_get_pdo_in(Audiodev *dev);
|
||||
|
||||
591
audio/audio_legacy.c
Normal file
591
audio/audio_legacy.c
Normal file
@@ -0,0 +1,591 @@
|
||||
/*
|
||||
* QEMU Audio subsystem: legacy configuration handling
|
||||
*
|
||||
* Copyright (c) 2015-2019 Zoltán Kővágó <DirtY.iCE.hu@gmail.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "audio.h"
|
||||
#include "audio_int.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qapi-visit-audio.h"
|
||||
#include "qapi/visitor-impl.h"
|
||||
|
||||
#define AUDIO_CAP "audio-legacy"
|
||||
#include "audio_int.h"
|
||||
|
||||
static uint32_t toui32(const char *str)
|
||||
{
|
||||
uint64_t ret;
|
||||
if (parse_uint_full(str, 10, &ret) || ret > UINT32_MAX) {
|
||||
dolog("Invalid integer value `%s'\n", str);
|
||||
exit(1);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* helper functions to convert env variables */
|
||||
static void get_bool(const char *env, bool *dst, bool *has_dst)
|
||||
{
|
||||
const char *val = getenv(env);
|
||||
if (val) {
|
||||
*dst = toui32(val) != 0;
|
||||
*has_dst = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void get_int(const char *env, uint32_t *dst, bool *has_dst)
|
||||
{
|
||||
const char *val = getenv(env);
|
||||
if (val) {
|
||||
*dst = toui32(val);
|
||||
*has_dst = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void get_str(const char *env, char **dst)
|
||||
{
|
||||
const char *val = getenv(env);
|
||||
if (val) {
|
||||
g_free(*dst);
|
||||
*dst = g_strdup(val);
|
||||
}
|
||||
}
|
||||
|
||||
static void get_fmt(const char *env, AudioFormat *dst, bool *has_dst)
|
||||
{
|
||||
const char *val = getenv(env);
|
||||
if (val) {
|
||||
size_t i;
|
||||
for (i = 0; AudioFormat_lookup.size; ++i) {
|
||||
if (strcasecmp(val, AudioFormat_lookup.array[i]) == 0) {
|
||||
*dst = i;
|
||||
*has_dst = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
dolog("Invalid audio format `%s'\n", val);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#if defined(CONFIG_AUDIO_ALSA) || defined(CONFIG_AUDIO_DSOUND)
|
||||
static void get_millis_to_usecs(const char *env, uint32_t *dst, bool *has_dst)
|
||||
{
|
||||
const char *val = getenv(env);
|
||||
if (val) {
|
||||
*dst = toui32(val) * 1000;
|
||||
*has_dst = true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_AUDIO_ALSA) || defined(CONFIG_AUDIO_COREAUDIO) || \
|
||||
defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) || \
|
||||
defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
|
||||
static uint32_t frames_to_usecs(uint32_t frames,
|
||||
AudiodevPerDirectionOptions *pdo)
|
||||
{
|
||||
uint32_t freq = pdo->has_frequency ? pdo->frequency : 44100;
|
||||
return (frames * 1000000 + freq / 2) / freq;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_COREAUDIO
|
||||
static void get_frames_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
|
||||
AudiodevPerDirectionOptions *pdo)
|
||||
{
|
||||
const char *val = getenv(env);
|
||||
if (val) {
|
||||
*dst = frames_to_usecs(toui32(val), pdo);
|
||||
*has_dst = true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) || \
|
||||
defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
|
||||
static uint32_t samples_to_usecs(uint32_t samples,
|
||||
AudiodevPerDirectionOptions *pdo)
|
||||
{
|
||||
uint32_t channels = pdo->has_channels ? pdo->channels : 2;
|
||||
return frames_to_usecs(samples / channels, pdo);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL)
|
||||
static void get_samples_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
|
||||
AudiodevPerDirectionOptions *pdo)
|
||||
{
|
||||
const char *val = getenv(env);
|
||||
if (val) {
|
||||
*dst = samples_to_usecs(toui32(val), pdo);
|
||||
*has_dst = true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
|
||||
static uint32_t bytes_to_usecs(uint32_t bytes, AudiodevPerDirectionOptions *pdo)
|
||||
{
|
||||
AudioFormat fmt = pdo->has_format ? pdo->format : AUDIO_FORMAT_S16;
|
||||
uint32_t bytes_per_sample = audioformat_bytes_per_sample(fmt);
|
||||
return samples_to_usecs(bytes / bytes_per_sample, pdo);
|
||||
}
|
||||
|
||||
static void get_bytes_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
|
||||
AudiodevPerDirectionOptions *pdo)
|
||||
{
|
||||
const char *val = getenv(env);
|
||||
if (val) {
|
||||
*dst = bytes_to_usecs(toui32(val), pdo);
|
||||
*has_dst = true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* backend specific functions */
|
||||
|
||||
#ifdef CONFIG_AUDIO_ALSA
|
||||
/* ALSA */
|
||||
static void handle_alsa_per_direction(
|
||||
AudiodevAlsaPerDirectionOptions *apdo, const char *prefix)
|
||||
{
|
||||
char buf[64];
|
||||
size_t len = strlen(prefix);
|
||||
bool size_in_usecs = false;
|
||||
bool dummy;
|
||||
|
||||
memcpy(buf, prefix, len);
|
||||
strcpy(buf + len, "TRY_POLL");
|
||||
get_bool(buf, &apdo->try_poll, &apdo->has_try_poll);
|
||||
|
||||
strcpy(buf + len, "DEV");
|
||||
get_str(buf, &apdo->dev);
|
||||
|
||||
strcpy(buf + len, "SIZE_IN_USEC");
|
||||
get_bool(buf, &size_in_usecs, &dummy);
|
||||
|
||||
strcpy(buf + len, "PERIOD_SIZE");
|
||||
get_int(buf, &apdo->period_length, &apdo->has_period_length);
|
||||
if (apdo->has_period_length && !size_in_usecs) {
|
||||
apdo->period_length = frames_to_usecs(
|
||||
apdo->period_length,
|
||||
qapi_AudiodevAlsaPerDirectionOptions_base(apdo));
|
||||
}
|
||||
|
||||
strcpy(buf + len, "BUFFER_SIZE");
|
||||
get_int(buf, &apdo->buffer_length, &apdo->has_buffer_length);
|
||||
if (apdo->has_buffer_length && !size_in_usecs) {
|
||||
apdo->buffer_length = frames_to_usecs(
|
||||
apdo->buffer_length,
|
||||
qapi_AudiodevAlsaPerDirectionOptions_base(apdo));
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_alsa(Audiodev *dev)
|
||||
{
|
||||
AudiodevAlsaOptions *aopt = &dev->u.alsa;
|
||||
handle_alsa_per_direction(aopt->in, "QEMU_ALSA_ADC_");
|
||||
handle_alsa_per_direction(aopt->out, "QEMU_ALSA_DAC_");
|
||||
|
||||
get_millis_to_usecs("QEMU_ALSA_THRESHOLD",
|
||||
&aopt->threshold, &aopt->has_threshold);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_COREAUDIO
|
||||
/* coreaudio */
|
||||
static void handle_coreaudio(Audiodev *dev)
|
||||
{
|
||||
get_frames_to_usecs(
|
||||
"QEMU_COREAUDIO_BUFFER_SIZE",
|
||||
&dev->u.coreaudio.out->buffer_length,
|
||||
&dev->u.coreaudio.out->has_buffer_length,
|
||||
qapi_AudiodevCoreaudioPerDirectionOptions_base(dev->u.coreaudio.out));
|
||||
get_int("QEMU_COREAUDIO_BUFFER_COUNT",
|
||||
&dev->u.coreaudio.out->buffer_count,
|
||||
&dev->u.coreaudio.out->has_buffer_count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_DSOUND
|
||||
/* dsound */
|
||||
static void handle_dsound(Audiodev *dev)
|
||||
{
|
||||
get_millis_to_usecs("QEMU_DSOUND_LATENCY_MILLIS",
|
||||
&dev->u.dsound.latency, &dev->u.dsound.has_latency);
|
||||
get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_OUT",
|
||||
&dev->u.dsound.out->buffer_length,
|
||||
&dev->u.dsound.out->has_buffer_length,
|
||||
dev->u.dsound.out);
|
||||
get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_IN",
|
||||
&dev->u.dsound.in->buffer_length,
|
||||
&dev->u.dsound.in->has_buffer_length,
|
||||
dev->u.dsound.in);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_OSS
|
||||
/* OSS */
|
||||
static void handle_oss_per_direction(
|
||||
AudiodevOssPerDirectionOptions *opdo, const char *try_poll_env,
|
||||
const char *dev_env)
|
||||
{
|
||||
get_bool(try_poll_env, &opdo->try_poll, &opdo->has_try_poll);
|
||||
get_str(dev_env, &opdo->dev);
|
||||
|
||||
get_bytes_to_usecs("QEMU_OSS_FRAGSIZE",
|
||||
&opdo->buffer_length, &opdo->has_buffer_length,
|
||||
qapi_AudiodevOssPerDirectionOptions_base(opdo));
|
||||
get_int("QEMU_OSS_NFRAGS", &opdo->buffer_count,
|
||||
&opdo->has_buffer_count);
|
||||
}
|
||||
|
||||
static void handle_oss(Audiodev *dev)
|
||||
{
|
||||
AudiodevOssOptions *oopt = &dev->u.oss;
|
||||
handle_oss_per_direction(oopt->in, "QEMU_AUDIO_ADC_TRY_POLL",
|
||||
"QEMU_OSS_ADC_DEV");
|
||||
handle_oss_per_direction(oopt->out, "QEMU_AUDIO_DAC_TRY_POLL",
|
||||
"QEMU_OSS_DAC_DEV");
|
||||
|
||||
get_bool("QEMU_OSS_MMAP", &oopt->try_mmap, &oopt->has_try_mmap);
|
||||
get_bool("QEMU_OSS_EXCLUSIVE", &oopt->exclusive, &oopt->has_exclusive);
|
||||
get_int("QEMU_OSS_POLICY", &oopt->dsp_policy, &oopt->has_dsp_policy);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_PA
|
||||
/* pulseaudio */
|
||||
static void handle_pa_per_direction(
|
||||
AudiodevPaPerDirectionOptions *ppdo, const char *env)
|
||||
{
|
||||
get_str(env, &ppdo->name);
|
||||
}
|
||||
|
||||
static void handle_pa(Audiodev *dev)
|
||||
{
|
||||
handle_pa_per_direction(dev->u.pa.in, "QEMU_PA_SOURCE");
|
||||
handle_pa_per_direction(dev->u.pa.out, "QEMU_PA_SINK");
|
||||
|
||||
get_samples_to_usecs(
|
||||
"QEMU_PA_SAMPLES", &dev->u.pa.in->buffer_length,
|
||||
&dev->u.pa.in->has_buffer_length,
|
||||
qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.in));
|
||||
get_samples_to_usecs(
|
||||
"QEMU_PA_SAMPLES", &dev->u.pa.out->buffer_length,
|
||||
&dev->u.pa.out->has_buffer_length,
|
||||
qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.out));
|
||||
|
||||
get_str("QEMU_PA_SERVER", &dev->u.pa.server);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_SDL
|
||||
/* SDL */
|
||||
static void handle_sdl(Audiodev *dev)
|
||||
{
|
||||
/* SDL is output only */
|
||||
get_samples_to_usecs("QEMU_SDL_SAMPLES", &dev->u.sdl.out->buffer_length,
|
||||
&dev->u.sdl.out->has_buffer_length,
|
||||
qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.out));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* wav */
|
||||
static void handle_wav(Audiodev *dev)
|
||||
{
|
||||
get_int("QEMU_WAV_FREQUENCY",
|
||||
&dev->u.wav.out->frequency, &dev->u.wav.out->has_frequency);
|
||||
get_fmt("QEMU_WAV_FORMAT", &dev->u.wav.out->format,
|
||||
&dev->u.wav.out->has_format);
|
||||
get_int("QEMU_WAV_DAC_FIXED_CHANNELS",
|
||||
&dev->u.wav.out->channels, &dev->u.wav.out->has_channels);
|
||||
get_str("QEMU_WAV_PATH", &dev->u.wav.path);
|
||||
}
|
||||
|
||||
/* general */
|
||||
static void handle_per_direction(
|
||||
AudiodevPerDirectionOptions *pdo, const char *prefix)
|
||||
{
|
||||
char buf[64];
|
||||
size_t len = strlen(prefix);
|
||||
|
||||
memcpy(buf, prefix, len);
|
||||
strcpy(buf + len, "FIXED_SETTINGS");
|
||||
get_bool(buf, &pdo->fixed_settings, &pdo->has_fixed_settings);
|
||||
|
||||
strcpy(buf + len, "FIXED_FREQ");
|
||||
get_int(buf, &pdo->frequency, &pdo->has_frequency);
|
||||
|
||||
strcpy(buf + len, "FIXED_FMT");
|
||||
get_fmt(buf, &pdo->format, &pdo->has_format);
|
||||
|
||||
strcpy(buf + len, "FIXED_CHANNELS");
|
||||
get_int(buf, &pdo->channels, &pdo->has_channels);
|
||||
|
||||
strcpy(buf + len, "VOICES");
|
||||
get_int(buf, &pdo->voices, &pdo->has_voices);
|
||||
}
|
||||
|
||||
static AudiodevListEntry *legacy_opt(const char *drvname)
|
||||
{
|
||||
AudiodevListEntry *e = g_new0(AudiodevListEntry, 1);
|
||||
e->dev = g_new0(Audiodev, 1);
|
||||
e->dev->id = g_strdup(drvname);
|
||||
e->dev->driver = qapi_enum_parse(
|
||||
&AudiodevDriver_lookup, drvname, -1, &error_abort);
|
||||
|
||||
audio_create_pdos(e->dev);
|
||||
|
||||
handle_per_direction(audio_get_pdo_in(e->dev), "QEMU_AUDIO_ADC_");
|
||||
handle_per_direction(audio_get_pdo_out(e->dev), "QEMU_AUDIO_DAC_");
|
||||
|
||||
/* Original description: Timer period in HZ (0 - use lowest possible) */
|
||||
get_int("QEMU_AUDIO_TIMER_PERIOD",
|
||||
&e->dev->timer_period, &e->dev->has_timer_period);
|
||||
if (e->dev->has_timer_period && e->dev->timer_period) {
|
||||
e->dev->timer_period = NANOSECONDS_PER_SECOND / 1000 /
|
||||
e->dev->timer_period;
|
||||
}
|
||||
|
||||
switch (e->dev->driver) {
|
||||
#ifdef CONFIG_AUDIO_ALSA
|
||||
case AUDIODEV_DRIVER_ALSA:
|
||||
handle_alsa(e->dev);
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_COREAUDIO
|
||||
case AUDIODEV_DRIVER_COREAUDIO:
|
||||
handle_coreaudio(e->dev);
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_DSOUND
|
||||
case AUDIODEV_DRIVER_DSOUND:
|
||||
handle_dsound(e->dev);
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_OSS
|
||||
case AUDIODEV_DRIVER_OSS:
|
||||
handle_oss(e->dev);
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_PA
|
||||
case AUDIODEV_DRIVER_PA:
|
||||
handle_pa(e->dev);
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIO_SDL
|
||||
case AUDIODEV_DRIVER_SDL:
|
||||
handle_sdl(e->dev);
|
||||
break;
|
||||
#endif
|
||||
|
||||
case AUDIODEV_DRIVER_WAV:
|
||||
handle_wav(e->dev);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return e;
|
||||
}
|
||||
|
||||
AudiodevListHead audio_handle_legacy_opts(void)
|
||||
{
|
||||
const char *drvname = getenv("QEMU_AUDIO_DRV");
|
||||
AudiodevListHead head = QSIMPLEQ_HEAD_INITIALIZER(head);
|
||||
|
||||
if (drvname) {
|
||||
AudiodevListEntry *e;
|
||||
audio_driver *driver = audio_driver_lookup(drvname);
|
||||
if (!driver) {
|
||||
dolog("Unknown audio driver `%s'\n", drvname);
|
||||
exit(1);
|
||||
}
|
||||
e = legacy_opt(drvname);
|
||||
QSIMPLEQ_INSERT_TAIL(&head, e, next);
|
||||
} else {
|
||||
for (int i = 0; audio_prio_list[i]; i++) {
|
||||
audio_driver *driver = audio_driver_lookup(audio_prio_list[i]);
|
||||
if (driver && driver->can_be_default) {
|
||||
AudiodevListEntry *e = legacy_opt(driver->name);
|
||||
QSIMPLEQ_INSERT_TAIL(&head, e, next);
|
||||
}
|
||||
}
|
||||
if (QSIMPLEQ_EMPTY(&head)) {
|
||||
dolog("Internal error: no default audio driver available\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
/* visitor to print -audiodev option */
|
||||
typedef struct {
|
||||
Visitor visitor;
|
||||
|
||||
bool comma;
|
||||
GList *path;
|
||||
} LegacyPrintVisitor;
|
||||
|
||||
static bool lv_start_struct(Visitor *v, const char *name, void **obj,
|
||||
size_t size, Error **errp)
|
||||
{
|
||||
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
|
||||
lv->path = g_list_append(lv->path, g_strdup(name));
|
||||
return true;
|
||||
}
|
||||
|
||||
static void lv_end_struct(Visitor *v, void **obj)
|
||||
{
|
||||
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
|
||||
lv->path = g_list_delete_link(lv->path, g_list_last(lv->path));
|
||||
}
|
||||
|
||||
static void lv_print_key(Visitor *v, const char *name)
|
||||
{
|
||||
GList *e;
|
||||
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
|
||||
if (lv->comma) {
|
||||
putchar(',');
|
||||
} else {
|
||||
lv->comma = true;
|
||||
}
|
||||
|
||||
for (e = lv->path; e; e = e->next) {
|
||||
if (e->data) {
|
||||
printf("%s.", (const char *) e->data);
|
||||
}
|
||||
}
|
||||
|
||||
printf("%s=", name);
|
||||
}
|
||||
|
||||
static bool lv_type_int64(Visitor *v, const char *name, int64_t *obj,
|
||||
Error **errp)
|
||||
{
|
||||
lv_print_key(v, name);
|
||||
printf("%" PRIi64, *obj);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool lv_type_uint64(Visitor *v, const char *name, uint64_t *obj,
|
||||
Error **errp)
|
||||
{
|
||||
lv_print_key(v, name);
|
||||
printf("%" PRIu64, *obj);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool lv_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
|
||||
{
|
||||
lv_print_key(v, name);
|
||||
printf("%s", *obj ? "on" : "off");
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool lv_type_str(Visitor *v, const char *name, char **obj, Error **errp)
|
||||
{
|
||||
const char *str = *obj;
|
||||
lv_print_key(v, name);
|
||||
|
||||
while (*str) {
|
||||
if (*str == ',') {
|
||||
putchar(',');
|
||||
}
|
||||
putchar(*str++);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void lv_complete(Visitor *v, void *opaque)
|
||||
{
|
||||
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
|
||||
assert(lv->path == NULL);
|
||||
}
|
||||
|
||||
static void lv_free(Visitor *v)
|
||||
{
|
||||
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
|
||||
|
||||
g_list_free_full(lv->path, g_free);
|
||||
g_free(lv);
|
||||
}
|
||||
|
||||
static Visitor *legacy_visitor_new(void)
|
||||
{
|
||||
LegacyPrintVisitor *lv = g_new0(LegacyPrintVisitor, 1);
|
||||
|
||||
lv->visitor.start_struct = lv_start_struct;
|
||||
lv->visitor.end_struct = lv_end_struct;
|
||||
/* lists not supported */
|
||||
lv->visitor.type_int64 = lv_type_int64;
|
||||
lv->visitor.type_uint64 = lv_type_uint64;
|
||||
lv->visitor.type_bool = lv_type_bool;
|
||||
lv->visitor.type_str = lv_type_str;
|
||||
|
||||
lv->visitor.type = VISITOR_OUTPUT;
|
||||
lv->visitor.complete = lv_complete;
|
||||
lv->visitor.free = lv_free;
|
||||
|
||||
return &lv->visitor;
|
||||
}
|
||||
|
||||
void audio_legacy_help(void)
|
||||
{
|
||||
AudiodevListHead head;
|
||||
AudiodevListEntry *e;
|
||||
|
||||
printf("Environment variable based configuration deprecated.\n");
|
||||
printf("Please use the new -audiodev option.\n");
|
||||
|
||||
head = audio_handle_legacy_opts();
|
||||
printf("\nEquivalent -audiodev to your current environment variables:\n");
|
||||
if (!getenv("QEMU_AUDIO_DRV")) {
|
||||
printf("(Since you didn't specify QEMU_AUDIO_DRV, I'll list all "
|
||||
"possibilities)\n");
|
||||
}
|
||||
|
||||
QSIMPLEQ_FOREACH(e, &head, next) {
|
||||
Visitor *v;
|
||||
Audiodev *dev = e->dev;
|
||||
printf("-audiodev ");
|
||||
|
||||
v = legacy_visitor_new();
|
||||
visit_type_Audiodev(v, NULL, &dev, &error_abort);
|
||||
visit_free(v);
|
||||
|
||||
printf("\n");
|
||||
}
|
||||
audio_free_audiodev_list(&head);
|
||||
}
|
||||
@@ -37,12 +37,11 @@
|
||||
#endif
|
||||
|
||||
static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
|
||||
struct audio_driver *drv, int min_voices)
|
||||
struct audio_driver *drv)
|
||||
{
|
||||
int max_voices = glue (drv->max_voices_, TYPE);
|
||||
size_t voice_size = glue(drv->voice_size_, TYPE);
|
||||
|
||||
glue (s->nb_hw_voices_, TYPE) = glue(audio_get_pdo_, TYPE)(s->dev)->voices;
|
||||
if (glue (s->nb_hw_voices_, TYPE) > max_voices) {
|
||||
if (!max_voices) {
|
||||
#ifdef DAC
|
||||
@@ -57,12 +56,6 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
|
||||
glue (s->nb_hw_voices_, TYPE) = max_voices;
|
||||
}
|
||||
|
||||
if (glue (s->nb_hw_voices_, TYPE) < min_voices) {
|
||||
dolog ("Bogus number of " NAME " voices %d, setting to %d\n",
|
||||
glue (s->nb_hw_voices_, TYPE),
|
||||
min_voices);
|
||||
}
|
||||
|
||||
if (audio_bug(__func__, !voice_size && max_voices)) {
|
||||
dolog ("drv=`%s' voice_size=0 max_voices=%d\n",
|
||||
drv->name, max_voices);
|
||||
|
||||
@@ -644,7 +644,7 @@ static void coreaudio_enable_out(HWVoiceOut *hw, bool enable)
|
||||
update_device_playback_state(core);
|
||||
}
|
||||
|
||||
static void *coreaudio_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *coreaudio_audio_init(Audiodev *dev)
|
||||
{
|
||||
return dev;
|
||||
}
|
||||
@@ -673,6 +673,7 @@ static struct audio_driver coreaudio_audio_driver = {
|
||||
.init = coreaudio_audio_init,
|
||||
.fini = coreaudio_audio_fini,
|
||||
.pcm_ops = &coreaudio_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = 1,
|
||||
.max_voices_in = 0,
|
||||
.voice_size_out = sizeof (coreaudioVoiceOut),
|
||||
|
||||
@@ -395,7 +395,7 @@ dbus_enable_in(HWVoiceIn *hw, bool enable)
|
||||
}
|
||||
|
||||
static void *
|
||||
dbus_audio_init(Audiodev *dev, Error **errp)
|
||||
dbus_audio_init(Audiodev *dev)
|
||||
{
|
||||
DBusAudio *da = g_new0(DBusAudio, 1);
|
||||
|
||||
@@ -676,6 +676,7 @@ static struct audio_driver dbus_audio_driver = {
|
||||
.fini = dbus_audio_fini,
|
||||
.set_dbus_server = dbus_audio_set_server,
|
||||
.pcm_ops = &dbus_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = INT_MAX,
|
||||
.voice_size_out = sizeof(DBusVoiceOut),
|
||||
|
||||
@@ -619,7 +619,7 @@ static void dsound_audio_fini (void *opaque)
|
||||
g_free(s);
|
||||
}
|
||||
|
||||
static void *dsound_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *dsound_audio_init(Audiodev *dev)
|
||||
{
|
||||
int err;
|
||||
HRESULT hr;
|
||||
@@ -721,6 +721,7 @@ static struct audio_driver dsound_audio_driver = {
|
||||
.init = dsound_audio_init,
|
||||
.fini = dsound_audio_fini,
|
||||
.pcm_ops = &dsound_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = 1,
|
||||
.voice_size_out = sizeof (DSoundVoiceOut),
|
||||
|
||||
@@ -70,9 +70,6 @@ typedef struct QJackClient {
|
||||
int buffersize;
|
||||
jack_port_t **port;
|
||||
QJackBuffer fifo;
|
||||
|
||||
/* Used as workspace by qjack_process() */
|
||||
float **process_buffers;
|
||||
}
|
||||
QJackClient;
|
||||
|
||||
@@ -270,21 +267,22 @@ static int qjack_process(jack_nframes_t nframes, void *arg)
|
||||
}
|
||||
|
||||
/* get the buffers for the ports */
|
||||
float *buffers[c->nchannels];
|
||||
for (int i = 0; i < c->nchannels; ++i) {
|
||||
c->process_buffers[i] = jack_port_get_buffer(c->port[i], nframes);
|
||||
buffers[i] = jack_port_get_buffer(c->port[i], nframes);
|
||||
}
|
||||
|
||||
if (c->out) {
|
||||
if (likely(c->enabled)) {
|
||||
qjack_buffer_read_l(&c->fifo, c->process_buffers, nframes);
|
||||
qjack_buffer_read_l(&c->fifo, buffers, nframes);
|
||||
} else {
|
||||
for (int i = 0; i < c->nchannels; ++i) {
|
||||
memset(c->process_buffers[i], 0, nframes * sizeof(float));
|
||||
memset(buffers[i], 0, nframes * sizeof(float));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (likely(c->enabled)) {
|
||||
qjack_buffer_write_l(&c->fifo, c->process_buffers, nframes);
|
||||
qjack_buffer_write_l(&c->fifo, buffers, nframes);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -402,8 +400,7 @@ static void qjack_client_connect_ports(QJackClient *c)
|
||||
static int qjack_client_init(QJackClient *c)
|
||||
{
|
||||
jack_status_t status;
|
||||
int client_name_len = jack_client_name_size(); /* includes NUL */
|
||||
g_autofree char *client_name = g_new(char, client_name_len);
|
||||
char client_name[jack_client_name_size()];
|
||||
jack_options_t options = JackNullOption;
|
||||
|
||||
if (c->state == QJACK_STATE_RUNNING) {
|
||||
@@ -412,7 +409,7 @@ static int qjack_client_init(QJackClient *c)
|
||||
|
||||
c->connect_ports = true;
|
||||
|
||||
snprintf(client_name, client_name_len, "%s-%s",
|
||||
snprintf(client_name, sizeof(client_name), "%s-%s",
|
||||
c->out ? "out" : "in",
|
||||
c->opt->client_name ? c->opt->client_name : audio_application_name());
|
||||
|
||||
@@ -450,9 +447,6 @@ static int qjack_client_init(QJackClient *c)
|
||||
jack_get_client_name(c->client));
|
||||
}
|
||||
|
||||
/* Allocate working buffer for process callback */
|
||||
c->process_buffers = g_new(float *, c->nchannels);
|
||||
|
||||
jack_set_process_callback(c->client, qjack_process , c);
|
||||
jack_set_port_registration_callback(c->client, qjack_port_registration, c);
|
||||
jack_set_xrun_callback(c->client, qjack_xrun, c);
|
||||
@@ -584,7 +578,6 @@ static void qjack_client_fini_locked(QJackClient *c)
|
||||
|
||||
qjack_buffer_free(&c->fifo);
|
||||
g_free(c->port);
|
||||
g_free(c->process_buffers);
|
||||
|
||||
c->state = QJACK_STATE_DISCONNECTED;
|
||||
/* fallthrough */
|
||||
@@ -645,7 +638,7 @@ static int qjack_thread_creator(jack_native_thread_t *thread,
|
||||
}
|
||||
#endif
|
||||
|
||||
static void *qjack_init(Audiodev *dev, Error **errp)
|
||||
static void *qjack_init(Audiodev *dev)
|
||||
{
|
||||
assert(dev->driver == AUDIODEV_DRIVER_JACK);
|
||||
return dev;
|
||||
@@ -676,6 +669,7 @@ static struct audio_driver jack_driver = {
|
||||
.init = qjack_init,
|
||||
.fini = qjack_fini,
|
||||
.pcm_ops = &jack_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = INT_MAX,
|
||||
.voice_size_out = sizeof(QJackOut),
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
system_ss.add([spice_headers, files('audio.c')])
|
||||
system_ss.add(files(
|
||||
'audio-hmp-cmds.c',
|
||||
'audio_legacy.c',
|
||||
'mixeng.c',
|
||||
'noaudio.c',
|
||||
'wavaudio.c',
|
||||
|
||||
@@ -38,7 +38,7 @@ typedef struct st_sample st_sample;
|
||||
typedef void (t_sample) (struct st_sample *dst, const void *src, int samples);
|
||||
typedef void (f_sample) (void *dst, const struct st_sample *src, int samples);
|
||||
|
||||
/* indices: [stereo][signed][swap endianness][8, 16 or 32-bits] */
|
||||
/* indices: [stereo][signed][swap endiannes][8, 16 or 32-bits] */
|
||||
extern t_sample *mixeng_conv[2][2][2][3];
|
||||
extern f_sample *mixeng_clip[2][2][2][3];
|
||||
|
||||
|
||||
@@ -104,7 +104,7 @@ static void no_enable_in(HWVoiceIn *hw, bool enable)
|
||||
}
|
||||
}
|
||||
|
||||
static void *no_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *no_audio_init(Audiodev *dev)
|
||||
{
|
||||
return &no_audio_init;
|
||||
}
|
||||
@@ -135,6 +135,7 @@ static struct audio_driver no_audio_driver = {
|
||||
.init = no_audio_init,
|
||||
.fini = no_audio_fini,
|
||||
.pcm_ops = &no_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = INT_MAX,
|
||||
.voice_size_out = sizeof (NoVoiceOut),
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include "qapi/error.h"
|
||||
#include "audio.h"
|
||||
#include "trace.h"
|
||||
|
||||
@@ -549,6 +548,7 @@ static int oss_init_out(HWVoiceOut *hw, struct audsettings *as,
|
||||
hw->size_emul);
|
||||
hw->buf_emul = NULL;
|
||||
} else {
|
||||
int err;
|
||||
int trig = 0;
|
||||
if (ioctl (fd, SNDCTL_DSP_SETTRIGGER, &trig) < 0) {
|
||||
oss_logerr (errno, "SNDCTL_DSP_SETTRIGGER 0 failed\n");
|
||||
@@ -736,7 +736,7 @@ static void oss_init_per_direction(AudiodevOssPerDirectionOptions *opdo)
|
||||
}
|
||||
}
|
||||
|
||||
static void *oss_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *oss_audio_init(Audiodev *dev)
|
||||
{
|
||||
AudiodevOssOptions *oopts;
|
||||
assert(dev->driver == AUDIODEV_DRIVER_OSS);
|
||||
@@ -745,12 +745,8 @@ static void *oss_audio_init(Audiodev *dev, Error **errp)
|
||||
oss_init_per_direction(oopts->in);
|
||||
oss_init_per_direction(oopts->out);
|
||||
|
||||
if (access(oopts->in->dev ?: "/dev/dsp", R_OK | W_OK) < 0) {
|
||||
error_setg_errno(errp, errno, "%s not accessible", oopts->in->dev ?: "/dev/dsp");
|
||||
return NULL;
|
||||
}
|
||||
if (access(oopts->out->dev ?: "/dev/dsp", R_OK | W_OK) < 0) {
|
||||
error_setg_errno(errp, errno, "%s not accessible", oopts->out->dev ?: "/dev/dsp");
|
||||
if (access(oopts->in->dev ?: "/dev/dsp", R_OK | W_OK) < 0 ||
|
||||
access(oopts->out->dev ?: "/dev/dsp", R_OK | W_OK) < 0) {
|
||||
return NULL;
|
||||
}
|
||||
return dev;
|
||||
@@ -783,6 +779,7 @@ static struct audio_driver oss_audio_driver = {
|
||||
.init = oss_audio_init,
|
||||
.fini = oss_audio_fini,
|
||||
.pcm_ops = &oss_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = INT_MAX,
|
||||
.voice_size_out = sizeof (OSSVoiceOut),
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/module.h"
|
||||
#include "audio.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/opts-visitor.h"
|
||||
|
||||
#include <pulse/pulseaudio.h>
|
||||
|
||||
@@ -818,7 +818,7 @@ fail:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *qpa_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *qpa_audio_init(Audiodev *dev)
|
||||
{
|
||||
paaudio *g;
|
||||
AudiodevPaOptions *popts = &dev->u.pa;
|
||||
@@ -834,12 +834,10 @@ static void *qpa_audio_init(Audiodev *dev, Error **errp)
|
||||
|
||||
runtime = getenv("XDG_RUNTIME_DIR");
|
||||
if (!runtime) {
|
||||
error_setg(errp, "XDG_RUNTIME_DIR not set");
|
||||
return NULL;
|
||||
}
|
||||
snprintf(pidfile, sizeof(pidfile), "%s/pulse/pid", runtime);
|
||||
if (stat(pidfile, &st) != 0) {
|
||||
error_setg_errno(errp, errno, "could not stat pidfile %s", pidfile);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@@ -869,7 +867,6 @@ static void *qpa_audio_init(Audiodev *dev, Error **errp)
|
||||
}
|
||||
if (!g->conn) {
|
||||
g_free(g);
|
||||
error_setg(errp, "could not connect to PulseAudio server");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -931,6 +928,7 @@ static struct audio_driver pa_audio_driver = {
|
||||
.init = qpa_audio_init,
|
||||
.fini = qpa_audio_fini,
|
||||
.pcm_ops = &qpa_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = INT_MAX,
|
||||
.voice_size_out = sizeof (PAVoiceOut),
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include "audio.h"
|
||||
#include <errno.h>
|
||||
#include "qemu/error-report.h"
|
||||
#include "qapi/error.h"
|
||||
#include <spa/param/audio/format-utils.h>
|
||||
#include <spa/utils/ringbuffer.h>
|
||||
#include <spa/utils/result.h>
|
||||
@@ -737,7 +736,7 @@ static const struct pw_core_events core_events = {
|
||||
};
|
||||
|
||||
static void *
|
||||
qpw_audio_init(Audiodev *dev, Error **errp)
|
||||
qpw_audio_init(Audiodev *dev)
|
||||
{
|
||||
g_autofree pwaudio *pw = g_new0(pwaudio, 1);
|
||||
|
||||
@@ -749,19 +748,19 @@ qpw_audio_init(Audiodev *dev, Error **errp)
|
||||
pw->dev = dev;
|
||||
pw->thread_loop = pw_thread_loop_new("PipeWire thread loop", NULL);
|
||||
if (pw->thread_loop == NULL) {
|
||||
error_setg_errno(errp, errno, "Could not create PipeWire loop");
|
||||
error_report("Could not create PipeWire loop: %s", g_strerror(errno));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
pw->context =
|
||||
pw_context_new(pw_thread_loop_get_loop(pw->thread_loop), NULL, 0);
|
||||
if (pw->context == NULL) {
|
||||
error_setg_errno(errp, errno, "Could not create PipeWire context");
|
||||
error_report("Could not create PipeWire context: %s", g_strerror(errno));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (pw_thread_loop_start(pw->thread_loop) < 0) {
|
||||
error_setg_errno(errp, errno, "Could not start PipeWire loop");
|
||||
error_report("Could not start PipeWire loop: %s", g_strerror(errno));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -770,13 +769,13 @@ qpw_audio_init(Audiodev *dev, Error **errp)
|
||||
pw->core = pw_context_connect(pw->context, NULL, 0);
|
||||
if (pw->core == NULL) {
|
||||
pw_thread_loop_unlock(pw->thread_loop);
|
||||
goto fail_error;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (pw_core_add_listener(pw->core, &pw->core_listener,
|
||||
&core_events, pw) < 0) {
|
||||
pw_thread_loop_unlock(pw->thread_loop);
|
||||
goto fail_error;
|
||||
goto fail;
|
||||
}
|
||||
if (wait_resync(pw) < 0) {
|
||||
pw_thread_loop_unlock(pw->thread_loop);
|
||||
@@ -786,9 +785,8 @@ qpw_audio_init(Audiodev *dev, Error **errp)
|
||||
|
||||
return g_steal_pointer(&pw);
|
||||
|
||||
fail_error:
|
||||
error_setg(errp, "Failed to initialize PW context");
|
||||
fail:
|
||||
AUD_log(AUDIO_CAP, "Failed to initialize PW context");
|
||||
if (pw->thread_loop) {
|
||||
pw_thread_loop_stop(pw->thread_loop);
|
||||
}
|
||||
@@ -843,6 +841,7 @@ static struct audio_driver pw_audio_driver = {
|
||||
.init = qpw_audio_init,
|
||||
.fini = qpw_audio_fini,
|
||||
.pcm_ops = &qpw_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = INT_MAX,
|
||||
.voice_size_out = sizeof(PWVoiceOut),
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <SDL.h>
|
||||
#include <SDL_thread.h>
|
||||
#include "qemu/module.h"
|
||||
#include "qapi/error.h"
|
||||
#include "audio.h"
|
||||
|
||||
#ifndef _WIN32
|
||||
@@ -450,10 +449,10 @@ static void sdl_enable_in(HWVoiceIn *hw, bool enable)
|
||||
SDL_PauseAudioDevice(sdl->devid, !enable);
|
||||
}
|
||||
|
||||
static void *sdl_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *sdl_audio_init(Audiodev *dev)
|
||||
{
|
||||
if (SDL_InitSubSystem (SDL_INIT_AUDIO)) {
|
||||
error_setg(errp, "SDL failed to initialize audio subsystem");
|
||||
sdl_logerr ("SDL failed to initialize audio subsystem\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -494,6 +493,7 @@ static struct audio_driver sdl_audio_driver = {
|
||||
.init = sdl_audio_init,
|
||||
.fini = sdl_audio_fini,
|
||||
.pcm_ops = &sdl_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = INT_MAX,
|
||||
.voice_size_out = sizeof(SDLVoiceOut),
|
||||
|
||||
@@ -518,7 +518,7 @@ static void sndio_fini_in(HWVoiceIn *hw)
|
||||
sndio_fini(self);
|
||||
}
|
||||
|
||||
static void *sndio_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *sndio_audio_init(Audiodev *dev)
|
||||
{
|
||||
assert(dev->driver == AUDIODEV_DRIVER_SNDIO);
|
||||
return dev;
|
||||
@@ -550,6 +550,7 @@ static struct audio_driver sndio_audio_driver = {
|
||||
.init = sndio_audio_init,
|
||||
.fini = sndio_audio_fini,
|
||||
.pcm_ops = &sndio_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = INT_MAX,
|
||||
.voice_size_out = sizeof(SndioVoice),
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "qapi/error.h"
|
||||
#include "ui/qemu-spice.h"
|
||||
|
||||
#define AUDIO_CAP "spice"
|
||||
@@ -72,13 +71,11 @@ static const SpiceRecordInterface record_sif = {
|
||||
.base.minor_version = SPICE_INTERFACE_RECORD_MINOR,
|
||||
};
|
||||
|
||||
static void *spice_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *spice_audio_init(Audiodev *dev)
|
||||
{
|
||||
if (!using_spice) {
|
||||
error_setg(errp, "Cannot use spice audio without -spice");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &spice_audio_init;
|
||||
}
|
||||
|
||||
|
||||
@@ -182,7 +182,7 @@ static void wav_enable_out(HWVoiceOut *hw, bool enable)
|
||||
}
|
||||
}
|
||||
|
||||
static void *wav_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *wav_audio_init(Audiodev *dev)
|
||||
{
|
||||
assert(dev->driver == AUDIODEV_DRIVER_WAV);
|
||||
return dev;
|
||||
@@ -208,6 +208,7 @@ static struct audio_driver wav_audio_driver = {
|
||||
.init = wav_audio_init,
|
||||
.fini = wav_audio_fini,
|
||||
.pcm_ops = &wav_pcm_ops,
|
||||
.can_be_default = 0,
|
||||
.max_voices_out = 1,
|
||||
.max_voices_in = 0,
|
||||
.voice_size_out = sizeof (WAVVoiceOut),
|
||||
|
||||
@@ -252,11 +252,10 @@ static void cryptodev_backend_throttle_timer_cb(void *opaque)
|
||||
continue;
|
||||
}
|
||||
|
||||
throttle_account(&backend->ts, THROTTLE_WRITE, ret);
|
||||
throttle_account(&backend->ts, true, ret);
|
||||
cryptodev_backend_operation(backend, op_info);
|
||||
if (throttle_enabled(&backend->tc) &&
|
||||
throttle_schedule_timer(&backend->ts, &backend->tt,
|
||||
THROTTLE_WRITE)) {
|
||||
throttle_schedule_timer(&backend->ts, &backend->tt, true)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -272,7 +271,7 @@ int cryptodev_backend_crypto_operation(
|
||||
goto do_account;
|
||||
}
|
||||
|
||||
if (throttle_schedule_timer(&backend->ts, &backend->tt, THROTTLE_WRITE) ||
|
||||
if (throttle_schedule_timer(&backend->ts, &backend->tt, true) ||
|
||||
!QTAILQ_EMPTY(&backend->opinfos)) {
|
||||
QTAILQ_INSERT_TAIL(&backend->opinfos, op_info, next);
|
||||
return 0;
|
||||
@@ -284,7 +283,7 @@ do_account:
|
||||
return ret;
|
||||
}
|
||||
|
||||
throttle_account(&backend->ts, THROTTLE_WRITE, ret);
|
||||
throttle_account(&backend->ts, true, ret);
|
||||
|
||||
return cryptodev_backend_operation(backend, op_info);
|
||||
}
|
||||
@@ -342,7 +341,8 @@ static void cryptodev_backend_set_throttle(CryptoDevBackend *backend, int field,
|
||||
if (!enabled) {
|
||||
throttle_init(&backend->ts);
|
||||
throttle_timers_init(&backend->tt, qemu_get_aio_context(),
|
||||
QEMU_CLOCK_REALTIME, NULL,
|
||||
QEMU_CLOCK_REALTIME,
|
||||
cryptodev_backend_throttle_timer_cb, /* FIXME */
|
||||
cryptodev_backend_throttle_timer_cb, backend);
|
||||
}
|
||||
|
||||
|
||||
@@ -18,8 +18,6 @@
|
||||
#include "sysemu/hostmem.h"
|
||||
#include "qom/object_interfaces.h"
|
||||
#include "qom/object.h"
|
||||
#include "qapi/visitor.h"
|
||||
#include "qapi/qapi-visit-common.h"
|
||||
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(HostMemoryBackendFile, MEMORY_BACKEND_FILE)
|
||||
|
||||
@@ -33,7 +31,6 @@ struct HostMemoryBackendFile {
|
||||
bool discard_data;
|
||||
bool is_pmem;
|
||||
bool readonly;
|
||||
OnOffAuto rom;
|
||||
};
|
||||
|
||||
static void
|
||||
@@ -56,39 +53,15 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
switch (fb->rom) {
|
||||
case ON_OFF_AUTO_AUTO:
|
||||
/* Traditionally, opening the file readonly always resulted in ROM. */
|
||||
fb->rom = fb->readonly ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
|
||||
break;
|
||||
case ON_OFF_AUTO_ON:
|
||||
if (!fb->readonly) {
|
||||
error_setg(errp, "property 'rom' = 'on' is not supported with"
|
||||
" 'readonly' = 'off'");
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case ON_OFF_AUTO_OFF:
|
||||
if (fb->readonly && backend->share) {
|
||||
error_setg(errp, "property 'rom' = 'off' is incompatible with"
|
||||
" 'readonly' = 'on' and 'share' = 'on'");
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
|
||||
name = host_memory_backend_get_name(backend);
|
||||
ram_flags = backend->share ? RAM_SHARED : 0;
|
||||
ram_flags |= fb->readonly ? RAM_READONLY_FD : 0;
|
||||
ram_flags |= fb->rom == ON_OFF_AUTO_ON ? RAM_READONLY : 0;
|
||||
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
|
||||
ram_flags |= fb->is_pmem ? RAM_PMEM : 0;
|
||||
ram_flags |= RAM_NAMED_FILE;
|
||||
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name,
|
||||
backend->size, fb->align, ram_flags,
|
||||
fb->mem_path, fb->offset, errp);
|
||||
fb->mem_path, fb->offset, fb->readonly,
|
||||
errp);
|
||||
g_free(name);
|
||||
#endif
|
||||
}
|
||||
@@ -228,32 +201,6 @@ static void file_memory_backend_set_readonly(Object *obj, bool value,
|
||||
fb->readonly = value;
|
||||
}
|
||||
|
||||
static void file_memory_backend_get_rom(Object *obj, Visitor *v,
|
||||
const char *name, void *opaque,
|
||||
Error **errp)
|
||||
{
|
||||
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(obj);
|
||||
OnOffAuto rom = fb->rom;
|
||||
|
||||
visit_type_OnOffAuto(v, name, &rom, errp);
|
||||
}
|
||||
|
||||
static void file_memory_backend_set_rom(Object *obj, Visitor *v,
|
||||
const char *name, void *opaque,
|
||||
Error **errp)
|
||||
{
|
||||
HostMemoryBackend *backend = MEMORY_BACKEND(obj);
|
||||
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(obj);
|
||||
|
||||
if (host_memory_backend_mr_inited(backend)) {
|
||||
error_setg(errp, "cannot change property '%s' of %s.", name,
|
||||
object_get_typename(obj));
|
||||
return;
|
||||
}
|
||||
|
||||
visit_type_OnOffAuto(v, name, &fb->rom, errp);
|
||||
}
|
||||
|
||||
static void file_backend_unparent(Object *obj)
|
||||
{
|
||||
HostMemoryBackend *backend = MEMORY_BACKEND(obj);
|
||||
@@ -296,10 +243,6 @@ file_backend_class_init(ObjectClass *oc, void *data)
|
||||
object_class_property_add_bool(oc, "readonly",
|
||||
file_memory_backend_get_readonly,
|
||||
file_memory_backend_set_readonly);
|
||||
object_class_property_add(oc, "rom", "OnOffAuto",
|
||||
file_memory_backend_get_rom, file_memory_backend_set_rom, NULL, NULL);
|
||||
object_class_property_set_description(oc, "rom",
|
||||
"Whether to create Read Only Memory (ROM)");
|
||||
}
|
||||
|
||||
static void file_backend_instance_finalize(Object *o)
|
||||
|
||||
@@ -238,7 +238,7 @@ struct ptm_lockstorage {
|
||||
} req; /* request */
|
||||
struct {
|
||||
ptm_res tpm_result;
|
||||
} resp; /* response */
|
||||
} resp; /* reponse */
|
||||
} u;
|
||||
};
|
||||
|
||||
|
||||
@@ -112,8 +112,12 @@ static int tpm_util_request(int fd,
|
||||
void *response,
|
||||
size_t responselen)
|
||||
{
|
||||
GPollFD fds[1] = { {.fd = fd, .events = G_IO_IN } };
|
||||
fd_set readfds;
|
||||
int n;
|
||||
struct timeval tv = {
|
||||
.tv_sec = 1,
|
||||
.tv_usec = 0,
|
||||
};
|
||||
|
||||
n = write(fd, request, requestlen);
|
||||
if (n < 0) {
|
||||
@@ -123,8 +127,11 @@ static int tpm_util_request(int fd,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
FD_ZERO(&readfds);
|
||||
FD_SET(fd, &readfds);
|
||||
|
||||
/* wait for a second */
|
||||
n = RETRY_ON_EINTR(g_poll(fds, 1, 1000));
|
||||
n = select(fd + 1, &readfds, NULL, NULL, &tv);
|
||||
if (n != 1) {
|
||||
return -errno;
|
||||
}
|
||||
|
||||
@@ -374,7 +374,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
||||
assert(bs);
|
||||
assert(target);
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/* QMP interface protects us from these cases */
|
||||
assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL);
|
||||
|
||||
@@ -251,9 +251,7 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
ret = 0;
|
||||
fail_log:
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, s->log_file);
|
||||
bdrv_graph_wrunlock();
|
||||
s->log_file = NULL;
|
||||
}
|
||||
fail:
|
||||
@@ -265,10 +263,8 @@ static void blk_log_writes_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVBlkLogWritesState *s = bs->opaque;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, s->log_file);
|
||||
s->log_file = NULL;
|
||||
bdrv_graph_wrunlock();
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn GRAPH_RDLOCK
|
||||
|
||||
@@ -151,10 +151,8 @@ static void blkverify_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVBlkverifyState *s = bs->opaque;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, s->test_file);
|
||||
s->test_file = NULL;
|
||||
bdrv_graph_wrunlock();
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn GRAPH_RDLOCK
|
||||
|
||||
@@ -33,6 +33,8 @@
|
||||
|
||||
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
|
||||
|
||||
static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
|
||||
|
||||
typedef struct BlockBackendAioNotifier {
|
||||
void (*attached_aio_context)(AioContext *new_context, void *opaque);
|
||||
void (*detach_aio_context)(void *opaque);
|
||||
@@ -101,6 +103,7 @@ typedef struct BlockBackendAIOCB {
|
||||
} BlockBackendAIOCB;
|
||||
|
||||
static const AIOCBInfo block_backend_aiocb_info = {
|
||||
.get_aio_context = blk_aiocb_get_aio_context,
|
||||
.aiocb_size = sizeof(BlockBackendAIOCB),
|
||||
};
|
||||
|
||||
@@ -118,10 +121,6 @@ static QTAILQ_HEAD(, BlockBackend) block_backends =
|
||||
static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
|
||||
QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
|
||||
|
||||
static int coroutine_mixed_fn GRAPH_RDLOCK
|
||||
blk_set_perm_locked(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
|
||||
Error **errp);
|
||||
|
||||
static void blk_root_inherit_options(BdrvChildRole role, bool parent_is_format,
|
||||
int *child_flags, QDict *child_options,
|
||||
int parent_flags, QDict *parent_options)
|
||||
@@ -187,7 +186,7 @@ static void blk_vm_state_changed(void *opaque, bool running, RunState state)
|
||||
*
|
||||
* If an error is returned, the VM cannot be allowed to be resumed.
|
||||
*/
|
||||
static void GRAPH_RDLOCK blk_root_activate(BdrvChild *child, Error **errp)
|
||||
static void blk_root_activate(BdrvChild *child, Error **errp)
|
||||
{
|
||||
BlockBackend *blk = child->opaque;
|
||||
Error *local_err = NULL;
|
||||
@@ -208,7 +207,7 @@ static void GRAPH_RDLOCK blk_root_activate(BdrvChild *child, Error **errp)
|
||||
*/
|
||||
saved_shared_perm = blk->shared_perm;
|
||||
|
||||
blk_set_perm_locked(blk, blk->perm, BLK_PERM_ALL, &local_err);
|
||||
blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
blk->disable_perm = true;
|
||||
@@ -227,7 +226,7 @@ static void GRAPH_RDLOCK blk_root_activate(BdrvChild *child, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
blk_set_perm_locked(blk, blk->perm, blk->shared_perm, &local_err);
|
||||
blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
blk->disable_perm = true;
|
||||
@@ -260,7 +259,7 @@ static bool blk_can_inactivate(BlockBackend *blk)
|
||||
return blk->force_allow_inactivate;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK blk_root_inactivate(BdrvChild *child)
|
||||
static int blk_root_inactivate(BdrvChild *child)
|
||||
{
|
||||
BlockBackend *blk = child->opaque;
|
||||
|
||||
@@ -780,12 +779,11 @@ BlockDriverState *blk_bs(BlockBackend *blk)
|
||||
return blk->root ? blk->root->bs : NULL;
|
||||
}
|
||||
|
||||
static BlockBackend * GRAPH_RDLOCK bdrv_first_blk(BlockDriverState *bs)
|
||||
static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *child;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH(child, &bs->parents, next_parent) {
|
||||
if (child->klass == &child_root) {
|
||||
@@ -813,8 +811,6 @@ bool bdrv_is_root_node(BlockDriverState *bs)
|
||||
BdrvChild *c;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH(c, &bs->parents, next_parent) {
|
||||
if (c->klass != &child_root) {
|
||||
return false;
|
||||
@@ -915,10 +911,7 @@ void blk_remove_bs(BlockBackend *blk)
|
||||
blk_drain(blk);
|
||||
root = blk->root;
|
||||
blk->root = NULL;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_root_unref_child(root);
|
||||
bdrv_graph_wrunlock();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -960,9 +953,8 @@ int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp)
|
||||
/*
|
||||
* Sets the permission bitmasks that the user of the BlockBackend needs.
|
||||
*/
|
||||
static int coroutine_mixed_fn GRAPH_RDLOCK
|
||||
blk_set_perm_locked(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
|
||||
Error **errp)
|
||||
int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
GLOBAL_STATE_CODE();
|
||||
@@ -980,15 +972,6 @@ blk_set_perm_locked(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
|
||||
Error **errp)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
return blk_set_perm_locked(blk, perm, shared_perm, errp);
|
||||
}
|
||||
|
||||
void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
@@ -1358,7 +1341,7 @@ blk_co_do_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes,
|
||||
/* throttling disk I/O */
|
||||
if (blk->public.throttle_group_member.throttle_state) {
|
||||
throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
|
||||
bytes, THROTTLE_READ);
|
||||
bytes, false);
|
||||
}
|
||||
|
||||
ret = bdrv_co_preadv_part(blk->root, offset, bytes, qiov, qiov_offset,
|
||||
@@ -1432,7 +1415,7 @@ blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
|
||||
/* throttling disk I/O */
|
||||
if (blk->public.throttle_group_member.throttle_state) {
|
||||
throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
|
||||
bytes, THROTTLE_WRITE);
|
||||
bytes, true);
|
||||
}
|
||||
|
||||
if (!blk->enable_write_cache) {
|
||||
@@ -1550,7 +1533,7 @@ BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
|
||||
acb->blk = blk;
|
||||
acb->ret = ret;
|
||||
|
||||
replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),
|
||||
replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
|
||||
error_callback_bh, acb);
|
||||
return &acb->common;
|
||||
}
|
||||
@@ -1562,8 +1545,16 @@ typedef struct BlkAioEmAIOCB {
|
||||
bool has_returned;
|
||||
} BlkAioEmAIOCB;
|
||||
|
||||
static AioContext *blk_aio_em_aiocb_get_aio_context(BlockAIOCB *acb_)
|
||||
{
|
||||
BlkAioEmAIOCB *acb = container_of(acb_, BlkAioEmAIOCB, common);
|
||||
|
||||
return blk_get_aio_context(acb->rwco.blk);
|
||||
}
|
||||
|
||||
static const AIOCBInfo blk_aio_em_aiocb_info = {
|
||||
.aiocb_size = sizeof(BlkAioEmAIOCB),
|
||||
.get_aio_context = blk_aio_em_aiocb_get_aio_context,
|
||||
};
|
||||
|
||||
static void blk_aio_complete(BlkAioEmAIOCB *acb)
|
||||
@@ -1604,11 +1595,11 @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset,
|
||||
acb->has_returned = false;
|
||||
|
||||
co = qemu_coroutine_create(co_entry, acb);
|
||||
aio_co_enter(qemu_get_current_aio_context(), co);
|
||||
aio_co_enter(blk_get_aio_context(blk), co);
|
||||
|
||||
acb->has_returned = true;
|
||||
if (acb->rwco.ret != NOT_DONE) {
|
||||
replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),
|
||||
replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
|
||||
blk_aio_complete_bh, acb);
|
||||
}
|
||||
|
||||
@@ -1910,11 +1901,11 @@ BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset,
|
||||
acb->has_returned = false;
|
||||
|
||||
co = qemu_coroutine_create(blk_aio_zone_report_entry, acb);
|
||||
aio_co_enter(qemu_get_current_aio_context(), co);
|
||||
aio_co_enter(blk_get_aio_context(blk), co);
|
||||
|
||||
acb->has_returned = true;
|
||||
if (acb->rwco.ret != NOT_DONE) {
|
||||
replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),
|
||||
replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
|
||||
blk_aio_complete_bh, acb);
|
||||
}
|
||||
|
||||
@@ -1951,11 +1942,11 @@ BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
|
||||
acb->has_returned = false;
|
||||
|
||||
co = qemu_coroutine_create(blk_aio_zone_mgmt_entry, acb);
|
||||
aio_co_enter(qemu_get_current_aio_context(), co);
|
||||
aio_co_enter(blk_get_aio_context(blk), co);
|
||||
|
||||
acb->has_returned = true;
|
||||
if (acb->rwco.ret != NOT_DONE) {
|
||||
replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),
|
||||
replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
|
||||
blk_aio_complete_bh, acb);
|
||||
}
|
||||
|
||||
@@ -1991,10 +1982,10 @@ BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset,
|
||||
acb->has_returned = false;
|
||||
|
||||
co = qemu_coroutine_create(blk_aio_zone_append_entry, acb);
|
||||
aio_co_enter(qemu_get_current_aio_context(), co);
|
||||
aio_co_enter(blk_get_aio_context(blk), co);
|
||||
acb->has_returned = true;
|
||||
if (acb->rwco.ret != NOT_DONE) {
|
||||
replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),
|
||||
replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
|
||||
blk_aio_complete_bh, acb);
|
||||
}
|
||||
|
||||
@@ -2262,7 +2253,6 @@ void blk_activate(BlockBackend *blk, Error **errp)
|
||||
if (qemu_in_coroutine()) {
|
||||
bdrv_co_activate(bs, errp);
|
||||
} else {
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
bdrv_activate(bs, errp);
|
||||
}
|
||||
}
|
||||
@@ -2388,7 +2378,6 @@ bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
|
||||
{
|
||||
BlockDriverState *bs = blk_bs(blk);
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!bs) {
|
||||
return false;
|
||||
@@ -2445,6 +2434,12 @@ AioContext *blk_get_aio_context(BlockBackend *blk)
|
||||
return blk->ctx;
|
||||
}
|
||||
|
||||
static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
|
||||
{
|
||||
BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
|
||||
return blk_get_aio_context(blk_acb->blk);
|
||||
}
|
||||
|
||||
int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
|
||||
Error **errp)
|
||||
{
|
||||
@@ -2906,8 +2901,6 @@ const BdrvChild *blk_root(BlockBackend *blk)
|
||||
int blk_make_empty(BlockBackend *blk, Error **errp)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!blk_is_available(blk)) {
|
||||
error_setg(errp, "No medium inserted");
|
||||
return -ENOMEDIUM;
|
||||
|
||||
@@ -67,7 +67,7 @@ typedef struct BlockCopyCallState {
|
||||
QLIST_ENTRY(BlockCopyCallState) list;
|
||||
|
||||
/*
|
||||
* Fields that report information about return values and errors.
|
||||
* Fields that report information about return values and erros.
|
||||
* Protected by lock in BlockCopyState.
|
||||
*/
|
||||
bool error_is_read;
|
||||
@@ -462,7 +462,7 @@ static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
|
||||
* Do copy of cluster-aligned chunk. Requested region is allowed to exceed
|
||||
* s->len only to cover last cluster when s->len is not aligned to clusters.
|
||||
*
|
||||
* No sync here: neither bitmap nor intersecting requests handling, only copy.
|
||||
* No sync here: nor bitmap neighter intersecting requests handling, only copy.
|
||||
*
|
||||
* @method is an in-out argument, so that copy_range can be either extended to
|
||||
* a full-size buffer or disabled if the copy_range attempt fails. The output
|
||||
|
||||
@@ -106,9 +106,7 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
int ret;
|
||||
|
||||
/* No write support yet */
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -67,9 +67,7 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
uint32_t offsets_size, max_compressed_block_size = 1, i;
|
||||
int ret;
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -434,7 +434,6 @@ int bdrv_commit(BlockDriverState *bs)
|
||||
Error *local_err = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
if (!drv)
|
||||
return -ENOMEDIUM;
|
||||
|
||||
@@ -305,7 +305,7 @@ cbw_co_snapshot_block_status(BlockDriverState *bs,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
ret = bdrv_co_block_status(child->bs, offset, cur_bytes, pnum, map, file);
|
||||
ret = bdrv_block_status(child->bs, offset, cur_bytes, pnum, map, file);
|
||||
if (child == s->target) {
|
||||
/*
|
||||
* We refer to s->target only for areas that we've written to it.
|
||||
@@ -341,11 +341,11 @@ static void cbw_refresh_filename(BlockDriverState *bs)
|
||||
bs->file->bs->filename);
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK
|
||||
cbw_child_perm(BlockDriverState *bs, BdrvChild *c, BdrvChildRole role,
|
||||
BlockReopenQueue *reopen_queue,
|
||||
uint64_t perm, uint64_t shared,
|
||||
uint64_t *nperm, uint64_t *nshared)
|
||||
static void cbw_child_perm(BlockDriverState *bs, BdrvChild *c,
|
||||
BdrvChildRole role,
|
||||
BlockReopenQueue *reopen_queue,
|
||||
uint64_t perm, uint64_t shared,
|
||||
uint64_t *nperm, uint64_t *nshared)
|
||||
{
|
||||
if (!(role & BDRV_CHILD_FILTERED)) {
|
||||
/*
|
||||
@@ -503,7 +503,7 @@ static void cbw_close(BlockDriverState *bs)
|
||||
s->bcs = NULL;
|
||||
}
|
||||
|
||||
static BlockDriver bdrv_cbw_filter = {
|
||||
BlockDriver bdrv_cbw_filter = {
|
||||
.format_name = "copy-before-write",
|
||||
.instance_size = sizeof(BDRVCopyBeforeWriteState),
|
||||
|
||||
|
||||
@@ -146,11 +146,11 @@ cor_co_preadv_part(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
local_flags = flags;
|
||||
|
||||
/* In case of failure, try to copy-on-read anyway */
|
||||
ret = bdrv_co_is_allocated(bs->file->bs, offset, bytes, &n);
|
||||
ret = bdrv_is_allocated(bs->file->bs, offset, bytes, &n);
|
||||
if (ret <= 0) {
|
||||
ret = bdrv_co_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
|
||||
state->bottom_bs, true, offset,
|
||||
n, &n);
|
||||
ret = bdrv_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
|
||||
state->bottom_bs, true, offset,
|
||||
n, &n);
|
||||
if (ret > 0 || ret < 0) {
|
||||
local_flags |= BDRV_REQ_COPY_ON_READ;
|
||||
}
|
||||
|
||||
@@ -777,7 +777,7 @@ block_crypto_get_specific_info_luks(BlockDriverState *bs, Error **errp)
|
||||
return spec_info;
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
static int
|
||||
block_crypto_amend_prepare(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BlockCrypto *crypto = bs->opaque;
|
||||
@@ -793,7 +793,7 @@ block_crypto_amend_prepare(BlockDriverState *bs, Error **errp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK
|
||||
static void
|
||||
block_crypto_amend_cleanup(BlockDriverState *bs)
|
||||
{
|
||||
BlockCrypto *crypto = bs->opaque;
|
||||
@@ -828,7 +828,7 @@ block_crypto_amend_options_generic_luks(BlockDriverState *bs,
|
||||
errp);
|
||||
}
|
||||
|
||||
static int GRAPH_RDLOCK
|
||||
static int
|
||||
block_crypto_amend_options_luks(BlockDriverState *bs,
|
||||
QemuOpts *opts,
|
||||
BlockDriverAmendStatusCB *status_cb,
|
||||
|
||||
@@ -696,10 +696,8 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
const char *protocol_delimiter;
|
||||
int ret;
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, "curl driver does not support writes",
|
||||
errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -452,9 +452,7 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
int64_t offset;
|
||||
int ret;
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -83,8 +83,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||
uint64_t perm;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (!id_wellformed(export->id)) {
|
||||
error_setg(errp, "Invalid block export id");
|
||||
return NULL;
|
||||
@@ -147,9 +145,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
||||
* access since the export could be available before migration handover.
|
||||
* ctx was acquired in the caller.
|
||||
*/
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_activate(bs, NULL);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
perm = BLK_PERM_CONSISTENT_READ;
|
||||
if (export->writable) {
|
||||
|
||||
@@ -138,7 +138,7 @@ static void vduse_blk_enable_queue(VduseDev *dev, VduseVirtq *vq)
|
||||
|
||||
aio_set_fd_handler(vblk_exp->export.ctx, vduse_queue_get_fd(vq),
|
||||
on_vduse_vq_kick, NULL, NULL, NULL, vq);
|
||||
/* Make sure we don't miss any kick after reconnecting */
|
||||
/* Make sure we don't miss any kick afer reconnecting */
|
||||
eventfd_write(vduse_queue_get_fd(vq), 1);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Sharing QEMU block devices via vhost-user protocol
|
||||
* Sharing QEMU block devices via vhost-user protocal
|
||||
*
|
||||
* Parts of the code based on nbd/server.c.
|
||||
*
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Sharing QEMU block devices via vhost-user protocol
|
||||
* Sharing QEMU block devices via vhost-user protocal
|
||||
*
|
||||
* Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
|
||||
* Copyright (c) 2020 Red Hat, Inc.
|
||||
|
||||
@@ -1159,9 +1159,9 @@ static int raw_reopen_prepare(BDRVReopenState *state,
|
||||
* As part of reopen prepare we also want to create new fd by
|
||||
* raw_reconfigure_getfd(). But it wants updated "perm", when in
|
||||
* bdrv_reopen_multiple() .bdrv_reopen_prepare() callback called prior to
|
||||
* permission update. Happily, permission update is always a part
|
||||
* (a separate stage) of bdrv_reopen_multiple() so we can rely on this
|
||||
* fact and reconfigure fd in raw_check_perm().
|
||||
* permission update. Happily, permission update is always a part (a seprate
|
||||
* stage) of bdrv_reopen_multiple() so we can rely on this fact and
|
||||
* reconfigure fd in raw_check_perm().
|
||||
*/
|
||||
|
||||
s->reopen_state = state;
|
||||
@@ -1412,9 +1412,11 @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
|
||||
BlockZoneModel zoned;
|
||||
int ret;
|
||||
|
||||
bs->bl.zoned = BLK_Z_NONE;
|
||||
|
||||
ret = get_sysfs_zoned_model(st, &zoned);
|
||||
if (ret < 0 || zoned == BLK_Z_NONE) {
|
||||
goto no_zoned;
|
||||
return;
|
||||
}
|
||||
bs->bl.zoned = zoned;
|
||||
|
||||
@@ -1435,10 +1437,10 @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Unable to read chunk_sectors "
|
||||
"sysfs attribute");
|
||||
goto no_zoned;
|
||||
return;
|
||||
} else if (!ret) {
|
||||
error_setg(errp, "Read 0 from chunk_sectors sysfs attribute");
|
||||
goto no_zoned;
|
||||
return;
|
||||
}
|
||||
bs->bl.zone_size = ret << BDRV_SECTOR_BITS;
|
||||
|
||||
@@ -1446,10 +1448,10 @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Unable to read nr_zones "
|
||||
"sysfs attribute");
|
||||
goto no_zoned;
|
||||
return;
|
||||
} else if (!ret) {
|
||||
error_setg(errp, "Read 0 from nr_zones sysfs attribute");
|
||||
goto no_zoned;
|
||||
return;
|
||||
}
|
||||
bs->bl.nr_zones = ret;
|
||||
|
||||
@@ -1470,15 +1472,10 @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
|
||||
ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 0);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "report wps failed");
|
||||
goto no_zoned;
|
||||
bs->wps = NULL;
|
||||
return;
|
||||
}
|
||||
qemu_co_mutex_init(&bs->wps->colock);
|
||||
return;
|
||||
|
||||
no_zoned:
|
||||
bs->bl.zoned = BLK_Z_NONE;
|
||||
g_free(bs->wps);
|
||||
bs->wps = NULL;
|
||||
}
|
||||
#else /* !defined(CONFIG_BLKZONED) */
|
||||
static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
|
||||
@@ -2455,10 +2452,9 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
|
||||
if (fd_open(bs) < 0)
|
||||
return -EIO;
|
||||
#if defined(CONFIG_BLKZONED)
|
||||
if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
|
||||
bs->bl.zoned != BLK_Z_NONE) {
|
||||
if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && bs->wps) {
|
||||
qemu_co_mutex_lock(&bs->wps->colock);
|
||||
if (type & QEMU_AIO_ZONE_APPEND) {
|
||||
if (type & QEMU_AIO_ZONE_APPEND && bs->bl.zone_size) {
|
||||
int index = offset / bs->bl.zone_size;
|
||||
offset = bs->wps->wp[index];
|
||||
}
|
||||
@@ -2506,10 +2502,11 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
|
||||
|
||||
out:
|
||||
#if defined(CONFIG_BLKZONED)
|
||||
if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
|
||||
bs->bl.zoned != BLK_Z_NONE) {
|
||||
BlockZoneWps *wps = bs->wps;
|
||||
if (ret == 0) {
|
||||
{
|
||||
BlockZoneWps *wps = bs->wps;
|
||||
if (ret == 0) {
|
||||
if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND))
|
||||
&& wps && bs->bl.zone_size) {
|
||||
uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
|
||||
if (!BDRV_ZT_IS_CONV(*wp)) {
|
||||
if (type & QEMU_AIO_ZONE_APPEND) {
|
||||
@@ -2522,12 +2519,17 @@ out:
|
||||
*wp = offset + bytes;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
}
|
||||
} else {
|
||||
if (type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) {
|
||||
update_zones_wp(bs, s->fd, 0, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && wps) {
|
||||
qemu_co_mutex_unlock(&wps->colock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
@@ -3372,7 +3374,7 @@ static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret)
|
||||
* of an array of zone descriptors.
|
||||
* zones is an array of zone descriptors to hold zone information on reply;
|
||||
* offset can be any byte within the entire size of the device;
|
||||
* nr_zones is the maximum number of sectors the command should operate on.
|
||||
* nr_zones is the maxium number of sectors the command should operate on.
|
||||
*/
|
||||
#if defined(CONFIG_BLKZONED)
|
||||
static int coroutine_fn raw_co_zone_report(BlockDriverState *bs, int64_t offset,
|
||||
|
||||
@@ -863,13 +863,11 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
|
||||
if (ret == -EACCES || ret == -EROFS) {
|
||||
/* Try to degrade to read-only, but if it doesn't work, still use the
|
||||
* normal error message. */
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_apply_auto_read_only(bs, NULL, NULL) == 0) {
|
||||
open_flags = (open_flags & ~O_RDWR) | O_RDONLY;
|
||||
s->fd = glfs_open(s->glfs, gconf->path, open_flags);
|
||||
ret = s->fd ? 0 : -errno;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
}
|
||||
|
||||
s->supports_seek_data = qemu_gluster_test_seek(s->fd);
|
||||
|
||||
@@ -95,7 +95,7 @@ static uint32_t reader_count(void)
|
||||
|
||||
QEMU_LOCK_GUARD(&aio_context_list_lock);
|
||||
|
||||
/* rd can temporarily be negative, but the total will *always* be >= 0 */
|
||||
/* rd can temporarly be negative, but the total will *always* be >= 0 */
|
||||
rd = orphaned_reader_count;
|
||||
QTAILQ_FOREACH(brdv_graph, &aio_context_list, next_aio) {
|
||||
rd += qatomic_read(&brdv_graph->reader_count);
|
||||
@@ -106,13 +106,12 @@ static uint32_t reader_count(void)
|
||||
return rd;
|
||||
}
|
||||
|
||||
void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs)
|
||||
void bdrv_graph_wrlock(BlockDriverState *bs)
|
||||
{
|
||||
AioContext *ctx = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
assert(!qatomic_read(&has_writer));
|
||||
assert(!qemu_in_coroutine());
|
||||
|
||||
/*
|
||||
* Release only non-mainloop AioContext. The mainloop often relies on the
|
||||
@@ -164,29 +163,17 @@ void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs)
|
||||
void bdrv_graph_wrunlock(void)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
QEMU_LOCK_GUARD(&aio_context_list_lock);
|
||||
assert(qatomic_read(&has_writer));
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&aio_context_list_lock) {
|
||||
/*
|
||||
* No need for memory barriers, this works in pair with
|
||||
* the slow path of rdlock() and both take the lock.
|
||||
*/
|
||||
qatomic_store_release(&has_writer, 0);
|
||||
|
||||
/* Wake up all coroutines that are waiting to read the graph */
|
||||
qemu_co_enter_all(&reader_queue, &aio_context_list_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Run any BHs that were scheduled during the wrlock section and that
|
||||
* callers might expect to have finished (in particular, this is important
|
||||
* for bdrv_schedule_unref()).
|
||||
*
|
||||
* Do this only after restarting coroutines so that nested event loops in
|
||||
* BHs don't deadlock if their condition relies on the coroutine making
|
||||
* progress.
|
||||
* No need for memory barriers, this works in pair with
|
||||
* the slow path of rdlock() and both take the lock.
|
||||
*/
|
||||
aio_bh_poll(qemu_get_aio_context());
|
||||
qatomic_store_release(&has_writer, 0);
|
||||
|
||||
/* Wake up all coroutine that are waiting to read the graph */
|
||||
qemu_co_enter_all(&reader_queue, &aio_context_list_lock);
|
||||
}
|
||||
|
||||
void coroutine_fn bdrv_graph_co_rdlock(void)
|
||||
|
||||
249
block/io.c
249
block/io.c
@@ -42,18 +42,13 @@
|
||||
/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
|
||||
#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
|
||||
|
||||
static void coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_parent_cb_resize(BlockDriverState *bs);
|
||||
|
||||
static void bdrv_parent_cb_resize(BlockDriverState *bs);
|
||||
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int64_t bytes, BdrvRequestFlags flags);
|
||||
|
||||
static void GRAPH_RDLOCK
|
||||
bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
|
||||
static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
|
||||
{
|
||||
BdrvChild *c, *next;
|
||||
IO_OR_GS_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
|
||||
if (c == ignore) {
|
||||
@@ -75,12 +70,9 @@ void bdrv_parent_drained_end_single(BdrvChild *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void GRAPH_RDLOCK
|
||||
bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
|
||||
static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
|
||||
{
|
||||
BdrvChild *c;
|
||||
IO_OR_GS_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH(c, &bs->parents, next_parent) {
|
||||
if (c == ignore) {
|
||||
@@ -92,22 +84,17 @@ bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
|
||||
|
||||
bool bdrv_parent_drained_poll_single(BdrvChild *c)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
|
||||
if (c->klass->drained_poll) {
|
||||
return c->klass->drained_poll(c);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool GRAPH_RDLOCK
|
||||
bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
|
||||
bool ignore_bds_parents)
|
||||
static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
|
||||
bool ignore_bds_parents)
|
||||
{
|
||||
BdrvChild *c, *next;
|
||||
bool busy = false;
|
||||
IO_OR_GS_CODE();
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
|
||||
if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
|
||||
@@ -127,7 +114,6 @@ void bdrv_parent_drained_begin_single(BdrvChild *c)
|
||||
c->quiesced_parent = true;
|
||||
|
||||
if (c->klass->drained_begin) {
|
||||
/* called with rdlock taken, but it doesn't really need it. */
|
||||
c->klass->drained_begin(c);
|
||||
}
|
||||
}
|
||||
@@ -277,9 +263,6 @@ bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
|
||||
static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
|
||||
BdrvChild *ignore_parent)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
return bdrv_drain_poll(bs, ignore_parent, false);
|
||||
}
|
||||
|
||||
@@ -359,7 +342,7 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
|
||||
* timer callback), it is a bug in the caller that should be fixed. */
|
||||
assert(data.done);
|
||||
|
||||
/* Reacquire the AioContext of bs if we dropped it */
|
||||
/* Reaquire the AioContext of bs if we dropped it */
|
||||
if (ctx != co_ctx) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
@@ -379,7 +362,6 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
|
||||
|
||||
/* Stop things in parent-to-child order */
|
||||
if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
bdrv_parent_drained_begin(bs, parent);
|
||||
if (bs->drv && bs->drv->bdrv_drain_begin) {
|
||||
bs->drv->bdrv_drain_begin(bs);
|
||||
@@ -405,8 +387,7 @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
|
||||
bdrv_do_drained_begin(bs, parent, false);
|
||||
}
|
||||
|
||||
void coroutine_mixed_fn
|
||||
bdrv_drained_begin(BlockDriverState *bs)
|
||||
void bdrv_drained_begin(BlockDriverState *bs)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_do_drained_begin(bs, NULL, true);
|
||||
@@ -426,16 +407,12 @@ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
|
||||
bdrv_co_yield_to_drain(bs, false, parent, false);
|
||||
return;
|
||||
}
|
||||
|
||||
/* At this point, we should be always running in the main loop. */
|
||||
GLOBAL_STATE_CODE();
|
||||
assert(bs->quiesce_counter > 0);
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/* Re-enable things in child-to-parent order */
|
||||
old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
|
||||
if (old_quiesce_counter == 1) {
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
if (bs->drv && bs->drv->bdrv_drain_end) {
|
||||
bs->drv->bdrv_drain_end(bs);
|
||||
}
|
||||
@@ -459,8 +436,6 @@ void bdrv_drain(BlockDriverState *bs)
|
||||
static void bdrv_drain_assert_idle(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *child, *next;
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
assert(qatomic_read(&bs->in_flight) == 0);
|
||||
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
|
||||
@@ -474,9 +449,7 @@ static bool bdrv_drain_all_poll(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
bool result = false;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
|
||||
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
|
||||
@@ -533,7 +506,7 @@ void bdrv_drain_all_begin_nopoll(void)
|
||||
}
|
||||
}
|
||||
|
||||
void coroutine_mixed_fn bdrv_drain_all_begin(void)
|
||||
void bdrv_drain_all_begin(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
|
||||
@@ -618,16 +591,10 @@ static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
|
||||
qatomic_dec(&req->bs->serialising_in_flight);
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&req->bs->reqs_lock);
|
||||
qemu_co_mutex_lock(&req->bs->reqs_lock);
|
||||
QLIST_REMOVE(req, list);
|
||||
qemu_mutex_unlock(&req->bs->reqs_lock);
|
||||
|
||||
/*
|
||||
* At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called
|
||||
* anymore because the request has been removed from the list, so it's safe
|
||||
* to restart the queue outside reqs_lock to minimize the critical section.
|
||||
*/
|
||||
qemu_co_queue_restart_all(&req->wait_queue);
|
||||
qemu_co_mutex_unlock(&req->bs->reqs_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -654,9 +621,9 @@ static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
|
||||
|
||||
qemu_co_queue_init(&req->wait_queue);
|
||||
|
||||
qemu_mutex_lock(&bs->reqs_lock);
|
||||
qemu_co_mutex_lock(&bs->reqs_lock);
|
||||
QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
|
||||
qemu_mutex_unlock(&bs->reqs_lock);
|
||||
qemu_co_mutex_unlock(&bs->reqs_lock);
|
||||
}
|
||||
|
||||
static bool tracked_request_overlaps(BdrvTrackedRequest *req,
|
||||
@@ -761,21 +728,21 @@ BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
|
||||
}
|
||||
|
||||
/**
|
||||
* Round a region to subcluster (if supported) or cluster boundaries
|
||||
* Round a region to cluster boundaries
|
||||
*/
|
||||
void coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
int64_t *align_offset, int64_t *align_bytes)
|
||||
bdrv_round_to_clusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
int64_t *cluster_offset, int64_t *cluster_bytes)
|
||||
{
|
||||
BlockDriverInfo bdi;
|
||||
IO_CODE();
|
||||
if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) {
|
||||
*align_offset = offset;
|
||||
*align_bytes = bytes;
|
||||
if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
|
||||
*cluster_offset = offset;
|
||||
*cluster_bytes = bytes;
|
||||
} else {
|
||||
int64_t c = bdi.subcluster_size;
|
||||
*align_offset = QEMU_ALIGN_DOWN(offset, c);
|
||||
*align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c);
|
||||
int64_t c = bdi.cluster_size;
|
||||
*cluster_offset = QEMU_ALIGN_DOWN(offset, c);
|
||||
*cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -820,9 +787,9 @@ bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&bs->reqs_lock);
|
||||
qemu_co_mutex_lock(&bs->reqs_lock);
|
||||
bdrv_wait_serialising_requests_locked(self);
|
||||
qemu_mutex_unlock(&bs->reqs_lock);
|
||||
qemu_co_mutex_unlock(&bs->reqs_lock);
|
||||
}
|
||||
|
||||
void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
|
||||
@@ -830,12 +797,12 @@ void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
|
||||
{
|
||||
IO_CODE();
|
||||
|
||||
qemu_mutex_lock(&req->bs->reqs_lock);
|
||||
qemu_co_mutex_lock(&req->bs->reqs_lock);
|
||||
|
||||
tracked_request_set_serialising(req, align);
|
||||
bdrv_wait_serialising_requests_locked(req);
|
||||
|
||||
qemu_mutex_unlock(&req->bs->reqs_lock);
|
||||
qemu_co_mutex_unlock(&req->bs->reqs_lock);
|
||||
}
|
||||
|
||||
int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
|
||||
@@ -1201,8 +1168,8 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
void *bounce_buffer = NULL;
|
||||
|
||||
BlockDriver *drv = bs->drv;
|
||||
int64_t align_offset;
|
||||
int64_t align_bytes;
|
||||
int64_t cluster_offset;
|
||||
int64_t cluster_bytes;
|
||||
int64_t skip_bytes;
|
||||
int ret;
|
||||
int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
|
||||
@@ -1236,28 +1203,28 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
* BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
|
||||
* is one reason we loop rather than doing it all at once.
|
||||
*/
|
||||
bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes);
|
||||
skip_bytes = offset - align_offset;
|
||||
bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
|
||||
skip_bytes = offset - cluster_offset;
|
||||
|
||||
trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
|
||||
align_offset, align_bytes);
|
||||
cluster_offset, cluster_bytes);
|
||||
|
||||
while (align_bytes) {
|
||||
while (cluster_bytes) {
|
||||
int64_t pnum;
|
||||
|
||||
if (skip_write) {
|
||||
ret = 1; /* "already allocated", so nothing will be copied */
|
||||
pnum = MIN(align_bytes, max_transfer);
|
||||
pnum = MIN(cluster_bytes, max_transfer);
|
||||
} else {
|
||||
ret = bdrv_co_is_allocated(bs, align_offset,
|
||||
MIN(align_bytes, max_transfer), &pnum);
|
||||
ret = bdrv_is_allocated(bs, cluster_offset,
|
||||
MIN(cluster_bytes, max_transfer), &pnum);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Safe to treat errors in querying allocation as if
|
||||
* unallocated; we'll probably fail again soon on the
|
||||
* read, but at least that will set a decent errno.
|
||||
*/
|
||||
pnum = MIN(align_bytes, max_transfer);
|
||||
pnum = MIN(cluster_bytes, max_transfer);
|
||||
}
|
||||
|
||||
/* Stop at EOF if the image ends in the middle of the cluster */
|
||||
@@ -1275,7 +1242,7 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
/* Must copy-on-read; use the bounce buffer */
|
||||
pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
|
||||
if (!bounce_buffer) {
|
||||
int64_t max_we_need = MAX(pnum, align_bytes - pnum);
|
||||
int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
|
||||
int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
|
||||
int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
|
||||
|
||||
@@ -1287,7 +1254,7 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
}
|
||||
qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
|
||||
|
||||
ret = bdrv_driver_preadv(bs, align_offset, pnum,
|
||||
ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
|
||||
&local_qiov, 0, 0);
|
||||
if (ret < 0) {
|
||||
goto err;
|
||||
@@ -1299,13 +1266,13 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
/* FIXME: Should we (perhaps conditionally) be setting
|
||||
* BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
|
||||
* that still correctly reads as zero? */
|
||||
ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum,
|
||||
ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
|
||||
BDRV_REQ_WRITE_UNCHANGED);
|
||||
} else {
|
||||
/* This does not change the data on the disk, it is not
|
||||
* necessary to flush even in cache=writethrough mode.
|
||||
*/
|
||||
ret = bdrv_driver_pwritev(bs, align_offset, pnum,
|
||||
ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
|
||||
&local_qiov, 0,
|
||||
BDRV_REQ_WRITE_UNCHANGED);
|
||||
}
|
||||
@@ -1334,8 +1301,8 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
}
|
||||
}
|
||||
|
||||
align_offset += pnum;
|
||||
align_bytes -= pnum;
|
||||
cluster_offset += pnum;
|
||||
cluster_bytes -= pnum;
|
||||
progress += pnum - skip_bytes;
|
||||
skip_bytes = 0;
|
||||
}
|
||||
@@ -1397,7 +1364,7 @@ bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
|
||||
/* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
|
||||
flags &= ~BDRV_REQ_COPY_ON_READ;
|
||||
|
||||
ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum);
|
||||
ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@@ -2029,7 +1996,7 @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void coroutine_fn GRAPH_RDLOCK
|
||||
static inline void coroutine_fn
|
||||
bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
|
||||
BdrvTrackedRequest *req, int ret)
|
||||
{
|
||||
@@ -2356,7 +2323,6 @@ int bdrv_flush_all(void)
|
||||
int result = 0;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/*
|
||||
* bdrv queue is managed by record/replay,
|
||||
@@ -2410,9 +2376,9 @@ int bdrv_flush_all(void)
|
||||
* set to the host mapping and BDS corresponding to the guest offset.
|
||||
*/
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
|
||||
int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map, BlockDriverState **file)
|
||||
bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
|
||||
int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map, BlockDriverState **file)
|
||||
{
|
||||
int64_t total_size;
|
||||
int64_t n; /* bytes */
|
||||
@@ -2571,8 +2537,8 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
|
||||
|
||||
if (ret & BDRV_BLOCK_RAW) {
|
||||
assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
|
||||
ret = bdrv_co_do_block_status(local_file, want_zero, local_map,
|
||||
*pnum, pnum, &local_map, &local_file);
|
||||
ret = bdrv_co_block_status(local_file, want_zero, local_map,
|
||||
*pnum, pnum, &local_map, &local_file);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -2599,8 +2565,8 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
|
||||
int64_t file_pnum;
|
||||
int ret2;
|
||||
|
||||
ret2 = bdrv_co_do_block_status(local_file, want_zero, local_map,
|
||||
*pnum, &file_pnum, NULL, NULL);
|
||||
ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
|
||||
*pnum, &file_pnum, NULL, NULL);
|
||||
if (ret2 >= 0) {
|
||||
/* Ignore errors. This is just providing extra information, it
|
||||
* is useful but not necessary.
|
||||
@@ -2667,8 +2633,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum,
|
||||
map, file);
|
||||
ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
|
||||
++*depth;
|
||||
if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
|
||||
return ret;
|
||||
@@ -2684,8 +2649,8 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
|
||||
for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
|
||||
p = bdrv_filter_or_cow_bs(p))
|
||||
{
|
||||
ret = bdrv_co_do_block_status(p, want_zero, offset, bytes, pnum,
|
||||
map, file);
|
||||
ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
|
||||
file);
|
||||
++*depth;
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
@@ -2751,13 +2716,21 @@ int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
|
||||
bytes, pnum, map, file, NULL);
|
||||
}
|
||||
|
||||
int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum,
|
||||
int64_t *map, BlockDriverState **file)
|
||||
int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
|
||||
int64_t offset, int64_t bytes, int64_t *pnum,
|
||||
int64_t *map, BlockDriverState **file)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
|
||||
offset, bytes, pnum, map, file);
|
||||
return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
|
||||
pnum, map, file, NULL);
|
||||
}
|
||||
|
||||
int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
int64_t *pnum, int64_t *map, BlockDriverState **file)
|
||||
{
|
||||
IO_CODE();
|
||||
return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
|
||||
offset, bytes, pnum, map, file);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2804,6 +2777,45 @@ int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
|
||||
return !!(ret & BDRV_BLOCK_ALLOCATED);
|
||||
}
|
||||
|
||||
int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
int64_t *pnum)
|
||||
{
|
||||
int ret;
|
||||
int64_t dummy;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
|
||||
bytes, pnum ? pnum : &dummy, NULL,
|
||||
NULL, NULL);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
return !!(ret & BDRV_BLOCK_ALLOCATED);
|
||||
}
|
||||
|
||||
/* See bdrv_is_allocated_above for documentation */
|
||||
int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
|
||||
BlockDriverState *base,
|
||||
bool include_base, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
int depth;
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_co_common_block_status_above(top, base, include_base, false,
|
||||
offset, bytes, pnum, NULL, NULL,
|
||||
&depth);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret & BDRV_BLOCK_ALLOCATED) {
|
||||
return depth;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
|
||||
*
|
||||
@@ -2821,18 +2833,18 @@ int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
|
||||
* words, the result is not necessarily the maximum possible range);
|
||||
* but 'pnum' will only be 0 when end of file is reached.
|
||||
*/
|
||||
int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs,
|
||||
BlockDriverState *base,
|
||||
bool include_base, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
int bdrv_is_allocated_above(BlockDriverState *top,
|
||||
BlockDriverState *base,
|
||||
bool include_base, int64_t offset,
|
||||
int64_t bytes, int64_t *pnum)
|
||||
{
|
||||
int depth;
|
||||
int ret;
|
||||
IO_CODE();
|
||||
|
||||
ret = bdrv_co_common_block_status_above(bs, base, include_base, false,
|
||||
offset, bytes, pnum, NULL, NULL,
|
||||
&depth);
|
||||
ret = bdrv_common_block_status_above(top, base, include_base, false,
|
||||
offset, bytes, pnum, NULL, NULL,
|
||||
&depth);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -2932,18 +2944,25 @@ int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
|
||||
/**************************************************************/
|
||||
/* async I/Os */
|
||||
|
||||
/**
|
||||
* Synchronously cancels an acb. Must be called with the BQL held and the acb
|
||||
* must be processed with the BQL held too (IOThreads are not allowed).
|
||||
*
|
||||
* Use bdrv_aio_cancel_async() instead when possible.
|
||||
*/
|
||||
void bdrv_aio_cancel(BlockAIOCB *acb)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
IO_CODE();
|
||||
qemu_aio_ref(acb);
|
||||
bdrv_aio_cancel_async(acb);
|
||||
AIO_WAIT_WHILE_UNLOCKED(NULL, acb->refcnt > 1);
|
||||
while (acb->refcnt > 1) {
|
||||
if (acb->aiocb_info->get_aio_context) {
|
||||
aio_poll(acb->aiocb_info->get_aio_context(acb), true);
|
||||
} else if (acb->bs) {
|
||||
/* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
|
||||
* assert that we're not using an I/O thread. Thread-safe
|
||||
* code should use bdrv_aio_cancel_async exclusively.
|
||||
*/
|
||||
assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
|
||||
aio_poll(bdrv_get_aio_context(acb->bs), true);
|
||||
} else {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
qemu_aio_unref(acb);
|
||||
}
|
||||
|
||||
@@ -2977,7 +2996,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
|
||||
goto early_exit;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&bs->reqs_lock);
|
||||
qemu_co_mutex_lock(&bs->reqs_lock);
|
||||
current_gen = qatomic_read(&bs->write_gen);
|
||||
|
||||
/* Wait until any previous flushes are completed */
|
||||
@@ -2987,7 +3006,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
|
||||
|
||||
/* Flushes reach this point in nondecreasing current_gen order. */
|
||||
bs->active_flush_req = true;
|
||||
qemu_mutex_unlock(&bs->reqs_lock);
|
||||
qemu_co_mutex_unlock(&bs->reqs_lock);
|
||||
|
||||
/* Write back all layers by calling one driver function */
|
||||
if (bs->drv->bdrv_co_flush) {
|
||||
@@ -3075,11 +3094,11 @@ out:
|
||||
bs->flushed_gen = current_gen;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&bs->reqs_lock);
|
||||
qemu_co_mutex_lock(&bs->reqs_lock);
|
||||
bs->active_flush_req = false;
|
||||
/* Return value is ignored - it's ok if wait queue is empty */
|
||||
qemu_co_queue_next(&bs->flush_queue);
|
||||
qemu_mutex_unlock(&bs->reqs_lock);
|
||||
qemu_co_mutex_unlock(&bs->reqs_lock);
|
||||
|
||||
early_exit:
|
||||
bdrv_dec_in_flight(bs);
|
||||
@@ -3532,13 +3551,9 @@ int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
|
||||
bytes, read_flags, write_flags);
|
||||
}
|
||||
|
||||
static void coroutine_fn GRAPH_RDLOCK
|
||||
bdrv_parent_cb_resize(BlockDriverState *bs)
|
||||
static void bdrv_parent_cb_resize(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *c;
|
||||
|
||||
assert_bdrv_graph_readable();
|
||||
|
||||
QLIST_FOREACH(c, &bs->parents, next_parent) {
|
||||
if (c->klass->resize) {
|
||||
c->klass->resize(c);
|
||||
|
||||
@@ -1058,7 +1058,6 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Must use malloc(): this is freed via scsi_free_scsi_task() */
|
||||
acb->task = malloc(sizeof(struct scsi_task));
|
||||
if (acb->task == NULL) {
|
||||
error_report("iSCSI: Failed to allocate task for scsi command. %s",
|
||||
@@ -1925,9 +1924,7 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
/* Check the write protect flag of the LUN if we want to write */
|
||||
if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) &&
|
||||
iscsilun->write_protected) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = bdrv_apply_auto_read_only(bs, "LUN is write protected", errp);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ static void qemu_laio_process_completions(LinuxAioState *s)
|
||||
|
||||
/* If we are nested we have to notify the level above that we are done
|
||||
* by setting event_max to zero, upper level will then jump out of it's
|
||||
* own `for` loop. If we are the last all counters dropped to zero. */
|
||||
* own `for` loop. If we are the last all counters droped to zero. */
|
||||
s->event_max = 0;
|
||||
s->event_idx = 0;
|
||||
}
|
||||
|
||||
@@ -4,41 +4,41 @@ block_ss.add(files(
|
||||
'aio_task.c',
|
||||
'amend.c',
|
||||
'backup.c',
|
||||
'copy-before-write.c',
|
||||
'blkdebug.c',
|
||||
'blklogwrites.c',
|
||||
'blkverify.c',
|
||||
'block-backend.c',
|
||||
'block-copy.c',
|
||||
'graph-lock.c',
|
||||
'commit.c',
|
||||
'copy-before-write.c',
|
||||
'copy-on-read.c',
|
||||
'preallocate.c',
|
||||
'progress_meter.c',
|
||||
'create.c',
|
||||
'crypto.c',
|
||||
'dirty-bitmap.c',
|
||||
'filter-compress.c',
|
||||
'graph-lock.c',
|
||||
'io.c',
|
||||
'mirror.c',
|
||||
'nbd.c',
|
||||
'null.c',
|
||||
'plug.c',
|
||||
'preallocate.c',
|
||||
'progress_meter.c',
|
||||
'qapi.c',
|
||||
'qcow2.c',
|
||||
'qcow2-bitmap.c',
|
||||
'qcow2-cache.c',
|
||||
'qcow2-cluster.c',
|
||||
'qcow2-refcount.c',
|
||||
'qcow2-snapshot.c',
|
||||
'qcow2-threads.c',
|
||||
'qcow2.c',
|
||||
'quorum.c',
|
||||
'raw-format.c',
|
||||
'reqlist.c',
|
||||
'snapshot.c',
|
||||
'snapshot-access.c',
|
||||
'throttle.c',
|
||||
'throttle-groups.c',
|
||||
'throttle.c',
|
||||
'write-threshold.c',
|
||||
), zstd, zlib, gnutls)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user