Compare commits

..

12 Commits

Author SHA1 Message Date
Fabiano Rosas
eab13108cd tests/qtest: bios-tables-test: Skip if missing configs
If we build with --without-default-devices, CONFIG_HPET and
CONFIG_PARALLEL are set to N, which makes the respective devices go
missing from acpi tables.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
750400c56f tests/qemu-iotests: Require virtio-scsi-pci
Check that virtio-scsi-pci is present in the QEMU build before running
the tests.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
f5ef199312 tests/qtest: Do not include hexloader-test if loader device is not present
Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
c789207417 tests/qtest: Check for devices in bios-tables-test
Do not include tests that require devices that are not available in
the QEMU build.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
3bf0b7dd7b tests/qtest: drive_del-test: Skip tests that require missing devices
Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
3864d5e5c6 tests/qtest: Skip unplug tests that use missing devices
Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
8a840ab4c2 tests/qtest: Fix coding style in device-plug-test.c
We should not mix declarations and statements in QEMU code.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
d32f29f651 tests/qtest: hd-geo-test: Check for missing devices
Don't include tests that require devices not available in the QEMU
binary.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
696cf0c1cd tests/qtest: Don't build virtio-serial-test.c if device not present
The virtconsole device might not be present in the QEMU build that is
being tested.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
ef92be0914 tests/qtest: Add dependence on PCIE_PORT for virtio-net-failover.c
This test depends on the presence of the pcie-root-port device. Add a
build time dependency.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
f85bfa0bcb tests/qtest: Do not run lsi53c895a test if device is not present
The tests are built once for all the targets, so as long as one QEMU
binary is built with CONFIG_LSI_SCSI_PCI=y, this test will
run. However some binaries might not include the device. So check this
again in runtime.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
ecbd3f095e tests/qtest: Skip PXE tests for missing devices
Check if the devices we're trying to add are present in the QEMU
binary. They could have been removed from the build via Kconfig or the
--without-default-devices option.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
2960 changed files with 93666 additions and 177271 deletions

109
.cirrus.yml Normal file
View File

@@ -0,0 +1,109 @@
env:
CIRRUS_CLONE_DEPTH: 1
windows_msys2_task:
timeout_in: 90m
windows_container:
image: cirrusci/windowsservercore:2019
os_version: 2019
cpu: 8
memory: 8G
env:
CIRRUS_SHELL: powershell
MSYS: winsymlinks:native
MSYSTEM: MINGW64
MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe
MSYS2_FINGERPRINT: 0
MSYS2_PACKAGES: "
diffutils git grep make pkg-config sed
mingw-w64-x86_64-python
mingw-w64-x86_64-python-sphinx
mingw-w64-x86_64-toolchain
mingw-w64-x86_64-SDL2
mingw-w64-x86_64-SDL2_image
mingw-w64-x86_64-gtk3
mingw-w64-x86_64-glib2
mingw-w64-x86_64-ninja
mingw-w64-x86_64-jemalloc
mingw-w64-x86_64-lzo2
mingw-w64-x86_64-zstd
mingw-w64-x86_64-libjpeg-turbo
mingw-w64-x86_64-pixman
mingw-w64-x86_64-libgcrypt
mingw-w64-x86_64-libpng
mingw-w64-x86_64-libssh
mingw-w64-x86_64-snappy
mingw-w64-x86_64-libusb
mingw-w64-x86_64-usbredir
mingw-w64-x86_64-libtasn1
mingw-w64-x86_64-nettle
mingw-w64-x86_64-cyrus-sasl
mingw-w64-x86_64-curl
mingw-w64-x86_64-gnutls
mingw-w64-x86_64-libnfs
"
CHERE_INVOKING: 1
msys2_cache:
folder: C:\tools\archive
reupload_on_changes: false
# These env variables are used to generate fingerprint to trigger the cache procedure
# If wanna to force re-populate msys2, increase MSYS2_FINGERPRINT
fingerprint_script:
- |
echo $env:CIRRUS_TASK_NAME
echo $env:MSYS2_URL
echo $env:MSYS2_FINGERPRINT
echo $env:MSYS2_PACKAGES
populate_script:
- |
md -Force C:\tools\archive\pkg
$start_time = Get-Date
bitsadmin /transfer msys_download /dynamic /download /priority FOREGROUND $env:MSYS2_URL C:\tools\archive\base.exe
Write-Output "Download time taken: $((Get-Date).Subtract($start_time))"
cd C:\tools
C:\tools\archive\base.exe -y
del -Force C:\tools\archive\base.exe
Write-Output "Base install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
((Get-Content -path C:\tools\msys64\etc\\post-install\\07-pacman-key.post -Raw) -replace '--refresh-keys', '--version') | Set-Content -Path C:\tools\msys64\etc\\post-install\\07-pacman-key.post
C:\tools\msys64\usr\bin\bash.exe -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf"
C:\tools\msys64\usr\bin\bash.exe -lc "export"
C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Sy
echo Y | C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Suu --overwrite=*
taskkill /F /FI "MODULES eq msys-2.0.dll"
tasklist
C:\tools\msys64\usr\bin\bash.exe -lc "mv -f /etc/pacman.conf.pacnew /etc/pacman.conf || true"
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -Syuu --overwrite=*"
Write-Output "Core install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -S --needed $env:MSYS2_PACKAGES"
Write-Output "Package install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\etc\mtab
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\fd
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stderr
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdin
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdout
del -Force -Recurse -ErrorAction SilentlyContinue C:\tools\msys64\var\cache\pacman\pkg
tar cf C:\tools\archive\msys64.tar -C C:\tools\ msys64
Write-Output "Package archive time taken: $((Get-Date).Subtract($start_time))"
del -Force -Recurse -ErrorAction SilentlyContinue c:\tools\msys64
install_script:
- |
$start_time = Get-Date
cd C:\tools
ls C:\tools\archive\msys64.tar
tar xf C:\tools\archive\msys64.tar
Write-Output "Extract msys2 time taken: $((Get-Date).Subtract($start_time))"
script:
- C:\tools\msys64\usr\bin\bash.exe -lc "mkdir build"
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && ../configure --python=python3"
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make -j8"
- exit $LastExitCode
test_script:
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make V=1 check"
- exit $LastExitCode

View File

@@ -1,21 +0,0 @@
#
# List of code-formatting clean ups the git blame can ignore
#
# git blame --ignore-revs-file .git-blame-ignore-revs
#
# or
#
# git config blame.ignoreRevsFile .git-blame-ignore-revs
#
# gdbstub: clean-up indents
ad9e4585b3c7425759d3eea697afbca71d2c2082
# e1000e: fix code style
0eadd56bf53ab196a16d492d7dd31c62e1c24c32
# target/riscv: coding style fixes
8c7feddddd9218b407792120bcfda0347ed16205
# replace TABs with spaces
48805df9c22a0700fba4b3b548fafaa21726ca68

View File

@@ -1,68 +1,44 @@
variables:
# On stable branches this is changed by later rules. Should also
# be overridden per pipeline if running pipelines concurrently
# for different branches in contributor forks.
QEMU_CI_CONTAINER_TAG: latest
# For purposes of CI rules, upstream is the gitlab.com/qemu-project
# namespace. When testing CI, it might be usefult to override this
# to point to a fork repo
QEMU_CI_UPSTREAM: qemu-project
# The order of rules defined here is critically important.
# They are evaluated in order and first match wins.
#
# Thus we group them into a number of stages, ordered from
# most restrictive to least restrictive
#
# For pipelines running for stable "staging-X.Y" branches
# we must override QEMU_CI_CONTAINER_TAG
#
.base_job_template:
variables:
# Each script line from will be in a collapsible section in the job output
# and show the duration of each line.
FF_SCRIPT_SECTIONS: 1
interruptible: true
rules:
#############################################################
# Stage 1: exclude scenarios where we definitely don't
# want jobs to run
#############################################################
# Never run jobs upstream on stable branch, staging branch jobs already ran
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /^stable-/'
when: never
# Never run jobs upstream on tags, staging branch jobs already ran
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_TAG'
when: never
# Cirrus jobs can't run unless the creds / target repo are set
- if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)'
when: never
# Publishing jobs should only run on the default branch in upstream
- if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
- if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
when: never
# Non-publishing jobs should only run on staging branches in upstream
- if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH !~ /staging/'
- if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH !~ /staging/'
when: never
# Jobs only intended for forks should always be skipped on upstream
- if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
- if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == "qemu-project"'
when: never
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never
# Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set
- if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
- if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never
@@ -72,29 +48,17 @@ variables:
#############################################################
# Optional jobs should not be run unless manually triggered
- if: '$QEMU_JOB_OPTIONAL && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: manual
allow_failure: true
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- if: '$QEMU_JOB_OPTIONAL'
when: manual
allow_failure: true
# Skipped jobs should not be run unless manually triggered
- if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: manual
allow_failure: true
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- if: '$QEMU_JOB_SKIPPED'
when: manual
allow_failure: true
# Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset
- if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
- if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: manual
allow_failure: true
@@ -106,23 +70,8 @@ variables:
# Forks pipeline jobs don't start automatically unless
# QEMU_CI=2 is set
- if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
- if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: manual
# Upstream pipeline jobs start automatically unless told not to
# by setting QEMU_CI=1
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: manual
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
when: manual
# Jobs can run if any jobs they depend on were successful
- if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: on_success
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
# Jobs can run if any jobs they depend on were successfull
- when: on_success

View File

@@ -1,19 +1,25 @@
.native_build_job_template:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
before_script:
- JOBS=$(expr $(nproc) + 1)
script:
- mkdir build
- cd build
- ../configure --enable-werror --disable-docs --enable-fdt=system
${TARGETS:+--target-list="$TARGETS"}
$CONFIGURE_ARGS ||
{ cat config.log meson-logs/meson-log.txt && exit 1; }
- if test -n "$LD_JOBS";
then
pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ;
scripts/git-submodule.sh update meson ;
fi
- mkdir build
- cd build
- if test -n "$TARGETS";
then
../configure --enable-werror --disable-docs ${LD_JOBS:+--meson=git} $CONFIGURE_ARGS --target-list="$TARGETS" ;
else
../configure --enable-werror --disable-docs ${LD_JOBS:+--meson=git} $CONFIGURE_ARGS ;
fi || { cat config.log meson-logs/meson-log.txt && exit 1; }
- if test -n "$LD_JOBS";
then
../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ;
fi || exit 1;
- make -j"$JOBS"
- if test -n "$MAKE_CHECK_ARGS";
@@ -21,30 +27,13 @@
make -j"$JOBS" $MAKE_CHECK_ARGS ;
fi
# We jump some hoops in common_test_job_template to avoid
# rebuilding all the object files we skip in the artifacts
.native_build_artifact_template:
artifacts:
when: on_success
expire_in: 2 days
paths:
- build
- .git-submodule-status
exclude:
- build/**/*.p
- build/**/*.a.p
- build/**/*.fa.p
- build/**/*.c.o
- build/**/*.c.o.d
- build/**/*.fa
.common_test_job_template:
extends: .base_job_template
stage: test
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
script:
- scripts/git-submodule.sh update roms/SLOF
- meson subprojects download $(cd build/subprojects && echo *)
- scripts/git-submodule.sh update
$(sed -n '/GIT_SUBMODULES=/ s/.*=// p' build/config-host.mak)
- cd build
- find . -type f -exec touch {} +
# Avoid recompiling by hiding ninja with NINJA=":"
@@ -54,7 +43,6 @@
extends: .common_test_job_template
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: always
expire_in: 7 days
paths:
- build/meson-logs/testlog.txt
@@ -70,7 +58,7 @@
policy: pull-push
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: always
when: on_failure
expire_in: 7 days
paths:
- build/tests/results/latest/results.xml

View File

@@ -2,16 +2,20 @@ include:
- local: '/.gitlab-ci.d/buildtest-template.yml'
build-system-alpine:
extends:
- .native_build_job_template
- .native_build_artifact_template
extends: .native_build_job_template
needs:
- job: amd64-alpine-container
variables:
IMAGE: alpine
TARGETS: avr-softmmu loongarch64-softmmu mips64-softmmu mipsel-softmmu
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog
artifacts:
expire_in: 2 days
paths:
- .git-submodule-status
- build
check-system-alpine:
extends: .native_test_job_template
@@ -32,17 +36,19 @@ avocado-system-alpine:
MAKE_CHECK_ARGS: check-avocado
build-system-ubuntu:
extends:
- .native_build_job_template
- .native_build_artifact_template
extends: .native_build_job_template
needs:
job: amd64-ubuntu2204-container
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2204
CONFIGURE_ARGS: --enable-docs
TARGETS: alpha-softmmu cris-softmmu hppa-softmmu
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-docs --enable-fdt=system --enable-capstone
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-ubuntu:
extends: .native_test_job_template
@@ -50,7 +56,7 @@ check-system-ubuntu:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2204
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check
avocado-system-ubuntu:
@@ -59,21 +65,22 @@ avocado-system-ubuntu:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2204
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check-avocado
build-system-debian:
extends:
- .native_build_job_template
- .native_build_artifact_template
extends: .native_build_job_template
needs:
job: amd64-debian-container
variables:
IMAGE: debian-amd64
CONFIGURE_ARGS: --with-coroutine=sigaltstack
TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
sparc-softmmu xtensaeb-softmmu
TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu
riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-debian:
extends: .native_test_job_template
@@ -102,21 +109,24 @@ crash-test-debian:
IMAGE: debian-amd64
script:
- cd build
- make NINJA=":" check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
- make check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-i386
build-system-fedora:
extends:
- .native_build_job_template
- .native_build_artifact_template
extends: .native_build_job_template
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
--enable-fdt=system --enable-slirp --enable-capstone
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-fedora:
extends: .native_test_job_template
@@ -145,23 +155,26 @@ crash-test-fedora:
IMAGE: fedora
script:
- cd build
- make NINJA=":" check-venv
- make check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
build-system-centos:
extends:
- .native_build_job_template
- .native_build_artifact_template
extends: .native_build_job_template
needs:
job: amd64-centos8-container
variables:
IMAGE: centos8
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-vfio-user-server
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-fdt=system
--enable-modules --enable-trace-backends=dtrace --enable-docs
--enable-vfio-user-server
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-centos:
extends: .native_test_job_template
@@ -182,15 +195,18 @@ avocado-system-centos:
MAKE_CHECK_ARGS: check-avocado
build-system-opensuse:
extends:
- .native_build_job_template
- .native_build_artifact_template
extends: .native_build_job_template
needs:
job: amd64-opensuse-leap-container
variables:
IMAGE: opensuse-leap
CONFIGURE_ARGS: --enable-fdt=system
TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-opensuse:
extends: .native_test_job_template
@@ -325,9 +341,7 @@ clang-user:
# Split in three sets of build/check/avocado to limit the execution time of each
# job
build-cfi-aarch64:
extends:
- .native_build_job_template
- .native_build_artifact_template
extends: .native_build_job_template
needs:
- job: amd64-fedora-container
variables:
@@ -343,6 +357,10 @@ build-cfi-aarch64:
# skipped until the situation has been solved.
QEMU_JOB_SKIPPED: 1
timeout: 90m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-aarch64:
extends: .native_test_job_template
@@ -363,9 +381,7 @@ avocado-cfi-aarch64:
MAKE_CHECK_ARGS: check-avocado
build-cfi-ppc64-s390x:
extends:
- .native_build_job_template
- .native_build_artifact_template
extends: .native_build_job_template
needs:
- job: amd64-fedora-container
variables:
@@ -381,6 +397,10 @@ build-cfi-ppc64-s390x:
# skipped until the situation has been solved.
QEMU_JOB_SKIPPED: 1
timeout: 80m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-ppc64-s390x:
extends: .native_test_job_template
@@ -401,9 +421,7 @@ avocado-cfi-ppc64-s390x:
MAKE_CHECK_ARGS: check-avocado
build-cfi-x86_64:
extends:
- .native_build_job_template
- .native_build_artifact_template
extends: .native_build_job_template
needs:
- job: amd64-fedora-container
variables:
@@ -415,6 +433,10 @@ build-cfi-x86_64:
TARGETS: x86_64-softmmu
MAKE_CHECK_ARGS: check-build
timeout: 70m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-x86_64:
extends: .native_test_job_template
@@ -437,24 +459,36 @@ avocado-cfi-x86_64:
tsan-build:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2204-container
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2204
CONFIGURE_ARGS: --enable-tsan --cc=clang --cxx=clang++
--enable-trace-backends=ust --disable-slirp
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10
--enable-trace-backends=ust --enable-fdt=system --disable-slirp
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
MAKE_CHECK_ARGS: bench V=1
# gcov is a GCC features
gcov:
# gprof/gcov are GCC features
build-gprof-gcov:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2204-container
timeout: 80m
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2204
CONFIGURE_ARGS: --enable-gcov
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-gprof --enable-gcov
TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
MAKE_CHECK_ARGS: check-unit check-softfloat
artifacts:
expire_in: 1 days
paths:
- build
check-gprof-gcov:
extends: .native_test_job_template
needs:
- job: build-gprof-gcov
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check
after_script:
- cd build
- gcovr --xml-pretty --exclude-unreachable-branches --print-summary
@@ -462,12 +496,8 @@ gcov:
coverage: /^\s*lines:\s*\d+.\d+\%/
artifacts:
name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA}
when: always
expire_in: 2 days
paths:
- build/meson-logs/testlog.txt
reports:
junit: build/meson-logs/testlog.junit.xml
coverage_report:
coverage_format: cobertura
path: build/coverage.xml
@@ -515,6 +545,18 @@ build-tci:
- QTEST_QEMU_BINARY="./qemu-system-s390x" ./tests/qtest/pxe-test -m slow
- make check-tcg
# Alternate coroutines implementations are only really of interest to KVM users
# However we can't test against KVM on Gitlab-CI so we can only run unit tests
build-coroutine-sigaltstack:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --with-coroutine=sigaltstack --disable-tcg
--enable-trace-backends=ftrace
MAKE_CHECK_ARGS: check-unit
# Check our reduced build configurations
build-without-defaults:
extends: .native_build_job_template
@@ -531,12 +573,12 @@ build-without-defaults:
--disable-strip
TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu
sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user
MAKE_CHECK_ARGS: check
MAKE_CHECK_ARGS: check-unit check-qtest-avr check-qtest-mips64
build-libvhost-user:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG
image: $CI_REGISTRY_IMAGE/qemu/fedora:latest
needs:
job: amd64-fedora-container
script:
@@ -548,9 +590,7 @@ build-libvhost-user:
# No targets are built here, just tools, docs, and unit tests. This
# also feeds into the eventual documentation deployment steps later
build-tools-and-docs-debian:
extends:
- .native_build_job_template
- .native_build_artifact_template
extends: .native_build_job_template
needs:
job: amd64-debian-container
# when running on 'master' we use pre-existing container
@@ -560,6 +600,10 @@ build-tools-and-docs-debian:
MAKE_CHECK_ARGS: check-unit ctags TAGS cscope
CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
QEMU_JOB_PUBLISH: 1
artifacts:
expire_in: 2 days
paths:
- build
# Prepare for GitLab pages deployment. Anything copied into the
# "public" directory will be deployed to $USER.gitlab.io/$PROJECT
@@ -576,7 +620,7 @@ build-tools-and-docs-debian:
# of what topic branch they're currently using
pages:
extends: .base_job_template
image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:$QEMU_CI_CONTAINER_TAG
image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:latest
stage: test
needs:
- job: build-tools-and-docs-debian
@@ -591,7 +635,6 @@ pages:
- make -C build install DESTDIR=$(pwd)/temp-install
- mv temp-install/usr/local/share/doc/qemu/* public/
artifacts:
when: on_success
paths:
- public
variables:

View File

@@ -44,6 +44,19 @@
variables:
QEMU_JOB_CIRRUS: 1
x64-freebsd-12-build:
extends: .cirrus_build_job
variables:
NAME: freebsd-12
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-12-4
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update; pkg upgrade -y
INSTALL_COMMAND: pkg install -y
TEST_TARGETS: check
x64-freebsd-13-build:
extends: .cirrus_build_job
variables:

View File

@@ -32,9 +32,6 @@ build_task:
- $MAKE -j$(sysctl -n hw.ncpu)
- for TARGET in $TEST_TARGETS ;
do
$MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1 ;
$MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
done
always:
build_result_artifacts:
path: build/meson-logs/*log.txt
type: text/plain

View File

@@ -0,0 +1,16 @@
# THIS FILE WAS AUTO-GENERATED
#
# $ lcitool variables freebsd-12 qemu
#
# https://gitlab.com/libvirt/libvirt-ci
CCACHE='/usr/local/bin/ccache'
CPAN_PKGS=''
CROSS_PKGS=''
MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract usbredir virglrenderer vte3 zstd'
PYPI_PKGS=''
PYTHON='/usr/local/bin/python3'

View File

@@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract usbredir virglrenderer vte3 zstd'
PYPI_PKGS=''
PYTHON='/usr/local/bin/python3'

View File

@@ -11,6 +11,6 @@ MAKE='/opt/homebrew/bin/gmake'
NINJA='/opt/homebrew/bin/ninja'
PACKAGING_COMMAND='brew'
PIP3='/opt/homebrew/bin/pip3'
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol tesseract usbredir vde vte3 xorriso zlib zstd'
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract usbredir vde vte3 zlib zstd'
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme'
PYTHON='/opt/homebrew/bin/python3'

View File

@@ -1,21 +1,22 @@
.container_job_template:
extends: .base_job_template
image: docker:latest
image: docker:stable
stage: containers
services:
- docker:dind
before_script:
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:$QEMU_CI_CONTAINER_TAG"
# Always ':latest' because we always use upstream as a common cache source
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest"
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/$NAME:latest"
- apk add python3
- docker info
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
- until docker info; do sleep 1; done
script:
- echo "TAG:$TAG"
- echo "COMMON_TAG:$COMMON_TAG"
- docker build --tag "$TAG" --cache-from "$TAG" --cache-from "$COMMON_TAG"
--build-arg BUILDKIT_INLINE_CACHE=1
-f "tests/docker/dockerfiles/$NAME.docker" "."
- ./tests/docker/docker.py --engine docker build
-t "qemu/$NAME" -f "tests/docker/dockerfiles/$NAME.docker"
-r $CI_REGISTRY/qemu-project/qemu
- docker tag "qemu/$NAME" "$TAG"
- docker push "$TAG"
after_script:
- docker logout

View File

@@ -13,10 +13,10 @@ amd64-debian-container:
variables:
NAME: debian-amd64
amd64-ubuntu2204-container:
amd64-ubuntu2004-container:
extends: .container_job_template
variables:
NAME: ubuntu2204
NAME: ubuntu2004
amd64-opensuse-leap-container:
extends: .container_job_template

View File

@@ -1,14 +1,13 @@
.cross_system_build_job:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
timeout: 80m
script:
- mkdir build
- cd build
- ../configure --enable-werror --disable-docs --enable-fdt=system
--disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
--target-list-exclude="arm-softmmu cris-softmmu
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-user --target-list-exclude="arm-softmmu cris-softmmu
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu
sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS"
@@ -27,7 +26,7 @@
.cross_accel_build_job:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
timeout: 30m
script:
- mkdir build
@@ -39,7 +38,7 @@
.cross_user_build_job:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
script:
- mkdir build
- cd build
@@ -49,15 +48,3 @@
nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user
xtensa-linux-user $CROSS_SKIP_TARGETS"
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
# We can still run some tests on some of our cross build jobs. They can add this
# template to their extends to save the build logs and test results
.cross_test_artifacts:
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: always
expire_in: 7 days
paths:
- build/meson-logs/testlog.txt
reports:
junit: build/meson-logs/testlog.junit.xml

View File

@@ -1,6 +1,13 @@
include:
- local: '/.gitlab-ci.d/crossbuild-template.yml'
cross-armel-system:
extends: .cross_system_build_job
needs:
job: armel-debian-cross-container
variables:
IMAGE: debian-armel-cross
cross-armel-user:
extends: .cross_user_build_job
needs:
@@ -8,6 +15,13 @@ cross-armel-user:
variables:
IMAGE: debian-armel-cross
cross-armhf-system:
extends: .cross_system_build_job
needs:
job: armhf-debian-cross-container
variables:
IMAGE: debian-armhf-cross
cross-armhf-user:
extends: .cross_user_build_job
needs:
@@ -29,18 +43,16 @@ cross-arm64-user:
variables:
IMAGE: debian-arm64-cross
cross-arm64-kvm-only:
extends: .cross_accel_build_job
cross-i386-system:
extends: .cross_system_build_job
needs:
job: arm64-debian-cross-container
job: i386-fedora-cross-container
variables:
IMAGE: debian-arm64-cross
EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-features
IMAGE: fedora-i386-cross
MAKE_CHECK_ARGS: check-qtest
cross-i386-user:
extends:
- .cross_user_build_job
- .cross_test_artifacts
extends: .cross_user_build_job
needs:
job: i386-fedora-cross-container
variables:
@@ -48,16 +60,14 @@ cross-i386-user:
MAKE_CHECK_ARGS: check
cross-i386-tci:
extends:
- .cross_accel_build_job
- .cross_test_artifacts
extends: .cross_accel_build_job
timeout: 60m
needs:
job: i386-fedora-cross-container
variables:
IMAGE: fedora-i386-cross
ACCEL: tcg-interpreter
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user
MAKE_CHECK_ARGS: check check-tcg
cross-mipsel-system:
@@ -149,7 +159,7 @@ cross-s390x-kvm-only:
job: s390x-debian-cross-container
variables:
IMAGE: debian-s390x-cross
EXTRA_CONFIGURE_OPTS: --disable-tcg --enable-trace-backends=ftrace
EXTRA_CONFIGURE_OPTS: --disable-tcg
cross-mips64el-kvm-only:
extends: .cross_accel_build_job
@@ -165,11 +175,9 @@ cross-win32-system:
job: win32-fedora-cross-container
variables:
IMAGE: fedora-win32-cross
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
microblazeel-softmmu mips64el-softmmu nios2-softmmu
artifacts:
when: on_success
paths:
- build/qemu-setup*.exe
@@ -179,13 +187,11 @@ cross-win64-system:
job: win64-fedora-cross-container
variables:
IMAGE: fedora-win64-cross
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu
m68k-softmmu microblazeel-softmmu nios2-softmmu
or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
tricore-softmmu xtensaeb-softmmu
artifacts:
when: on_success
paths:
- build/qemu-setup*.exe

View File

@@ -15,15 +15,12 @@ variables:
# All custom runners can extend this template to upload the testlog
# data as an artifact and also feed the junit report
.custom_runner_template:
extends: .base_job_template
.custom_artifacts_template:
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
expire_in: 7 days
when: always
paths:
- build/build.ninja
- build/meson-logs
- build/meson-logs/testlog.txt
reports:
junit: build/meson-logs/testlog.junit.xml

View File

@@ -1,9 +1,4 @@
# All centos-stream-8 jobs should run successfully in an environment
# setup by the scripts/ci/setup/stream/8/build-environment.yml task
# "Installation of extra packages to build QEMU"
centos-stream-8-x86_64:
extends: .custom_runner_template
allow_failure: true
needs: []
stage: build
@@ -13,6 +8,15 @@ centos-stream-8-x86_64:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE"
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: on_failure
expire_in: 7 days
paths:
- build/tests/results/latest/results.xml
- build/tests/results/latest/test-results
reports:
junit: build/tests/results/latest/results.xml
before_script:
- JOBS=$(expr $(nproc) + 1)
script:
@@ -21,4 +25,6 @@ centos-stream-8-x86_64:
- ../scripts/ci/org.centos/stream/8/x86_64/configure
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make -j"$JOBS"
- make NINJA=":" check check-avocado
- make NINJA=":" check
|| { cat meson-logs/testlog.txt; exit 1; } ;
- ../scripts/ci/org.centos/stream/8/x86_64/test-avocado

View File

@@ -3,7 +3,7 @@
# "Install basic packages to build QEMU on Ubuntu 20.04/20.04"
ubuntu-20.04-s390x-all-linux-static:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:
@@ -24,7 +24,7 @@ ubuntu-20.04-s390x-all-linux-static:
- make --output-sync -j`nproc` check
ubuntu-20.04-s390x-all:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:
@@ -43,7 +43,7 @@ ubuntu-20.04-s390x-all:
- make --output-sync -j`nproc` check
ubuntu-20.04-s390x-alldbg:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:
@@ -66,7 +66,7 @@ ubuntu-20.04-s390x-alldbg:
- make --output-sync -j`nproc` check
ubuntu-20.04-s390x-clang:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:
@@ -108,7 +108,7 @@ ubuntu-20.04-s390x-tci:
- make --output-sync -j`nproc`
ubuntu-20.04-s390x-notcg:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:

View File

@@ -1,9 +1,9 @@
# All ubuntu-22.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 22.04"
# "Install basic packages to build QEMU on Ubuntu 20.04"
ubuntu-22.04-aarch32-all:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:

View File

@@ -1,9 +1,9 @@
# All ubuntu-22.04 jobs should run successfully in an environment
# All ubuntu-20.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 22.04"
# "Install basic packages to build QEMU on Ubuntu 20.04"
ubuntu-22.04-aarch64-all-linux-static:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:
@@ -24,7 +24,7 @@ ubuntu-22.04-aarch64-all-linux-static:
- make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-all:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:
@@ -45,30 +45,8 @@ ubuntu-22.04-aarch64-all:
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-without-defaults:
extends: .custom_runner_template
needs: []
stage: build
tags:
- ubuntu_22.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --disable-user --without-default-devices --without-default-features
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-alldbg:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:
@@ -87,7 +65,7 @@ ubuntu-22.04-aarch64-alldbg:
- make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-clang:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:
@@ -129,7 +107,7 @@ ubuntu-22.04-aarch64-tci:
- make --output-sync -j`nproc --ignore=40`
ubuntu-22.04-aarch64-notcg:
extends: .custom_runner_template
extends: .custom_artifacts_template
needs: []
stage: build
tags:
@@ -145,7 +123,7 @@ ubuntu-22.04-aarch64-notcg:
script:
- mkdir build
- cd build
- ../configure --disable-tcg --with-devices-aarch64=minimal
- ../configure --disable-tcg
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check

85
.gitlab-ci.d/edk2.yml Normal file
View File

@@ -0,0 +1,85 @@
# All jobs needing docker-edk2 must use the same rules it uses.
.edk2_job_rules:
rules:
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never
# In forks, if QEMU_CI=1 is set, then create manual job
# if any of the files affecting the build are touched
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
changes:
- .gitlab-ci.d/edk2.yml
- .gitlab-ci.d/edk2/Dockerfile
- roms/edk2/*
when: manual
# In forks, if QEMU_CI=1 is set, then create manual job
# if the branch/tag starts with 'edk2'
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^edk2/'
when: manual
# In forks, if QEMU_CI=1 is set, then create manual job
# if last commit msg contains 'EDK2' (case insensitive)
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /edk2/i'
when: manual
# Run if any files affecting the build output are touched
- changes:
- .gitlab-ci.d/edk2.yml
- .gitlab-ci.d/edk2/Dockerfile
- roms/edk2/*
when: on_success
# Run if the branch/tag starts with 'edk2'
- if: '$CI_COMMIT_REF_NAME =~ /^edk2/'
when: on_success
# Run if last commit msg contains 'EDK2' (case insensitive)
- if: '$CI_COMMIT_MESSAGE =~ /edk2/i'
when: on_success
docker-edk2:
extends: .edk2_job_rules
stage: containers
image: docker:19.03.1
services:
- docker:19.03.1-dind
variables:
GIT_DEPTH: 3
IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build
# We don't use TLS
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- docker pull $IMAGE_TAG || true
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
--tag $IMAGE_TAG .gitlab-ci.d/edk2
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker push $IMAGE_TAG
build-edk2:
extends: .edk2_job_rules
stage: build
needs: ['docker-edk2']
artifacts:
paths: # 'artifacts.zip' will contains the following files:
- pc-bios/edk2*bz2
- pc-bios/edk2-licenses.txt
- edk2-stdout.log
- edk2-stderr.log
image: $CI_REGISTRY_IMAGE:edk2-cross-build
variables:
GIT_DEPTH: 3
script: # Clone the required submodules and build EDK2
- git submodule update --init roms/edk2
- git -C roms/edk2 submodule update --init --
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
BaseTools/Source/C/BrotliCompress/brotli
CryptoPkg/Library/OpensslLib/openssl
MdeModulePkg/Library/BrotliCustomDecompressLib/brotli
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
- echo "=== Using ${JOBS} simultaneous jobs ==="
- make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2

View File

@@ -0,0 +1,27 @@
#
# Docker image to cross-compile EDK2 firmware binaries
#
FROM ubuntu:18.04
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
# Install packages required to build EDK2
RUN apt update \
&& \
\
DEBIAN_FRONTEND=noninteractive \
apt install --assume-yes --no-install-recommends \
build-essential \
ca-certificates \
dos2unix \
gcc-aarch64-linux-gnu \
gcc-arm-linux-gnueabi \
git \
iasl \
make \
nasm \
python3 \
uuid-dev \
&& \
\
rm -rf /var/lib/apt/lists/*

View File

@@ -42,15 +42,17 @@
docker-opensbi:
extends: .opensbi_job_rules
stage: containers
image: docker:latest
image: docker:19.03.1
services:
- docker:dind
- docker:19.03.1-dind
variables:
GIT_DEPTH: 3
IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build
# We don't use TLS
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- until docker info; do sleep 1; done
script:
- docker pull $IMAGE_TAG || true
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
@@ -63,7 +65,6 @@ build-opensbi:
stage: build
needs: ['docker-opensbi']
artifacts:
when: on_success
paths: # 'artifacts.zip' will contains the following files:
- pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
- pc-bios/opensbi-riscv64-generic-fw_dynamic.bin

View File

@@ -15,7 +15,6 @@ RUN apt update \
ca-certificates \
git \
make \
python3 \
wget \
&& \
\

View File

@@ -1,16 +1,10 @@
# This file contains the set of jobs run by the QEMU project:
# https://gitlab.com/qemu-project/qemu/-/pipelines
variables:
RUNNER_TAG: ""
default:
tags:
- $RUNNER_TAG
include:
- local: '/.gitlab-ci.d/base.yml'
- local: '/.gitlab-ci.d/stages.yml'
- local: '/.gitlab-ci.d/edk2.yml'
- local: '/.gitlab-ci.d/opensbi.yml'
- local: '/.gitlab-ci.d/containers.yml'
- local: '/.gitlab-ci.d/crossbuilds.yml'

View File

@@ -23,12 +23,12 @@ check-dco:
before_script:
- apk -U add git
check-python-minreqs:
check-python-pipenv:
extends: .base_job_template
stage: test
image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG
image: $CI_REGISTRY_IMAGE/qemu/python:latest
script:
- make -C python check-minreqs
- make -C python check-pipenv
variables:
GIT_DEPTH: 1
needs:
@@ -37,7 +37,7 @@ check-python-minreqs:
check-python-tox:
extends: .base_job_template
stage: test
image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG
image: $CI_REGISTRY_IMAGE/qemu/python:latest
script:
- make -C python check-tox
variables:

View File

@@ -11,13 +11,6 @@
needs: []
stage: build
timeout: 80m
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
expire_in: 7 days
paths:
- build/meson-logs/testlog.txt
reports:
junit: "build/meson-logs/testlog.junit.xml"
before_script:
- If ( !(Test-Path -Path msys64\var\cache ) ) {
mkdir msys64\var\cache
@@ -45,7 +38,6 @@ msys2-64bit:
mingw-w64-x86_64-capstone
mingw-w64-x86_64-curl
mingw-w64-x86_64-cyrus-sasl
mingw-w64-x86_64-dtc
mingw-w64-x86_64-gcc
mingw-w64-x86_64-glib2
mingw-w64-x86_64-gnutls
@@ -66,21 +58,20 @@ msys2-64bit:
mingw-w64-x86_64-SDL2
mingw-w64-x86_64-SDL2_image
mingw-w64-x86_64-snappy
mingw-w64-x86_64-spice
mingw-w64-x86_64-usbredir
mingw-w64-x86_64-zstd "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYSTEM = 'MINGW64' # Start a 64-bit MinGW environment
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- mkdir build
- cd build
- mkdir output
- cd output
# Note: do not remove "--without-default-devices"!
# commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices"
# changed to compile QEMU with the --without-default-devices switch
# for the msys2 64-bit job, due to the build could not complete within
# the project timeout.
- ..\msys64\usr\bin\bash -lc '../configure --target-list=x86_64-softmmu
--without-default-devices --enable-fdt=system'
--without-default-devices'
- ..\msys64\usr\bin\bash -lc 'make'
# qTests don't run successfully with "--without-default-devices",
# so let's exclude the qtests from CI for now.
@@ -95,7 +86,6 @@ msys2-32bit:
mingw-w64-i686-capstone
mingw-w64-i686-curl
mingw-w64-i686-cyrus-sasl
mingw-w64-i686-dtc
mingw-w64-i686-gcc
mingw-w64-i686-glib2
mingw-w64-i686-gnutls
@@ -116,16 +106,14 @@ msys2-32bit:
mingw-w64-i686-SDL2
mingw-w64-i686-SDL2_image
mingw-w64-i686-snappy
mingw-w64-i686-spice
mingw-w64-i686-usbredir
mingw-w64-i686-zstd "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinGW environment
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- mkdir build
- cd build
- ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu
--enable-fdt=system'
- mkdir output
- cd output
- ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu'
- ..\msys64\usr\bin\bash -lc 'make'
- ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" ||
{ cat meson-logs/testlog.txt; exit 1; }'

21
.gitmodules vendored
View File

@@ -13,6 +13,12 @@
[submodule "roms/qemu-palcode"]
path = roms/qemu-palcode
url = https://gitlab.com/qemu-project/qemu-palcode.git
[submodule "roms/sgabios"]
path = roms/sgabios
url = https://gitlab.com/qemu-project/sgabios.git
[submodule "dtc"]
path = dtc
url = https://gitlab.com/qemu-project/dtc.git
[submodule "roms/u-boot"]
path = roms/u-boot
url = https://gitlab.com/qemu-project/u-boot.git
@@ -22,12 +28,21 @@
[submodule "roms/QemuMacDrivers"]
path = roms/QemuMacDrivers
url = https://gitlab.com/qemu-project/QemuMacDrivers.git
[submodule "ui/keycodemapdb"]
path = ui/keycodemapdb
url = https://gitlab.com/qemu-project/keycodemapdb.git
[submodule "roms/seabios-hppa"]
path = roms/seabios-hppa
url = https://gitlab.com/qemu-project/seabios-hppa.git
[submodule "roms/u-boot-sam460ex"]
path = roms/u-boot-sam460ex
url = https://gitlab.com/qemu-project/u-boot-sam460ex.git
[submodule "tests/fp/berkeley-testfloat-3"]
path = tests/fp/berkeley-testfloat-3
url = https://gitlab.com/qemu-project/berkeley-testfloat-3.git
[submodule "tests/fp/berkeley-softfloat-3"]
path = tests/fp/berkeley-softfloat-3
url = https://gitlab.com/qemu-project/berkeley-softfloat-3.git
[submodule "roms/edk2"]
path = roms/edk2
url = https://gitlab.com/qemu-project/edk2.git
@@ -37,9 +52,15 @@
[submodule "roms/qboot"]
path = roms/qboot
url = https://gitlab.com/qemu-project/qboot.git
[submodule "meson"]
path = meson
url = https://gitlab.com/qemu-project/meson.git
[submodule "roms/vbootrom"]
path = roms/vbootrom
url = https://gitlab.com/qemu-project/vbootrom.git
[submodule "tests/lcitool/libvirt-ci"]
path = tests/lcitool/libvirt-ci
url = https://gitlab.com/libvirt/libvirt-ci.git
[submodule "subprojects/libvfio-user"]
path = subprojects/libvfio-user
url = https://gitlab.com/qemu-project/libvfio-user.git

View File

@@ -54,10 +54,8 @@ Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <amarkovic@wavecomp.com>
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <arikalo@wavecomp.com>
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com>
Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
Ani Sinha <anisinha@redhat.com> <ani@anisinha.ca>
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com>
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
Frederic Konrad <konrad.frederic@yahoo.fr> <fred.konrad@greensocs.com>
Frederic Konrad <konrad.frederic@yahoo.fr> <konrad@adacore.com>
@@ -76,9 +74,7 @@ Paul Burton <paulburton@kernel.org> <pburton@wavecomp.com>
Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org>
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
Roman Bolshakov <rbolshakov@ddn.com> <r.bolshakov@yadro.com>
Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
Taylor Simpson <ltaylorsimpson@gmail.com> <tsimpson@quicinc.com>
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
# Also list preferred name forms where people have changed their

View File

@@ -237,15 +237,13 @@ jobs:
- libglib2.0-dev
- libgnutls28-dev
- ninja-build
- flex
- bison
env:
- CONFIG="--disable-containers --disable-system"
- name: "[s390x] Clang (disable-tcg)"
arch: s390x
dist: focal
compiler: clang-10
compiler: clang
addons:
apt_packages:
- libaio-dev
@@ -271,7 +269,6 @@ jobs:
- libvdeplug-dev
- libvte-2.91-dev
- ninja-build
- clang-10
env:
- TEST_CMD="make check-unit"
- CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools

View File

@@ -64,21 +64,6 @@ L: qemu-devel@nongnu.org
F: *
F: */
Project policy and developer guides
R: Alex Bennée <alex.bennee@linaro.org>
R: Daniel P. Berrangé <berrange@redhat.com>
R: Thomas Huth <thuth@redhat.com>
R: Markus Armbruster <armbru@redhat.com>
R: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Juan Quintela <quintela@redhat.com>
W: https://www.qemu.org/docs/master/devel/index.html
S: Odd Fixes
F: docs/devel/style.rst
F: docs/devel/code-of-conduct.rst
F: docs/devel/conflict-resolution.rst
F: docs/devel/submitting-a-patch.rst
F: docs/devel/submitting-a-pull-request.rst
Responsible Disclosure, Reporting Security Issues
-------------------------------------------------
W: https://wiki.qemu.org/SecurityProcess
@@ -138,7 +123,6 @@ M: Richard Henderson <richard.henderson@linaro.org>
R: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: softmmu/cpus.c
F: softmmu/watchpoint.c
F: cpus-common.c
F: page-vary.c
F: page-vary-common.c
@@ -151,17 +135,10 @@ F: docs/devel/decodetree.rst
F: docs/devel/tcg*
F: include/exec/cpu*.h
F: include/exec/exec-all.h
F: include/exec/tb-flush.h
F: include/exec/target_long.h
F: include/exec/helper*.h
F: include/exec/helper*.h.inc
F: include/exec/helper-info.c.inc
F: include/sysemu/cpus.h
F: include/sysemu/tcg.h
F: include/hw/core/tcg-cpu-ops.h
F: host/include/*/host/cpuinfo.h
F: util/cpuinfo-*.c
F: include/tcg/
FPU emulation
M: Aurelien Jarno <aurelien@aurel32.net>
@@ -184,7 +161,6 @@ M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
S: Maintained
F: target/arm/
F: target/arm/tcg/
F: tests/tcg/arm/
F: tests/tcg/aarch64/
F: tests/qtest/arm-cpu-features.c
@@ -220,7 +196,7 @@ F: tests/tcg/cris/
F: disas/cris.c
Hexagon TCG CPUs
M: Brian Cain <bcain@quicinc.com>
M: Taylor Simpson <tsimpson@quicinc.com>
S: Supported
F: target/hexagon/
X: target/hexagon/idef-parser/
@@ -230,7 +206,6 @@ F: tests/tcg/hexagon/
F: disas/hexagon.c
F: configs/targets/hexagon-linux-user/default.mak
F: docker/dockerfiles/debian-hexagon-cross.docker
F: gdb-xml/hexagon*.xml
Hexagon idef-parser
M: Alessandro Di Federico <ale@rev.ng>
@@ -251,7 +226,6 @@ M: Xiaojuan Yang <yangxiaojuan@loongson.cn>
S: Maintained
F: target/loongarch/
F: tests/tcg/loongarch64/
F: tests/avocado/machine_loongarch.py
M68K TCG CPUs
M: Laurent Vivier <laurent@vivier.eu>
@@ -279,9 +253,9 @@ F: docs/system/cpu-models-mips.rst.inc
F: tests/tcg/mips/
NiosII TCG CPUs
R: Chris Wulff <crwulff@gmail.com>
R: Marek Vasut <marex@denx.de>
S: Orphan
M: Chris Wulff <crwulff@gmail.com>
M: Marek Vasut <marex@denx.de>
S: Maintained
F: target/nios2/
F: hw/nios2/
F: disas/nios2.c
@@ -302,7 +276,6 @@ M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: Cédric Le Goater <clg@kaod.org>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
R: Nicholas Piggin <npiggin@gmail.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: target/ppc/
@@ -314,9 +287,6 @@ RISC-V TCG CPUs
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Alistair Francis <alistair.francis@wdc.com>
M: Bin Meng <bin.meng@windriver.com>
R: Weiwei Li <liweiwei@iscas.ac.cn>
R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
L: qemu-riscv@nongnu.org
S: Supported
F: target/riscv/
@@ -336,7 +306,7 @@ F: target/riscv/xthead*.decode
RISC-V XVentanaCondOps extension
M: Philipp Tomsich <philipp.tomsich@vrull.eu>
L: qemu-riscv@nongnu.org
S: Maintained
S: Supported
F: target/riscv/XVentanaCondOps.decode
F: target/riscv/insn_trans/trans_xventanacondops.c.inc
@@ -394,7 +364,6 @@ S: Maintained
F: target/xtensa/
F: hw/xtensa/
F: tests/tcg/xtensa/
F: tests/tcg/xtensaeb/
F: disas/xtensa.c
F: include/hw/xtensa/xtensa-isa.h
F: configs/devices/xtensa*/default.mak
@@ -452,6 +421,8 @@ S: Supported
F: target/s390x/kvm/
F: target/s390x/machine.c
F: target/s390x/sigp.c
F: hw/s390x/pv.c
F: include/hw/s390x/pv.h
F: gdb-xml/s390*.xml
T: git https://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
@@ -467,15 +438,6 @@ F: target/i386/kvm/
F: target/i386/sev*
F: scripts/kvm/vmxcap
Xen emulation on X86 KVM CPUs
M: David Woodhouse <dwmw2@infradead.org>
M: Paul Durrant <paul@xen.org>
S: Supported
F: include/sysemu/kvm_xen.h
F: target/i386/kvm/xen*
F: hw/i386/kvm/xen*
F: tests/avocado/xen_guest.py
Guest CPU Cores (other accelerators)
------------------------------------
Overall
@@ -496,14 +458,14 @@ F: target/arm/hvf/
X86 HVF CPUs
M: Cameron Esfahani <dirty@apple.com>
M: Roman Bolshakov <rbolshakov@ddn.com>
M: Roman Bolshakov <r.bolshakov@yadro.com>
W: https://wiki.qemu.org/Features/HVF
S: Maintained
F: target/i386/hvf/
HVF
M: Cameron Esfahani <dirty@apple.com>
M: Roman Bolshakov <rbolshakov@ddn.com>
M: Roman Bolshakov <r.bolshakov@yadro.com>
W: https://wiki.qemu.org/Features/HVF
S: Maintained
F: accel/hvf/
@@ -845,13 +807,13 @@ F: include/hw/net/mv88w8618_eth.h
F: docs/system/arm/musicpal.rst
Nuvoton NPCM7xx
M: Havard Skinnemoen <hskinnemoen@google.com>
M: Tyrone Ting <kfting@nuvoton.com>
M: Hao Wu <wuhaotsh@google.com>
L: qemu-arm@nongnu.org
S: Supported
F: hw/*/npcm*
F: include/hw/*/npcm*
F: tests/qtest/npcm*
F: hw/*/npcm7xx*
F: include/hw/*/npcm7xx*
F: tests/qtest/npcm7xx*
F: pc-bios/npcm7xx_bootrom.bin
F: roms/vbootrom
F: docs/system/arm/nuvoton.rst
@@ -945,12 +907,10 @@ SBSA-REF
M: Radoslaw Biernacki <rad@semihalf.com>
M: Peter Maydell <peter.maydell@linaro.org>
R: Leif Lindholm <quic_llindhol@quicinc.com>
R: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/sbsa-ref.c
F: docs/system/arm/sbsa.rst
F: tests/avocado/machine_aarch64_sbsaref.py
Sharp SL-5500 (Collie) PDA
M: Peter Maydell <peter.maydell@linaro.org>
@@ -1034,6 +994,12 @@ S: Maintained
F: hw/ssi/xlnx-versal-ospi.c
F: include/hw/ssi/xlnx-versal-ospi.h
ARM ACPI Subsystem
M: Shannon Zhao <shannon.zhaosl@gmail.com>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/virt-acpi-build.c
STM32F100
M: Alexandre Iooss <erdnaxe@crans.org>
L: qemu-arm@nongnu.org
@@ -1120,7 +1086,7 @@ F: include/hw/misc/pca9552*.h
F: hw/net/ftgmac100.c
F: include/hw/net/ftgmac100.h
F: docs/system/arm/aspeed.rst
F: tests/*/*aspeed*
F: tests/qtest/*aspeed*
F: hw/arm/fby35.c
NRF51
@@ -1224,7 +1190,6 @@ q800
M: Laurent Vivier <laurent@vivier.eu>
S: Maintained
F: hw/m68k/q800.c
F: hw/m68k/q800-glue.c
F: hw/misc/mac_via.c
F: hw/nubus/*
F: hw/display/macfb.c
@@ -1236,8 +1201,6 @@ F: include/hw/misc/mac_via.h
F: include/hw/nubus/*
F: include/hw/display/macfb.h
F: include/hw/block/swim.h
F: include/hw/m68k/q800.h
F: include/hw/m68k/q800-glue.h
virt
M: Laurent Vivier <laurent@vivier.eu>
@@ -1432,7 +1395,6 @@ M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: Cédric Le Goater <clg@kaod.org>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
R: Harsh Prateek Bora <harshpb@linux.ibm.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/*/spapr*
@@ -1450,8 +1412,6 @@ F: tests/avocado/ppc_pseries.py
PowerNV (Non-Virtualized)
M: Cédric Le Goater <clg@kaod.org>
R: Frédéric Barrat <fbarrat@linux.ibm.com>
R: Nicholas Piggin <npiggin@gmail.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: docs/system/ppc/powernv.rst
@@ -1715,8 +1675,8 @@ F: hw/i2c/smbus_ich9.c
F: hw/acpi/piix4.c
F: hw/acpi/ich9*.c
F: include/hw/acpi/ich9*.h
F: include/hw/southbridge/ich9.h
F: include/hw/southbridge/piix.h
F: hw/misc/sga.c
F: hw/isa/apm.c
F: include/hw/isa/apm.h
F: tests/unit/test-x86-cpuid.c
@@ -1744,11 +1704,10 @@ F: hw/rtc/mc146818rtc*
F: hw/watchdog/wdt_ib700.c
F: hw/watchdog/wdt_i6300esb.c
F: include/hw/display/vga.h
F: include/hw/char/parallel*.h
F: include/hw/char/parallel.h
F: include/hw/dma/i8257.h
F: include/hw/i2c/pm_smbus.h
F: include/hw/input/i8042.h
F: include/hw/intc/ioapic*
F: include/hw/isa/i8259_internal.h
F: include/hw/isa/superio.h
F: include/hw/timer/hpet.h
@@ -1823,7 +1782,7 @@ M: Francisco Iglesias <francisco.iglesias@amd.com>
S: Maintained
F: hw/net/can/xlnx-*
F: include/hw/net/xlnx-*
F: tests/qtest/xlnx-can*-test*
F: tests/qtest/xlnx-can-test*
EDU
M: Jiri Slaby <jslaby@suse.cz>
@@ -1833,7 +1792,7 @@ F: hw/misc/edu.c
IDE
M: John Snow <jsnow@redhat.com>
L: qemu-block@nongnu.org
S: Odd Fixes
S: Supported
F: include/hw/ide.h
F: include/hw/ide/
F: hw/ide/
@@ -1858,7 +1817,7 @@ T: git https://github.com/cminyard/qemu.git master-ipmi-rebase
Floppy
M: John Snow <jsnow@redhat.com>
L: qemu-block@nongnu.org
S: Odd Fixes
S: Supported
F: hw/block/fdc.c
F: hw/block/fdc-internal.h
F: hw/block/fdc-isa.c
@@ -1909,7 +1868,7 @@ F: hw/pci/pcie_doe.c
ACPI/SMBIOS
M: Michael S. Tsirkin <mst@redhat.com>
M: Igor Mammedov <imammedo@redhat.com>
R: Ani Sinha <anisinha@redhat.com>
R: Ani Sinha <ani@anisinha.ca>
S: Supported
F: include/hw/acpi/*
F: include/hw/firmware/smbios.h
@@ -1927,18 +1886,6 @@ F: docs/specs/acpi_nvdimm.rst
F: docs/specs/acpi_pci_hotplug.rst
F: docs/specs/acpi_hw_reduced_hotplug.rst
ARM ACPI Subsystem
M: Shannon Zhao <shannon.zhaosl@gmail.com>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/virt-acpi-build.c
RISC-V ACPI Subsystem
M: Sunil V L <sunilvl@ventanamicro.com>
L: qemu-riscv@nongnu.org
S: Maintained
F: hw/riscv/virt-acpi-build.c
ACPI/VIOT
M: Jean-Philippe Brucker <jean-philippe@linaro.org>
S: Supported
@@ -1946,7 +1893,7 @@ F: hw/acpi/viot.c
F: hw/acpi/viot.h
ACPI/AVOCADO/BIOSBITS
M: Ani Sinha <anisinha@redhat.com>
M: Ani Sinha <ani@anisinha.ca>
M: Michael S. Tsirkin <mst@redhat.com>
S: Supported
F: tests/avocado/acpi-bits/*
@@ -2049,7 +1996,6 @@ F: hw/usb/dev-serial.c
VFIO
M: Alex Williamson <alex.williamson@redhat.com>
M: Cédric Le Goater <clg@redhat.com>
S: Supported
F: hw/vfio/*
F: include/hw/vfio/
@@ -2090,10 +2036,6 @@ F: backends/vhost-user.c
F: include/sysemu/vhost-user-backend.h
F: subprojects/libvhost-user/
vhost-shadow-virtqueue
R: Eugenio Pérez <eperezma@redhat.com>
F: hw/virtio/vhost-shadow-virtqueue.*
virtio
M: Michael S. Tsirkin <mst@redhat.com>
S: Supported
@@ -2118,24 +2060,17 @@ F: include/sysemu/balloon.h
virtio-9p
M: Greg Kurz <groug@kaod.org>
M: Christian Schoenebeck <qemu_oss@crudebyte.com>
S: Maintained
S: Odd Fixes
W: https://wiki.qemu.org/Documentation/9p
F: hw/9pfs/
X: hw/9pfs/xen-9p*
X: hw/9pfs/9p-proxy*
F: fsdev/
X: fsdev/virtfs-proxy-helper.c
F: docs/tools/virtfs-proxy-helper.rst
F: tests/qtest/virtio-9p-test.c
F: tests/qtest/libqos/virtio-9p*
T: git https://gitlab.com/gkurz/qemu.git 9p-next
T: git https://github.com/cschoenebeck/qemu.git 9p.next
virtio-9p-proxy
F: hw/9pfs/9p-proxy*
F: fsdev/virtfs-proxy-helper.c
F: docs/tools/virtfs-proxy-helper.rst
S: Obsolete
virtio-blk
M: Stefan Hajnoczi <stefanha@redhat.com>
L: qemu-block@nongnu.org
@@ -2159,10 +2094,13 @@ T: git https://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
virtiofs
M: Dr. David Alan Gilbert <dgilbert@redhat.com>
M: Stefan Hajnoczi <stefanha@redhat.com>
S: Supported
F: tools/virtiofsd/*
F: hw/virtio/vhost-user-fs*
F: include/hw/virtio/vhost-user-fs.h
F: docs/tools/virtiofsd.rst
L: virtio-fs@redhat.com
virtio-input
@@ -2215,13 +2153,6 @@ F: hw/virtio/vhost-user-gpio*
F: include/hw/virtio/vhost-user-gpio.h
F: tests/qtest/libqos/virtio-gpio.*
vhost-user-scmi
R: mzamazal@redhat.com
S: Supported
F: hw/virtio/vhost-user-scmi*
F: include/hw/virtio/vhost-user-scmi.h
F: tests/qtest/libqos/virtio-scmi.*
virtio-crypto
M: Gonglei <arei.gonglei@huawei.com>
S: Supported
@@ -2229,13 +2160,6 @@ F: hw/virtio/virtio-crypto.c
F: hw/virtio/virtio-crypto-pci.c
F: include/hw/virtio/virtio-crypto.h
virtio based memory device
M: David Hildenbrand <david@redhat.com>
S: Supported
F: hw/virtio/virtio-md-pci.c
F: include/hw/virtio/virtio-md-pci.h
F: stubs/virtio-md-pci.c
virtio-mem
M: David Hildenbrand <david@redhat.com>
S: Supported
@@ -2267,7 +2191,6 @@ F: tests/qtest/fuzz-megasas-test.c
Network packet abstractions
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Maintained
F: include/net/eth.h
F: net/eth.c
@@ -2291,28 +2214,14 @@ F: docs/specs/rocker.txt
e1000x
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Maintained
F: hw/net/e1000x*
e1000e
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Maintained
F: hw/net/e1000e*
F: tests/qtest/fuzz-e1000e-test.c
F: tests/qtest/e1000e-test.c
F: tests/qtest/libqos/e1000e.*
igb
M: Akihiko Odaki <akihiko.odaki@daynix.com>
R: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
S: Maintained
F: docs/system/devices/igb.rst
F: hw/net/igb*
F: tests/avocado/netdev-ethtool.py
F: tests/qtest/igb-test.c
F: tests/qtest/libqos/igb.c
eepro100
M: Stefan Weil <sw@weilnetz.de>
@@ -2366,6 +2275,7 @@ F: hw/acpi/vmgenid.c
F: include/hw/acpi/vmgenid.h
F: docs/specs/vmgenid.txt
F: tests/qtest/vmgenid-test.c
F: stubs/vmgenid.c
LED
M: Philippe Mathieu-Daudé <philmd@linaro.org>
@@ -2467,7 +2377,6 @@ T: git https://github.com/philmd/qemu.git fw_cfg-next
XIVE
M: Cédric Le Goater <clg@kaod.org>
R: Frédéric Barrat <fbarrat@linux.ibm.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/*/*xive*
@@ -2577,7 +2486,6 @@ Subsystems
----------
Overall Audio backends
M: Gerd Hoffmann <kraxel@redhat.com>
M: Marc-André Lureau <marcandre.lureau@redhat.com>
S: Odd Fixes
F: audio/
X: audio/alsaaudio.c
@@ -2601,7 +2509,7 @@ Core Audio framework backend
M: Gerd Hoffmann <kraxel@redhat.com>
M: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Christian Schoenebeck <qemu_oss@crudebyte.com>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
R: Akihiko Odaki <akihiko.odaki@gmail.com>
S: Odd Fixes
F: audio/coreaudio.c
@@ -2678,7 +2586,6 @@ F: util/aio-*.c
F: util/aio-*.h
F: util/fdmon-*.c
F: block/io.c
F: block/plug.c
F: migration/block*
F: include/block/aio.h
F: include/block/aio-wait.h
@@ -2724,8 +2631,8 @@ T: git https://gitlab.com/jsnow/qemu.git jobs
T: git https://gitlab.com/vsementsov/qemu.git block
Compute Express Link
M: Ben Widawsky <ben.widawsky@intel.com>
M: Jonathan Cameron <jonathan.cameron@huawei.com>
R: Fan Ni <fan.ni@samsung.com>
S: Supported
F: hw/cxl/
F: hw/mem/cxl_type3.c
@@ -2823,11 +2730,9 @@ S: Maintained
F: docs/system/gdb.rst
F: gdbstub/*
F: include/exec/gdbstub.h
F: include/gdbstub/*
F: gdb-xml/
F: tests/tcg/multiarch/gdbstub/
F: scripts/feature_to_c.sh
F: scripts/probe-gdb-support.py
Memory API
M: Paolo Bonzini <pbonzini@redhat.com>
@@ -2875,7 +2780,6 @@ F: docs/spice-port-fqdn.txt
Graphics
M: Gerd Hoffmann <kraxel@redhat.com>
M: Marc-André Lureau <marcandre.lureau@redhat.com>
S: Odd Fixes
F: ui/
F: include/ui/
@@ -2886,13 +2790,14 @@ F: docs/devel/ui.rst
Cocoa graphics
M: Peter Maydell <peter.maydell@linaro.org>
M: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Akihiko Odaki <akihiko.odaki@daynix.com>
R: Akihiko Odaki <akihiko.odaki@gmail.com>
S: Odd Fixes
F: ui/cocoa.m
Main loop
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: include/exec/gen-icount.h
F: include/qemu/main-loop.h
F: include/sysemu/runstate.h
F: include/sysemu/runstate-action.h
@@ -2910,15 +2815,13 @@ F: qapi/run-state.json
Read, Copy, Update (RCU)
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: docs/devel/lockcnt.txt
F: docs/devel/rcu.txt
F: include/qemu/rcu*.h
F: tests/unit/rcutorture.c
F: tests/unit/test-rcu-*.c
F: util/rcu.c
Human Monitor (HMP)
M: Dr. David Alan Gilbert <dave@treblig.org>
M: Dr. David Alan Gilbert <dgilbert@redhat.com>
S: Maintained
F: monitor/monitor-internal.h
F: monitor/misc.c
@@ -2958,11 +2861,9 @@ T: git https://gitlab.com/ehabkost/qemu.git machine-next
Cryptodev Backends
M: Gonglei <arei.gonglei@huawei.com>
M: zhenwei pi <pizhenwei@bytedance.com>
S: Maintained
F: include/sysemu/cryptodev*.h
F: backends/cryptodev*.c
F: qapi/cryptodev.json
Python library
M: John Snow <jsnow@redhat.com>
@@ -3072,7 +2973,6 @@ F: include/qom/
F: qapi/qom.json
F: qapi/qdev.json
F: scripts/coccinelle/qom-parent-type.cocci
F: scripts/qom-cast-macro-clean-cocci-gen.py
F: softmmu/qdev-monitor.c
F: stubs/qdev.c
F: qom/
@@ -3125,7 +3025,6 @@ R: Qiuhao Li <Qiuhao.Li@outlook.com>
S: Maintained
F: tests/qtest/fuzz/
F: tests/qtest/fuzz-*test.c
F: tests/docker/test-fuzz
F: scripts/oss-fuzz/
F: hw/mem/sparse-mem.c
F: docs/devel/fuzzing.rst
@@ -3193,8 +3092,7 @@ F: scripts/checkpatch.pl
Migration
M: Juan Quintela <quintela@redhat.com>
R: Peter Xu <peterx@redhat.com>
R: Leonardo Bras <leobras@redhat.com>
M: Dr. David Alan Gilbert <dgilbert@redhat.com>
S: Maintained
F: hw/core/vmstate-if.c
F: include/hw/vmstate-if.h
@@ -3209,15 +3107,6 @@ F: qapi/migration.json
F: tests/migration/
F: util/userfaultfd.c
Migration dirty limit and dirty page rate
M: Hyman Huang <yong.huang@smartx.com>
S: Maintained
F: softmmu/dirtylimit.c
F: include/sysemu/dirtylimit.h
F: migration/dirtyrate.c
F: migration/dirtyrate.h
F: include/sysemu/dirtyrate.h
D-Bus
M: Marc-André Lureau <marcandre.lureau@redhat.com>
S: Maintained
@@ -3231,7 +3120,6 @@ F: docs/interop/dbus*
F: docs/sphinx/dbus*
F: docs/sphinx/fakedbusdoc.py
F: tests/qtest/dbus*
F: scripts/xml-preprocess*
Seccomp
M: Daniel P. Berrange <berrange@redhat.com>
@@ -3245,7 +3133,6 @@ M: Daniel P. Berrange <berrange@redhat.com>
S: Maintained
F: crypto/
F: include/crypto/
F: host/include/*/host/crypto/
F: qapi/crypto.json
F: tests/unit/test-crypto-*
F: tests/bench/benchmark-crypto-*
@@ -3349,7 +3236,6 @@ S: Supported
F: replay/*
F: block/blkreplay.c
F: net/filter-replay.c
F: include/exec/replay-core.h
F: include/sysemu/replay.h
F: docs/devel/replay.rst
F: docs/system/replay.rst
@@ -3414,6 +3300,8 @@ F: roms/edk2
F: roms/edk2-*
F: tests/data/uefi-boot-images/
F: tests/uefi-test-tools/
F: .gitlab-ci.d/edk2.yml
F: .gitlab-ci.d/edk2/
VT-d Emulation
M: Michael S. Tsirkin <mst@redhat.com>
@@ -3424,10 +3312,6 @@ F: hw/i386/intel_iommu.c
F: hw/i386/intel_iommu_internal.h
F: include/hw/i386/intel_iommu.h
AMD-Vi Emulation
S: Orphan
F: hw/i386/amd_iommu.?
OpenSBI Firmware
M: Bin Meng <bmeng.cn@gmail.com>
S: Supported
@@ -3437,7 +3321,7 @@ F: .gitlab-ci.d/opensbi/
Clock framework
M: Luc Michel <luc@lmichel.fr>
R: Damien Hedde <damien.hedde@dahe.fr>
R: Damien Hedde <damien.hedde@greensocs.com>
S: Maintained
F: include/hw/clock.h
F: include/hw/qdev-clock.h
@@ -3696,11 +3580,13 @@ F: block/dmg.c
parallels
M: Stefan Hajnoczi <stefanha@redhat.com>
M: Denis V. Lunev <den@openvz.org>
M: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
L: qemu-block@nongnu.org
S: Supported
F: block/parallels.c
F: block/parallels-ext.c
F: docs/interop/parallels.txt
T: git https://gitlab.com/vsementsov/qemu.git block
qed
M: Stefan Hajnoczi <stefanha@redhat.com>
@@ -3824,6 +3710,7 @@ F: tests/tcg/aarch64/system/semiheap.c
Multi-process QEMU
M: Elena Ufimtseva <elena.ufimtseva@oracle.com>
M: Jagannathan Raman <jag.raman@oracle.com>
M: John G Johnson <john.g.johnson@oracle.com>
S: Maintained
F: docs/devel/multi-process.rst
F: docs/system/multi-process.rst
@@ -3874,7 +3761,6 @@ F: scripts/ci/
F: tests/docker/
F: tests/vm/
F: tests/lcitool/
F: tests/avocado/tuxrun_baselines.py
F: scripts/archive-source.sh
F: docs/devel/testing.rst
W: https://gitlab.com/qemu-project/qemu/pipelines
@@ -3891,7 +3777,8 @@ W: https://cirrus-ci.com/github/qemu/qemu
Windows Hosted Continuous Integration
M: Yonggang Luo <luoyonggang@gmail.com>
S: Maintained
F: .gitlab-ci.d/windows.yml
F: .cirrus.yml
W: https://cirrus-ci.com/github/qemu/qemu
Guest Test Compilation Support
M: Alex Bennée <alex.bennee@linaro.org>
@@ -3957,16 +3844,6 @@ F: configure
F: scripts/mtest2make.py
F: tests/Makefile.include
Kconfig
M: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: scripts/minikconf.py
F: docs/devel/kconfig.rst
F: Kconfig*
F: */Kconfig*
F: hw/*/Kconfig*
F: target/*/Kconfig*
GIT submodules
M: Daniel P. Berrange <berrange@redhat.com>
S: Odd Fixes
@@ -3990,8 +3867,3 @@ Performance Tools and Tests
M: Ahmed Karaman <ahmedkhaledkaraman@gmail.com>
S: Maintained
F: scripts/performance/
Code Coverage Tools
M: Alex Bennée <alex.bennee@linaro.org>
S: Odd Fixes
F: scripts/coverage/

View File

@@ -26,9 +26,9 @@ quiet-command-run = $(if $(V),,$(if $2,printf " %-7s %s\n" $2 $3 && ))$1
quiet-@ = $(if $(V),,@)
quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3)
UNCHECKED_GOALS := TAGS gtags cscope ctags dist \
UNCHECKED_GOALS := %clean TAGS cscope ctags dist \
help check-help print-% \
docker docker-% lcitool-refresh vm-help vm-test vm-build-%
docker docker-% vm-help vm-test vm-build-%
all:
.PHONY: all clean distclean recurse-all dist msi FORCE
@@ -45,6 +45,18 @@ include config-host.mak
include Makefile.prereqs
Makefile.prereqs: config-host.mak
git-submodule-update:
.git-submodule-status: git-submodule-update config-host.mak
Makefile: .git-submodule-status
.PHONY: git-submodule-update
git-submodule-update:
ifneq ($(GIT_SUBMODULES_ACTION),ignore)
$(call quiet-command, \
(GIT="$(GIT)" "$(SRC_PATH)/scripts/git-submodule.sh" $(GIT_SUBMODULES_ACTION) $(GIT_SUBMODULES)), \
"GIT","$(GIT_SUBMODULES)")
endif
# 0. ensure the build tree is okay
# Check that we're not trying to do an out-of-tree build from
@@ -83,17 +95,16 @@ config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh
@if test -f meson-private/coredata.dat; then \
./config.status --skip-meson; \
else \
./config.status; \
./config.status && touch build.ninja.stamp; \
fi
# 2. meson.stamp exists if meson has run at least once (so ninja reconfigure
# works), but otherwise never needs to be updated
meson-private/coredata.dat: meson.stamp
meson.stamp: config-host.mak
@touch meson.stamp
# 3. ensure meson-generated build files are up-to-date
# 3. ensure generated build files are up-to-date
ifneq ($(NINJA),)
Makefile.ninja: build.ninja
@@ -104,23 +115,15 @@ Makefile.ninja: build.ninja
$(NINJA) -t query build.ninja | sed -n '1,/^ input:/d; /^ outputs:/q; s/$$/ \\/p'; \
} > $@.tmp && mv $@.tmp $@
-include Makefile.ninja
endif
ifneq ($(MESON),)
# The path to meson always points to pyvenv/bin/meson, but the absolute
# paths could change. In that case, force a regeneration of build.ninja.
# Note that this invocation of $(NINJA), just like when Make rebuilds
# Makefiles, does not include -n.
# A separate rule is needed for Makefile dependencies to avoid -n
build.ninja: build.ninja.stamp
$(build-files):
build.ninja.stamp: meson.stamp $(build-files)
@if test "$$(cat build.ninja.stamp)" = "$(MESON)" && test -n "$(NINJA)"; then \
$(NINJA) build.ninja; \
else \
echo "$(MESON) setup --reconfigure $(SRC_PATH)"; \
$(MESON) setup --reconfigure $(SRC_PATH); \
fi && echo "$(MESON)" > $@
$(NINJA) $(if $V,-v,) build.ninja && touch $@
endif
ifneq ($(MESON),)
Makefile.mtest: build.ninja scripts/mtest2make.py
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
-include Makefile.mtest
@@ -173,8 +176,10 @@ plugins:
endif # $(CONFIG_PLUGIN)
else # config-host.mak does not exist
config-host.mak:
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
$(error Please call configure before running make)
@echo "Please call configure before running make!"
@exit 1
endif
endif # config-host.mak does not exist
@@ -215,7 +220,7 @@ qemu-%.tar.bz2:
distclean: clean recurse-distclean
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || :
rm -f config-host.mak Makefile.prereqs
rm -f config-host.mak Makefile.prereqs qemu-bundle
rm -f tests/tcg/*/config-target.mak tests/tcg/config-host.mak
rm -f config.status
rm -f roms/seabios/config.mak
@@ -225,7 +230,7 @@ distclean: clean recurse-distclean
rm -f Makefile.ninja Makefile.mtest build.ninja.stamp meson.stamp
rm -f config.log
rm -f linux-headers/asm
rm -Rf .sdk qemu-bundle
rm -Rf .sdk
find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \
-type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \)

View File

@@ -1 +1 @@
8.0.92
7.2.50

View File

@@ -27,7 +27,7 @@
#include "qemu/accel.h"
#include "hw/boards.h"
#include "sysemu/cpus.h"
#include "qemu/error-report.h"
#include "accel-softmmu.h"
int accel_init_machine(AccelState *accel, MachineState *ms)

View File

@@ -52,7 +52,6 @@
#include "qemu/main-loop.h"
#include "exec/address-spaces.h"
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
#include "sysemu/cpus.h"
#include "sysemu/hvf.h"
#include "sysemu/hvf_int.h"
@@ -304,7 +303,7 @@ static void hvf_region_del(MemoryListener *listener,
static MemoryListener hvf_memory_listener = {
.name = "hvf",
.priority = MEMORY_LISTENER_PRIORITY_ACCEL,
.priority = 10,
.region_add = hvf_region_add,
.region_del = hvf_region_del,
.log_start = hvf_log_start,
@@ -335,26 +334,18 @@ static int hvf_accel_init(MachineState *ms)
s->slots[x].slot_id = x;
}
QTAILQ_INIT(&s->hvf_sw_breakpoints);
hvf_state = s;
memory_listener_register(&hvf_memory_listener, &address_space_memory);
return hvf_arch_init();
}
static inline int hvf_gdbstub_sstep_flags(void)
{
return SSTEP_ENABLE | SSTEP_NOIRQ;
}
static void hvf_accel_class_init(ObjectClass *oc, void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "HVF";
ac->init_machine = hvf_accel_init;
ac->allowed = &hvf_allowed;
ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags;
}
static const TypeInfo hvf_accel_type = {
@@ -372,19 +363,19 @@ type_init(hvf_type_init);
static void hvf_vcpu_destroy(CPUState *cpu)
{
hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd);
hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
assert_hvf_ok(ret);
hvf_arch_vcpu_destroy(cpu);
g_free(cpu->accel);
cpu->accel = NULL;
g_free(cpu->hvf);
cpu->hvf = NULL;
}
static int hvf_init_vcpu(CPUState *cpu)
{
int r;
cpu->accel = g_new0(AccelCPUState, 1);
cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
/* init cpu signals */
struct sigaction sigact;
@@ -393,20 +384,17 @@ static int hvf_init_vcpu(CPUState *cpu)
sigact.sa_handler = dummy_signal;
sigaction(SIG_IPI, &sigact, NULL);
pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask);
sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI);
pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
#ifdef __aarch64__
r = hv_vcpu_create(&cpu->accel->fd,
(hv_vcpu_exit_t **)&cpu->accel->exit, NULL);
r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
#else
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->accel->fd, HV_VCPU_DEFAULT);
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
#endif
cpu->vcpu_dirty = 1;
assert_hvf_ok(r);
cpu->accel->guest_debug_enabled = false;
return hvf_arch_init_vcpu(cpu);
}
@@ -474,108 +462,6 @@ static void hvf_start_vcpu_thread(CPUState *cpu)
cpu, QEMU_THREAD_JOINABLE);
}
static int hvf_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
{
struct hvf_sw_breakpoint *bp;
int err;
if (type == GDB_BREAKPOINT_SW) {
bp = hvf_find_sw_breakpoint(cpu, addr);
if (bp) {
bp->use_count++;
return 0;
}
bp = g_new(struct hvf_sw_breakpoint, 1);
bp->pc = addr;
bp->use_count = 1;
err = hvf_arch_insert_sw_breakpoint(cpu, bp);
if (err) {
g_free(bp);
return err;
}
QTAILQ_INSERT_HEAD(&hvf_state->hvf_sw_breakpoints, bp, entry);
} else {
err = hvf_arch_insert_hw_breakpoint(addr, len, type);
if (err) {
return err;
}
}
CPU_FOREACH(cpu) {
err = hvf_update_guest_debug(cpu);
if (err) {
return err;
}
}
return 0;
}
static int hvf_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
{
struct hvf_sw_breakpoint *bp;
int err;
if (type == GDB_BREAKPOINT_SW) {
bp = hvf_find_sw_breakpoint(cpu, addr);
if (!bp) {
return -ENOENT;
}
if (bp->use_count > 1) {
bp->use_count--;
return 0;
}
err = hvf_arch_remove_sw_breakpoint(cpu, bp);
if (err) {
return err;
}
QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
g_free(bp);
} else {
err = hvf_arch_remove_hw_breakpoint(addr, len, type);
if (err) {
return err;
}
}
CPU_FOREACH(cpu) {
err = hvf_update_guest_debug(cpu);
if (err) {
return err;
}
}
return 0;
}
static void hvf_remove_all_breakpoints(CPUState *cpu)
{
struct hvf_sw_breakpoint *bp, *next;
CPUState *tmpcpu;
QTAILQ_FOREACH_SAFE(bp, &hvf_state->hvf_sw_breakpoints, entry, next) {
if (hvf_arch_remove_sw_breakpoint(cpu, bp) != 0) {
/* Try harder to find a CPU that currently sees the breakpoint. */
CPU_FOREACH(tmpcpu)
{
if (hvf_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
break;
}
}
}
QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
g_free(bp);
}
hvf_arch_remove_all_hw_breakpoints();
CPU_FOREACH(cpu) {
hvf_update_guest_debug(cpu);
}
}
static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
@@ -587,12 +473,6 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
ops->synchronize_state = hvf_cpu_synchronize_state;
ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
ops->insert_breakpoint = hvf_insert_breakpoint;
ops->remove_breakpoint = hvf_remove_breakpoint;
ops->remove_all_breakpoints = hvf_remove_all_breakpoints;
ops->update_guest_debug = hvf_update_guest_debug;
ops->supports_guest_debug = hvf_arch_supports_guest_debug;
};
static const TypeInfo hvf_accel_ops_type = {
.name = ACCEL_OPS_NAME("hvf"),

View File

@@ -38,38 +38,9 @@ void assert_hvf_ok(hv_return_t ret)
case HV_UNSUPPORTED:
error_report("Error: HV_UNSUPPORTED");
break;
#if defined(MAC_OS_VERSION_11_0) && \
MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
case HV_DENIED:
error_report("Error: HV_DENIED");
break;
#endif
default:
error_report("Unknown Error");
}
abort();
}
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, target_ulong pc)
{
struct hvf_sw_breakpoint *bp;
QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
if (bp->pc == pc) {
return bp;
}
}
return NULL;
}
int hvf_sw_breakpoints_active(CPUState *cpu)
{
return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
}
int hvf_update_guest_debug(CPUState *cpu)
{
hvf_arch_update_guest_debug(cpu);
return 0;
}

View File

@@ -86,13 +86,6 @@ static bool kvm_cpus_are_resettable(void)
return !kvm_enabled() || kvm_cpu_check_are_resettable();
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
static int kvm_update_guest_debug_ops(CPUState *cpu)
{
return kvm_update_guest_debug(cpu, 0);
}
#endif
static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
@@ -106,7 +99,6 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
#ifdef KVM_CAP_SET_GUEST_DEBUG
ops->update_guest_debug = kvm_update_guest_debug_ops;
ops->supports_guest_debug = kvm_supports_guest_debug;
ops->insert_breakpoint = kvm_insert_breakpoint;
ops->remove_breakpoint = kvm_remove_breakpoint;

View File

@@ -450,8 +450,6 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
"kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
kvm_arch_vcpu_id(cpu));
}
cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
err:
return ret;
}
@@ -687,15 +685,6 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
uint32_t ring_size = s->kvm_dirty_ring_size;
uint32_t count = 0, fetch = cpu->kvm_fetch_index;
/*
* It's possible that we race with vcpu creation code where the vcpu is
* put onto the vcpus list but not yet initialized the dirty ring
* structures. If so, skip it.
*/
if (!cpu->created) {
return 0;
}
assert(dirty_gfns && ring_size);
trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
@@ -1105,7 +1094,6 @@ static MemoryListener kvm_coalesced_pio_listener = {
.name = "kvm-coalesced-pio",
.coalesced_io_add = kvm_coalesce_pio_add,
.coalesced_io_del = kvm_coalesce_pio_del,
.priority = MEMORY_LISTENER_PRIORITY_MIN,
};
int kvm_check_extension(KVMState *s, unsigned int extension)
@@ -1364,10 +1352,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
*/
if (kvm_state->kvm_dirty_ring_size) {
kvm_dirty_ring_reap_locked(kvm_state, NULL);
if (kvm_state->kvm_dirty_ring_with_bitmap) {
kvm_slot_sync_dirty_pages(mem);
kvm_slot_get_dirty_log(kvm_state, mem);
}
} else {
kvm_slot_get_dirty_log(kvm_state, mem);
}
@@ -1465,69 +1449,6 @@ static int kvm_dirty_ring_reaper_init(KVMState *s)
return 0;
}
static int kvm_dirty_ring_init(KVMState *s)
{
uint32_t ring_size = s->kvm_dirty_ring_size;
uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
int ret;
s->kvm_dirty_ring_size = 0;
s->kvm_dirty_ring_bytes = 0;
/* Bail if the dirty ring size isn't specified */
if (!ring_size) {
return 0;
}
/*
* Read the max supported pages. Fall back to dirty logging mode
* if the dirty ring isn't supported.
*/
ret = kvm_vm_check_extension(s, capability);
if (ret <= 0) {
capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
ret = kvm_vm_check_extension(s, capability);
}
if (ret <= 0) {
warn_report("KVM dirty ring not available, using bitmap method");
return 0;
}
if (ring_bytes > ret) {
error_report("KVM dirty ring size %" PRIu32 " too big "
"(maximum is %ld). Please use a smaller value.",
ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
return -EINVAL;
}
ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
"Suggested minimum value is 1024.", strerror(-ret));
return -EIO;
}
/* Enable the backup bitmap if it is supported */
ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
if (ret > 0) {
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
if (ret) {
error_report("Enabling of KVM dirty ring's backup bitmap failed: "
"%s. ", strerror(-ret));
return -EIO;
}
s->kvm_dirty_ring_with_bitmap = true;
}
s->kvm_dirty_ring_size = ring_size;
s->kvm_dirty_ring_bytes = ring_bytes;
return 0;
}
static void kvm_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
@@ -1633,7 +1554,7 @@ static void kvm_log_sync(MemoryListener *listener,
kvm_slots_unlock();
}
static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
static void kvm_log_sync_global(MemoryListener *l)
{
KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
KVMState *s = kvm_state;
@@ -1652,12 +1573,6 @@ static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
mem = &kml->slots[i];
if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_slot_sync_dirty_pages(mem);
if (s->kvm_dirty_ring_with_bitmap && last_stage &&
kvm_slot_get_dirty_log(s, mem)) {
kvm_slot_sync_dirty_pages(mem);
}
/*
* This is not needed by KVM_GET_DIRTY_LOG because the
* ioctl will unconditionally overwrite the whole region.
@@ -1778,7 +1693,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
kml->listener.commit = kvm_region_commit;
kml->listener.log_start = kvm_log_start;
kml->listener.log_stop = kvm_log_stop;
kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
kml->listener.priority = 10;
kml->listener.name = name;
if (s->kvm_dirty_ring_size) {
@@ -1803,7 +1718,7 @@ static MemoryListener kvm_io_listener = {
.name = "kvm-io",
.eventfd_add = kvm_io_ioeventfd_add,
.eventfd_del = kvm_io_ioeventfd_del,
.priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
.priority = 10,
};
int kvm_set_irq(KVMState *s, int irq, int level)
@@ -2446,13 +2361,13 @@ static int kvm_init(MachineState *ms)
static const char upgrade_note[] =
"Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
"(see http://sourceforge.net/projects/kvm).\n";
const struct {
struct {
const char *name;
int num;
} num_cpus[] = {
{ "SMP", ms->smp.cpus },
{ "hotpluggable", ms->smp.max_cpus },
{ /* end of list */ }
{ NULL, }
}, *nc = num_cpus;
int soft_vcpus_limit, hard_vcpus_limit;
KVMState *s;
@@ -2597,9 +2512,35 @@ static int kvm_init(MachineState *ms)
* Enable KVM dirty ring if supported, otherwise fall back to
* dirty logging mode
*/
ret = kvm_dirty_ring_init(s);
if (ret < 0) {
goto err;
if (s->kvm_dirty_ring_size > 0) {
uint64_t ring_bytes;
ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
/* Read the max supported pages */
ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING);
if (ret > 0) {
if (ring_bytes > ret) {
error_report("KVM dirty ring size %" PRIu32 " too big "
"(maximum is %ld). Please use a smaller value.",
s->kvm_dirty_ring_size,
(long)ret / sizeof(struct kvm_dirty_gfn));
ret = -EINVAL;
goto err;
}
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
"Suggested minimum value is 1024.", strerror(-ret));
goto err;
}
s->kvm_dirty_ring_bytes = ring_bytes;
} else {
warn_report("KVM dirty ring not available, using bitmap method");
s->kvm_dirty_ring_size = 0;
}
}
/*
@@ -2812,7 +2753,7 @@ void kvm_flush_coalesced_mmio_buffer(void)
{
KVMState *s = kvm_state;
if (!s || s->coalesced_flush_in_progress) {
if (s->coalesced_flush_in_progress) {
return;
}
@@ -3364,7 +3305,7 @@ bool kvm_supports_guest_debug(void)
return kvm_has_guest_debug;
}
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
{
struct kvm_sw_breakpoint *bp;
int err;
@@ -3402,7 +3343,7 @@ int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
return 0;
}
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
{
struct kvm_sw_breakpoint *bp;
int err;
@@ -3760,12 +3701,8 @@ static void kvm_accel_instance_init(Object *obj)
s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
/* KVM dirty ring is by default off */
s->kvm_dirty_ring_size = 0;
s->kvm_dirty_ring_with_bitmap = false;
s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
s->notify_window = 0;
s->xen_version = 0;
s->xen_gnttab_max_frames = 64;
s->xen_evtchn_max_pirq = 256;
}
/**
@@ -4010,7 +3947,7 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd
/* Read stats header */
kvm_stats_header = &descriptors->kvm_stats_header;
ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header));
if (ret != sizeof(*kvm_stats_header)) {
error_setg(errp, "KVM stats: failed to read stats header: "
"expected %zu actual %zu",
@@ -4041,8 +3978,7 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd
}
static void query_stats(StatsResultList **result, StatsTarget target,
strList *names, int stats_fd, CPUState *cpu,
Error **errp)
strList *names, int stats_fd, Error **errp)
{
struct kvm_stats_desc *kvm_stats_desc;
struct kvm_stats_header *kvm_stats_header;
@@ -4100,7 +4036,7 @@ static void query_stats(StatsResultList **result, StatsTarget target,
break;
case STATS_TARGET_VCPU:
add_stats_entry(result, STATS_PROVIDER_KVM,
cpu->parent_obj.canonical_path,
current_cpu->parent_obj.canonical_path,
stats_list);
break;
default:
@@ -4137,9 +4073,10 @@ static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
}
static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
{
int stats_fd = cpu->kvm_vcpu_stats_fd;
StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
Error *local_err = NULL;
if (stats_fd == -1) {
@@ -4148,13 +4085,14 @@ static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
return;
}
query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
kvm_stats_args->names, stats_fd, cpu,
kvm_stats_args->errp);
kvm_stats_args->names, stats_fd, kvm_stats_args->errp);
close(stats_fd);
}
static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data)
{
int stats_fd = cpu->kvm_vcpu_stats_fd;
StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
Error *local_err = NULL;
if (stats_fd == -1) {
@@ -4164,6 +4102,7 @@ static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
}
query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
kvm_stats_args->errp);
close(stats_fd);
}
static void query_stats_cb(StatsResultList **result, StatsTarget target,
@@ -4181,7 +4120,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target,
error_setg_errno(errp, errno, "KVM stats: ioctl failed");
return;
}
query_stats(result, target, names, stats_fd, NULL, errp);
query_stats(result, target, names, stats_fd, errp);
close(stats_fd);
break;
}
@@ -4195,7 +4134,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target,
if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
continue;
}
query_stats_vcpu(cpu, &stats_args);
run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
}
break;
}
@@ -4221,6 +4160,6 @@ void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
if (first_cpu) {
stats_args.result.schema = result;
stats_args.errp = errp;
query_stats_schema_vcpu(first_cpu, &stats_args);
run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
}
}

View File

@@ -19,8 +19,8 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu);
void kvm_cpu_synchronize_post_init(CPUState *cpu);
void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu);
bool kvm_supports_guest_debug(void);
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len);
int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len);
void kvm_remove_all_breakpoints(CPUState *cpu);
#endif /* KVM_CPUS_H */

View File

@@ -1,5 +1,5 @@
specific_ss.add(files('accel-common.c', 'accel-blocker.c'))
system_ss.add(files('accel-softmmu.c'))
softmmu_ss.add(files('accel-softmmu.c'))
user_ss.add(files('accel-user.c'))
subdir('tcg')
@@ -12,4 +12,4 @@ if have_system
endif
# qtest
system_ss.add(files('dummy-cpus.c'))
softmmu_ss.add(files('dummy-cpus.c'))

View File

@@ -1 +1 @@
qtest_module_ss.add(when: ['CONFIG_SYSTEM_ONLY'], if_true: files('qtest.c'))
qtest_module_ss.add(when: ['CONFIG_SOFTMMU'], if_true: files('qtest.c'))

View File

@@ -27,7 +27,6 @@ bool kvm_allowed;
bool kvm_readonly_mem_allowed;
bool kvm_ioeventfd_any_length_allowed;
bool kvm_msi_use_devid;
bool kvm_direct_msi_allowed;
void kvm_flush_coalesced_mmio_buffer(void)
{

View File

@@ -4,4 +4,4 @@ sysemu_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
sysemu_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
sysemu_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: sysemu_stubs_ss)
specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: sysemu_stubs_ss)

View File

@@ -11,14 +11,13 @@
*/
#include "qemu/osdep.h"
#include "exec/tb-flush.h"
#include "exec/exec-all.h"
void tb_flush(CPUState *cpu)
{
}
void tlb_set_dirty(CPUState *cpu, vaddr vaddr)
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
{
}
@@ -26,14 +25,14 @@ void tcg_flush_jmp_cache(CPUState *cpu)
{
}
int probe_access_flags(CPUArchState *env, vaddr addr, int size,
int probe_access_flags(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{
g_assert_not_reached();
}
void *probe_access(CPUArchState *env, vaddr addr, int size,
void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
/* Handled by hardware accelerator. */

View File

@@ -13,12 +13,26 @@
* See the COPYING file in the top-level directory.
*/
static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
}
#if HAVE_ATOMIC128
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
}
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
#endif
/*
* Atomic helpers callable from TCG.
* These have a common interface and all defer to cpu_atomic_*
@@ -26,7 +40,7 @@ static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
*/
#define CMPXCHG_HELPER(OP, TYPE) \
TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr, \
TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \
TYPE oldv, TYPE newv, uint32_t oi) \
{ return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
@@ -41,23 +55,43 @@ CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif
#if HAVE_CMPXCHG128
#ifdef CONFIG_CMPXCHG128
CMPXCHG_HELPER(cmpxchgo_be, Int128)
CMPXCHG_HELPER(cmpxchgo_le, Int128)
#endif
#undef CMPXCHG_HELPER
Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
Int128 cmpv, Int128 newv, uint32_t oi)
Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
uintptr_t ra = GETPC();
Int128 oldv;
oldv = cpu_ld16_mmu(env, addr, oi, ra);
oldv = cpu_ld16_be_mmu(env, addr, oi, ra);
if (int128_eq(oldv, cmpv)) {
cpu_st16_mmu(env, addr, newv, oi, ra);
cpu_st16_be_mmu(env, addr, newv, oi, ra);
} else {
/* Even with comparison failure, still need a write cycle. */
probe_write(env, addr, 16, get_mmuidx(oi), ra);
}
return oldv;
#else
g_assert_not_reached();
#endif
}
Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
uintptr_t ra = GETPC();
Int128 oldv;
oldv = cpu_ld16_le_mmu(env, addr, oi, ra);
if (int128_eq(oldv, cmpv)) {
cpu_st16_le_mmu(env, addr, newv, oi, ra);
} else {
/* Even with comparison failure, still need a write cycle. */
probe_write(env, addr, 16, get_mmuidx(oi), ra);
@@ -69,7 +103,7 @@ Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
}
#define ATOMIC_HELPER(OP, TYPE) \
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr, \
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \
TYPE val, uint32_t oi) \
{ return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }

View File

@@ -73,7 +73,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
@@ -86,11 +87,38 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
return ret;
}
#if DATA_SIZE < 16
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, oi);
return val;
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, val);
@@ -103,8 +131,9 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
@@ -134,8 +163,9 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE cmp, old, new, val = xval; \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
do { \
@@ -158,7 +188,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA SIZE < 16 */
#endif /* DATA SIZE >= 16 */
#undef END
@@ -176,7 +206,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
@@ -189,11 +220,39 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
return BSWAP(ret);
}
#if DATA_SIZE < 16
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, oi);
return BSWAP(val);
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
val = BSWAP(val);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
ABI_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
@@ -206,8 +265,9 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
@@ -234,8 +294,9 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
do { \
@@ -265,7 +326,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#undef ADD
#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA_SIZE < 16 */
#endif /* DATA_SIZE >= 16 */
#undef END
#endif /* DATA_SIZE > 1 */

View File

@@ -21,8 +21,6 @@
#include "sysemu/cpus.h"
#include "sysemu/tcg.h"
#include "exec/exec-all.h"
#include "qemu/plugin.h"
#include "internal.h"
bool tcg_allowed;
@@ -67,8 +65,6 @@ void cpu_loop_exit(CPUState *cpu)
{
/* Undo the setting in cpu_tb_exec. */
cpu->can_do_io = 1;
/* Undo any setting in generated code. */
qemu_plugin_disable_mem_helpers(cpu);
siglongjmp(cpu->jmp_env, 1);
}
@@ -82,8 +78,6 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
{
/* Prevent looping if already executing in a serial context. */
g_assert(!cpu_in_serial_context(cpu));
cpu->exception_index = EXCP_ATOMIC;
cpu_loop_exit_restore(cpu, pc);
}

View File

@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/qemu-print.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "qapi/type-helpers.h"
#include "hw/core/tcg-cpu-ops.h"
#include "trace.h"
@@ -27,6 +28,7 @@
#include "exec/exec-all.h"
#include "tcg/tcg.h"
#include "qemu/atomic.h"
#include "qemu/timer.h"
#include "qemu/rcu.h"
#include "exec/log.h"
#include "qemu/main-loop.h"
@@ -36,9 +38,9 @@
#include "sysemu/cpus.h"
#include "exec/cpu-all.h"
#include "sysemu/cpu-timers.h"
#include "exec/replay-core.h"
#include "sysemu/replay.h"
#include "sysemu/tcg.h"
#include "exec/helper-proto-common.h"
#include "exec/helper-proto.h"
#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
@@ -62,8 +64,8 @@ typedef struct SyncClocks {
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
int64_t max_delay;
int64_t max_advance;
static int64_t max_delay;
static int64_t max_advance;
static void align_clocks(SyncClocks *sc, CPUState *cpu)
{
@@ -159,7 +161,7 @@ uint32_t curr_cflags(CPUState *cpu)
*/
if (unlikely(cpu->singlestep_enabled)) {
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
} else if (qatomic_read(&one_insn_per_tb)) {
} else if (singlestep) {
cflags |= CF_NO_GOTO_TB | 1;
} else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
cflags |= CF_NO_GOTO_TB;
@@ -169,12 +171,13 @@ uint32_t curr_cflags(CPUState *cpu)
}
struct tb_desc {
vaddr pc;
uint64_t cs_base;
target_ulong pc;
target_ulong cs_base;
CPUArchState *env;
tb_page_addr_t page_addr0;
uint32_t flags;
uint32_t cflags;
uint32_t trace_vcpu_dstate;
};
static bool tb_lookup_cmp(const void *p, const void *d)
@@ -182,10 +185,11 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
tb_page_addr0(tb) == desc->page_addr0 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
tb_cflags(tb) == desc->cflags) {
/* check next page if needed */
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
@@ -193,7 +197,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return true;
} else {
tb_page_addr_t phys_page1;
vaddr virt_page1;
target_ulong virt_page1;
/*
* We know that the first page matched, and an otherwise valid TB
@@ -214,8 +218,8 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return false;
}
static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
uint64_t cs_base, uint32_t flags,
static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
uint32_t cflags)
{
tb_page_addr_t phys_pc;
@@ -226,21 +230,22 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
desc.cs_base = cs_base;
desc.flags = flags;
desc.cflags = cflags;
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
if (phys_pc == -1) {
return NULL;
}
desc.page_addr0 = phys_pc;
h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
flags, cs_base, cflags);
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
/* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
uint64_t cs_base, uint32_t flags,
uint32_t cflags)
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base,
uint32_t flags, uint32_t cflags)
{
TranslationBlock *tb;
CPUJumpCache *jc;
@@ -251,57 +256,35 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
hash = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache;
tb = tb_jmp_cache_get_tb(jc, hash);
if (cflags & CF_PCREL) {
/* Use acquire to ensure current load of pc from jc. */
tb = qatomic_load_acquire(&jc->array[hash].tb);
if (likely(tb &&
jc->array[hash].pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb_cflags(tb) == cflags)) {
return tb;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
jc->array[hash].pc = pc;
/* Ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
} else {
/* Use rcu_read to ensure current load of pc from *tb. */
tb = qatomic_rcu_read(&jc->array[hash].tb);
if (likely(tb &&
tb->pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb_cflags(tb) == cflags)) {
return tb;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[hash].tb, tb);
if (likely(tb &&
tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
tb_cflags(tb) == cflags)) {
return tb;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
tb_jmp_cache_set(jc, hash, tb, pc);
return tb;
}
static void log_cpu_exec(vaddr pc, CPUState *cpu,
static void log_cpu_exec(target_ulong pc, CPUState *cpu,
const TranslationBlock *tb)
{
if (qemu_log_in_addr_range(pc)) {
qemu_log_mask(CPU_LOG_EXEC,
"Trace %d: %p [%08" PRIx64
"/%016" VADDR_PRIx "/%08x/%08x] %s\n",
"Trace %d: %p [" TARGET_FMT_lx
"/" TARGET_FMT_lx "/%08x/%08x] %s\n",
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
tb->flags, tb->cflags, lookup_symbol(pc));
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
@@ -313,17 +296,15 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
#if defined(TARGET_I386)
flags |= CPU_DUMP_CCOP;
#endif
if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
flags |= CPU_DUMP_VPU;
}
cpu_dump_state(cpu, logfile, flags);
qemu_log_unlock(logfile);
}
}
#endif /* DEBUG_DISAS */
}
}
static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
uint32_t *cflags)
{
CPUBreakpoint *bp;
@@ -389,7 +370,7 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
return false;
}
static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
uint32_t *cflags)
{
return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
@@ -408,8 +389,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
vaddr pc;
uint64_t cs_base;
target_ulong cs_base, pc;
uint32_t flags, cflags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
@@ -456,7 +436,6 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
cpu->can_do_io = 1;
qemu_plugin_disable_mem_helpers(cpu);
/*
* TODO: Delay swapping back to the read-write region of the TB
* until we actually need to modify the TB. The read-only copy,
@@ -480,15 +459,15 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
} else {
tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
assert(!TARGET_TB_PCREL);
assert(cc->set_pc);
cc->set_pc(cpu, last_tb->pc);
cc->set_pc(cpu, tb_pc(last_tb));
}
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
vaddr pc = log_pc(cpu, last_tb);
target_ulong pc = log_pc(cpu, last_tb);
if (qemu_log_in_addr_range(pc)) {
qemu_log("Stopped execution of TB chain before %p [%016"
VADDR_PRIx "] %s\n",
qemu_log("Stopped execution of TB chain before %p ["
TARGET_FMT_lx "] %s\n",
last_tb->tc.ptr, pc, lookup_symbol(pc));
}
}
@@ -524,51 +503,14 @@ static void cpu_exec_exit(CPUState *cpu)
if (cc->tcg_ops->cpu_exec_exit) {
cc->tcg_ops->cpu_exec_exit(cpu);
}
}
static void cpu_exec_longjmp_cleanup(CPUState *cpu)
{
/* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);
#ifdef CONFIG_USER_ONLY
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#else
/*
* For softmmu, a tlb_fill fault during translation will land here,
* and we need to release any page locks held. In system mode we
* have one tcg_ctx per thread, so we know it was this cpu doing
* the translation.
*
* Alternative 1: Install a cleanup to be called via an exception
* handling safe longjmp. It seems plausible that all our hosts
* support such a thing. We'd have to properly register unwind info
* for the JIT for EH, rather that just for GDB.
*
* Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
* capture the cpu_loop_exit longjmp, perform the cleanup, and
* jump again to arrive here.
*/
if (tcg_ctx->gen_tb) {
tb_unlock_pages(tcg_ctx->gen_tb);
tcg_ctx->gen_tb = NULL;
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL);
}
void cpu_exec_step_atomic(CPUState *cpu)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
vaddr pc;
uint64_t cs_base;
target_ulong cs_base, pc;
uint32_t flags, cflags;
int tb_exit;
@@ -605,7 +547,17 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
cpu_exec_longjmp_cleanup(cpu);
#ifndef CONFIG_SOFTMMU
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
qemu_plugin_disable_mem_helpers(cpu);
}
/*
@@ -807,7 +759,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* Ensure zeroing happens before reading cpu->exit_request or
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/
qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0);
qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request;
@@ -910,8 +862,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
vaddr pc, TranslationBlock **last_tb,
int *tb_exit)
target_ulong pc,
TranslationBlock **last_tb, int *tb_exit)
{
int32_t insns_left;
@@ -972,8 +924,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb;
vaddr pc;
uint64_t cs_base;
target_ulong cs_base, pc;
uint32_t flags, cflags;
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
@@ -998,27 +949,17 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
CPUJumpCache *jc;
uint32_t h;
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
/*
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
h = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache;
if (cflags & CF_PCREL) {
jc->array[h].pc = pc;
/* Ensure pc is written first. */
qatomic_store_release(&jc->array[h].tb, tb);
} else {
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[h].tb, tb);
}
tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
}
#ifndef CONFIG_USER_ONLY
@@ -1039,6 +980,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL);
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(sc, cpu);
@@ -1051,7 +993,21 @@ static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
{
/* Prepare setjmp context for exception handling. */
if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
cpu_exec_longjmp_cleanup(cpu);
/* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);
#ifndef CONFIG_SOFTMMU
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
qemu_plugin_disable_mem_helpers(cpu);
assert_no_pages_locked();
}
return cpu_exec_loop(cpu, sc);
@@ -1116,3 +1072,86 @@ void tcg_exec_unrealizefn(CPUState *cpu)
tlb_destroy(cpu);
g_free_rcu(cpu->tb_jmp_cache, rcu);
}
#ifndef CONFIG_USER_ONLY
static void dump_drift_info(GString *buf)
{
if (!icount_enabled()) {
return;
}
g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
(cpu_get_clock() - icount_get()) / SCALE_MS);
if (icount_align_option) {
g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
-max_delay / SCALE_MS);
g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
max_advance / SCALE_MS);
} else {
g_string_append_printf(buf, "Max guest delay NA\n");
g_string_append_printf(buf, "Max guest advance NA\n");
}
}
HumanReadableText *qmp_x_query_jit(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "JIT information is only available with accel=tcg");
return NULL;
}
dump_exec_info(buf);
dump_drift_info(buf);
return human_readable_text_from_str(buf);
}
HumanReadableText *qmp_x_query_opcount(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "Opcode count information is only available with accel=tcg");
return NULL;
}
tcg_dump_op_count(buf);
return human_readable_text_from_str(buf);
}
#ifdef CONFIG_PROFILER
int64_t dev_time;
HumanReadableText *qmp_x_query_profile(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
static int64_t last_cpu_exec_time;
int64_t cpu_exec_time;
int64_t delta;
cpu_exec_time = tcg_cpu_exec_time();
delta = cpu_exec_time - last_cpu_exec_time;
g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
delta, delta / (double)NANOSECONDS_PER_SECOND);
last_cpu_exec_time = cpu_exec_time;
dev_time = 0;
return human_readable_text_from_str(buf);
}
#else
HumanReadableText *qmp_x_query_profile(Error **errp)
{
error_setg(errp, "Internal profiler not compiled");
return NULL;
}
#endif
#endif /* !CONFIG_USER_ONLY */

File diff suppressed because it is too large Load Diff

14
accel/tcg/hmp.c Normal file
View File

@@ -0,0 +1,14 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "exec/exec-all.h"
#include "monitor/monitor.h"
static void hmp_tcg_register(void)
{
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
}
type_init(hmp_tcg_register);

View File

@@ -10,7 +10,6 @@
#define ACCEL_TCG_INTERNAL_H
#include "exec/exec-all.h"
#include "exec/translate-all.h"
/*
* Access to the various translations structures need to be serialised
@@ -18,10 +17,10 @@
* memory related structures are protected with mmap_lock.
* In !user-mode we use per-page locks.
*/
#ifdef CONFIG_USER_ONLY
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
#else
#ifdef CONFIG_SOFTMMU
#define assert_memory_lock()
#else
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
#endif
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
@@ -36,32 +35,6 @@ static inline void page_table_config_init(void) { }
void page_table_config_init(void);
#endif
#ifdef CONFIG_USER_ONLY
/*
* For user-only, page_protect sets the page read-only.
* Since most execution is already on read-only pages, and we'd need to
* account for other TBs on the same page, defer undoing any page protection
* until we receive the write fault.
*/
static inline void tb_lock_page0(tb_page_addr_t p0)
{
page_protect(p0);
}
static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
{
page_protect(p1);
}
static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
static inline void tb_unlock_pages(TranslationBlock *tb) { }
#else
void tb_lock_page0(tb_page_addr_t);
void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_pages(TranslationBlock *);
#endif
#ifdef CONFIG_SOFTMMU
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size,
@@ -69,73 +42,26 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
uint64_t cs_base, uint32_t flags,
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
int cflags);
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
TranslationBlock *tb_link_page(TranslationBlock *tb);
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2);
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc);
/* Return the current PC from CPU, which may be cached in TB. */
static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
{
if (tb_cflags(tb) & CF_PCREL) {
return cpu->cc->get_pc(cpu);
} else {
return tb->pc;
}
}
/*
* Return true if CS is not running in parallel with other cpus, either
* because there are no other cpus or we are within an exclusive context.
*/
static inline bool cpu_in_serial_context(CPUState *cs)
{
return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
}
extern int64_t max_delay;
extern int64_t max_advance;
extern bool one_insn_per_tb;
/**
* tcg_req_mo:
* @type: TCGBar
*
* Filter @type to the barrier that is required for the guest
* memory ordering vs the host memory ordering. A non-zero
* result indicates that some barrier is required.
*
* If TCG_GUEST_DEFAULT_MO is not defined, assume that the
* guest requires strict ordering.
*
* This is a macro so that it's constant even without optimization.
*/
#ifdef TCG_GUEST_DEFAULT_MO
# define tcg_req_mo(type) \
((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
#if TARGET_TB_PCREL
return cpu->cc->get_pc(cpu);
#else
# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
return tb_pc(tb);
#endif
/**
* cpu_req_mo:
* @type: TCGBar
*
* If tcg_req_mo indicates a barrier for @type is required
* for the guest memory model, issue a host memory barrier.
*/
#define cpu_req_mo(type) \
do { \
if (tcg_req_mo(type)) { \
smp_mb(); \
} \
} while (0)
}
#endif /* ACCEL_TCG_INTERNAL_H */

File diff suppressed because it is too large Load Diff

View File

@@ -26,7 +26,7 @@ uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
return cpu_ldw_mmu(env, addr, oi, ra);
return cpu_ldw_be_mmu(env, addr, oi, ra);
}
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
@@ -39,21 +39,21 @@ uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
return cpu_ldl_mmu(env, addr, oi, ra);
return cpu_ldl_be_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
return cpu_ldq_mmu(env, addr, oi, ra);
return cpu_ldq_be_mmu(env, addr, oi, ra);
}
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
return cpu_ldw_mmu(env, addr, oi, ra);
return cpu_ldw_le_mmu(env, addr, oi, ra);
}
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
@@ -66,14 +66,14 @@ uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
return cpu_ldl_mmu(env, addr, oi, ra);
return cpu_ldl_le_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
return cpu_ldq_mmu(env, addr, oi, ra);
return cpu_ldq_le_mmu(env, addr, oi, ra);
}
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
@@ -87,42 +87,42 @@ void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
cpu_stw_mmu(env, addr, val, oi, ra);
cpu_stw_be_mmu(env, addr, val, oi, ra);
}
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
cpu_stl_mmu(env, addr, val, oi, ra);
cpu_stl_be_mmu(env, addr, val, oi, ra);
}
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
cpu_stq_mmu(env, addr, val, oi, ra);
cpu_stq_be_mmu(env, addr, val, oi, ra);
}
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
cpu_stw_mmu(env, addr, val, oi, ra);
cpu_stw_le_mmu(env, addr, val, oi, ra);
}
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
cpu_stl_mmu(env, addr, val, oi, ra);
cpu_stl_le_mmu(env, addr, val, oi, ra);
}
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
cpu_stq_mmu(env, addr, val, oi, ra);
cpu_stq_le_mmu(env, addr, val, oi, ra);
}
/*--------------------------*/

View File

@@ -10,18 +10,18 @@ tcg_ss.add(files(
'translator.c',
))
tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
tcg_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c'))
tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c'))
tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')])
tcg_ss.add(when: libdw, if_true: files('debuginfo.c'))
tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c'))
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
'cputlb.c',
'monitor.c',
'hmp.c',
))
tcg_module_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
tcg_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
'tcg-accel-ops.c',
'tcg-accel-ops-mttcg.c',
'tcg-accel-ops-icount.c',

View File

@@ -1,90 +0,0 @@
/*
* SPDX-License-Identifier: LGPL-2.1-or-later
*
* QEMU TCG monitor
*
* Copyright (c) 2003-2005 Fabrice Bellard
*/
#include "qemu/osdep.h"
#include "qemu/accel.h"
#include "qapi/error.h"
#include "qapi/type-helpers.h"
#include "qapi/qapi-commands-machine.h"
#include "monitor/monitor.h"
#include "sysemu/cpus.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/tcg.h"
#include "tcg/tcg.h"
#include "internal.h"
static void dump_drift_info(GString *buf)
{
if (!icount_enabled()) {
return;
}
g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
(cpu_get_clock() - icount_get()) / SCALE_MS);
if (icount_align_option) {
g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
-max_delay / SCALE_MS);
g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
max_advance / SCALE_MS);
} else {
g_string_append_printf(buf, "Max guest delay NA\n");
g_string_append_printf(buf, "Max guest advance NA\n");
}
}
static void dump_accel_info(GString *buf)
{
AccelState *accel = current_accel();
bool one_insn_per_tb = object_property_get_bool(OBJECT(accel),
"one-insn-per-tb",
&error_fatal);
g_string_append_printf(buf, "Accelerator settings:\n");
g_string_append_printf(buf, "one-insn-per-tb: %s\n\n",
one_insn_per_tb ? "on" : "off");
}
HumanReadableText *qmp_x_query_jit(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "JIT information is only available with accel=tcg");
return NULL;
}
dump_accel_info(buf);
dump_exec_info(buf);
dump_drift_info(buf);
return human_readable_text_from_str(buf);
}
HumanReadableText *qmp_x_query_opcount(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp,
"Opcode count information is only available with accel=tcg");
return NULL;
}
tcg_dump_op_count(buf);
return human_readable_text_from_str(buf);
}
static void hmp_tcg_register(void)
{
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
}
type_init(hmp_tcg_register);

View File

@@ -111,8 +111,6 @@ static void write_perfmap_entry(const void *start, size_t insn,
}
static FILE *jitdump;
static size_t perf_marker_size;
static void *perf_marker = MAP_FAILED;
#define JITHEADER_MAGIC 0x4A695444
#define JITHEADER_VERSION 1
@@ -192,6 +190,7 @@ void perf_enable_jitdump(void)
{
struct jitheader header;
char jitdump_file[32];
void *perf_marker;
if (!use_rt_clock) {
warn_report("CLOCK_MONOTONIC is not available, proceeding without jitdump");
@@ -211,8 +210,7 @@ void perf_enable_jitdump(void)
* PERF_RECORD_MMAP or PERF_RECORD_MMAP2 event is of the form jit-%d.dump
* and will process it as a jitdump file.
*/
perf_marker_size = qemu_real_host_page_size();
perf_marker = mmap(NULL, perf_marker_size, PROT_READ | PROT_EXEC,
perf_marker = mmap(NULL, qemu_real_host_page_size(), PROT_READ | PROT_EXEC,
MAP_PRIVATE, fileno(jitdump), 0);
if (perf_marker == MAP_FAILED) {
warn_report("Could not map %s: %s, proceeding without jitdump",
@@ -313,8 +311,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
const void *start)
{
struct debuginfo_query *q;
size_t insn, start_words;
uint64_t *gen_insn_data;
size_t insn;
if (!perfmap && !jitdump) {
return;
@@ -328,13 +325,10 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
debuginfo_lock();
/* Query debuginfo for each guest instruction. */
gen_insn_data = tcg_ctx->gen_insn_data;
start_words = tcg_ctx->insn_start_words;
for (insn = 0; insn < tb->icount; insn++) {
/* FIXME: This replicates the restore_state_to_opc() logic. */
q[insn].address = gen_insn_data[insn * start_words + 0];
if (tb_cflags(tb) & CF_PCREL) {
q[insn].address = tcg_ctx->gen_insn_data[insn][0];
if (TARGET_TB_PCREL) {
q[insn].address |= (guest_pc & TARGET_PAGE_MASK);
} else {
#if defined(TARGET_I386)
@@ -374,11 +368,6 @@ void perf_exit(void)
perfmap = NULL;
}
if (perf_marker != MAP_FAILED) {
munmap(perf_marker, perf_marker_size);
perf_marker = MAP_FAILED;
}
if (jitdump) {
fclose(jitdump);
jitdump = NULL;

View File

@@ -43,18 +43,11 @@
* CPU's index into a TCG temp, since the first callback did it already.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg/tcg.h"
#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h"
#include "exec/exec-all.h"
#include "exec/plugin-gen.h"
#include "exec/translator.h"
#include "exec/helper-proto-common.h"
#define HELPER_H "accel/tcg/plugin-helpers.h"
#include "exec/helper-info.c.inc"
#undef HELPER_H
#ifdef CONFIG_SOFTMMU
# define CONFIG_SOFTMMU_GATE 1
@@ -98,12 +91,30 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
void *userdata)
{ }
static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
{
TCGv_i32 cpu_index = tcg_temp_new_i32();
TCGv_i32 meminfo = tcg_const_i32(info);
TCGv_i64 vaddr64 = tcg_temp_new_i64();
TCGv_ptr udata = tcg_const_ptr(NULL);
tcg_gen_ld_i32(cpu_index, cpu_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
tcg_gen_extu_tl_i64(vaddr64, vaddr);
gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata);
tcg_temp_free_ptr(udata);
tcg_temp_free_i64(vaddr64);
tcg_temp_free_i32(meminfo);
tcg_temp_free_i32(cpu_index);
}
static void gen_empty_udata_cb(void)
{
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
TCGv_i32 cpu_index = tcg_temp_new_i32();
TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, cpu_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
@@ -118,10 +129,9 @@ static void gen_empty_udata_cb(void)
*/
static void gen_empty_inline_cb(void)
{
TCGv_i64 val = tcg_temp_ebb_new_i64();
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
TCGv_i64 val = tcg_temp_new_i64();
TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */
tcg_gen_movi_ptr(ptr, 0);
tcg_gen_ld_i64(val, ptr, 0);
/* pass an immediate != 0 so that it doesn't get optimized away */
tcg_gen_addi_i64(val, val, 0xdeadface);
@@ -130,22 +140,9 @@ static void gen_empty_inline_cb(void)
tcg_temp_free_i64(val);
}
static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
static void gen_empty_mem_cb(TCGv addr, uint32_t info)
{
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
tcg_gen_movi_i32(meminfo, info);
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, cpu_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
tcg_temp_free_ptr(udata);
tcg_temp_free_i32(meminfo);
tcg_temp_free_i32(cpu_index);
do_gen_mem_cb(addr, info);
}
/*
@@ -154,9 +151,9 @@ static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
*/
static void gen_empty_mem_helper(void)
{
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
TCGv_ptr ptr;
tcg_gen_movi_ptr(ptr, 0);
ptr = tcg_const_ptr(NULL);
tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr);
@@ -200,17 +197,35 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
}
}
void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
union mem_gen_fn {
void (*mem_fn)(TCGv, uint32_t);
void (*inline_fn)(void);
};
static void gen_mem_wrapped(enum plugin_gen_cb type,
const union mem_gen_fn *f, TCGv addr,
uint32_t info, bool is_mem)
{
enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw);
gen_empty_mem_cb(addr, info);
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw);
if (is_mem) {
f->mem_fn(addr, info);
} else {
f->inline_fn();
}
tcg_gen_plugin_cb_end();
}
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw);
gen_empty_inline_cb();
tcg_gen_plugin_cb_end();
void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
{
union mem_gen_fn fn;
fn.mem_fn = gen_empty_mem_cb;
gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true);
fn.inline_fn = gen_empty_inline_cb;
gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false);
}
static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
@@ -260,6 +275,33 @@ static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
return op;
}
static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
{
if (TCG_TARGET_REG_BITS == 32) {
/* mov_i32 */
op = copy_op(begin_op, op, INDEX_op_mov_i32);
/* mov_i32 w/ $0 */
op = copy_op(begin_op, op, INDEX_op_mov_i32);
} else {
/* extu_i32_i64 */
op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
}
return op;
}
static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
{
if (TCG_TARGET_REG_BITS == 32) {
/* 2x mov_i32 */
op = copy_op(begin_op, op, INDEX_op_mov_i32);
op = copy_op(begin_op, op, INDEX_op_mov_i32);
} else {
/* mov_i64 */
op = copy_op(begin_op, op, INDEX_op_mov_i64);
}
return op;
}
static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
{
if (UINTPTR_MAX == UINT32_MAX) {
@@ -274,6 +316,18 @@ static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
return op;
}
static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
{
if (TARGET_LONG_BITS == 32) {
/* extu_i32_i64 */
op = copy_extu_i32_i64(begin_op, op);
} else {
/* mov_i64 */
op = copy_mov_i64(begin_op, op);
}
return op;
}
static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
{
if (TCG_TARGET_REG_BITS == 32) {
@@ -418,6 +472,9 @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
}
/* extu_tl_i64 */
op = copy_extu_tl_i64(&begin_op, op);
if (type == PLUGIN_GEN_CB_MEM) {
/* call */
op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
@@ -569,6 +626,8 @@ static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
void plugin_gen_disable_mem_helpers(void)
{
TCGv_ptr ptr;
/*
* We could emit the clearing unconditionally and be done. However, this can
* be wasteful if for instance plugins don't track memory accesses, or if
@@ -581,8 +640,10 @@ void plugin_gen_disable_mem_helpers(void)
if (!tcg_ctx->plugin_tb->mem_helper) {
return;
}
tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env,
offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
ptr = tcg_const_ptr(NULL);
tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr);
}
static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,

View File

@@ -35,16 +35,16 @@
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
static inline unsigned int tb_jmp_cache_hash_page(vaddr pc)
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
{
vaddr tmp;
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
}
static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
{
vaddr tmp;
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
| (tmp & TB_JMP_ADDR_MASK));
@@ -53,7 +53,7 @@ static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
#else
/* In user-mode we can get better hashing because we do not have a TLB */
static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
{
return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
}
@@ -61,10 +61,10 @@ static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
#endif /* CONFIG_SOFTMMU */
static inline
uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc,
uint32_t flags, uint64_t flags2, uint32_t cf_mask)
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags,
uint32_t cf_mask, uint32_t trace_vcpu_dstate)
{
return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask);
return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate);
}
#endif

View File

@@ -14,15 +14,53 @@
/*
* Accessed in parallel; all accesses to 'tb' must be atomic.
* For CF_PCREL, accesses to 'pc' must be protected by a
* load_acquire/store_release to 'tb'.
* For TARGET_TB_PCREL, accesses to 'pc' must be protected by
* a load_acquire/store_release to 'tb'.
*/
struct CPUJumpCache {
struct rcu_head rcu;
struct {
TranslationBlock *tb;
vaddr pc;
#if TARGET_TB_PCREL
target_ulong pc;
#endif
} array[TB_JMP_CACHE_SIZE];
};
static inline TranslationBlock *
tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
{
#if TARGET_TB_PCREL
/* Use acquire to ensure current load of pc from jc. */
return qatomic_load_acquire(&jc->array[hash].tb);
#else
/* Use rcu_read to ensure current load of pc from *tb. */
return qatomic_rcu_read(&jc->array[hash].tb);
#endif
}
static inline target_ulong
tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
{
#if TARGET_TB_PCREL
return jc->array[hash].pc;
#else
return tb_pc(tb);
#endif
}
static inline void
tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
TranslationBlock *tb, target_ulong pc)
{
#if TARGET_TB_PCREL
jc->array[hash].pc = pc;
/* Use store_release on tb to ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
#else
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[hash].tb, tb);
#endif
}
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */

View File

@@ -19,11 +19,9 @@
#include "qemu/osdep.h"
#include "qemu/interval-tree.h"
#include "qemu/qtree.h"
#include "exec/cputlb.h"
#include "exec/log.h"
#include "exec/exec-all.h"
#include "exec/tb-flush.h"
#include "exec/translate-all.h"
#include "sysemu/tcg.h"
#include "tcg/tcg.h"
@@ -46,10 +44,11 @@ static bool tb_cmp(const void *ap, const void *bp)
const TranslationBlock *a = ap;
const TranslationBlock *b = bp;
return ((tb_cflags(a) & CF_PCREL || a->pc == b->pc) &&
return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
a->cs_base == b->cs_base &&
a->flags == b->flags &&
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
tb_page_addr0(a) == tb_page_addr0(b) &&
tb_page_addr1(a) == tb_page_addr1(b));
}
@@ -70,7 +69,17 @@ typedef struct PageDesc PageDesc;
*/
#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
static inline void tb_lock_pages(const TranslationBlock *tb) { }
static inline void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
PageDesc **ret_p2, tb_page_addr_t phys2,
bool alloc)
{
*ret_p1 = NULL;
*ret_p2 = NULL;
}
static inline void page_unlock(PageDesc *pd) { }
static inline void page_lock_tb(const TranslationBlock *tb) { }
static inline void page_unlock_tb(const TranslationBlock *tb) { }
/*
* For user-only, since we are protecting all of memory with a single lock,
@@ -86,9 +95,9 @@ static void tb_remove_all(void)
}
/* Call with mmap_lock held. */
static void tb_record(TranslationBlock *tb)
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
{
vaddr addr;
target_ulong addr;
int flags;
assert_memory_lock();
@@ -116,29 +125,29 @@ static void tb_remove(TranslationBlock *tb)
}
/* TODO: For now, still shared with translate-all.c for system mode. */
#define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \
for (T = foreach_tb_first(start, last), \
N = foreach_tb_next(T, start, last); \
#define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \
for (T = foreach_tb_first(start, end), \
N = foreach_tb_next(T, start, end); \
T != NULL; \
T = N, N = foreach_tb_next(N, start, last))
T = N, N = foreach_tb_next(N, start, end))
typedef TranslationBlock *PageForEachNext;
static PageForEachNext foreach_tb_first(tb_page_addr_t start,
tb_page_addr_t last)
tb_page_addr_t end)
{
IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last);
IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1);
return n ? container_of(n, TranslationBlock, itree) : NULL;
}
static PageForEachNext foreach_tb_next(PageForEachNext tb,
tb_page_addr_t start,
tb_page_addr_t last)
tb_page_addr_t end)
{
IntervalTreeNode *n;
if (tb) {
n = interval_tree_iter_next(&tb->itree, start, last);
n = interval_tree_iter_next(&tb->itree, start, end - 1);
if (n) {
return container_of(n, TranslationBlock, itree);
}
@@ -304,12 +313,12 @@ struct page_entry {
* See also: page_collection_lock().
*/
struct page_collection {
QTree *tree;
GTree *tree;
struct page_entry *max;
};
typedef int PageForEachNext;
#define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \
#define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
#ifdef CONFIG_DEBUG_TCG
@@ -381,108 +390,12 @@ static void page_lock(PageDesc *pd)
qemu_spin_lock(&pd->lock);
}
/* Like qemu_spin_trylock, returns false on success */
static bool page_trylock(PageDesc *pd)
{
bool busy = qemu_spin_trylock(&pd->lock);
if (!busy) {
page_lock__debug(pd);
}
return busy;
}
static void page_unlock(PageDesc *pd)
{
qemu_spin_unlock(&pd->lock);
page_unlock__debug(pd);
}
void tb_lock_page0(tb_page_addr_t paddr)
{
page_lock(page_find_alloc(paddr >> TARGET_PAGE_BITS, true));
}
void tb_lock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
{
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
PageDesc *pd0, *pd1;
if (pindex0 == pindex1) {
/* Identical pages, and the first page is already locked. */
return;
}
pd1 = page_find_alloc(pindex1, true);
if (pindex0 < pindex1) {
/* Correct locking order, we may block. */
page_lock(pd1);
return;
}
/* Incorrect locking order, we cannot block lest we deadlock. */
if (!page_trylock(pd1)) {
return;
}
/*
* Drop the lock on page0 and get both page locks in the right order.
* Restart translation via longjmp.
*/
pd0 = page_find_alloc(pindex0, false);
page_unlock(pd0);
page_lock(pd1);
page_lock(pd0);
siglongjmp(tcg_ctx->jmp_trans, -3);
}
void tb_unlock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
{
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (pindex0 != pindex1) {
page_unlock(page_find_alloc(pindex1, false));
}
}
static void tb_lock_pages(TranslationBlock *tb)
{
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (unlikely(paddr0 == -1)) {
return;
}
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
if (pindex0 < pindex1) {
page_lock(page_find_alloc(pindex0, true));
page_lock(page_find_alloc(pindex1, true));
return;
}
page_lock(page_find_alloc(pindex1, true));
}
page_lock(page_find_alloc(pindex0, true));
}
void tb_unlock_pages(TranslationBlock *tb)
{
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (unlikely(paddr0 == -1)) {
return;
}
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
page_unlock(page_find_alloc(pindex1, false));
}
page_unlock(page_find_alloc(pindex0, false));
}
static inline struct page_entry *
page_entry_new(PageDesc *pd, tb_page_addr_t index)
{
@@ -506,10 +419,13 @@ static void page_entry_destroy(gpointer p)
/* returns false on success */
static bool page_entry_trylock(struct page_entry *pe)
{
bool busy = page_trylock(pe->pd);
bool busy;
busy = qemu_spin_trylock(&pe->pd->lock);
if (!busy) {
g_assert(!pe->locked);
pe->locked = true;
page_lock__debug(pe->pd);
}
return busy;
}
@@ -550,7 +466,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
struct page_entry *pe;
PageDesc *pd;
pe = q_tree_lookup(set->tree, &index);
pe = g_tree_lookup(set->tree, &index);
if (pe) {
return false;
}
@@ -561,7 +477,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
}
pe = page_entry_new(pd, index);
q_tree_insert(set->tree, &pe->index, pe);
g_tree_insert(set->tree, &pe->index, pe);
/*
* If this is either (1) the first insertion or (2) a page whose index
@@ -593,30 +509,30 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
}
/*
* Lock a range of pages ([@start,@last]) as well as the pages of all
* Lock a range of pages ([@start,@end[) as well as the pages of all
* intersecting TBs.
* Locking order: acquire locks in ascending order of page index.
*/
static struct page_collection *page_collection_lock(tb_page_addr_t start,
tb_page_addr_t last)
tb_page_addr_t end)
{
struct page_collection *set = g_malloc(sizeof(*set));
tb_page_addr_t index;
PageDesc *pd;
start >>= TARGET_PAGE_BITS;
last >>= TARGET_PAGE_BITS;
g_assert(start <= last);
end >>= TARGET_PAGE_BITS;
g_assert(start <= end);
set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
page_entry_destroy);
set->max = NULL;
assert_no_pages_locked();
retry:
q_tree_foreach(set->tree, page_entry_lock, NULL);
g_tree_foreach(set->tree, page_entry_lock, NULL);
for (index = start; index <= last; index++) {
for (index = start; index <= end; index++) {
TranslationBlock *tb;
PageForEachNext n;
@@ -625,7 +541,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
continue;
}
if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
q_tree_foreach(set->tree, page_entry_unlock, NULL);
g_tree_foreach(set->tree, page_entry_unlock, NULL);
goto retry;
}
assert_page_locked(pd);
@@ -634,7 +550,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
(tb_page_addr1(tb) != -1 &&
page_trylock_add(set, tb_page_addr1(tb)))) {
/* drop all locks, and reacquire in order */
q_tree_foreach(set->tree, page_entry_unlock, NULL);
g_tree_foreach(set->tree, page_entry_unlock, NULL);
goto retry;
}
}
@@ -645,7 +561,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
static void page_collection_unlock(struct page_collection *set)
{
/* entries are unlocked and freed via page_entry_destroy */
q_tree_destroy(set->tree);
g_tree_destroy(set->tree);
g_free(set);
}
@@ -687,7 +603,8 @@ static void tb_remove_all(void)
* Add the tb in the target page and protect it if necessary.
* Called with @p->lock held.
*/
static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n)
static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
unsigned int n)
{
bool page_already_protected;
@@ -707,21 +624,15 @@ static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n)
}
}
static void tb_record(TranslationBlock *tb)
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
{
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
assert(paddr0 != -1);
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
tb_page_add(page_find_alloc(pindex1, false), tb, 1);
tb_page_add(p1, tb, 0);
if (unlikely(p2)) {
tb_page_add(p2, tb, 1);
}
tb_page_add(page_find_alloc(pindex0, false), tb, 0);
}
static void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
{
TranslationBlock *tb1;
uintptr_t *pprev;
@@ -741,16 +652,74 @@ static void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
static void tb_remove(TranslationBlock *tb)
{
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
PageDesc *pd;
assert(paddr0 != -1);
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
tb_page_remove(page_find_alloc(pindex1, false), tb);
pd = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
tb_page_remove(pd, tb);
if (unlikely(tb->page_addr[1] != -1)) {
pd = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
tb_page_remove(pd, tb);
}
}
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
{
PageDesc *p1, *p2;
tb_page_addr_t page1;
tb_page_addr_t page2;
assert_memory_lock();
g_assert(phys1 != -1);
page1 = phys1 >> TARGET_PAGE_BITS;
page2 = phys2 >> TARGET_PAGE_BITS;
p1 = page_find_alloc(page1, alloc);
if (ret_p1) {
*ret_p1 = p1;
}
if (likely(phys2 == -1)) {
page_lock(p1);
return;
} else if (page1 == page2) {
page_lock(p1);
if (ret_p2) {
*ret_p2 = p1;
}
return;
}
p2 = page_find_alloc(page2, alloc);
if (ret_p2) {
*ret_p2 = p2;
}
if (page1 < page2) {
page_lock(p1);
page_lock(p2);
} else {
page_lock(p2);
page_lock(p1);
}
}
/* lock the page(s) of a TB in the correct acquisition order */
static void page_lock_tb(const TranslationBlock *tb)
{
page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
}
static void page_unlock_tb(const TranslationBlock *tb)
{
PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
page_unlock(p1);
if (unlikely(tb_page_addr1(tb) != -1)) {
PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
if (p2 != p1) {
page_unlock(p2);
}
}
tb_page_remove(page_find_alloc(pindex0, false), tb);
}
#endif /* CONFIG_USER_ONLY */
@@ -775,7 +744,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
tcg_region_reset_all();
/* XXX: flush processor icache at this point if cache flush is expensive */
qatomic_inc(&tb_ctx.tb_flush_count);
qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
done:
mmap_unlock();
@@ -787,9 +756,9 @@ done:
void tb_flush(CPUState *cpu)
{
if (tcg_enabled()) {
unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count);
unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
if (cpu_in_serial_context(cpu)) {
if (cpu_in_exclusive_context(cpu)) {
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
} else {
async_safe_run_on_cpu(cpu, do_tb_flush,
@@ -878,13 +847,13 @@ static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
{
CPUState *cpu;
if (tb_cflags(tb) & CF_PCREL) {
if (TARGET_TB_PCREL) {
/* A TB may be at any virtual address */
CPU_FOREACH(cpu) {
tcg_flush_jmp_cache(cpu);
}
} else {
uint32_t h = tb_jmp_cache_hash_func(tb->pc);
uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
CPU_FOREACH(cpu) {
CPUJumpCache *jc = cpu->tb_jmp_cache;
@@ -916,8 +885,8 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* remove the TB from the hash list */
phys_pc = tb_page_addr0(tb);
h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, tb->cs_base, orig_cflags);
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
tb->flags, orig_cflags, tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
return;
}
@@ -955,16 +924,18 @@ static void tb_phys_invalidate__locked(TranslationBlock *tb)
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{
if (page_addr == -1 && tb_page_addr0(tb) != -1) {
tb_lock_pages(tb);
page_lock_tb(tb);
do_tb_phys_invalidate(tb, true);
tb_unlock_pages(tb);
page_unlock_tb(tb);
} else {
do_tb_phys_invalidate(tb, false);
}
}
/*
* Add a new TB and link it to the physical page tables.
* Add a new TB and link it to the physical page tables. phys_page2 is
* (-1) to indicate that only one page contains the TB.
*
* Called with mmap_lock held for user-mode emulation.
*
* Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
@@ -972,29 +943,43 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
* for the same block of guest code that @tb corresponds to. In that case,
* the caller should discard the original @tb, and use instead the returned TB.
*/
TranslationBlock *tb_link_page(TranslationBlock *tb)
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2)
{
PageDesc *p;
PageDesc *p2 = NULL;
void *existing_tb = NULL;
uint32_t h;
assert_memory_lock();
tcg_debug_assert(!(tb->cflags & CF_INVALID));
tb_record(tb);
/*
* Add the TB to the page list, acquiring first the pages's locks.
* We keep the locks held until after inserting the TB in the hash table,
* so that if the insertion fails we know for sure that the TBs are still
* in the page descriptors.
* Note that inserting into the hash table first isn't an option, since
* we can only insert TBs that are fully initialized.
*/
page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
tb_record(tb, p, p2);
/* add in the hash table */
h = tb_hash_func(tb_page_addr0(tb), (tb->cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, tb->cs_base, tb->cflags);
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
tb->flags, tb->cflags, tb->trace_vcpu_dstate);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
/* remove TB from the page(s) if we couldn't insert it */
if (unlikely(existing_tb)) {
tb_remove(tb);
tb_unlock_pages(tb);
return existing_tb;
tb = existing_tb;
}
tb_unlock_pages(tb);
if (p2 && p2 != p) {
page_unlock(p2);
}
page_unlock(p);
return tb;
}
@@ -1004,14 +989,14 @@ TranslationBlock *tb_link_page(TranslationBlock *tb)
* Called with mmap_lock held for user-mode emulation.
* NOTE: this function must not be called while a TB is running.
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{
TranslationBlock *tb;
PageForEachNext n;
assert_memory_lock();
PAGE_FOR_EACH_TB(start, last, unused, tb, n) {
PAGE_FOR_EACH_TB(start, end, unused, tb, n) {
tb_phys_invalidate__locked(tb);
}
}
@@ -1023,11 +1008,11 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
*/
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
tb_page_addr_t start, last;
tb_page_addr_t start, end;
start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK;
tb_invalidate_phys_range(start, last);
end = start + TARGET_PAGE_SIZE;
tb_invalidate_phys_range(start, end);
}
/*
@@ -1043,7 +1028,6 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
bool current_tb_modified;
TranslationBlock *tb;
PageForEachNext n;
tb_page_addr_t last;
/*
* Without precise smc semantics, or when outside of a TB,
@@ -1060,11 +1044,10 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
assert_memory_lock();
current_tb = tcg_tb_lookup(pc);
last = addr | ~TARGET_PAGE_MASK;
addr &= TARGET_PAGE_MASK;
current_tb_modified = false;
PAGE_FOR_EACH_TB(addr, last, unused, tb, n) {
PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) {
if (current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/*
@@ -1096,36 +1079,34 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
static void
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
PageDesc *p, tb_page_addr_t start,
tb_page_addr_t last,
tb_page_addr_t end,
uintptr_t retaddr)
{
TranslationBlock *tb;
tb_page_addr_t tb_start, tb_end;
PageForEachNext n;
#ifdef TARGET_HAS_PRECISE_SMC
bool current_tb_modified = false;
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
#endif /* TARGET_HAS_PRECISE_SMC */
/* Range may not cross a page. */
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
/*
* We remove all the TBs in the range [start, last].
* We remove all the TBs in the range [start, end[.
* XXX: see if in some cases it could be faster to invalidate all the code
*/
PAGE_FOR_EACH_TB(start, last, p, tb, n) {
tb_page_addr_t tb_start, tb_last;
PAGE_FOR_EACH_TB(start, end, p, tb, n) {
/* NOTE: this is subtle as a TB may span two physical pages */
tb_start = tb_page_addr0(tb);
tb_last = tb_start + tb->size - 1;
if (n == 0) {
tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK);
/* NOTE: tb_end may be after the end of the page, but
it is not a problem */
tb_start = tb_page_addr0(tb);
tb_end = tb_start + tb->size;
} else {
tb_start = tb_page_addr1(tb);
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
& ~TARGET_PAGE_MASK);
}
if (!(tb_last < start || tb_start > last)) {
if (!(tb_end <= start || tb_start >= end)) {
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
@@ -1167,7 +1148,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
struct page_collection *pages;
tb_page_addr_t start, last;
tb_page_addr_t start, end;
PageDesc *p;
p = page_find(addr >> TARGET_PAGE_BITS);
@@ -1176,40 +1157,36 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
}
start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK;
pages = page_collection_lock(start, last);
tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
end = start + TARGET_PAGE_SIZE;
pages = page_collection_lock(start, end);
tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
page_collection_unlock(pages);
}
/*
* Invalidate all TBs which intersect with the target physical address range
* [start;last]. NOTE: start and end may refer to *different* physical pages.
* [start;end[. NOTE: start and end may refer to *different* physical pages.
* 'is_cpu_write_access' should be true if called from a real cpu write
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{
struct page_collection *pages;
tb_page_addr_t index, index_last;
tb_page_addr_t next;
pages = page_collection_lock(start, last);
index_last = last >> TARGET_PAGE_BITS;
for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
PageDesc *pd = page_find(index);
tb_page_addr_t page_start, page_last;
pages = page_collection_lock(start, end);
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
start < end;
start = next, next += TARGET_PAGE_SIZE) {
PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
tb_page_addr_t bound = MIN(next, end);
if (pd == NULL) {
continue;
}
assert_page_locked(pd);
page_start = index << TARGET_PAGE_BITS;
page_last = page_start | ~TARGET_PAGE_MASK;
page_last = MIN(page_last, last);
tb_invalidate_phys_page_range__locked(pages, pd,
page_start, page_last, 0);
tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
}
page_collection_unlock(pages);
}
@@ -1229,7 +1206,7 @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
}
assert_page_locked(p);
tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
}
/*
@@ -1243,7 +1220,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
{
struct page_collection *pages;
pages = page_collection_lock(ram_addr, ram_addr + size - 1);
pages = page_collection_lock(ram_addr, ram_addr + size);
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
page_collection_unlock(pages);
}

View File

@@ -89,20 +89,7 @@ void icount_handle_deadline(void)
}
}
/* Distribute the budget evenly across all CPUs */
int64_t icount_percpu_budget(int cpu_count)
{
int64_t limit = icount_get_limit();
int64_t timeslice = limit / cpu_count;
if (timeslice == 0) {
timeslice = limit;
}
return timeslice;
}
void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
void icount_prepare_for_run(CPUState *cpu)
{
int insns_left;
@@ -114,13 +101,13 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
g_assert(cpu->icount_extra == 0);
replay_mutex_lock();
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
cpu->icount_budget = icount_get_limit();
insns_left = MIN(0xffff, cpu->icount_budget);
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
replay_mutex_lock();
if (cpu->icount_budget == 0) {
/*
* We're called without the iothread lock, so must take it while

View File

@@ -11,8 +11,7 @@
#define TCG_ACCEL_OPS_ICOUNT_H
void icount_handle_deadline(void);
void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget);
int64_t icount_percpu_budget(int cpu_count);
void icount_prepare_for_run(CPUState *cpu);
void icount_process_data(CPUState *cpu);
void icount_handle_interrupt(CPUState *cpu, int mask);

View File

@@ -32,7 +32,7 @@
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
#include "hw/boards.h"
#include "tcg/tcg.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-mttcg.h"
@@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
}
}
qatomic_set_mb(&cpu->exit_request, 0);
qatomic_mb_set(&cpu->exit_request, 0);
qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
@@ -152,4 +152,8 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE);
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
}

View File

@@ -24,7 +24,6 @@
*/
#include "qemu/osdep.h"
#include "qemu/lockable.h"
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "sysemu/cpu-timers.h"
@@ -32,7 +31,7 @@
#include "qemu/notify.h"
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
#include "tcg/tcg.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-rr.h"
#include "tcg-accel-ops-icount.h"
@@ -72,13 +71,11 @@ static void rr_kick_next_cpu(void)
{
CPUState *cpu;
do {
cpu = qatomic_read(&rr_current_cpu);
cpu = qatomic_mb_read(&rr_current_cpu);
if (cpu) {
cpu_exit(cpu);
}
/* Finish kicking this cpu before reading again. */
smp_mb();
} while (cpu != qatomic_read(&rr_current_cpu));
} while (cpu != qatomic_mb_read(&rr_current_cpu));
}
static void rr_kick_thread(void *opaque)
@@ -142,33 +139,6 @@ static void rr_force_rcu(Notifier *notify, void *data)
rr_kick_next_cpu();
}
/*
* Calculate the number of CPUs that we will process in a single iteration of
* the main CPU thread loop so that we can fairly distribute the instruction
* count across CPUs.
*
* The CPU count is cached based on the CPU list generation ID to avoid
* iterating the list every time.
*/
static int rr_cpu_count(void)
{
static unsigned int last_gen_id = ~0;
static int cpu_count;
CPUState *cpu;
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
if (cpu_list_generation_id_get() != last_gen_id) {
cpu_count = 0;
CPU_FOREACH(cpu) {
++cpu_count;
}
last_gen_id = cpu_list_generation_id_get();
}
return cpu_count;
}
/*
* In the single-threaded case each vCPU is simulated in turn. If
* there is more than a single vCPU we create a simple timer to kick
@@ -215,16 +185,11 @@ static void *rr_cpu_thread_fn(void *arg)
cpu->exit_request = 1;
while (1) {
/* Only used for icount_enabled() */
int64_t cpu_budget = 0;
qemu_mutex_unlock_iothread();
replay_mutex_lock();
qemu_mutex_lock_iothread();
if (icount_enabled()) {
int cpu_count = rr_cpu_count();
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
icount_account_warp_timer();
/*
@@ -232,8 +197,6 @@ static void *rr_cpu_thread_fn(void *arg)
* waking up the I/O thread and waiting for completion.
*/
icount_handle_deadline();
cpu_budget = icount_percpu_budget(cpu_count);
}
replay_mutex_unlock();
@@ -243,9 +206,8 @@ static void *rr_cpu_thread_fn(void *arg)
}
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
/* Store rr_current_cpu before evaluating cpu_can_run(). */
qatomic_set_mb(&rr_current_cpu, cpu);
qatomic_mb_set(&rr_current_cpu, cpu);
current_cpu = cpu;
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
@@ -256,7 +218,7 @@ static void *rr_cpu_thread_fn(void *arg)
qemu_mutex_unlock_iothread();
if (icount_enabled()) {
icount_prepare_for_run(cpu, cpu_budget);
icount_prepare_for_run(cpu);
}
r = tcg_cpus_exec(cpu);
if (icount_enabled()) {
@@ -283,11 +245,11 @@ static void *rr_cpu_thread_fn(void *arg)
cpu = CPU_NEXT(cpu);
} /* while (cpu && !cpu->exit_request).. */
/* Does not need a memory barrier because a spurious wakeup is okay. */
/* Does not need qatomic_mb_set because a spurious wakeup is okay. */
qatomic_set(&rr_current_cpu, NULL);
if (cpu && cpu->exit_request) {
qatomic_set_mb(&cpu->exit_request, 0);
qatomic_mb_set(&cpu->exit_request, 0);
}
if (icount_enabled() && all_cpu_threads_idle()) {
@@ -329,6 +291,9 @@ void rr_start_vcpu_thread(CPUState *cpu)
single_tcg_halt_cond = cpu->halt_cond;
single_tcg_cpu_thread = cpu->thread;
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
} else {
/* we share the thread */
cpu->thread = single_tcg_cpu_thread;

View File

@@ -31,7 +31,6 @@
#include "sysemu/cpu-timers.h"
#include "qemu/main-loop.h"
#include "qemu/guest-random.h"
#include "qemu/timer.h"
#include "exec/exec-all.h"
#include "exec/hwaddr.h"
#include "exec/gdbstub.h"
@@ -45,21 +44,10 @@
void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
{
uint32_t cflags;
/*
* Include the cluster number in the hash we use to look up TBs.
* This is important because a TB that is valid for one cluster at
* a given physical address and set of CPU flags is not necessarily
* valid for another:
* the two clusters may have different views of physical memory, or
* may have different CPU features (eg FPU present or absent).
*/
cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
cflags |= parallel ? CF_PARALLEL : 0;
cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
cpu->tcg_cflags |= cflags;
cpu->tcg_cflags = cflags;
}
void tcg_cpus_destroy(CPUState *cpu)
@@ -70,10 +58,20 @@ void tcg_cpus_destroy(CPUState *cpu)
int tcg_cpus_exec(CPUState *cpu)
{
int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
assert(tcg_enabled());
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
#ifdef CONFIG_PROFILER
qatomic_set(&tcg_ctx->prof.cpu_exec_time,
tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
#endif
return ret;
}
@@ -118,7 +116,7 @@ static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
return cputype;
}
static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
static int tcg_insert_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len)
{
CPUState *cpu;
int err = 0;
@@ -149,7 +147,7 @@ static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
}
}
static int tcg_remove_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
static int tcg_remove_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len)
{
CPUState *cpu;
int err = 0;

View File

@@ -25,14 +25,12 @@
#include "qemu/osdep.h"
#include "sysemu/tcg.h"
#include "exec/replay-core.h"
#include "sysemu/replay.h"
#include "sysemu/cpu-timers.h"
#include "tcg/tcg.h"
#include "tcg/oversized-guest.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/accel.h"
#include "qemu/atomic.h"
#include "qapi/qapi-builtin-visit.h"
#include "qemu/units.h"
#if !defined(CONFIG_USER_ONLY)
@@ -44,7 +42,6 @@ struct TCGState {
AccelState parent_obj;
bool mttcg_enabled;
bool one_insn_per_tb;
int splitwx_enabled;
unsigned long tb_size;
};
@@ -64,23 +61,37 @@ DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
* they can set the appropriate CONFIG flags in ${target}-softmmu.mak
*
* Once a guest architecture has been converted to the new primitives
* there is one remaining limitation to check:
* - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
* there are two remaining limitations to check.
*
* - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
* - The host must have a stronger memory order than the guest
*
* It may be possible in future to support strong guests on weak hosts
* but that will require tagging all load/stores in a guest with their
* implicit memory order requirements which would likely slow things
* down a lot.
*/
static bool check_tcg_memory_orders_compatible(void)
{
#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
#else
return false;
#endif
}
static bool default_mttcg_enabled(void)
{
if (icount_enabled() || TCG_OVERSIZED_GUEST) {
return false;
}
} else {
#ifdef TARGET_SUPPORTS_MTTCG
# ifndef TCG_GUEST_DEFAULT_MO
# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
# endif
return true;
return check_tcg_memory_orders_compatible();
#else
return false;
return false;
#endif
}
}
static void tcg_accel_instance_init(Object *obj)
@@ -98,7 +109,6 @@ static void tcg_accel_instance_init(Object *obj)
}
bool mttcg_enabled;
bool one_insn_per_tb;
static int tcg_init_machine(MachineState *ms)
{
@@ -148,6 +158,11 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
warn_report("Guest not yet converted to MTTCG - "
"you may get unexpected results");
#endif
if (!check_tcg_memory_orders_compatible()) {
warn_report("Guest expects a stronger memory ordering "
"than the host provides");
error_printf("This may cause strange/hard to debug errors\n");
}
s->mttcg_enabled = true;
}
} else if (strcmp(value, "single") == 0) {
@@ -193,20 +208,6 @@ static void tcg_set_splitwx(Object *obj, bool value, Error **errp)
s->splitwx_enabled = value;
}
static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp)
{
TCGState *s = TCG_STATE(obj);
return s->one_insn_per_tb;
}
static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp)
{
TCGState *s = TCG_STATE(obj);
s->one_insn_per_tb = value;
/* Set the global also: this changes the behaviour */
qatomic_set(&one_insn_per_tb, value);
}
static int tcg_gdbstub_supported_sstep_flags(void)
{
/*
@@ -244,12 +245,6 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
tcg_get_splitwx, tcg_set_splitwx);
object_class_property_set_description(oc, "split-wx",
"Map jit pages into separate RW and RX regions");
object_class_property_add_bool(oc, "one-insn-per-tb",
tcg_get_one_insn_per_tb,
tcg_set_one_insn_per_tb);
object_class_property_set_description(oc, "one-insn-per-tb",
"Only put one guest insn in each translation block");
}
static const TypeInfo tcg_accel_type = {

View File

@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
#include "cpu.h"
#include "exec/helper-proto-common.h"
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
@@ -550,17 +550,6 @@ void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc)
clear_high(d, oprsz, desc);
}
void HELPER(gvec_andcs)(void *d, void *a, uint64_t b, uint32_t desc)
{
intptr_t oprsz = simd_oprsz(desc);
intptr_t i;
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
*(uint64_t *)(d + i) = *(uint64_t *)(a + i) & ~b;
}
clear_high(d, oprsz, desc);
}
void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
{
intptr_t oprsz = simd_oprsz(desc);

View File

@@ -24,17 +24,13 @@
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
#include "cpu.h"
#include "exec/helper-proto-common.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "exec/exec-all.h"
#include "disas/disas.h"
#include "exec/log.h"
#include "tcg/tcg.h"
#define HELPER_H "accel/tcg/tcg-runtime.h"
#include "exec/helper-info.c.inc"
#undef HELPER_H
/* 32-bit helpers */
int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2)

View File

@@ -39,63 +39,62 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr)
#endif /* IN_HELPER_PROTO */
DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32)
DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
i32, env, i64, i32, i32, i32)
i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
i32, env, i64, i32, i32, i32)
i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG,
i32, env, i64, i32, i32, i32)
i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
i32, env, i64, i32, i32, i32)
i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
i32, env, i64, i32, i32, i32)
i32, env, tl, i32, i32, i32)
#ifdef CONFIG_ATOMIC64
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
i64, env, i64, i64, i64, i32)
i64, env, tl, i64, i64, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
i64, env, i64, i64, i64, i32)
i64, env, tl, i64, i64, i32)
#endif
#if HAVE_CMPXCHG128
#ifdef CONFIG_CMPXCHG128
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32)
i128, env, tl, i128, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32)
i128, env, tl, i128, i128, i32)
#endif
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32)
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG,
i128, env, tl, i128, i128, i32)
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG,
i128, env, tl, i128, i128, i32)
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \
TCG_CALL_NO_WG, i64, env, i64, i64, i32) \
TCG_CALL_NO_WG, i64, env, tl, i64, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \
TCG_CALL_NO_WG, i64, env, i64, i64, i32)
TCG_CALL_NO_WG, i64, env, tl, i64, i32)
#else
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32)
TCG_CALL_NO_WG, i32, env, tl, i32, i32)
#endif /* CONFIG_ATOMIC64 */
GEN_ATOMIC_HELPERS(fetch_add)
@@ -218,7 +217,6 @@ DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_andcs, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)

View File

@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#define NO_CPU_IO_DEFS
#include "trace.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
@@ -46,12 +47,11 @@
#include "exec/cputlb.h"
#include "exec/translate-all.h"
#include "exec/translator.h"
#include "exec/tb-flush.h"
#include "qemu/bitmap.h"
#include "qemu/qemu-print.h"
#include "qemu/timer.h"
#include "qemu/main-loop.h"
#include "qemu/cacheinfo.h"
#include "qemu/timer.h"
#include "exec/log.h"
#include "sysemu/cpus.h"
#include "sysemu/cpu-timers.h"
@@ -63,15 +63,17 @@
#include "tb-context.h"
#include "internal.h"
#include "perf.h"
#include "tcg/insn-start-words.h"
/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
sizeof_field(TranslationBlock, trace_vcpu_dstate)
* BITS_PER_BYTE);
TBContext tb_ctx;
/*
* Encode VAL as a signed leb128 sequence at P.
* Return P incremented past the encoded value.
*/
static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
/* Encode VAL as a signed leb128 sequence at P.
Return P incremented past the encoded value. */
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
{
int more, byte;
@@ -89,23 +91,21 @@ static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
return p;
}
/*
* Decode a signed leb128 sequence at *PP; increment *PP past the
* decoded value. Return the decoded value.
*/
static int64_t decode_sleb128(const uint8_t **pp)
/* Decode a signed leb128 sequence at *PP; increment *PP past the
decoded value. Return the decoded value. */
static target_long decode_sleb128(const uint8_t **pp)
{
const uint8_t *p = *pp;
int64_t val = 0;
target_long val = 0;
int byte, shift = 0;
do {
byte = *p++;
val |= (int64_t)(byte & 0x7f) << shift;
val |= (target_ulong)(byte & 0x7f) << shift;
shift += 7;
} while (byte & 0x80);
if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
val |= -(int64_t)1 << shift;
val |= -(target_ulong)1 << shift;
}
*pp = p;
@@ -127,26 +127,22 @@ static int64_t decode_sleb128(const uint8_t **pp)
static int encode_search(TranslationBlock *tb, uint8_t *block)
{
uint8_t *highwater = tcg_ctx->code_gen_highwater;
uint64_t *insn_data = tcg_ctx->gen_insn_data;
uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
uint8_t *p = block;
int i, j, n;
for (i = 0, n = tb->icount; i < n; ++i) {
uint64_t prev, curr;
target_ulong prev;
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
if (i == 0) {
prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0);
} else {
prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
prev = tcg_ctx->gen_insn_data[i - 1][j];
}
curr = insn_data[i * TARGET_INSN_START_WORDS + j];
p = encode_sleb128(p, curr - prev);
p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
}
prev = (i == 0 ? 0 : insn_end_off[i - 1]);
curr = insn_end_off[i];
p = encode_sleb128(p, curr - prev);
prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
/* Test for (pending) buffer overflow. The assumption is that any
one row beginning below the high water mark cannot overrun
@@ -174,8 +170,8 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
}
memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
if (!(tb_cflags(tb) & CF_PCREL)) {
data[0] = tb->pc;
if (!TARGET_TB_PCREL) {
data[0] = tb_pc(tb);
}
/*
@@ -202,6 +198,10 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc)
{
uint64_t data[TARGET_INSN_START_WORDS];
#ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
int64_t ti = profile_getclock();
#endif
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
if (insns_left < 0) {
@@ -218,6 +218,12 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
}
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
#ifdef CONFIG_PROFILER
qatomic_set(&prof->restore_time,
prof->restore_time + profile_getclock() - ti);
qatomic_set(&prof->restore_count, prof->restore_count + 1);
#endif
}
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
@@ -264,7 +270,7 @@ void page_init(void)
* Return the size of the generated code, or negative on error.
*/
static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
vaddr pc, void *host_pc,
target_ulong pc, void *host_pc,
int *max_insns, int64_t *ti)
{
int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
@@ -275,24 +281,34 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
tcg_func_start(tcg_ctx);
tcg_ctx->cpu = env_cpu(env);
gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
gen_intermediate_code(env_cpu(env), tb, *max_insns, pc, host_pc);
assert(tb->size != 0);
tcg_ctx->cpu = NULL;
*max_insns = tb->icount;
#ifdef CONFIG_PROFILER
qatomic_set(&tcg_ctx->prof.tb_count, tcg_ctx->prof.tb_count + 1);
qatomic_set(&tcg_ctx->prof.interm_time,
tcg_ctx->prof.interm_time + profile_getclock() - *ti);
*ti = profile_getclock();
#endif
return tcg_gen_code(tcg_ctx, tb, pc);
}
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu,
vaddr pc, uint64_t cs_base,
target_ulong pc, target_ulong cs_base,
uint32_t flags, int cflags)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb, *existing_tb;
tb_page_addr_t phys_pc, phys_p2;
tb_page_addr_t phys_pc;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns;
#ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
#endif
int64_t ti;
void *host_pc;
@@ -313,7 +329,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
buffer_overflow:
assert_no_pages_locked();
tb = tcg_tb_alloc(tcg_ctx);
if (unlikely(!tb)) {
/* flush must be done */
@@ -326,35 +341,24 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
if (!(cflags & CF_PCREL)) {
tb->pc = pc;
}
#if !TARGET_TB_PCREL
tb->pc = pc;
#endif
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
tb->trace_vcpu_dstate = *cpu->trace_dstate;
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
if (phys_pc != -1) {
tb_lock_page0(phys_pc);
}
tcg_ctx->gen_tb = tb;
tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
#ifdef CONFIG_SOFTMMU
tcg_ctx->page_bits = TARGET_PAGE_BITS;
tcg_ctx->page_mask = TARGET_PAGE_MASK;
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
tcg_ctx->tlb_fast_offset =
(int)offsetof(ArchCPU, neg.tlb.f) - (int)offsetof(ArchCPU, env);
#endif
tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
#ifdef TCG_GUEST_DEFAULT_MO
tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
#else
tcg_ctx->guest_mo = TCG_MO_ALL;
tb_overflow:
#ifdef CONFIG_PROFILER
/* includes aborted translations because of exceptions */
qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
ti = profile_getclock();
#endif
restart_translate:
trace_translate_block(tb, pc, tb->tc.ptr);
gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
@@ -373,8 +377,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation for "
"code_gen_buffer overflow\n");
tb_unlock_pages(tb);
tcg_ctx->gen_tb = NULL;
goto buffer_overflow;
case -2:
@@ -393,49 +395,32 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
"Restarting code generation with "
"smaller translation block (max %d insns)\n",
max_insns);
/*
* The half-sized TB may not cross pages.
* TODO: Fix all targets that cross pages except with
* the first insn, at which point this can't be reached.
*/
phys_p2 = tb_page_addr1(tb);
if (unlikely(phys_p2 != -1)) {
tb_unlock_page1(phys_pc, phys_p2);
tb_set_page_addr1(tb, -1);
}
goto restart_translate;
case -3:
/*
* We had a page lock ordering problem. In order to avoid
* deadlock we had to drop the lock on page0, which means
* that everything we translated so far is compromised.
* Restart with locks held on both pages.
*/
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation with re-locked pages");
goto restart_translate;
goto tb_overflow;
default:
g_assert_not_reached();
}
}
tcg_ctx->gen_tb = NULL;
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) {
tb_unlock_pages(tb);
goto buffer_overflow;
}
tb->tc.size = gen_code_size;
/*
* For CF_PCREL, attribute all executions of the generated code
* to its first mapping.
* For TARGET_TB_PCREL, attribute all executions of the generated
* code to its first mapping.
*/
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
#ifdef CONFIG_PROFILER
qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
#endif
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(pc)) {
FILE *logfile = qemu_log_trylock();
@@ -458,8 +443,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
/* Dump header and the first instruction */
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
fprintf(logfile,
" -- guest addr 0x%016" PRIx64 " + tb prologue\n",
tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
tcg_ctx->gen_insn_data[insn][0]);
chunk_start = tcg_ctx->gen_insn_end_off[insn];
disas(logfile, tb->tc.ptr, chunk_start);
@@ -471,8 +456,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
while (insn < tb->icount) {
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
if (chunk_end > chunk_start) {
fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
fprintf(logfile, " -- guest addr 0x" TARGET_FMT_lx "\n",
tcg_ctx->gen_insn_data[insn][0]);
disas(logfile, tb->tc.ptr + chunk_start,
chunk_end - chunk_start);
chunk_start = chunk_end;
@@ -508,6 +493,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qemu_log_unlock(logfile);
}
}
#endif
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
@@ -535,7 +521,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* before attempting to link to other TBs or add to the lookup table.
*/
if (tb_page_addr0(tb) == -1) {
assert_no_pages_locked();
return tb;
}
@@ -550,9 +535,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* No explicit memory barrier is required -- tb_link_page() makes the
* TB visible in a consistent state.
*/
existing_tb = tb_link_page(tb);
assert_no_pages_locked();
existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb));
/* if the TB already exists, discard what we just translated */
if (unlikely(existing_tb != tb)) {
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
@@ -581,15 +564,14 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
/* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */
CPUArchState *env = cpu->env_ptr;
vaddr pc;
uint64_t cs_base;
target_ulong pc, cs_base;
tb_page_addr_t addr;
uint32_t flags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
addr = get_page_addr_code(env, pc);
if (addr != -1) {
tb_invalidate_phys_range(addr, addr);
tb_invalidate_phys_range(addr, addr + 1);
}
}
}
@@ -636,10 +618,10 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
vaddr pc = log_pc(cpu, tb);
target_ulong pc = log_pc(cpu, tb);
if (qemu_log_in_addr_range(pc)) {
qemu_log("cpu_io_recompile: rewound execution of TB to %016"
VADDR_PRIx "\n", pc);
qemu_log("cpu_io_recompile: rewound execution of TB to "
TARGET_FMT_lx "\n", pc);
}
}

View File

@@ -8,116 +8,30 @@
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
#include "exec/exec-all.h"
#include "exec/gen-icount.h"
#include "exec/log.h"
#include "exec/translator.h"
#include "exec/plugin-gen.h"
#include "tcg/tcg-op-common.h"
#include "internal.h"
#include "sysemu/replay.h"
static void gen_io_start(void)
/* Pairs with tcg_clear_temp_count.
To be called by #TranslatorOps.{translate_insn,tb_stop} if
(1) the target is sufficiently clean to support reporting,
(2) as and when all temporaries are known to be consumed.
For most targets, (2) is at the end of translate_insn. */
void translator_loop_temp_check(DisasContextBase *db)
{
tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
offsetof(ArchCPU, parent_obj.can_do_io) -
offsetof(ArchCPU, env));
}
bool translator_io_start(DisasContextBase *db)
{
uint32_t cflags = tb_cflags(db->tb);
if (!(cflags & CF_USE_ICOUNT)) {
return false;
}
if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
/* Already started in translator_loop. */
return true;
}
gen_io_start();
/*
* Ensure that this instruction will be the last in the TB.
* The target may override this to something more forceful.
*/
if (db->is_jmp == DISAS_NEXT) {
db->is_jmp = DISAS_TOO_MANY;
}
return true;
}
static TCGOp *gen_tb_start(uint32_t cflags)
{
TCGv_i32 count = tcg_temp_new_i32();
TCGOp *icount_start_insn = NULL;
tcg_gen_ld_i32(count, cpu_env,
offsetof(ArchCPU, neg.icount_decr.u32) -
offsetof(ArchCPU, env));
if (cflags & CF_USE_ICOUNT) {
/*
* We emit a sub with a dummy immediate argument. Keep the insn index
* of the sub so that we later (when we know the actual insn count)
* can update the argument with the actual insn count.
*/
tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
icount_start_insn = tcg_last_op();
}
/*
* Emit the check against icount_decr.u32 to see if we should exit
* unless we suppress the check with CF_NOIRQ. If we are using
* icount and have suppressed interruption the higher level code
* should have ensured we don't run more instructions than the
* budget.
*/
if (cflags & CF_NOIRQ) {
tcg_ctx->exitreq_label = NULL;
} else {
tcg_ctx->exitreq_label = gen_new_label();
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
}
if (cflags & CF_USE_ICOUNT) {
tcg_gen_st16_i32(count, cpu_env,
offsetof(ArchCPU, neg.icount_decr.u16.low) -
offsetof(ArchCPU, env));
/*
* cpu->can_do_io is cleared automatically here at the beginning of
* each translation block. The cost is minimal and only paid for
* -icount, plus it would be very easy to forget doing it in the
* translator. Doing it here means we don't need a gen_io_end() to
* go with gen_io_start().
*/
tcg_gen_st_i32(tcg_constant_i32(0), cpu_env,
offsetof(ArchCPU, parent_obj.can_do_io) -
offsetof(ArchCPU, env));
}
return icount_start_insn;
}
static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
TCGOp *icount_start_insn, int num_insns)
{
if (cflags & CF_USE_ICOUNT) {
/*
* Update the num_insn immediate parameter now that we know
* the actual insn count.
*/
tcg_set_insn_param(icount_start_insn, 2,
tcgv_i32_arg(tcg_constant_i32(num_insns)));
}
if (tcg_ctx->exitreq_label) {
gen_set_label(tcg_ctx->exitreq_label);
tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
if (tcg_check_temp_count()) {
qemu_log("warning: TCG temporary leaks before "
TARGET_FMT_lx "\n", db->pc_next);
}
}
bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
{
/* Suppress goto_tb if requested. */
if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
@@ -128,12 +42,11 @@ bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
}
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
vaddr pc, void *host_pc, const TranslatorOps *ops,
DisasContextBase *db)
void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
target_ulong pc, void *host_pc,
const TranslatorOps *ops, DisasContextBase *db)
{
uint32_t cflags = tb_cflags(tb);
TCGOp *icount_start_insn;
bool plugin_enabled;
/* Initialize DisasContext */
@@ -142,23 +55,30 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
db->pc_next = pc;
db->is_jmp = DISAS_NEXT;
db->num_insns = 0;
db->max_insns = *max_insns;
db->max_insns = max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
db->host_addr[0] = host_pc;
db->host_addr[1] = NULL;
#ifdef CONFIG_USER_ONLY
page_protect(pc);
#endif
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
/* Reset the temp count so that we can identify leaks */
tcg_clear_temp_count();
/* Start translating. */
icount_start_insn = gen_tb_start(cflags);
gen_tb_start(db->tb);
ops->tb_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
while (true) {
*max_insns = ++db->num_insns;
db->num_insns++;
ops->insn_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@@ -208,7 +128,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
/* Emit code to exit the TB, as indicated by db->is_jmp. */
ops->tb_stop(db, cpu);
gen_tb_end(tb, cflags, icount_start_insn, db->num_insns);
gen_tb_end(db->tb, db->num_insns);
if (plugin_enabled) {
plugin_gen_tb_end(cpu);
@@ -218,6 +138,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
tb->size = db->pc_next - db->pc_first;
tb->icount = db->num_insns;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(db->pc_first)) {
FILE *logfile = qemu_log_trylock();
@@ -228,13 +149,14 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
qemu_log_unlock(logfile);
}
}
#endif
}
static void *translator_access(CPUArchState *env, DisasContextBase *db,
vaddr pc, size_t len)
target_ulong pc, size_t len)
{
void *host;
vaddr base, end;
target_ulong base, end;
TranslationBlock *tb;
tb = db->tb;
@@ -252,36 +174,14 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
host = db->host_addr[1];
base = TARGET_PAGE_ALIGN(db->pc_first);
if (host == NULL) {
tb_page_addr_t page0, old_page1, new_page1;
new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
/*
* If the second page is MMIO, treat as if the first page
* was MMIO as well, so that we do not cache the TB.
*/
if (unlikely(new_page1 == -1)) {
tb_unlock_pages(tb);
tb_set_page_addr0(tb, -1);
return NULL;
}
/*
* If this is not the first time around, and page1 matches,
* then we already have the page locked. Alternately, we're
* not doing anything to prevent the PTE from changing, so
* we might wind up with a different page, requiring us to
* re-do the locking.
*/
old_page1 = tb_page_addr1(tb);
if (likely(new_page1 != old_page1)) {
page0 = tb_page_addr0(tb);
if (unlikely(old_page1 != -1)) {
tb_unlock_page1(page0, old_page1);
}
tb_set_page_addr1(tb, new_page1);
tb_lock_page1(page0, new_page1);
}
tb_page_addr_t phys_page =
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
/* We cannot handle MMIO as second page. */
assert(phys_page != -1);
tb_set_page_addr1(tb, phys_page);
#ifdef CONFIG_USER_ONLY
page_protect(end);
#endif
host = db->host_addr[1];
}
@@ -295,27 +195,6 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
return host + (pc - base);
}
static void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
{
#ifdef CONFIG_PLUGIN
struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn;
abi_ptr off;
if (insn == NULL) {
return;
}
off = pc - insn->vaddr;
if (off < insn->data->len) {
g_byte_array_set_size(insn->data, off);
} else if (off > insn->data->len) {
/* we have an unexpected gap */
g_assert_not_reached();
}
insn->data = g_byte_array_append(insn->data, from, size);
#endif
}
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
{
uint8_t ret;
@@ -374,8 +253,3 @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
plugin_insn_append(pc, &plug, sizeof(ret));
return ret;
}
void translator_fake_ldb(uint8_t insn8, abi_ptr pc)
{
plugin_insn_append(pc, &insn8, sizeof(insn8));
}

View File

@@ -1,6 +1,6 @@
#include "qemu/osdep.h"
#include "hw/core/cpu.h"
#include "exec/replay-core.h"
#include "sysemu/replay.h"
bool enable_cpu_pm = false;

View File

@@ -144,7 +144,7 @@ typedef struct PageFlagsNode {
static IntervalTreeRoot pageflags_root;
static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
{
IntervalTreeNode *n;
@@ -153,7 +153,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
}
static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
target_ulong last)
target_long last)
{
IntervalTreeNode *n;
@@ -480,22 +480,24 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
* The flag PAGE_WRITE_ORG is positioned automatically depending
* on PAGE_WRITE. The mmap_lock should already be held.
*/
void page_set_flags(target_ulong start, target_ulong last, int flags)
void page_set_flags(target_ulong start, target_ulong end, int flags)
{
target_ulong last;
bool reset = false;
bool inval_tb = false;
/* This function should never be called with addresses outside the
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
assert(start <= last);
assert(last <= GUEST_ADDR_MAX);
assert(start < end);
assert(end - 1 <= GUEST_ADDR_MAX);
/* Only set PAGE_ANON with new mappings. */
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
assert_memory_lock();
start &= TARGET_PAGE_MASK;
last |= ~TARGET_PAGE_MASK;
start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
last = end - 1;
if (!(flags & PAGE_VALID)) {
flags = 0;
@@ -508,7 +510,7 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
}
if (!flags || reset) {
page_reset_target_data(start, last);
page_reset_target_data(start, end);
inval_tb |= pageflags_unset(start, last);
}
if (flags) {
@@ -516,23 +518,23 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
~(reset ? 0 : PAGE_STICKY));
}
if (inval_tb) {
tb_invalidate_phys_range(start, last);
tb_invalidate_phys_range(start, end);
}
}
bool page_check_range(target_ulong start, target_ulong len, int flags)
int page_check_range(target_ulong start, target_ulong len, int flags)
{
target_ulong last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
bool ret;
int ret;
if (len == 0) {
return true; /* trivial length */
return 0; /* trivial length */
}
last = start + len - 1;
if (last < start) {
return false; /* wrap around */
return -1; /* wrap around */
}
locked = have_mmap_lock();
@@ -551,33 +553,33 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
p = pageflags_find(start, last);
}
if (!p) {
ret = false; /* entire region invalid */
ret = -1; /* entire region invalid */
break;
}
}
if (start < p->itree.start) {
ret = false; /* initial bytes invalid */
ret = -1; /* initial bytes invalid */
break;
}
missing = flags & ~p->flags;
if (missing & ~PAGE_WRITE) {
ret = false; /* page doesn't match */
if (missing & PAGE_READ) {
ret = -1; /* page not readable */
break;
}
if (missing & PAGE_WRITE) {
if (!(p->flags & PAGE_WRITE_ORG)) {
ret = false; /* page not writable */
ret = -1; /* page not writable */
break;
}
/* Asking about writable, but has been protected: undo. */
if (!page_unprotect(start, 0)) {
ret = false;
ret = -1;
break;
}
/* TODO: page_unprotect should take a range, not a single page. */
if (last - start < TARGET_PAGE_SIZE) {
ret = true; /* ok */
ret = 0; /* ok */
break;
}
start += TARGET_PAGE_SIZE;
@@ -585,7 +587,7 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
}
if (last <= p->itree.last) {
ret = true; /* ok */
ret = 0; /* ok */
break;
}
start = p->itree.last + 1;
@@ -598,54 +600,6 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
return ret;
}
bool page_check_range_empty(target_ulong start, target_ulong last)
{
assert(last >= start);
assert_memory_lock();
return pageflags_find(start, last) == NULL;
}
target_ulong page_find_range_empty(target_ulong min, target_ulong max,
target_ulong len, target_ulong align)
{
target_ulong len_m1, align_m1;
assert(min <= max);
assert(max <= GUEST_ADDR_MAX);
assert(len != 0);
assert(is_power_of_2(align));
assert_memory_lock();
len_m1 = len - 1;
align_m1 = align - 1;
/* Iteratively narrow the search region. */
while (1) {
PageFlagsNode *p;
/* Align min and double-check there's enough space remaining. */
min = (min + align_m1) & ~align_m1;
if (min > max) {
return -1;
}
if (len_m1 > max - min) {
return -1;
}
p = pageflags_find(min, min + len_m1);
if (p == NULL) {
/* Found! */
return min;
}
if (max <= p->itree.last) {
/* Existing allocation fills the remainder of the search region. */
return -1;
}
/* Skip across existing allocation. */
min = p->itree.last + 1;
}
}
void page_protect(tb_page_addr_t address)
{
PageFlagsNode *p;
@@ -769,7 +723,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
return current_tb_invalidated ? 2 : 1;
}
static int probe_access_internal(CPUArchState *env, vaddr addr,
static int probe_access_internal(CPUArchState *env, target_ulong addr,
int fault_size, MMUAccessType access_type,
bool nonfault, uintptr_t ra)
{
@@ -793,10 +747,6 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
if (guest_addr_valid_untagged(addr)) {
int page_flags = page_get_flags(addr);
if (page_flags & acc_flag) {
if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE)
&& cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
return TLB_MMIO;
}
return 0; /* success */
}
maperr = !(page_flags & PAGE_VALID);
@@ -811,31 +761,30 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
}
int probe_access_flags(CPUArchState *env, vaddr addr, int size,
int probe_access_flags(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t ra)
{
int flags;
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
*phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
*phost = flags ? NULL : g2h(env_cpu(env), addr);
return flags;
}
void *probe_access(CPUArchState *env, vaddr addr, int size,
void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t ra)
{
int flags;
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, false, ra);
g_assert((flags & ~TLB_MMIO) == 0);
g_assert(flags == 0);
return size ? g2h(env_cpu(env), addr) : NULL;
}
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp)
{
int flags;
@@ -866,14 +815,15 @@ typedef struct TargetPageDataNode {
static IntervalTreeRoot targetdata_root;
void page_reset_target_data(target_ulong start, target_ulong last)
void page_reset_target_data(target_ulong start, target_ulong end)
{
IntervalTreeNode *n, *next;
target_ulong last;
assert_memory_lock();
start &= TARGET_PAGE_MASK;
last |= ~TARGET_PAGE_MASK;
start = start & TARGET_PAGE_MASK;
last = TARGET_PAGE_ALIGN(end) - 1;
for (n = interval_tree_iter_first(&targetdata_root, start, last),
next = n ? interval_tree_iter_next(n, start, last) : NULL;
@@ -936,14 +886,40 @@ void *page_get_target_data(target_ulong address)
return t->data[(page - region) >> TARGET_PAGE_BITS];
}
#else
void page_reset_target_data(target_ulong start, target_ulong last) { }
void page_reset_target_data(target_ulong start, target_ulong end) { }
#endif /* TARGET_PAGE_DATA_SIZE */
/* The softmmu versions of these helpers are in cputlb.c. */
static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
/*
* Verify that we have passed the correct MemOp to the correct function.
*
* We could present one function to target code, and dispatch based on
* the MemOp, but so far we have worked hard to avoid an indirect function
* call along the memory path.
*/
static void validate_memop(MemOpIdx oi, MemOp expected)
{
#ifdef CONFIG_DEBUG_TCG
MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
assert(have == expected);
#endif
}
void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
{
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
}
void helper_unaligned_st(CPUArchState *env, target_ulong addr)
{
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
}
static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t ra, MMUAccessType type)
{
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
void *ret;
@@ -957,330 +933,251 @@ static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
return ret;
}
#include "ldst_atomicity.c.inc"
static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint8_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
validate_memop(oi, MO_UB);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldub_p(haddr);
clear_helper_retaddr();
return ret;
}
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return do_ld1_mmu(env, addr, get_memop(oi), ra);
}
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra);
}
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint16_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_2(env, ra, haddr, mop);
validate_memop(oi, MO_BEUW);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = lduw_be_p(haddr);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
ret = bswap16(ret);
}
return ret;
}
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return do_ld2_mmu(env, addr, get_memop(oi), ra);
}
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra);
}
uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint32_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_4(env, ra, haddr, mop);
validate_memop(oi, MO_BEUL);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldl_be_p(haddr);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
ret = bswap32(ret);
}
return ret;
}
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return do_ld4_mmu(env, addr, get_memop(oi), ra);
}
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra);
}
uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint64_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_8(env, ra, haddr, mop);
validate_memop(oi, MO_BEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_be_p(haddr);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
ret = bswap64(ret);
}
return ret;
}
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return do_ld8_mmu(env, addr, get_memop(oi), ra);
}
uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint16_t ret;
validate_memop(oi, MO_LEUW);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = lduw_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint32_t ret;
validate_memop(oi, MO_LEUL);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldl_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint64_t ret;
validate_memop(oi, MO_LEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
Int128 ret;
tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_16(env, ra, haddr, mop);
validate_memop(oi, MO_128 | MO_BE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
memcpy(&ret, haddr, 16);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
if (mop & MO_BSWAP) {
if (!HOST_BIG_ENDIAN) {
ret = bswap128(ret);
}
return ret;
}
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
return do_ld16_mmu(env, addr, get_memop(oi), ra);
}
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi)
{
return helper_ld16_mmu(env, addr, oi, GETPC());
}
Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
MemOp mop, uintptr_t ra)
{
void *haddr;
Int128 ret;
tcg_debug_assert((mop & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
stb_p(haddr, val);
validate_memop(oi, MO_128 | MO_LE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
memcpy(&ret, haddr, 16);
clear_helper_retaddr();
}
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st1_mmu(env, addr, val, get_memop(oi), ra);
if (HOST_BIG_ENDIAN) {
ret = bswap128(ret);
}
return ret;
}
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st1_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOp mop, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap16(val);
}
store_atom_2(env, ra, haddr, mop, val);
validate_memop(oi, MO_UB);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr();
}
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st2_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st2_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOp mop, uintptr_t ra)
void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap32(val);
}
store_atom_4(env, ra, haddr, mop, val);
validate_memop(oi, MO_BEUW);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stw_be_p(haddr, val);
clear_helper_retaddr();
}
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st4_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st4_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOp mop, uintptr_t ra)
void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap64(val);
}
store_atom_8(env, ra, haddr, mop, val);
validate_memop(oi, MO_BEUL);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stl_be_p(haddr, val);
clear_helper_retaddr();
}
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st8_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st8_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
MemOp mop, uintptr_t ra)
void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
validate_memop(oi, MO_BEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_be_p(haddr, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
if (mop & MO_BSWAP) {
void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_LEUW);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stw_le_p(haddr, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_LEUL);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stl_le_p(haddr, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_LEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_le_p(haddr, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr,
Int128 val, MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_128 | MO_BE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
if (!HOST_BIG_ENDIAN) {
val = bswap128(val);
}
store_atom_16(env, ra, haddr, mop, val);
memcpy(haddr, &val, 16);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
MemOpIdx oi, uintptr_t ra)
void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr,
Int128 val, MemOpIdx oi, uintptr_t ra)
{
do_st16_mmu(env, addr, val, get_memop(oi), ra);
}
void *haddr;
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
{
helper_st16_mmu(env, addr, val, oi, GETPC());
}
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr,
Int128 val, MemOpIdx oi, uintptr_t ra)
{
do_st16_mmu(env, addr, val, get_memop(oi), ra);
validate_memop(oi, MO_128 | MO_LE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
if (HOST_BIG_ENDIAN) {
val = bswap128(val);
}
memcpy(haddr, &val, 16);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
@@ -1324,70 +1221,16 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
return ret;
}
uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint8_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
ret = ldub_p(haddr);
clear_helper_retaddr();
return ret;
}
uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint16_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
ret = lduw_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
ret = bswap16(ret);
}
return ret;
}
uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint32_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
ret = ldl_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
ret = bswap32(ret);
}
return ret;
}
uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint64_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
ret = bswap64(ret);
}
return ret;
}
#include "ldst_common.c.inc"
/*
* Do not allow unaligned operations to proceed. Return the host address.
*
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
MemOpIdx oi, int size, int prot,
uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
@@ -1395,7 +1238,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
/* Enforce guest required alignment. */
if (unlikely(addr & ((1 << a_bits) - 1))) {
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr);
MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
}
/* Enforce qemu required alignment. */
@@ -1433,7 +1277,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
#include "atomic_template.h"
#endif
#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
#define DATA_SIZE 16
#include "atomic_template.h"
#endif

View File

@@ -12,7 +12,6 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qapi/error.h"
#include "hw/xen/xen_native.h"
#include "hw/xen/xen-legacy-backend.h"
#include "hw/xen/xen_pt.h"
#include "chardev/char.h"
@@ -24,18 +23,99 @@
#include "migration/global_state.h"
#include "hw/boards.h"
//#define DEBUG_XEN
#ifdef DEBUG_XEN
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
bool xen_allowed;
xc_interface *xen_xc;
xenforeignmemory_handle *xen_fmem;
xendevicemodel_handle *xen_dmod;
static void xenstore_record_dm_state(const char *state)
static int store_dev_info(int domid, Chardev *cs, const char *string)
{
struct xs_handle *xs = NULL;
char *path = NULL;
char *newpath = NULL;
char *pts = NULL;
int ret = -1;
/* Only continue if we're talking to a pty. */
if (!CHARDEV_IS_PTY(cs)) {
return 0;
}
pts = cs->filename + 4;
/* We now have everything we need to set the xenstore entry. */
xs = xs_open(0);
if (xs == NULL) {
fprintf(stderr, "Could not contact XenStore\n");
goto out;
}
path = xs_get_domain_path(xs, domid);
if (path == NULL) {
fprintf(stderr, "xs_get_domain_path() error\n");
goto out;
}
newpath = realloc(path, (strlen(path) + strlen(string) +
strlen("/tty") + 1));
if (newpath == NULL) {
fprintf(stderr, "realloc error\n");
goto out;
}
path = newpath;
strcat(path, string);
strcat(path, "/tty");
if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) {
fprintf(stderr, "xs_write for '%s' fail", string);
goto out;
}
ret = 0;
out:
free(path);
xs_close(xs);
return ret;
}
void xenstore_store_pv_console_info(int i, Chardev *chr)
{
if (i == 0) {
store_dev_info(xen_domid, chr, "/console");
} else {
char buf[32];
snprintf(buf, sizeof(buf), "/device/console/%d", i);
store_dev_info(xen_domid, chr, buf);
}
}
static void xenstore_record_dm_state(struct xs_handle *xs, const char *state)
{
char path[50];
if (xs == NULL) {
error_report("xenstore connection not initialized");
exit(1);
}
snprintf(path, sizeof (path), "device-model/%u/state", xen_domid);
if (!qemu_xen_xs_write(xenstore, XBT_NULL, path, state, strlen(state))) {
/*
* This call may fail when running restricted so don't make it fatal in
* that case. Toolstacks should instead use QMP to listen for state changes.
*/
if (!xs_write(xs, XBT_NULL, path, state, strlen(state)) &&
!xen_domid_restrict) {
error_report("error recording dm state");
exit(1);
}
@@ -47,7 +127,7 @@ static void xen_change_state_handler(void *opaque, bool running,
{
if (running) {
/* record state running */
xenstore_record_dm_state("running");
xenstore_record_dm_state(xenstore, "running");
}
}
@@ -96,21 +176,11 @@ static int xen_init(MachineState *ms)
xc_interface_close(xen_xc);
return -1;
}
/*
* The XenStore write would fail when running restricted so don't attempt
* it in that case. Toolstacks should instead use QMP to listen for state
* changes.
*/
if (!xen_domid_restrict) {
qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
}
qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
/*
* opt out of system RAM being allocated by generic code
*/
mc->default_ram_id = NULL;
xen_mode = XEN_ATTACH;
return 0;
}

View File

@@ -222,7 +222,11 @@ static int alsa_poll_helper (snd_pcm_t *handle, struct pollhlp *hlp, int mask)
return -1;
}
pfds = g_new0(struct pollfd, count);
pfds = audio_calloc ("alsa_poll_helper", count, sizeof (*pfds));
if (!pfds) {
dolog ("Could not initialize poll mode\n");
return -1;
}
err = snd_pcm_poll_descriptors (handle, pfds, count);
if (err < 0) {
@@ -913,23 +917,28 @@ static void *alsa_audio_init(Audiodev *dev)
alsa_init_per_direction(aopts->in);
alsa_init_per_direction(aopts->out);
/* don't set has_* so alsa_open can identify it wasn't set by the user */
/*
* need to define them, as otherwise alsa produces no sound
* doesn't set has_* so alsa_open can identify it wasn't set by the user
*/
if (!dev->u.alsa.out->has_period_length) {
/* 256 frames assuming 44100Hz */
dev->u.alsa.out->period_length = 5805;
/* 1024 frames assuming 44100Hz */
dev->u.alsa.out->period_length = 1024 * 1000000 / 44100;
}
if (!dev->u.alsa.out->has_buffer_length) {
/* 4096 frames assuming 44100Hz */
dev->u.alsa.out->buffer_length = 92880;
dev->u.alsa.out->buffer_length = 4096ll * 1000000 / 44100;
}
/*
* OptsVisitor sets unspecified optional fields to zero, but do not depend
* on it...
*/
if (!dev->u.alsa.in->has_period_length) {
/* 256 frames assuming 44100Hz */
dev->u.alsa.in->period_length = 5805;
dev->u.alsa.in->period_length = 0;
}
if (!dev->u.alsa.in->has_buffer_length) {
/* 4096 frames assuming 44100Hz */
dev->u.alsa.in->buffer_length = 92880;
dev->u.alsa.in->buffer_length = 0;
}
return dev;

View File

@@ -33,7 +33,6 @@
#include "qapi/qapi-visit-audio.h"
#include "qapi/qapi-commands-audio.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/help_option.h"
#include "sysemu/sysemu.h"
@@ -149,6 +148,26 @@ static inline int audio_bits_to_index (int bits)
}
}
void *audio_calloc (const char *funcname, int nmemb, size_t size)
{
int cond;
size_t len;
len = nmemb * size;
cond = !nmemb || !size;
cond |= nmemb < 0;
cond |= len < size;
if (audio_bug ("audio_calloc", cond)) {
AUD_log (NULL, "%s passed invalid arguments to audio_calloc\n",
funcname);
AUD_log (NULL, "nmemb=%d size=%zu (len=%zu)\n", nmemb, size, len);
return NULL;
}
return g_malloc0 (len);
}
void AUD_vlog (const char *cap, const char *fmt, va_list ap)
{
if (cap) {
@@ -381,6 +400,13 @@ void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len)
/*
* Capture
*/
static void noop_conv (struct st_sample *dst, const void *src, int samples)
{
(void) src;
(void) dst;
(void) samples;
}
static CaptureVoiceOut *audio_pcm_capture_find_specific(AudioState *s,
struct audsettings *as)
{
@@ -478,8 +504,15 @@ static int audio_attach_capture (HWVoiceOut *hw)
sw->info = hw->info;
sw->empty = 1;
sw->active = hw->enabled;
sw->conv = noop_conv;
sw->ratio = ((int64_t) hw_cap->info.freq << 32) / sw->info.freq;
sw->vol = nominal_volume;
sw->rate = st_rate_start (sw->info.freq, hw_cap->info.freq);
if (!sw->rate) {
dolog ("Could not start rate conversion for `%s'\n", SW_NAME (sw));
g_free (sw);
return -1;
}
QLIST_INSERT_HEAD (&hw_cap->sw_head, sw, entries);
QLIST_INSERT_HEAD (&hw->cap_head, sc, entries);
#ifdef DEBUG_CAPTURE
@@ -514,8 +547,8 @@ static size_t audio_pcm_hw_find_min_in (HWVoiceIn *hw)
static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw)
{
size_t live = hw->total_samples_captured - audio_pcm_hw_find_min_in (hw);
if (audio_bug(__func__, live > hw->conv_buf.size)) {
dolog("live=%zu hw->conv_buf.size=%zu\n", live, hw->conv_buf.size);
if (audio_bug(__func__, live > hw->conv_buf->size)) {
dolog("live=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
return 0;
}
return live;
@@ -524,13 +557,13 @@ static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw)
static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples)
{
size_t conv = 0;
STSampleBuffer *conv_buf = &hw->conv_buf;
STSampleBuffer *conv_buf = hw->conv_buf;
while (samples) {
uint8_t *src = advance(pcm_buf, conv * hw->info.bytes_per_frame);
size_t proc = MIN(samples, conv_buf->size - conv_buf->pos);
hw->conv(conv_buf->buffer + conv_buf->pos, src, proc);
hw->conv(conv_buf->samples + conv_buf->pos, src, proc);
conv_buf->pos = (conv_buf->pos + proc) % conv_buf->size;
samples -= proc;
conv += proc;
@@ -542,65 +575,56 @@ static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples)
/*
* Soft voice (capture)
*/
static void audio_pcm_sw_resample_in(SWVoiceIn *sw,
size_t frames_in_max, size_t frames_out_max,
size_t *total_in, size_t *total_out)
static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t size)
{
HWVoiceIn *hw = sw->hw;
struct st_sample *src, *dst;
size_t live, rpos, frames_in, frames_out;
live = hw->total_samples_captured - sw->total_hw_samples_acquired;
rpos = audio_ring_posb(hw->conv_buf.pos, live, hw->conv_buf.size);
/* resample conv_buf from rpos to end of buffer */
src = hw->conv_buf.buffer + rpos;
frames_in = MIN(frames_in_max, hw->conv_buf.size - rpos);
dst = sw->resample_buf.buffer;
frames_out = frames_out_max;
st_rate_flow(sw->rate, src, dst, &frames_in, &frames_out);
rpos += frames_in;
*total_in = frames_in;
*total_out = frames_out;
/* resample conv_buf from start of buffer if there are input frames left */
if (frames_in_max - frames_in && rpos == hw->conv_buf.size) {
src = hw->conv_buf.buffer;
frames_in = frames_in_max - frames_in;
dst += frames_out;
frames_out = frames_out_max - frames_out;
st_rate_flow(sw->rate, src, dst, &frames_in, &frames_out);
*total_in += frames_in;
*total_out += frames_out;
}
}
static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t buf_len)
{
HWVoiceIn *hw = sw->hw;
size_t live, frames_out_max, total_in, total_out;
size_t samples, live, ret = 0, swlim, isamp, osamp, rpos, total = 0;
struct st_sample *src, *dst = sw->buf;
live = hw->total_samples_captured - sw->total_hw_samples_acquired;
if (!live) {
return 0;
}
if (audio_bug(__func__, live > hw->conv_buf.size)) {
dolog("live_in=%zu hw->conv_buf.size=%zu\n", live, hw->conv_buf.size);
if (audio_bug(__func__, live > hw->conv_buf->size)) {
dolog("live_in=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
return 0;
}
frames_out_max = MIN(buf_len / sw->info.bytes_per_frame,
sw->resample_buf.size);
rpos = audio_ring_posb(hw->conv_buf->pos, live, hw->conv_buf->size);
audio_pcm_sw_resample_in(sw, live, frames_out_max, &total_in, &total_out);
samples = size / sw->info.bytes_per_frame;
swlim = (live * sw->ratio) >> 32;
swlim = MIN (swlim, samples);
while (swlim) {
src = hw->conv_buf->samples + rpos;
if (hw->conv_buf->pos > rpos) {
isamp = hw->conv_buf->pos - rpos;
} else {
isamp = hw->conv_buf->size - rpos;
}
if (!isamp) {
break;
}
osamp = swlim;
st_rate_flow (sw->rate, src, dst, &isamp, &osamp);
swlim -= osamp;
rpos = (rpos + isamp) % hw->conv_buf->size;
dst += osamp;
ret += osamp;
total += isamp;
}
if (!hw->pcm_ops->volume_in) {
mixeng_volume(sw->resample_buf.buffer, total_out, &sw->vol);
mixeng_volume (sw->buf, ret, &sw->vol);
}
sw->clip(buf, sw->resample_buf.buffer, total_out);
sw->total_hw_samples_acquired += total_in;
return total_out * sw->info.bytes_per_frame;
sw->clip (buf, sw->buf, ret);
sw->total_hw_samples_acquired += total;
return ret * sw->info.bytes_per_frame;
}
/*
@@ -636,8 +660,8 @@ static size_t audio_pcm_hw_get_live_out (HWVoiceOut *hw, int *nb_live)
if (nb_live1) {
size_t live = smin;
if (audio_bug(__func__, live > hw->mix_buf.size)) {
dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size);
if (audio_bug(__func__, live > hw->mix_buf->size)) {
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size);
return 0;
}
return live;
@@ -654,17 +678,17 @@ static size_t audio_pcm_hw_get_free(HWVoiceOut *hw)
static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len)
{
size_t clipped = 0;
size_t pos = hw->mix_buf.pos;
size_t pos = hw->mix_buf->pos;
while (len) {
st_sample *src = hw->mix_buf.buffer + pos;
st_sample *src = hw->mix_buf->samples + pos;
uint8_t *dst = advance(pcm_buf, clipped * hw->info.bytes_per_frame);
size_t samples_till_end_of_buf = hw->mix_buf.size - pos;
size_t samples_till_end_of_buf = hw->mix_buf->size - pos;
size_t samples_to_clip = MIN(len, samples_till_end_of_buf);
hw->clip(dst, src, samples_to_clip);
pos = (pos + samples_to_clip) % hw->mix_buf.size;
pos = (pos + samples_to_clip) % hw->mix_buf->size;
len -= samples_to_clip;
clipped += samples_to_clip;
}
@@ -673,113 +697,84 @@ static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len)
/*
* Soft voice (playback)
*/
static void audio_pcm_sw_resample_out(SWVoiceOut *sw,
size_t frames_in_max, size_t frames_out_max,
size_t *total_in, size_t *total_out)
static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size)
{
HWVoiceOut *hw = sw->hw;
struct st_sample *src, *dst;
size_t live, wpos, frames_in, frames_out;
size_t hwsamples, samples, isamp, osamp, wpos, live, dead, left, blck;
size_t hw_free;
size_t ret = 0, pos = 0, total = 0;
live = sw->total_hw_samples_mixed;
wpos = (hw->mix_buf.pos + live) % hw->mix_buf.size;
/* write to mix_buf from wpos to end of buffer */
src = sw->resample_buf.buffer;
frames_in = frames_in_max;
dst = hw->mix_buf.buffer + wpos;
frames_out = MIN(frames_out_max, hw->mix_buf.size - wpos);
st_rate_flow_mix(sw->rate, src, dst, &frames_in, &frames_out);
wpos += frames_out;
*total_in = frames_in;
*total_out = frames_out;
/* write to mix_buf from start of buffer if there are input frames left */
if (frames_in_max - frames_in > 0 && wpos == hw->mix_buf.size) {
src += frames_in;
frames_in = frames_in_max - frames_in;
dst = hw->mix_buf.buffer;
frames_out = frames_out_max - frames_out;
st_rate_flow_mix(sw->rate, src, dst, &frames_in, &frames_out);
*total_in += frames_in;
*total_out += frames_out;
if (!sw) {
return size;
}
}
static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t buf_len)
{
HWVoiceOut *hw = sw->hw;
size_t live, dead, hw_free, sw_max, fe_max;
size_t frames_in_max, frames_out_max, total_in, total_out;
hwsamples = sw->hw->mix_buf->size;
live = sw->total_hw_samples_mixed;
if (audio_bug(__func__, live > hw->mix_buf.size)) {
dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size);
if (audio_bug(__func__, live > hwsamples)) {
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hwsamples);
return 0;
}
if (live == hw->mix_buf.size) {
if (live == hwsamples) {
#ifdef DEBUG_OUT
dolog ("%s is full %zu\n", sw->name, live);
#endif
return 0;
}
dead = hw->mix_buf.size - live;
hw_free = audio_pcm_hw_get_free(hw);
wpos = (sw->hw->mix_buf->pos + live) % hwsamples;
dead = hwsamples - live;
hw_free = audio_pcm_hw_get_free(sw->hw);
hw_free = hw_free > live ? hw_free - live : 0;
frames_out_max = MIN(dead, hw_free);
sw_max = st_rate_frames_in(sw->rate, frames_out_max);
fe_max = MIN(buf_len / sw->info.bytes_per_frame + sw->resample_buf.pos,
sw->resample_buf.size);
frames_in_max = MIN(sw_max, fe_max);
samples = ((int64_t)MIN(dead, hw_free) << 32) / sw->ratio;
samples = MIN(samples, size / sw->info.bytes_per_frame);
if (samples) {
sw->conv(sw->buf, buf, samples);
if (!frames_in_max) {
return 0;
}
if (frames_in_max > sw->resample_buf.pos) {
sw->conv(sw->resample_buf.buffer + sw->resample_buf.pos,
buf, frames_in_max - sw->resample_buf.pos);
if (!sw->hw->pcm_ops->volume_out) {
mixeng_volume(sw->resample_buf.buffer + sw->resample_buf.pos,
frames_in_max - sw->resample_buf.pos, &sw->vol);
mixeng_volume(sw->buf, samples, &sw->vol);
}
}
audio_pcm_sw_resample_out(sw, frames_in_max, frames_out_max,
&total_in, &total_out);
sw->total_hw_samples_mixed += total_out;
sw->empty = sw->total_hw_samples_mixed == 0;
/*
* Upsampling may leave one audio frame in the resample buffer. Decrement
* total_in by one if there was a leftover frame from the previous resample
* pass in the resample buffer. Increment total_in by one if the current
* resample pass left one frame in the resample buffer.
*/
if (frames_in_max - total_in == 1) {
/* copy one leftover audio frame to the beginning of the buffer */
*sw->resample_buf.buffer = *(sw->resample_buf.buffer + total_in);
total_in += 1 - sw->resample_buf.pos;
sw->resample_buf.pos = 1;
} else if (total_in >= sw->resample_buf.pos) {
total_in -= sw->resample_buf.pos;
sw->resample_buf.pos = 0;
while (samples) {
dead = hwsamples - live;
left = hwsamples - wpos;
blck = MIN (dead, left);
if (!blck) {
break;
}
isamp = samples;
osamp = blck;
st_rate_flow_mix (
sw->rate,
sw->buf + pos,
sw->hw->mix_buf->samples + wpos,
&isamp,
&osamp
);
ret += isamp;
samples -= isamp;
pos += isamp;
live += osamp;
wpos = (wpos + osamp) % hwsamples;
total += osamp;
}
sw->total_hw_samples_mixed += total;
sw->empty = sw->total_hw_samples_mixed == 0;
#ifdef DEBUG_OUT
dolog (
"%s: write size %zu written %zu total mixed %zu\n",
SW_NAME(sw),
buf_len / sw->info.bytes_per_frame,
total_in,
"%s: write size %zu ret %zu total sw %zu\n",
SW_NAME (sw),
size / sw->info.bytes_per_frame,
ret,
sw->total_hw_samples_mixed
);
#endif
return total_in * sw->info.bytes_per_frame;
return ret * sw->info.bytes_per_frame;
}
#ifdef DEBUG_AUDIO
@@ -997,6 +992,18 @@ void AUD_set_active_in (SWVoiceIn *sw, int on)
}
}
/**
* audio_frontend_frames_in() - returns the number of frames the resampling
* code generates from frames_in frames
*
* @sw: audio recording frontend
* @frames_in: number of frames
*/
static size_t audio_frontend_frames_in(SWVoiceIn *sw, size_t frames_in)
{
return (int64_t)frames_in * sw->ratio >> 32;
}
static size_t audio_get_avail (SWVoiceIn *sw)
{
size_t live;
@@ -1006,21 +1013,33 @@ static size_t audio_get_avail (SWVoiceIn *sw)
}
live = sw->hw->total_samples_captured - sw->total_hw_samples_acquired;
if (audio_bug(__func__, live > sw->hw->conv_buf.size)) {
dolog("live=%zu sw->hw->conv_buf.size=%zu\n", live,
sw->hw->conv_buf.size);
if (audio_bug(__func__, live > sw->hw->conv_buf->size)) {
dolog("live=%zu sw->hw->conv_buf->size=%zu\n", live,
sw->hw->conv_buf->size);
return 0;
}
ldebug (
"%s: get_avail live %zu frontend frames %u\n",
"%s: get_avail live %zu frontend frames %zu\n",
SW_NAME (sw),
live, st_rate_frames_out(sw->rate, live)
live, audio_frontend_frames_in(sw, live)
);
return live;
}
/**
* audio_frontend_frames_out() - returns the number of frames needed to
* get frames_out frames after resampling
*
* @sw: audio playback frontend
* @frames_out: number of frames
*/
static size_t audio_frontend_frames_out(SWVoiceOut *sw, size_t frames_out)
{
return ((int64_t)frames_out << 32) / sw->ratio;
}
static size_t audio_get_free(SWVoiceOut *sw)
{
size_t live, dead;
@@ -1031,17 +1050,17 @@ static size_t audio_get_free(SWVoiceOut *sw)
live = sw->total_hw_samples_mixed;
if (audio_bug(__func__, live > sw->hw->mix_buf.size)) {
dolog("live=%zu sw->hw->mix_buf.size=%zu\n", live,
sw->hw->mix_buf.size);
if (audio_bug(__func__, live > sw->hw->mix_buf->size)) {
dolog("live=%zu sw->hw->mix_buf->size=%zu\n", live,
sw->hw->mix_buf->size);
return 0;
}
dead = sw->hw->mix_buf.size - live;
dead = sw->hw->mix_buf->size - live;
#ifdef DEBUG_OUT
dolog("%s: get_free live %zu dead %zu frontend frames %u\n",
SW_NAME(sw), live, dead, st_rate_frames_in(sw->rate, dead));
dolog("%s: get_free live %zu dead %zu frontend frames %zu\n",
SW_NAME(sw), live, dead, audio_frontend_frames_out(sw, dead));
#endif
return dead;
@@ -1057,40 +1076,32 @@ static void audio_capture_mix_and_clear(HWVoiceOut *hw, size_t rpos,
for (sc = hw->cap_head.lh_first; sc; sc = sc->entries.le_next) {
SWVoiceOut *sw = &sc->sw;
size_t rpos2 = rpos;
int rpos2 = rpos;
n = samples;
while (n) {
size_t till_end_of_hw = hw->mix_buf.size - rpos2;
size_t to_read = MIN(till_end_of_hw, n);
size_t live, frames_in, frames_out;
size_t till_end_of_hw = hw->mix_buf->size - rpos2;
size_t to_write = MIN(till_end_of_hw, n);
size_t bytes = to_write * hw->info.bytes_per_frame;
size_t written;
sw->resample_buf.buffer = hw->mix_buf.buffer + rpos2;
sw->resample_buf.size = to_read;
live = sw->total_hw_samples_mixed;
audio_pcm_sw_resample_out(sw,
to_read, sw->hw->mix_buf.size - live,
&frames_in, &frames_out);
sw->total_hw_samples_mixed += frames_out;
sw->empty = sw->total_hw_samples_mixed == 0;
if (to_read - frames_in) {
dolog("Could not mix %zu frames into a capture "
sw->buf = hw->mix_buf->samples + rpos2;
written = audio_pcm_sw_write (sw, NULL, bytes);
if (written - bytes) {
dolog("Could not mix %zu bytes into a capture "
"buffer, mixed %zu\n",
to_read, frames_in);
bytes, written);
break;
}
n -= to_read;
rpos2 = (rpos2 + to_read) % hw->mix_buf.size;
n -= to_write;
rpos2 = (rpos2 + to_write) % hw->mix_buf->size;
}
}
}
n = MIN(samples, hw->mix_buf.size - rpos);
mixeng_clear(hw->mix_buf.buffer + rpos, n);
mixeng_clear(hw->mix_buf.buffer, samples - n);
n = MIN(samples, hw->mix_buf->size - rpos);
mixeng_clear(hw->mix_buf->samples + rpos, n);
mixeng_clear(hw->mix_buf->samples, samples - n);
}
static size_t audio_pcm_hw_run_out(HWVoiceOut *hw, size_t live)
@@ -1116,7 +1127,7 @@ static size_t audio_pcm_hw_run_out(HWVoiceOut *hw, size_t live)
live -= proc;
clipped += proc;
hw->mix_buf.pos = (hw->mix_buf.pos + proc) % hw->mix_buf.size;
hw->mix_buf->pos = (hw->mix_buf->pos + proc) % hw->mix_buf->size;
if (proc == 0 || proc < decr) {
break;
@@ -1170,14 +1181,12 @@ static void audio_run_out (AudioState *s)
size_t free;
if (hw_free > sw->total_hw_samples_mixed) {
free = st_rate_frames_in(sw->rate,
free = audio_frontend_frames_out(sw,
MIN(sw_free, hw_free - sw->total_hw_samples_mixed));
} else {
free = 0;
}
if (free > sw->resample_buf.pos) {
free = MIN(free, sw->resample_buf.size)
- sw->resample_buf.pos;
if (free > 0) {
sw->callback.fn(sw->callback.opaque,
free * sw->info.bytes_per_frame);
}
@@ -1189,8 +1198,8 @@ static void audio_run_out (AudioState *s)
live = 0;
}
if (audio_bug(__func__, live > hw->mix_buf.size)) {
dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size);
if (audio_bug(__func__, live > hw->mix_buf->size)) {
dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size);
continue;
}
@@ -1218,13 +1227,13 @@ static void audio_run_out (AudioState *s)
continue;
}
prev_rpos = hw->mix_buf.pos;
prev_rpos = hw->mix_buf->pos;
played = audio_pcm_hw_run_out(hw, live);
replay_audio_out(&played);
if (audio_bug(__func__, hw->mix_buf.pos >= hw->mix_buf.size)) {
dolog("hw->mix_buf.pos=%zu hw->mix_buf.size=%zu played=%zu\n",
hw->mix_buf.pos, hw->mix_buf.size, played);
hw->mix_buf.pos = 0;
if (audio_bug(__func__, hw->mix_buf->pos >= hw->mix_buf->size)) {
dolog("hw->mix_buf->pos=%zu hw->mix_buf->size=%zu played=%zu\n",
hw->mix_buf->pos, hw->mix_buf->size, played);
hw->mix_buf->pos = 0;
}
#ifdef DEBUG_OUT
@@ -1305,10 +1314,10 @@ static void audio_run_in (AudioState *s)
if (replay_mode != REPLAY_MODE_PLAY) {
captured = audio_pcm_hw_run_in(
hw, hw->conv_buf.size - audio_pcm_hw_get_live_in(hw));
hw, hw->conv_buf->size - audio_pcm_hw_get_live_in(hw));
}
replay_audio_in(&captured, hw->conv_buf.buffer, &hw->conv_buf.pos,
hw->conv_buf.size);
replay_audio_in(&captured, hw->conv_buf->samples, &hw->conv_buf->pos,
hw->conv_buf->size);
min = audio_pcm_hw_find_min_in (hw);
hw->total_samples_captured += captured - min;
@@ -1321,9 +1330,8 @@ static void audio_run_in (AudioState *s)
size_t sw_avail = audio_get_avail(sw);
size_t avail;
avail = st_rate_frames_out(sw->rate, sw_avail);
avail = audio_frontend_frames_in(sw, sw_avail);
if (avail > 0) {
avail = MIN(avail, sw->resample_buf.size);
sw->callback.fn(sw->callback.opaque,
avail * sw->info.bytes_per_frame);
}
@@ -1342,14 +1350,14 @@ static void audio_run_capture (AudioState *s)
SWVoiceOut *sw;
captured = live = audio_pcm_hw_get_live_out (hw, NULL);
rpos = hw->mix_buf.pos;
rpos = hw->mix_buf->pos;
while (live) {
size_t left = hw->mix_buf.size - rpos;
size_t left = hw->mix_buf->size - rpos;
size_t to_capture = MIN(live, left);
struct st_sample *src;
struct capture_callback *cb;
src = hw->mix_buf.buffer + rpos;
src = hw->mix_buf->samples + rpos;
hw->clip (cap->buf, src, to_capture);
mixeng_clear (src, to_capture);
@@ -1357,10 +1365,10 @@ static void audio_run_capture (AudioState *s)
cb->ops.capture (cb->opaque, cap->buf,
to_capture * hw->info.bytes_per_frame);
}
rpos = (rpos + to_capture) % hw->mix_buf.size;
rpos = (rpos + to_capture) % hw->mix_buf->size;
live -= to_capture;
}
hw->mix_buf.pos = rpos;
hw->mix_buf->pos = rpos;
for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) {
if (!sw->active && sw->empty) {
@@ -1919,7 +1927,7 @@ CaptureVoiceOut *AUD_add_capture(
audio_pcm_init_info (&hw->info, as);
cap->buf = g_malloc0_n(hw->mix_buf.size, hw->info.bytes_per_frame);
cap->buf = g_malloc0_n(hw->mix_buf->size, hw->info.bytes_per_frame);
if (hw->info.is_float) {
hw->clip = mixeng_clip_float[hw->info.nchannels == 2];
@@ -1971,7 +1979,7 @@ void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque)
sw = sw1;
}
QLIST_REMOVE (cap, entries);
g_free(cap->hw.mix_buf.buffer);
g_free (cap->hw.mix_buf);
g_free (cap->buf);
g_free (cap);
}
@@ -2061,9 +2069,6 @@ void audio_create_pdos(Audiodev *dev)
#ifdef CONFIG_AUDIO_PA
CASE(PA, pa, Pa);
#endif
#ifdef CONFIG_AUDIO_PIPEWIRE
CASE(PIPEWIRE, pipewire, Pipewire);
#endif
#ifdef CONFIG_AUDIO_SDL
CASE(SDL, sdl, Sdl);
#endif

View File

@@ -58,7 +58,7 @@ typedef struct SWVoiceCap SWVoiceCap;
typedef struct STSampleBuffer {
size_t pos, size;
st_sample *buffer;
st_sample samples[];
} STSampleBuffer;
typedef struct HWVoiceOut {
@@ -71,7 +71,7 @@ typedef struct HWVoiceOut {
f_sample *clip;
uint64_t ts_helper;
STSampleBuffer mix_buf;
STSampleBuffer *mix_buf;
void *buf_emul;
size_t pos_emul, pending_emul, size_emul;
@@ -93,7 +93,7 @@ typedef struct HWVoiceIn {
size_t total_samples_captured;
uint64_t ts_helper;
STSampleBuffer conv_buf;
STSampleBuffer *conv_buf;
void *buf_emul;
size_t pos_emul, pending_emul, size_emul;
@@ -108,7 +108,8 @@ struct SWVoiceOut {
AudioState *s;
struct audio_pcm_info info;
t_sample *conv;
STSampleBuffer resample_buf;
int64_t ratio;
struct st_sample *buf;
void *rate;
size_t total_hw_samples_mixed;
int active;
@@ -125,9 +126,10 @@ struct SWVoiceIn {
AudioState *s;
int active;
struct audio_pcm_info info;
int64_t ratio;
void *rate;
size_t total_hw_samples_acquired;
STSampleBuffer resample_buf;
struct st_sample *buf;
f_sample *clip;
HWVoiceIn *hw;
char *name;
@@ -143,14 +145,14 @@ struct audio_driver {
void *(*init) (Audiodev *);
void (*fini) (void *);
#ifdef CONFIG_GIO
void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager, bool p2p);
void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager);
#endif
struct audio_pcm_ops *pcm_ops;
int can_be_default;
int max_voices_out;
int max_voices_in;
size_t voice_size_out;
size_t voice_size_in;
int voice_size_out;
int voice_size_in;
QLIST_ENTRY(audio_driver) next;
};
@@ -249,6 +251,7 @@ void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as);
void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len);
int audio_bug (const char *funcname, int cond);
void *audio_calloc (const char *funcname, int nmemb, size_t size);
void audio_run(AudioState *s, const char *msg);
@@ -291,6 +294,9 @@ static inline size_t audio_ring_posb(size_t pos, size_t dist, size_t len)
#define ldebug(fmt, ...) (void)0
#endif
#define AUDIO_STRINGIFY_(n) #n
#define AUDIO_STRINGIFY(n) AUDIO_STRINGIFY_(n)
typedef struct AudiodevListEntry {
Audiodev *dev;
QSIMPLEQ_ENTRY(AudiodevListEntry) next;

View File

@@ -35,8 +35,8 @@
static uint32_t toui32(const char *str)
{
uint64_t ret;
if (parse_uint_full(str, 10, &ret) || ret > UINT32_MAX) {
unsigned long long ret;
if (parse_uint_full(str, &ret, 10) || ret > UINT32_MAX) {
dolog("Invalid integer value `%s'\n", str);
exit(1);
}

View File

@@ -40,7 +40,7 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
struct audio_driver *drv)
{
int max_voices = glue (drv->max_voices_, TYPE);
size_t voice_size = glue(drv->voice_size_, TYPE);
int voice_size = glue (drv->voice_size_, TYPE);
if (glue (s->nb_hw_voices_, TYPE) > max_voices) {
if (!max_voices) {
@@ -63,17 +63,16 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
}
if (audio_bug(__func__, voice_size && !max_voices)) {
dolog("drv=`%s' voice_size=%zu max_voices=0\n",
drv->name, voice_size);
dolog ("drv=`%s' voice_size=%d max_voices=0\n",
drv->name, voice_size);
}
}
static void glue (audio_pcm_hw_free_resources_, TYPE) (HW *hw)
{
g_free(hw->buf_emul);
g_free(HWBUF.buffer);
HWBUF.buffer = NULL;
HWBUF.size = 0;
g_free (HWBUF);
HWBUF = NULL;
}
static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw)
@@ -84,67 +83,56 @@ static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw)
dolog("Attempted to allocate empty buffer\n");
}
HWBUF.buffer = g_new0(st_sample, samples);
HWBUF.size = samples;
HWBUF.pos = 0;
HWBUF = g_malloc0(sizeof(STSampleBuffer) + sizeof(st_sample) * samples);
HWBUF->size = samples;
} else {
HWBUF.buffer = NULL;
HWBUF.size = 0;
HWBUF = NULL;
}
}
static void glue (audio_pcm_sw_free_resources_, TYPE) (SW *sw)
{
g_free(sw->resample_buf.buffer);
sw->resample_buf.buffer = NULL;
sw->resample_buf.size = 0;
g_free (sw->buf);
if (sw->rate) {
st_rate_stop (sw->rate);
}
sw->buf = NULL;
sw->rate = NULL;
}
static int glue (audio_pcm_sw_alloc_resources_, TYPE) (SW *sw)
{
HW *hw = sw->hw;
uint64_t samples;
int samples;
if (!glue(audio_get_pdo_, TYPE)(sw->s->dev)->mixing_engine) {
return 0;
}
samples = muldiv64(HWBUF.size, sw->info.freq, hw->info.freq);
if (samples == 0) {
uint64_t f_fe_min;
uint64_t f_be = (uint32_t)hw->info.freq;
#ifdef DAC
samples = ((int64_t) sw->HWBUF->size << 32) / sw->ratio;
#else
samples = (int64_t)sw->HWBUF->size * sw->ratio >> 32;
#endif
/* f_fe_min = ceil(1 [frames] * f_be [Hz] / size_be [frames]) */
f_fe_min = (f_be + HWBUF.size - 1) / HWBUF.size;
qemu_log_mask(LOG_UNIMP,
AUDIO_CAP ": The guest selected a " NAME " sample rate"
" of %d Hz for %s. Only sample rates >= %" PRIu64 " Hz"
" are supported.\n",
sw->info.freq, sw->name, f_fe_min);
sw->buf = audio_calloc(__func__, samples, sizeof(struct st_sample));
if (!sw->buf) {
dolog ("Could not allocate buffer for `%s' (%d samples)\n",
SW_NAME (sw), samples);
return -1;
}
/*
* Allocate one additional audio frame that is needed for upsampling
* if the resample buffer size is small. For large buffer sizes take
* care of overflows and truncation.
*/
samples = samples < SIZE_MAX ? samples + 1 : SIZE_MAX;
sw->resample_buf.buffer = g_new0(st_sample, samples);
sw->resample_buf.size = samples;
sw->resample_buf.pos = 0;
#ifdef DAC
sw->rate = st_rate_start(sw->info.freq, hw->info.freq);
sw->rate = st_rate_start (sw->info.freq, sw->hw->info.freq);
#else
sw->rate = st_rate_start(hw->info.freq, sw->info.freq);
sw->rate = st_rate_start (sw->hw->info.freq, sw->info.freq);
#endif
if (!sw->rate) {
g_free (sw->buf);
sw->buf = NULL;
return -1;
}
return 0;
}
@@ -161,8 +149,11 @@ static int glue (audio_pcm_sw_init_, TYPE) (
sw->hw = hw;
sw->active = 0;
#ifdef DAC
sw->ratio = ((int64_t) sw->hw->info.freq << 32) / sw->info.freq;
sw->total_hw_samples_mixed = 0;
sw->empty = 1;
#else
sw->ratio = ((int64_t) sw->info.freq << 32) / sw->hw->info.freq;
#endif
if (sw->info.is_float) {
@@ -273,11 +264,13 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s,
return NULL;
}
/*
* Since glue(s->nb_hw_voices_, TYPE) is != 0, glue(drv->voice_size_, TYPE)
* is guaranteed to be != 0. See the audio_init_nb_voices_* functions.
*/
hw = g_malloc0(glue(drv->voice_size_, TYPE));
hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE));
if (!hw) {
dolog ("Can not allocate voice `%s' size %d\n",
drv->name, glue (drv->voice_size_, TYPE));
return NULL;
}
hw->s = s;
hw->pcm_ops = drv->pcm_ops;
@@ -362,10 +355,6 @@ AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev)
case AUDIODEV_DRIVER_PA:
return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE);
#endif
#ifdef CONFIG_AUDIO_PIPEWIRE
case AUDIODEV_DRIVER_PIPEWIRE:
return qapi_AudiodevPipewirePerDirectionOptions_base(dev->u.pipewire.TYPE);
#endif
#ifdef CONFIG_AUDIO_SDL
case AUDIODEV_DRIVER_SDL:
return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE);
@@ -429,28 +418,33 @@ static SW *glue(audio_pcm_create_voice_pair_, TYPE)(
hw_as = *as;
}
sw = g_new0(SW, 1);
sw = audio_calloc(__func__, 1, sizeof(*sw));
if (!sw) {
dolog ("Could not allocate soft voice `%s' (%zu bytes)\n",
sw_name ? sw_name : "unknown", sizeof (*sw));
goto err1;
}
sw->s = s;
hw = glue(audio_pcm_hw_add_, TYPE)(s, &hw_as);
if (!hw) {
dolog("Could not create a backend for voice `%s'\n", sw_name);
goto err1;
goto err2;
}
glue (audio_pcm_hw_add_sw_, TYPE) (hw, sw);
if (glue (audio_pcm_sw_init_, TYPE) (sw, hw, sw_name, as)) {
goto err2;
goto err3;
}
return sw;
err2:
err3:
glue (audio_pcm_hw_del_sw_, TYPE) (sw);
glue (audio_pcm_hw_gc_, TYPE) (&hw);
err2:
g_free (sw);
err1:
g_free(sw);
return NULL;
}
@@ -521,8 +515,8 @@ SW *glue (AUD_open_, TYPE) (
HW *hw = sw->hw;
if (!hw) {
dolog("Internal logic error: voice `%s' has no backend\n",
SW_NAME(sw));
dolog ("Internal logic error voice `%s' has no hardware store\n",
SW_NAME (sw));
goto fail;
}
@@ -533,6 +527,7 @@ SW *glue (AUD_open_, TYPE) (
} else {
sw = glue(audio_pcm_create_voice_pair_, TYPE)(s, name, as);
if (!sw) {
dolog ("Failed to create voice `%s'\n", name);
return NULL;
}
}

View File

@@ -29,11 +29,7 @@
#include "qemu/timer.h"
#include "qemu/dbus.h"
#ifdef G_OS_UNIX
#include <gio/gunixfdlist.h>
#endif
#include "ui/dbus.h"
#include "ui/dbus-display1.h"
#define AUDIO_CAP "dbus"
@@ -47,7 +43,6 @@
typedef struct DBusAudio {
GDBusObjectManagerServer *server;
bool p2p;
GDBusObjectSkeleton *audio;
QemuDBusDisplay1Audio *iface;
GHashTable *out_listeners;
@@ -448,15 +443,12 @@ listener_in_vanished_cb(GDBusConnection *connection,
static gboolean
dbus_audio_register_listener(AudioState *s,
GDBusMethodInvocation *invocation,
#ifdef G_OS_UNIX
GUnixFDList *fd_list,
#endif
GVariant *arg_listener,
bool out)
{
DBusAudio *da = s->drv_opaque;
const char *sender =
da->p2p ? "p2p" : g_dbus_method_invocation_get_sender(invocation);
const char *sender = g_dbus_method_invocation_get_sender(invocation);
g_autoptr(GDBusConnection) listener_conn = NULL;
g_autoptr(GError) err = NULL;
g_autoptr(GSocket) socket = NULL;
@@ -477,11 +469,6 @@ dbus_audio_register_listener(AudioState *s,
return DBUS_METHOD_INVOCATION_HANDLED;
}
#ifdef G_OS_WIN32
if (!dbus_win32_import_socket(invocation, arg_listener, &fd)) {
return DBUS_METHOD_INVOCATION_HANDLED;
}
#else
fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err);
if (err) {
g_dbus_method_invocation_return_error(invocation,
@@ -491,7 +478,6 @@ dbus_audio_register_listener(AudioState *s,
err->message);
return DBUS_METHOD_INVOCATION_HANDLED;
}
#endif
socket = g_socket_new_from_fd(fd, &err);
if (err) {
@@ -500,28 +486,15 @@ dbus_audio_register_listener(AudioState *s,
DBUS_DISPLAY_ERROR_FAILED,
"Couldn't make a socket: %s",
err->message);
#ifdef G_OS_WIN32
closesocket(fd);
#else
close(fd);
#endif
return DBUS_METHOD_INVOCATION_HANDLED;
}
socket_conn = g_socket_connection_factory_create_connection(socket);
if (out) {
qemu_dbus_display1_audio_complete_register_out_listener(
da->iface, invocation
#ifdef G_OS_UNIX
, NULL
#endif
);
da->iface, invocation, NULL);
} else {
qemu_dbus_display1_audio_complete_register_in_listener(
da->iface, invocation
#ifdef G_OS_UNIX
, NULL
#endif
);
da->iface, invocation, NULL);
}
listener_conn =
@@ -599,36 +572,26 @@ dbus_audio_register_listener(AudioState *s,
static gboolean
dbus_audio_register_out_listener(AudioState *s,
GDBusMethodInvocation *invocation,
#ifdef G_OS_UNIX
GUnixFDList *fd_list,
#endif
GVariant *arg_listener)
{
return dbus_audio_register_listener(s, invocation,
#ifdef G_OS_UNIX
fd_list,
#endif
arg_listener, true);
fd_list, arg_listener, true);
}
static gboolean
dbus_audio_register_in_listener(AudioState *s,
GDBusMethodInvocation *invocation,
#ifdef G_OS_UNIX
GUnixFDList *fd_list,
#endif
GVariant *arg_listener)
{
return dbus_audio_register_listener(s, invocation,
#ifdef G_OS_UNIX
fd_list,
#endif
arg_listener, false);
fd_list, arg_listener, false);
}
static void
dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p)
dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server)
{
DBusAudio *da = s->drv_opaque;
@@ -636,7 +599,6 @@ dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p)
g_assert(!da->server);
da->server = g_object_ref(server);
da->p2p = p2p;
da->audio = g_dbus_object_skeleton_new(DBUS_DISPLAY1_AUDIO_PATH);
da->iface = qemu_dbus_display1_audio_skeleton_new();

View File

@@ -1,5 +1,5 @@
system_ss.add([spice_headers, files('audio.c')])
system_ss.add(files(
softmmu_ss.add([spice_headers, files('audio.c')])
softmmu_ss.add(files(
'audio-hmp-cmds.c',
'audio_legacy.c',
'mixeng.c',
@@ -8,8 +8,8 @@ system_ss.add(files(
'wavcapture.c',
))
system_ss.add(when: coreaudio, if_true: files('coreaudio.m'))
system_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c'))
softmmu_ss.add(when: coreaudio, if_true: files('coreaudio.m'))
softmmu_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c'))
audio_modules = {}
foreach m : [
@@ -19,7 +19,6 @@ foreach m : [
['sdl', sdl, files('sdlaudio.c')],
['jack', jack, files('jackaudio.c')],
['sndio', sndio, files('sndioaudio.c')],
['pipewire', pipewire, files('pwaudio.c')],
['spice', spice, files('spiceaudio.c')]
]
if m[1].found()
@@ -31,7 +30,7 @@ endforeach
if dbus_display
module_ss = ss.source_set()
module_ss.add(when: [gio, pixman], if_true: files('dbusaudio.c'))
module_ss.add(when: gio, if_true: files('dbusaudio.c'))
audio_modules += {'dbus': module_ss}
endif

View File

@@ -414,7 +414,12 @@ struct rate {
*/
void *st_rate_start (int inrate, int outrate)
{
struct rate *rate = g_new0(struct rate, 1);
struct rate *rate = audio_calloc(__func__, 1, sizeof(*rate));
if (!rate) {
dolog ("Could not allocate resampler (%zu bytes)\n", sizeof (*rate));
return NULL;
}
rate->opos = 0;
@@ -440,86 +445,6 @@ void st_rate_stop (void *opaque)
g_free (opaque);
}
/**
* st_rate_frames_out() - returns the number of frames the resampling code
* generates from frames_in frames
*
* @opaque: pointer to struct rate
* @frames_in: number of frames
*
* When upsampling, there may be more than one correct result. In this case,
* the function returns the maximum number of output frames the resampling
* code can generate.
*/
uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in)
{
struct rate *rate = opaque;
uint64_t opos_end, opos_delta;
uint32_t ipos_end;
uint32_t frames_out;
if (rate->opos_inc == 1ULL << 32) {
return frames_in;
}
/* no output frame without at least one input frame */
if (!frames_in) {
return 0;
}
/* last frame read was at rate->ipos - 1 */
ipos_end = rate->ipos - 1 + frames_in;
opos_end = (uint64_t)ipos_end << 32;
/* last frame written was at rate->opos - rate->opos_inc */
if (opos_end + rate->opos_inc <= rate->opos) {
return 0;
}
opos_delta = opos_end - rate->opos + rate->opos_inc;
frames_out = opos_delta / rate->opos_inc;
return opos_delta % rate->opos_inc ? frames_out : frames_out - 1;
}
/**
* st_rate_frames_in() - returns the number of frames needed to
* get frames_out frames after resampling
*
* @opaque: pointer to struct rate
* @frames_out: number of frames
*
* When downsampling, there may be more than one correct result. In this
* case, the function returns the maximum number of input frames needed.
*/
uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out)
{
struct rate *rate = opaque;
uint64_t opos_start, opos_end;
uint32_t ipos_start, ipos_end;
if (rate->opos_inc == 1ULL << 32) {
return frames_out;
}
if (frames_out) {
opos_start = rate->opos;
ipos_start = rate->ipos;
} else {
uint64_t offset;
/* add offset = ceil(opos_inc) to opos and ipos to avoid an underflow */
offset = (rate->opos_inc + (1ULL << 32) - 1) & ~((1ULL << 32) - 1);
opos_start = rate->opos + offset;
ipos_start = rate->ipos + (offset >> 32);
}
/* last frame written was at opos_start - rate->opos_inc */
opos_end = opos_start - rate->opos_inc + rate->opos_inc * frames_out;
ipos_end = (opos_end >> 32) + 1;
/* last frame read was at ipos_start - 1 */
return ipos_end + 1 > ipos_start ? ipos_end + 1 - ipos_start : 0;
}
void mixeng_clear (struct st_sample *buf, int len)
{
memset (buf, 0, len * sizeof (struct st_sample));

View File

@@ -52,8 +52,6 @@ void st_rate_flow(void *opaque, st_sample *ibuf, st_sample *obuf,
void st_rate_flow_mix(void *opaque, st_sample *ibuf, st_sample *obuf,
size_t *isamp, size_t *osamp);
void st_rate_stop (void *opaque);
uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in);
uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out);
void mixeng_clear (struct st_sample *buf, int len);
void mixeng_volume (struct st_sample *buf, int len, struct mixeng_volume *vol);

View File

@@ -1,857 +0,0 @@
/*
* QEMU PipeWire audio driver
*
* Copyright (c) 2023 Red Hat Inc.
*
* Author: Dorinda Bassey <dbassey@redhat.com>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/module.h"
#include "audio.h"
#include <errno.h>
#include "qemu/error-report.h"
#include <spa/param/audio/format-utils.h>
#include <spa/utils/ringbuffer.h>
#include <spa/utils/result.h>
#include <spa/param/props.h>
#include <pipewire/pipewire.h>
#include "trace.h"
#define AUDIO_CAP "pipewire"
#define RINGBUFFER_SIZE (1u << 22)
#define RINGBUFFER_MASK (RINGBUFFER_SIZE - 1)
#include "audio_int.h"
typedef struct pwvolume {
uint32_t channels;
float values[SPA_AUDIO_MAX_CHANNELS];
} pwvolume;
typedef struct pwaudio {
Audiodev *dev;
struct pw_thread_loop *thread_loop;
struct pw_context *context;
struct pw_core *core;
struct spa_hook core_listener;
int last_seq, pending_seq, error;
} pwaudio;
typedef struct PWVoice {
pwaudio *g;
struct pw_stream *stream;
struct spa_hook stream_listener;
struct spa_audio_info_raw info;
uint32_t highwater_mark;
uint32_t frame_size, req;
struct spa_ringbuffer ring;
uint8_t buffer[RINGBUFFER_SIZE];
pwvolume volume;
bool muted;
} PWVoice;
typedef struct PWVoiceOut {
HWVoiceOut hw;
PWVoice v;
} PWVoiceOut;
typedef struct PWVoiceIn {
HWVoiceIn hw;
PWVoice v;
} PWVoiceIn;
#define PW_VOICE_IN(v) ((PWVoiceIn *)v)
#define PW_VOICE_OUT(v) ((PWVoiceOut *)v)
static void
stream_destroy(void *data)
{
PWVoice *v = (PWVoice *) data;
spa_hook_remove(&v->stream_listener);
v->stream = NULL;
}
/* output data processing function to read stuffs from the buffer */
static void
playback_on_process(void *data)
{
PWVoice *v = data;
void *p;
struct pw_buffer *b;
struct spa_buffer *buf;
uint32_t req, index, n_bytes;
int32_t avail;
assert(v->stream);
/* obtain a buffer to read from */
b = pw_stream_dequeue_buffer(v->stream);
if (b == NULL) {
error_report("out of buffers: %s", strerror(errno));
return;
}
buf = b->buffer;
p = buf->datas[0].data;
if (p == NULL) {
return;
}
/* calculate the total no of bytes to read data from buffer */
req = b->requested * v->frame_size;
if (req == 0) {
req = v->req;
}
n_bytes = SPA_MIN(req, buf->datas[0].maxsize);
/* get no of available bytes to read data from buffer */
avail = spa_ringbuffer_get_read_index(&v->ring, &index);
if (avail <= 0) {
PWVoiceOut *vo = container_of(data, PWVoiceOut, v);
audio_pcm_info_clear_buf(&vo->hw.info, p, n_bytes / v->frame_size);
} else {
if ((uint32_t) avail < n_bytes) {
/*
* PipeWire immediately calls this callback again if we provide
* less than n_bytes. Then audio_pcm_info_clear_buf() fills the
* rest of the buffer with silence.
*/
n_bytes = avail;
}
spa_ringbuffer_read_data(&v->ring,
v->buffer, RINGBUFFER_SIZE,
index & RINGBUFFER_MASK, p, n_bytes);
index += n_bytes;
spa_ringbuffer_read_update(&v->ring, index);
}
buf->datas[0].chunk->offset = 0;
buf->datas[0].chunk->stride = v->frame_size;
buf->datas[0].chunk->size = n_bytes;
/* queue the buffer for playback */
pw_stream_queue_buffer(v->stream, b);
}
/* output data processing function to generate stuffs in the buffer */
static void
capture_on_process(void *data)
{
PWVoice *v = (PWVoice *) data;
void *p;
struct pw_buffer *b;
struct spa_buffer *buf;
int32_t filled;
uint32_t index, offs, n_bytes;
assert(v->stream);
/* obtain a buffer */
b = pw_stream_dequeue_buffer(v->stream);
if (b == NULL) {
error_report("out of buffers: %s", strerror(errno));
return;
}
/* Write data into buffer */
buf = b->buffer;
p = buf->datas[0].data;
if (p == NULL) {
return;
}
offs = SPA_MIN(buf->datas[0].chunk->offset, buf->datas[0].maxsize);
n_bytes = SPA_MIN(buf->datas[0].chunk->size, buf->datas[0].maxsize - offs);
filled = spa_ringbuffer_get_write_index(&v->ring, &index);
if (filled < 0) {
error_report("%p: underrun write:%u filled:%d", p, index, filled);
} else {
if ((uint32_t) filled + n_bytes > RINGBUFFER_SIZE) {
error_report("%p: overrun write:%u filled:%d + size:%u > max:%u",
p, index, filled, n_bytes, RINGBUFFER_SIZE);
}
}
spa_ringbuffer_write_data(&v->ring,
v->buffer, RINGBUFFER_SIZE,
index & RINGBUFFER_MASK,
SPA_PTROFF(p, offs, void), n_bytes);
index += n_bytes;
spa_ringbuffer_write_update(&v->ring, index);
/* queue the buffer for playback */
pw_stream_queue_buffer(v->stream, b);
}
static void
on_stream_state_changed(void *data, enum pw_stream_state old,
enum pw_stream_state state, const char *error)
{
PWVoice *v = (PWVoice *) data;
trace_pw_state_changed(pw_stream_get_node_id(v->stream),
pw_stream_state_as_string(state));
}
static const struct pw_stream_events capture_stream_events = {
PW_VERSION_STREAM_EVENTS,
.destroy = stream_destroy,
.state_changed = on_stream_state_changed,
.process = capture_on_process
};
static const struct pw_stream_events playback_stream_events = {
PW_VERSION_STREAM_EVENTS,
.destroy = stream_destroy,
.state_changed = on_stream_state_changed,
.process = playback_on_process
};
static size_t
qpw_read(HWVoiceIn *hw, void *data, size_t len)
{
PWVoiceIn *pw = (PWVoiceIn *) hw;
PWVoice *v = &pw->v;
pwaudio *c = v->g;
const char *error = NULL;
size_t l;
int32_t avail;
uint32_t index;
pw_thread_loop_lock(c->thread_loop);
if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) {
/* wait for stream to become ready */
l = 0;
goto done_unlock;
}
/* get no of available bytes to read data from buffer */
avail = spa_ringbuffer_get_read_index(&v->ring, &index);
trace_pw_read(avail, index, len);
if (avail < (int32_t) len) {
len = avail;
}
spa_ringbuffer_read_data(&v->ring,
v->buffer, RINGBUFFER_SIZE,
index & RINGBUFFER_MASK, data, len);
index += len;
spa_ringbuffer_read_update(&v->ring, index);
l = len;
done_unlock:
pw_thread_loop_unlock(c->thread_loop);
return l;
}
static size_t qpw_buffer_get_free(HWVoiceOut *hw)
{
PWVoiceOut *pw = (PWVoiceOut *)hw;
PWVoice *v = &pw->v;
pwaudio *c = v->g;
const char *error = NULL;
int32_t filled, avail;
uint32_t index;
pw_thread_loop_lock(c->thread_loop);
if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) {
/* wait for stream to become ready */
avail = 0;
goto done_unlock;
}
filled = spa_ringbuffer_get_write_index(&v->ring, &index);
avail = v->highwater_mark - filled;
done_unlock:
pw_thread_loop_unlock(c->thread_loop);
return avail;
}
static size_t
qpw_write(HWVoiceOut *hw, void *data, size_t len)
{
PWVoiceOut *pw = (PWVoiceOut *) hw;
PWVoice *v = &pw->v;
pwaudio *c = v->g;
const char *error = NULL;
int32_t filled, avail;
uint32_t index;
pw_thread_loop_lock(c->thread_loop);
if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) {
/* wait for stream to become ready */
len = 0;
goto done_unlock;
}
filled = spa_ringbuffer_get_write_index(&v->ring, &index);
avail = v->highwater_mark - filled;
trace_pw_write(filled, avail, index, len);
if (len > avail) {
len = avail;
}
if (filled < 0) {
error_report("%p: underrun write:%u filled:%d", pw, index, filled);
} else {
if ((uint32_t) filled + len > RINGBUFFER_SIZE) {
error_report("%p: overrun write:%u filled:%d + size:%zu > max:%u",
pw, index, filled, len, RINGBUFFER_SIZE);
}
}
spa_ringbuffer_write_data(&v->ring,
v->buffer, RINGBUFFER_SIZE,
index & RINGBUFFER_MASK, data, len);
index += len;
spa_ringbuffer_write_update(&v->ring, index);
done_unlock:
pw_thread_loop_unlock(c->thread_loop);
return len;
}
static int
audfmt_to_pw(AudioFormat fmt, int endianness)
{
int format;
switch (fmt) {
case AUDIO_FORMAT_S8:
format = SPA_AUDIO_FORMAT_S8;
break;
case AUDIO_FORMAT_U8:
format = SPA_AUDIO_FORMAT_U8;
break;
case AUDIO_FORMAT_S16:
format = endianness ? SPA_AUDIO_FORMAT_S16_BE : SPA_AUDIO_FORMAT_S16_LE;
break;
case AUDIO_FORMAT_U16:
format = endianness ? SPA_AUDIO_FORMAT_U16_BE : SPA_AUDIO_FORMAT_U16_LE;
break;
case AUDIO_FORMAT_S32:
format = endianness ? SPA_AUDIO_FORMAT_S32_BE : SPA_AUDIO_FORMAT_S32_LE;
break;
case AUDIO_FORMAT_U32:
format = endianness ? SPA_AUDIO_FORMAT_U32_BE : SPA_AUDIO_FORMAT_U32_LE;
break;
case AUDIO_FORMAT_F32:
format = endianness ? SPA_AUDIO_FORMAT_F32_BE : SPA_AUDIO_FORMAT_F32_LE;
break;
default:
dolog("Internal logic error: Bad audio format %d\n", fmt);
format = SPA_AUDIO_FORMAT_U8;
break;
}
return format;
}
static AudioFormat
pw_to_audfmt(enum spa_audio_format fmt, int *endianness,
uint32_t *sample_size)
{
switch (fmt) {
case SPA_AUDIO_FORMAT_S8:
*sample_size = 1;
return AUDIO_FORMAT_S8;
case SPA_AUDIO_FORMAT_U8:
*sample_size = 1;
return AUDIO_FORMAT_U8;
case SPA_AUDIO_FORMAT_S16_BE:
*sample_size = 2;
*endianness = 1;
return AUDIO_FORMAT_S16;
case SPA_AUDIO_FORMAT_S16_LE:
*sample_size = 2;
*endianness = 0;
return AUDIO_FORMAT_S16;
case SPA_AUDIO_FORMAT_U16_BE:
*sample_size = 2;
*endianness = 1;
return AUDIO_FORMAT_U16;
case SPA_AUDIO_FORMAT_U16_LE:
*sample_size = 2;
*endianness = 0;
return AUDIO_FORMAT_U16;
case SPA_AUDIO_FORMAT_S32_BE:
*sample_size = 4;
*endianness = 1;
return AUDIO_FORMAT_S32;
case SPA_AUDIO_FORMAT_S32_LE:
*sample_size = 4;
*endianness = 0;
return AUDIO_FORMAT_S32;
case SPA_AUDIO_FORMAT_U32_BE:
*sample_size = 4;
*endianness = 1;
return AUDIO_FORMAT_U32;
case SPA_AUDIO_FORMAT_U32_LE:
*sample_size = 4;
*endianness = 0;
return AUDIO_FORMAT_U32;
case SPA_AUDIO_FORMAT_F32_BE:
*sample_size = 4;
*endianness = 1;
return AUDIO_FORMAT_F32;
case SPA_AUDIO_FORMAT_F32_LE:
*sample_size = 4;
*endianness = 0;
return AUDIO_FORMAT_F32;
default:
*sample_size = 1;
dolog("Internal logic error: Bad spa_audio_format %d\n", fmt);
return AUDIO_FORMAT_U8;
}
}
static int
qpw_stream_new(pwaudio *c, PWVoice *v, const char *stream_name,
const char *name, enum spa_direction dir)
{
int res;
uint32_t n_params;
const struct spa_pod *params[2];
uint8_t buffer[1024];
struct spa_pod_builder b;
uint64_t buf_samples;
struct pw_properties *props;
props = pw_properties_new(NULL, NULL);
if (!props) {
error_report("Failed to create PW properties: %s", g_strerror(errno));
return -1;
}
/* 75% of the timer period for faster updates */
buf_samples = (uint64_t)v->g->dev->timer_period * v->info.rate
* 3 / 4 / 1000000;
pw_properties_setf(props, PW_KEY_NODE_LATENCY, "%" PRIu64 "/%u",
buf_samples, v->info.rate);
trace_pw_period(buf_samples, v->info.rate);
if (name) {
pw_properties_set(props, PW_KEY_TARGET_OBJECT, name);
}
v->stream = pw_stream_new(c->core, stream_name, props);
if (v->stream == NULL) {
error_report("Failed to create PW stream: %s", g_strerror(errno));
return -1;
}
if (dir == SPA_DIRECTION_INPUT) {
pw_stream_add_listener(v->stream,
&v->stream_listener, &capture_stream_events, v);
} else {
pw_stream_add_listener(v->stream,
&v->stream_listener, &playback_stream_events, v);
}
n_params = 0;
spa_pod_builder_init(&b, buffer, sizeof(buffer));
params[n_params++] = spa_format_audio_raw_build(&b,
SPA_PARAM_EnumFormat,
&v->info);
/* connect the stream to a sink or source */
res = pw_stream_connect(v->stream,
dir ==
SPA_DIRECTION_INPUT ? PW_DIRECTION_INPUT :
PW_DIRECTION_OUTPUT, PW_ID_ANY,
PW_STREAM_FLAG_AUTOCONNECT |
PW_STREAM_FLAG_INACTIVE |
PW_STREAM_FLAG_MAP_BUFFERS |
PW_STREAM_FLAG_RT_PROCESS, params, n_params);
if (res < 0) {
error_report("Failed to connect PW stream: %s", g_strerror(errno));
pw_stream_destroy(v->stream);
return -1;
}
return 0;
}
static void
qpw_set_position(uint32_t channels, uint32_t position[SPA_AUDIO_MAX_CHANNELS])
{
memcpy(position, (uint32_t[SPA_AUDIO_MAX_CHANNELS]) { SPA_AUDIO_CHANNEL_UNKNOWN, },
sizeof(uint32_t) * SPA_AUDIO_MAX_CHANNELS);
/*
* TODO: This currently expects the only frontend supporting more than 2
* channels is the usb-audio. We will need some means to set channel
* order when a new frontend gains multi-channel support.
*/
switch (channels) {
case 8:
position[6] = SPA_AUDIO_CHANNEL_SL;
position[7] = SPA_AUDIO_CHANNEL_SR;
/* fallthrough */
case 6:
position[2] = SPA_AUDIO_CHANNEL_FC;
position[3] = SPA_AUDIO_CHANNEL_LFE;
position[4] = SPA_AUDIO_CHANNEL_RL;
position[5] = SPA_AUDIO_CHANNEL_RR;
/* fallthrough */
case 2:
position[0] = SPA_AUDIO_CHANNEL_FL;
position[1] = SPA_AUDIO_CHANNEL_FR;
break;
case 1:
position[0] = SPA_AUDIO_CHANNEL_MONO;
break;
default:
dolog("Internal error: unsupported channel count %d\n", channels);
}
}
static int
qpw_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque)
{
PWVoiceOut *pw = (PWVoiceOut *) hw;
PWVoice *v = &pw->v;
struct audsettings obt_as = *as;
pwaudio *c = v->g = drv_opaque;
AudiodevPipewireOptions *popts = &c->dev->u.pipewire;
AudiodevPipewirePerDirectionOptions *ppdo = popts->out;
int r;
pw_thread_loop_lock(c->thread_loop);
v->info.format = audfmt_to_pw(as->fmt, as->endianness);
v->info.channels = as->nchannels;
qpw_set_position(as->nchannels, v->info.position);
v->info.rate = as->freq;
obt_as.fmt =
pw_to_audfmt(v->info.format, &obt_as.endianness, &v->frame_size);
v->frame_size *= as->nchannels;
v->req = (uint64_t)c->dev->timer_period * v->info.rate
* 1 / 2 / 1000000 * v->frame_size;
/* call the function that creates a new stream for playback */
r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id,
ppdo->name, SPA_DIRECTION_OUTPUT);
if (r < 0) {
pw_thread_loop_unlock(c->thread_loop);
return -1;
}
/* report the audio format we support */
audio_pcm_init_info(&hw->info, &obt_as);
/* report the buffer size to qemu */
hw->samples = audio_buffer_frames(
qapi_AudiodevPipewirePerDirectionOptions_base(ppdo), &obt_as, 46440);
v->highwater_mark = MIN(RINGBUFFER_SIZE,
(ppdo->has_latency ? ppdo->latency : 46440)
* (uint64_t)v->info.rate / 1000000 * v->frame_size);
pw_thread_loop_unlock(c->thread_loop);
return 0;
}
static int
qpw_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
{
PWVoiceIn *pw = (PWVoiceIn *) hw;
PWVoice *v = &pw->v;
struct audsettings obt_as = *as;
pwaudio *c = v->g = drv_opaque;
AudiodevPipewireOptions *popts = &c->dev->u.pipewire;
AudiodevPipewirePerDirectionOptions *ppdo = popts->in;
int r;
pw_thread_loop_lock(c->thread_loop);
v->info.format = audfmt_to_pw(as->fmt, as->endianness);
v->info.channels = as->nchannels;
qpw_set_position(as->nchannels, v->info.position);
v->info.rate = as->freq;
obt_as.fmt =
pw_to_audfmt(v->info.format, &obt_as.endianness, &v->frame_size);
v->frame_size *= as->nchannels;
/* call the function that creates a new stream for recording */
r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id,
ppdo->name, SPA_DIRECTION_INPUT);
if (r < 0) {
pw_thread_loop_unlock(c->thread_loop);
return -1;
}
/* report the audio format we support */
audio_pcm_init_info(&hw->info, &obt_as);
/* report the buffer size to qemu */
hw->samples = audio_buffer_frames(
qapi_AudiodevPipewirePerDirectionOptions_base(ppdo), &obt_as, 46440);
pw_thread_loop_unlock(c->thread_loop);
return 0;
}
static void
qpw_voice_fini(PWVoice *v)
{
pwaudio *c = v->g;
if (!v->stream) {
return;
}
pw_thread_loop_lock(c->thread_loop);
pw_stream_destroy(v->stream);
v->stream = NULL;
pw_thread_loop_unlock(c->thread_loop);
}
static void
qpw_fini_out(HWVoiceOut *hw)
{
qpw_voice_fini(&PW_VOICE_OUT(hw)->v);
}
static void
qpw_fini_in(HWVoiceIn *hw)
{
qpw_voice_fini(&PW_VOICE_IN(hw)->v);
}
static void
qpw_voice_set_enabled(PWVoice *v, bool enable)
{
pwaudio *c = v->g;
pw_thread_loop_lock(c->thread_loop);
pw_stream_set_active(v->stream, enable);
pw_thread_loop_unlock(c->thread_loop);
}
static void
qpw_enable_out(HWVoiceOut *hw, bool enable)
{
qpw_voice_set_enabled(&PW_VOICE_OUT(hw)->v, enable);
}
static void
qpw_enable_in(HWVoiceIn *hw, bool enable)
{
qpw_voice_set_enabled(&PW_VOICE_IN(hw)->v, enable);
}
static void
qpw_voice_set_volume(PWVoice *v, Volume *vol)
{
pwaudio *c = v->g;
int i, ret;
pw_thread_loop_lock(c->thread_loop);
v->volume.channels = vol->channels;
for (i = 0; i < vol->channels; ++i) {
v->volume.values[i] = (float)vol->vol[i] / 255;
}
ret = pw_stream_set_control(v->stream,
SPA_PROP_channelVolumes, v->volume.channels, v->volume.values, 0);
trace_pw_vol(ret == 0 ? "success" : "failed");
v->muted = vol->mute;
float val = v->muted ? 1.f : 0.f;
ret = pw_stream_set_control(v->stream, SPA_PROP_mute, 1, &val, 0);
pw_thread_loop_unlock(c->thread_loop);
}
static void
qpw_volume_out(HWVoiceOut *hw, Volume *vol)
{
qpw_voice_set_volume(&PW_VOICE_OUT(hw)->v, vol);
}
static void
qpw_volume_in(HWVoiceIn *hw, Volume *vol)
{
qpw_voice_set_volume(&PW_VOICE_IN(hw)->v, vol);
}
static int wait_resync(pwaudio *pw)
{
int res;
pw->pending_seq = pw_core_sync(pw->core, PW_ID_CORE, pw->pending_seq);
while (true) {
pw_thread_loop_wait(pw->thread_loop);
res = pw->error;
if (res < 0) {
pw->error = 0;
return res;
}
if (pw->pending_seq == pw->last_seq) {
break;
}
}
return 0;
}
static void
on_core_error(void *data, uint32_t id, int seq, int res, const char *message)
{
pwaudio *pw = data;
error_report("error id:%u seq:%d res:%d (%s): %s",
id, seq, res, spa_strerror(res), message);
/* stop and exit the thread loop */
pw_thread_loop_signal(pw->thread_loop, FALSE);
}
static void
on_core_done(void *data, uint32_t id, int seq)
{
pwaudio *pw = data;
assert(id == PW_ID_CORE);
pw->last_seq = seq;
if (pw->pending_seq == seq) {
/* stop and exit the thread loop */
pw_thread_loop_signal(pw->thread_loop, FALSE);
}
}
static const struct pw_core_events core_events = {
PW_VERSION_CORE_EVENTS,
.done = on_core_done,
.error = on_core_error,
};
static void *
qpw_audio_init(Audiodev *dev)
{
g_autofree pwaudio *pw = g_new0(pwaudio, 1);
assert(dev->driver == AUDIODEV_DRIVER_PIPEWIRE);
trace_pw_audio_init();
pw_init(NULL, NULL);
pw->dev = dev;
pw->thread_loop = pw_thread_loop_new("PipeWire thread loop", NULL);
if (pw->thread_loop == NULL) {
error_report("Could not create PipeWire loop: %s", g_strerror(errno));
goto fail;
}
pw->context =
pw_context_new(pw_thread_loop_get_loop(pw->thread_loop), NULL, 0);
if (pw->context == NULL) {
error_report("Could not create PipeWire context: %s", g_strerror(errno));
goto fail;
}
if (pw_thread_loop_start(pw->thread_loop) < 0) {
error_report("Could not start PipeWire loop: %s", g_strerror(errno));
goto fail;
}
pw_thread_loop_lock(pw->thread_loop);
pw->core = pw_context_connect(pw->context, NULL, 0);
if (pw->core == NULL) {
pw_thread_loop_unlock(pw->thread_loop);
goto fail;
}
if (pw_core_add_listener(pw->core, &pw->core_listener,
&core_events, pw) < 0) {
pw_thread_loop_unlock(pw->thread_loop);
goto fail;
}
if (wait_resync(pw) < 0) {
pw_thread_loop_unlock(pw->thread_loop);
}
pw_thread_loop_unlock(pw->thread_loop);
return g_steal_pointer(&pw);
fail:
AUD_log(AUDIO_CAP, "Failed to initialize PW context");
if (pw->thread_loop) {
pw_thread_loop_stop(pw->thread_loop);
}
g_clear_pointer(&pw->context, pw_context_destroy);
g_clear_pointer(&pw->thread_loop, pw_thread_loop_destroy);
return NULL;
}
static void
qpw_audio_fini(void *opaque)
{
pwaudio *pw = opaque;
if (pw->thread_loop) {
pw_thread_loop_stop(pw->thread_loop);
}
if (pw->core) {
spa_hook_remove(&pw->core_listener);
spa_zero(pw->core_listener);
pw_core_disconnect(pw->core);
}
if (pw->context) {
pw_context_destroy(pw->context);
}
pw_thread_loop_destroy(pw->thread_loop);
g_free(pw);
}
static struct audio_pcm_ops qpw_pcm_ops = {
.init_out = qpw_init_out,
.fini_out = qpw_fini_out,
.write = qpw_write,
.buffer_get_free = qpw_buffer_get_free,
.run_buffer_out = audio_generic_run_buffer_out,
.enable_out = qpw_enable_out,
.volume_out = qpw_volume_out,
.volume_in = qpw_volume_in,
.init_in = qpw_init_in,
.fini_in = qpw_fini_in,
.read = qpw_read,
.run_buffer_in = audio_generic_run_buffer_in,
.enable_in = qpw_enable_in
};
static struct audio_driver pw_audio_driver = {
.name = "pipewire",
.descr = "http://www.pipewire.org/",
.init = qpw_audio_init,
.fini = qpw_audio_fini,
.pcm_ops = &qpw_pcm_ops,
.can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof(PWVoiceOut),
.voice_size_in = sizeof(PWVoiceIn),
};
static void
register_audio_pw(void)
{
audio_driver_register(&pw_audio_driver);
}
type_init(register_audio_pw);

View File

@@ -40,6 +40,8 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf,
int64_t t;
#endif
ilast = rate->ilast;
istart = ibuf;
iend = ibuf + *isamp;
@@ -57,17 +59,15 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf,
return;
}
/* without input samples, there's nothing to do */
if (ibuf >= iend) {
*osamp = 0;
return;
}
while (obuf < oend) {
ilast = rate->ilast;
while (true) {
/* Safety catch to make sure we have input samples. */
if (ibuf >= iend) {
break;
}
/* read as many input samples so that ipos > opos */
while (rate->ipos <= (rate->opos >> 32)) {
ilast = *ibuf++;
rate->ipos++;
@@ -78,11 +78,6 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf,
}
}
/* make sure that the next output sample can be written */
if (obuf >= oend) {
break;
}
icur = *ibuf;
/* wrap ipos and opos around long before they overflow */

View File

@@ -18,14 +18,6 @@ dbus_audio_register(const char *s, const char *dir) "sender = %s, dir = %s"
dbus_audio_put_buffer_out(size_t len) "len = %zu"
dbus_audio_read(size_t len) "len = %zu"
# pwaudio.c
pw_state_changed(int nodeid, const char *s) "node id: %d stream state: %s"
pw_read(int32_t avail, uint32_t index, size_t len) "avail=%d index=%u len=%zu"
pw_write(int32_t filled, int32_t avail, uint32_t index, size_t len) "filled=%d avail=%d index=%u len=%zu"
pw_vol(const char *ret) "set volume: %s"
pw_period(uint64_t quantum, uint32_t rate) "period =%" PRIu64 "/%u"
pw_audio_init(void) "Initialize PipeWire context"
# audio.c
audio_timer_start(int interval) "interval %d ms"
audio_timer_stop(void) ""

View File

@@ -30,6 +30,7 @@
#include "qapi/qapi-visit-authz.h"
#include "qapi/qmp/qjson.h"
#include "qapi/qmp/qobject.h"
#include "qapi/qmp/qerror.h"
#include "qapi/qobject-input-visitor.h"

View File

@@ -59,19 +59,6 @@ struct CryptoDevBackendBuiltin {
CryptoDevBackendBuiltinSession *sessions[MAX_NUM_SESSIONS];
};
static void cryptodev_builtin_init_akcipher(CryptoDevBackend *backend)
{
QCryptoAkCipherOptions opts;
opts.alg = QCRYPTO_AKCIPHER_ALG_RSA;
opts.u.rsa.padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW;
if (qcrypto_akcipher_supports(&opts)) {
backend->conf.crypto_services |=
(1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER);
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
}
}
static void cryptodev_builtin_init(
CryptoDevBackend *backend, Error **errp)
{
@@ -85,18 +72,21 @@ static void cryptodev_builtin_init(
return;
}
cc = cryptodev_backend_new_client();
cc = cryptodev_backend_new_client(
"cryptodev-builtin", NULL);
cc->info_str = g_strdup_printf("cryptodev-builtin0");
cc->queue_index = 0;
cc->type = QCRYPTODEV_BACKEND_TYPE_BUILTIN;
cc->type = CRYPTODEV_BACKEND_TYPE_BUILTIN;
backend->conf.peers.ccs[0] = cc;
backend->conf.crypto_services =
1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER |
1u << QCRYPTODEV_BACKEND_SERVICE_HASH |
1u << QCRYPTODEV_BACKEND_SERVICE_MAC;
1u << VIRTIO_CRYPTO_SERVICE_CIPHER |
1u << VIRTIO_CRYPTO_SERVICE_HASH |
1u << VIRTIO_CRYPTO_SERVICE_MAC |
1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
/*
* Set the Maximum length of crypto request.
* Why this value? Just avoid to overflow when
@@ -105,7 +95,6 @@ static void cryptodev_builtin_init(
backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendOpInfo);
backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN;
backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN;
cryptodev_builtin_init_akcipher(backend);
cryptodev_backend_set_ready(backend, true);
}
@@ -539,14 +528,17 @@ static int cryptodev_builtin_asym_operation(
static int cryptodev_builtin_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info)
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendBuiltin *builtin =
CRYPTODEV_BACKEND_BUILTIN(backend);
CryptoDevBackendBuiltinSession *sess;
CryptoDevBackendSymOpInfo *sym_op_info;
CryptoDevBackendAsymOpInfo *asym_op_info;
QCryptodevBackendAlgType algtype = op_info->algtype;
enum CryptoDevBackendAlgType algtype = op_info->algtype;
int status = -VIRTIO_CRYPTO_ERR;
Error *local_error = NULL;
@@ -558,11 +550,11 @@ static int cryptodev_builtin_operation(
}
sess = builtin->sessions[op_info->session_id];
if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) {
if (algtype == CRYPTODEV_BACKEND_ALG_SYM) {
sym_op_info = op_info->u.sym_op_info;
status = cryptodev_builtin_sym_operation(sess, sym_op_info,
&local_error);
} else if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) {
} else if (algtype == CRYPTODEV_BACKEND_ALG_ASYM) {
asym_op_info = op_info->u.asym_op_info;
status = cryptodev_builtin_asym_operation(sess, op_info->op_code,
asym_op_info, &local_error);
@@ -571,8 +563,8 @@ static int cryptodev_builtin_operation(
if (local_error) {
error_report_err(local_error);
}
if (op_info->cb) {
op_info->cb(op_info->opaque, status);
if (cb) {
cb(opaque, status);
}
return 0;
}

View File

@@ -1,54 +0,0 @@
/*
* HMP commands related to cryptodev
*
* Copyright (c) 2023 Bytedance.Inc
*
* Authors:
* zhenwei pi<pizhenwei@bytedance.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version.
*/
#include "qemu/osdep.h"
#include "monitor/hmp.h"
#include "monitor/monitor.h"
#include "qapi/qapi-commands-cryptodev.h"
#include "qapi/qmp/qdict.h"
void hmp_info_cryptodev(Monitor *mon, const QDict *qdict)
{
QCryptodevInfoList *il;
QCryptodevBackendServiceTypeList *sl;
QCryptodevBackendClientList *cl;
for (il = qmp_query_cryptodev(NULL); il; il = il->next) {
g_autofree char *services = NULL;
QCryptodevInfo *info = il->value;
char *tmp_services;
/* build a string like 'service=[akcipher|mac|hash|cipher]' */
for (sl = info->service; sl; sl = sl->next) {
const char *service = QCryptodevBackendServiceType_str(sl->value);
if (!services) {
services = g_strdup(service);
} else {
tmp_services = g_strjoin("|", services, service, NULL);
g_free(services);
services = tmp_services;
}
}
monitor_printf(mon, "%s: service=[%s]\n", info->id, services);
for (cl = info->client; cl; cl = cl->next) {
QCryptodevBackendClient *client = cl->value;
monitor_printf(mon, " queue %" PRIu32 ": type=%s\n",
client->queue,
QCryptodevBackendType_str(client->type));
}
}
qapi_free_QCryptodevInfoList(il);
}

View File

@@ -223,14 +223,14 @@ static void cryptodev_lkcf_init(CryptoDevBackend *backend, Error **errp)
return;
}
cc = cryptodev_backend_new_client();
cc = cryptodev_backend_new_client("cryptodev-lkcf", NULL);
cc->info_str = g_strdup_printf("cryptodev-lkcf0");
cc->queue_index = 0;
cc->type = QCRYPTODEV_BACKEND_TYPE_LKCF;
cc->type = CRYPTODEV_BACKEND_TYPE_LKCF;
backend->conf.peers.ccs[0] = cc;
backend->conf.crypto_services =
1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER;
1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER;
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
lkcf->running = true;
@@ -469,12 +469,15 @@ static void *cryptodev_lkcf_worker(void *arg)
static int cryptodev_lkcf_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info)
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendLKCF *lkcf =
CRYPTODEV_BACKEND_LKCF(backend);
CryptoDevBackendLKCFSession *sess;
QCryptodevBackendAlgType algtype = op_info->algtype;
enum CryptoDevBackendAlgType algtype = op_info->algtype;
CryptoDevLKCFTask *task;
if (op_info->session_id >= MAX_SESSIONS ||
@@ -485,15 +488,15 @@ static int cryptodev_lkcf_operation(
}
sess = lkcf->sess[op_info->session_id];
if (algtype != QCRYPTODEV_BACKEND_ALG_ASYM) {
if (algtype != CRYPTODEV_BACKEND_ALG_ASYM) {
error_report("algtype not supported: %u", algtype);
return -VIRTIO_CRYPTO_NOTSUPP;
}
task = g_new0(CryptoDevLKCFTask, 1);
task->op_info = op_info;
task->cb = op_info->cb;
task->opaque = op_info->opaque;
task->cb = cb;
task->opaque = opaque;
task->sess = sess;
task->lkcf = lkcf;
task->status = -VIRTIO_CRYPTO_ERR;

View File

@@ -67,7 +67,7 @@ cryptodev_vhost_user_get_vhost(
{
CryptoDevBackendVhostUser *s =
CRYPTODEV_BACKEND_VHOST_USER(b);
assert(cc->type == QCRYPTODEV_BACKEND_TYPE_VHOST_USER);
assert(cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER);
assert(queue < MAX_CRYPTO_QUEUE_NUM);
return s->vhost_crypto[queue];
@@ -198,11 +198,12 @@ static void cryptodev_vhost_user_init(
s->opened = true;
for (i = 0; i < queues; i++) {
cc = cryptodev_backend_new_client();
cc = cryptodev_backend_new_client(
"cryptodev-vhost-user", NULL);
cc->info_str = g_strdup_printf("cryptodev-vhost-user%zu to %s ",
i, chr->label);
cc->queue_index = i;
cc->type = QCRYPTODEV_BACKEND_TYPE_VHOST_USER;
cc->type = CRYPTODEV_BACKEND_TYPE_VHOST_USER;
backend->conf.peers.ccs[i] = cc;
@@ -221,9 +222,9 @@ static void cryptodev_vhost_user_init(
cryptodev_vhost_user_event, NULL, s, NULL, true);
backend->conf.crypto_services =
1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER |
1u << QCRYPTODEV_BACKEND_SERVICE_HASH |
1u << QCRYPTODEV_BACKEND_SERVICE_MAC;
1u << VIRTIO_CRYPTO_SERVICE_CIPHER |
1u << VIRTIO_CRYPTO_SERVICE_HASH |
1u << VIRTIO_CRYPTO_SERVICE_MAC;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
@@ -232,9 +233,9 @@ static void cryptodev_vhost_user_init(
backend->conf.max_auth_key_len = VHOST_USER_MAX_AUTH_KEY_LEN;
}
static int64_t cryptodev_vhost_user_crypto_create_session(
static int64_t cryptodev_vhost_user_sym_create_session(
CryptoDevBackend *backend,
CryptoDevBackendSessionInfo *sess_info,
CryptoDevBackendSymSessionInfo *sess_info,
uint32_t queue_index, Error **errp)
{
CryptoDevBackendClient *cc =
@@ -266,17 +267,18 @@ static int cryptodev_vhost_user_create_session(
void *opaque)
{
uint32_t op_code = sess_info->op_code;
CryptoDevBackendSymSessionInfo *sym_sess_info;
int64_t ret;
Error *local_error = NULL;
int status;
switch (op_code) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
ret = cryptodev_vhost_user_crypto_create_session(backend, sess_info,
sym_sess_info = &sess_info->u.sym_sess_info;
ret = cryptodev_vhost_user_sym_create_session(backend, sym_sess_info,
queue_index, &local_error);
break;

View File

@@ -28,6 +28,7 @@
#ifdef CONFIG_VHOST_CRYPTO
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "hw/virtio/virtio-crypto.h"
#include "sysemu/cryptodev-vhost-user.h"
@@ -127,7 +128,7 @@ cryptodev_get_vhost(CryptoDevBackendClient *cc,
switch (cc->type) {
#if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
case QCRYPTODEV_BACKEND_TYPE_VHOST_USER:
case CRYPTODEV_BACKEND_TYPE_VHOST_USER:
vhost_crypto = cryptodev_vhost_user_get_vhost(cc, b, queue);
break;
#endif
@@ -195,7 +196,7 @@ int cryptodev_vhost_start(VirtIODevice *dev, int total_queues)
* because vhost user doesn't interrupt masking/unmasking
* properly.
*/
if (cc->type == QCRYPTODEV_BACKEND_TYPE_VHOST_USER) {
if (cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER) {
dev->use_guest_notifier_mask = false;
}
}

View File

@@ -23,92 +23,29 @@
#include "qemu/osdep.h"
#include "sysemu/cryptodev.h"
#include "sysemu/stats.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-cryptodev.h"
#include "qapi/qapi-types-stats.h"
#include "qapi/visitor.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qom/object_interfaces.h"
#include "hw/virtio/virtio-crypto.h"
#define SYM_ENCRYPT_OPS_STR "sym-encrypt-ops"
#define SYM_DECRYPT_OPS_STR "sym-decrypt-ops"
#define SYM_ENCRYPT_BYTES_STR "sym-encrypt-bytes"
#define SYM_DECRYPT_BYTES_STR "sym-decrypt-bytes"
#define ASYM_ENCRYPT_OPS_STR "asym-encrypt-ops"
#define ASYM_DECRYPT_OPS_STR "asym-decrypt-ops"
#define ASYM_SIGN_OPS_STR "asym-sign-ops"
#define ASYM_VERIFY_OPS_STR "asym-verify-ops"
#define ASYM_ENCRYPT_BYTES_STR "asym-encrypt-bytes"
#define ASYM_DECRYPT_BYTES_STR "asym-decrypt-bytes"
#define ASYM_SIGN_BYTES_STR "asym-sign-bytes"
#define ASYM_VERIFY_BYTES_STR "asym-verify-bytes"
typedef struct StatsArgs {
union StatsResultsType {
StatsResultList **stats;
StatsSchemaList **schema;
} result;
strList *names;
Error **errp;
} StatsArgs;
static QTAILQ_HEAD(, CryptoDevBackendClient) crypto_clients;
static int qmp_query_cryptodev_foreach(Object *obj, void *data)
{
CryptoDevBackend *backend;
QCryptodevInfoList **infolist = data;
uint32_t services, i;
if (!object_dynamic_cast(obj, TYPE_CRYPTODEV_BACKEND)) {
return 0;
}
QCryptodevInfo *info = g_new0(QCryptodevInfo, 1);
info->id = g_strdup(object_get_canonical_path_component(obj));
backend = CRYPTODEV_BACKEND(obj);
services = backend->conf.crypto_services;
for (i = 0; i < QCRYPTODEV_BACKEND_SERVICE__MAX; i++) {
if (services & (1 << i)) {
QAPI_LIST_PREPEND(info->service, i);
}
}
for (i = 0; i < backend->conf.peers.queues; i++) {
CryptoDevBackendClient *cc = backend->conf.peers.ccs[i];
QCryptodevBackendClient *client = g_new0(QCryptodevBackendClient, 1);
client->queue = cc->queue_index;
client->type = cc->type;
QAPI_LIST_PREPEND(info->client, client);
}
QAPI_LIST_PREPEND(*infolist, info);
return 0;
}
QCryptodevInfoList *qmp_query_cryptodev(Error **errp)
{
QCryptodevInfoList *list = NULL;
Object *objs = container_get(object_get_root(), "/objects");
object_child_foreach(objs, qmp_query_cryptodev_foreach, &list);
return list;
}
CryptoDevBackendClient *cryptodev_backend_new_client(void)
CryptoDevBackendClient *
cryptodev_backend_new_client(const char *model,
const char *name)
{
CryptoDevBackendClient *cc;
cc = g_new0(CryptoDevBackendClient, 1);
cc->model = g_strdup(model);
if (name) {
cc->name = g_strdup(name);
}
QTAILQ_INSERT_TAIL(&crypto_clients, cc, next);
return cc;
@@ -118,6 +55,8 @@ void cryptodev_backend_free_client(
CryptoDevBackendClient *cc)
{
QTAILQ_REMOVE(&crypto_clients, cc, next);
g_free(cc->name);
g_free(cc->model);
g_free(cc->info_str);
g_free(cc);
}
@@ -132,9 +71,6 @@ void cryptodev_backend_cleanup(
if (bc->cleanup) {
bc->cleanup(backend, errp);
}
g_free(backend->sym_stat);
g_free(backend->asym_stat);
}
int cryptodev_backend_create_session(
@@ -171,111 +107,38 @@ int cryptodev_backend_close_session(
static int cryptodev_backend_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info)
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendClass *bc =
CRYPTODEV_BACKEND_GET_CLASS(backend);
if (bc->do_op) {
return bc->do_op(backend, op_info);
return bc->do_op(backend, op_info, queue_index, cb, opaque);
}
return -VIRTIO_CRYPTO_NOTSUPP;
}
static int cryptodev_backend_account(CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info)
int cryptodev_backend_crypto_operation(
CryptoDevBackend *backend,
void *opaque1,
uint32_t queue_index,
CryptoDevCompletionFunc cb, void *opaque2)
{
enum QCryptodevBackendAlgType algtype = op_info->algtype;
int len;
VirtIOCryptoReq *req = opaque1;
CryptoDevBackendOpInfo *op_info = &req->op_info;
enum CryptoDevBackendAlgType algtype = req->flags;
if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) {
CryptoDevBackendAsymOpInfo *asym_op_info = op_info->u.asym_op_info;
len = asym_op_info->src_len;
switch (op_info->op_code) {
case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
CryptodevAsymStatIncEncrypt(backend, len);
break;
case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
CryptodevAsymStatIncDecrypt(backend, len);
break;
case VIRTIO_CRYPTO_AKCIPHER_SIGN:
CryptodevAsymStatIncSign(backend, len);
break;
case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
CryptodevAsymStatIncVerify(backend, len);
break;
default:
return -VIRTIO_CRYPTO_NOTSUPP;
}
} else if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) {
CryptoDevBackendSymOpInfo *sym_op_info = op_info->u.sym_op_info;
len = sym_op_info->src_len;
switch (op_info->op_code) {
case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
CryptodevSymStatIncEncrypt(backend, len);
break;
case VIRTIO_CRYPTO_CIPHER_DECRYPT:
CryptodevSymStatIncDecrypt(backend, len);
break;
default:
return -VIRTIO_CRYPTO_NOTSUPP;
}
} else {
if ((algtype != CRYPTODEV_BACKEND_ALG_SYM)
&& (algtype != CRYPTODEV_BACKEND_ALG_ASYM)) {
error_report("Unsupported cryptodev alg type: %" PRIu32 "", algtype);
return -VIRTIO_CRYPTO_NOTSUPP;
}
return len;
}
static void cryptodev_backend_throttle_timer_cb(void *opaque)
{
CryptoDevBackend *backend = (CryptoDevBackend *)opaque;
CryptoDevBackendOpInfo *op_info, *tmpop;
int ret;
QTAILQ_FOREACH_SAFE(op_info, &backend->opinfos, next, tmpop) {
QTAILQ_REMOVE(&backend->opinfos, op_info, next);
ret = cryptodev_backend_account(backend, op_info);
if (ret < 0) {
op_info->cb(op_info->opaque, ret);
continue;
}
throttle_account(&backend->ts, true, ret);
cryptodev_backend_operation(backend, op_info);
if (throttle_enabled(&backend->tc) &&
throttle_schedule_timer(&backend->ts, &backend->tt, true)) {
break;
}
}
}
int cryptodev_backend_crypto_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info)
{
int ret;
if (!throttle_enabled(&backend->tc)) {
goto do_account;
}
if (throttle_schedule_timer(&backend->ts, &backend->tt, true) ||
!QTAILQ_EMPTY(&backend->opinfos)) {
QTAILQ_INSERT_TAIL(&backend->opinfos, op_info, next);
return 0;
}
do_account:
ret = cryptodev_backend_account(backend, op_info);
if (ret < 0) {
return ret;
}
throttle_account(&backend->ts, true, ret);
return cryptodev_backend_operation(backend, op_info);
return cryptodev_backend_operation(backend, op_info, queue_index,
cb, opaque2);
}
static void
@@ -306,111 +169,15 @@ cryptodev_backend_set_queues(Object *obj, Visitor *v, const char *name,
backend->conf.peers.queues = value;
}
static void cryptodev_backend_set_throttle(CryptoDevBackend *backend, int field,
uint64_t value, Error **errp)
{
uint64_t orig = backend->tc.buckets[field].avg;
bool enabled = throttle_enabled(&backend->tc);
if (orig == value) {
return;
}
backend->tc.buckets[field].avg = value;
if (!throttle_enabled(&backend->tc)) {
throttle_timers_destroy(&backend->tt);
cryptodev_backend_throttle_timer_cb(backend); /* drain opinfos */
return;
}
if (!throttle_is_valid(&backend->tc, errp)) {
backend->tc.buckets[field].avg = orig; /* revert change */
return;
}
if (!enabled) {
throttle_init(&backend->ts);
throttle_timers_init(&backend->tt, qemu_get_aio_context(),
QEMU_CLOCK_REALTIME,
cryptodev_backend_throttle_timer_cb, /* FIXME */
cryptodev_backend_throttle_timer_cb, backend);
}
throttle_config(&backend->ts, QEMU_CLOCK_REALTIME, &backend->tc);
}
static void cryptodev_backend_get_bps(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
uint64_t value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg;
visit_type_uint64(v, name, &value, errp);
}
static void cryptodev_backend_set_bps(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
uint64_t value;
if (!visit_type_uint64(v, name, &value, errp)) {
return;
}
cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp);
}
static void cryptodev_backend_get_ops(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
uint64_t value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg;
visit_type_uint64(v, name, &value, errp);
}
static void cryptodev_backend_set_ops(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
uint64_t value;
if (!visit_type_uint64(v, name, &value, errp)) {
return;
}
cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp);
}
static void
cryptodev_backend_complete(UserCreatable *uc, Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(uc);
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(uc);
uint32_t services;
uint64_t value;
QTAILQ_INIT(&backend->opinfos);
value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg;
cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp);
value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg;
cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp);
if (bc->init) {
bc->init(backend, errp);
}
services = backend->conf.crypto_services;
if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_CIPHER)) {
backend->sym_stat = g_new0(CryptodevBackendSymStat, 1);
}
if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER)) {
backend->asym_stat = g_new0(CryptodevBackendAsymStat, 1);
}
}
void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used)
@@ -441,12 +208,8 @@ cryptodev_backend_can_be_deleted(UserCreatable *uc)
static void cryptodev_backend_instance_init(Object *obj)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
/* Initialize devices' queues property to 1 */
object_property_set_int(obj, "queues", 1, NULL);
throttle_config_init(&backend->tc);
}
static void cryptodev_backend_finalize(Object *obj)
@@ -454,137 +217,6 @@ static void cryptodev_backend_finalize(Object *obj)
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
cryptodev_backend_cleanup(backend, NULL);
if (throttle_enabled(&backend->tc)) {
throttle_timers_destroy(&backend->tt);
}
}
static StatsList *cryptodev_backend_stats_add(const char *name, int64_t *val,
StatsList *stats_list)
{
Stats *stats = g_new0(Stats, 1);
stats->name = g_strdup(name);
stats->value = g_new0(StatsValue, 1);
stats->value->type = QTYPE_QNUM;
stats->value->u.scalar = *val;
QAPI_LIST_PREPEND(stats_list, stats);
return stats_list;
}
static int cryptodev_backend_stats_query(Object *obj, void *data)
{
StatsArgs *stats_args = data;
StatsResultList **stats_results = stats_args->result.stats;
StatsList *stats_list = NULL;
StatsResult *entry;
CryptoDevBackend *backend;
CryptodevBackendSymStat *sym_stat;
CryptodevBackendAsymStat *asym_stat;
if (!object_dynamic_cast(obj, TYPE_CRYPTODEV_BACKEND)) {
return 0;
}
backend = CRYPTODEV_BACKEND(obj);
sym_stat = backend->sym_stat;
if (sym_stat) {
stats_list = cryptodev_backend_stats_add(SYM_ENCRYPT_OPS_STR,
&sym_stat->encrypt_ops, stats_list);
stats_list = cryptodev_backend_stats_add(SYM_DECRYPT_OPS_STR,
&sym_stat->decrypt_ops, stats_list);
stats_list = cryptodev_backend_stats_add(SYM_ENCRYPT_BYTES_STR,
&sym_stat->encrypt_bytes, stats_list);
stats_list = cryptodev_backend_stats_add(SYM_DECRYPT_BYTES_STR,
&sym_stat->decrypt_bytes, stats_list);
}
asym_stat = backend->asym_stat;
if (asym_stat) {
stats_list = cryptodev_backend_stats_add(ASYM_ENCRYPT_OPS_STR,
&asym_stat->encrypt_ops, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_DECRYPT_OPS_STR,
&asym_stat->decrypt_ops, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_SIGN_OPS_STR,
&asym_stat->sign_ops, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_VERIFY_OPS_STR,
&asym_stat->verify_ops, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_ENCRYPT_BYTES_STR,
&asym_stat->encrypt_bytes, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_DECRYPT_BYTES_STR,
&asym_stat->decrypt_bytes, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_SIGN_BYTES_STR,
&asym_stat->sign_bytes, stats_list);
stats_list = cryptodev_backend_stats_add(ASYM_VERIFY_BYTES_STR,
&asym_stat->verify_bytes, stats_list);
}
entry = g_new0(StatsResult, 1);
entry->provider = STATS_PROVIDER_CRYPTODEV;
entry->qom_path = object_get_canonical_path(obj);
entry->stats = stats_list;
QAPI_LIST_PREPEND(*stats_results, entry);
return 0;
}
static void cryptodev_backend_stats_cb(StatsResultList **result,
StatsTarget target,
strList *names, strList *targets,
Error **errp)
{
switch (target) {
case STATS_TARGET_CRYPTODEV:
{
Object *objs = container_get(object_get_root(), "/objects");
StatsArgs stats_args;
stats_args.result.stats = result;
stats_args.names = names;
stats_args.errp = errp;
object_child_foreach(objs, cryptodev_backend_stats_query, &stats_args);
break;
}
default:
break;
}
}
static StatsSchemaValueList *cryptodev_backend_schemas_add(const char *name,
StatsSchemaValueList *list)
{
StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
schema_entry->value = g_new0(StatsSchemaValue, 1);
schema_entry->value->type = STATS_TYPE_CUMULATIVE;
schema_entry->value->name = g_strdup(name);
schema_entry->next = list;
return schema_entry;
}
static void cryptodev_backend_schemas_cb(StatsSchemaList **result,
Error **errp)
{
StatsSchemaValueList *stats_list = NULL;
const char *sym_stats[] = { SYM_ENCRYPT_OPS_STR, SYM_DECRYPT_OPS_STR,
SYM_ENCRYPT_BYTES_STR, SYM_DECRYPT_BYTES_STR };
const char *asym_stats[] = { ASYM_ENCRYPT_OPS_STR, ASYM_DECRYPT_OPS_STR,
ASYM_SIGN_OPS_STR, ASYM_VERIFY_OPS_STR,
ASYM_ENCRYPT_BYTES_STR, ASYM_DECRYPT_BYTES_STR,
ASYM_SIGN_BYTES_STR, ASYM_VERIFY_BYTES_STR };
for (int i = 0; i < ARRAY_SIZE(sym_stats); i++) {
stats_list = cryptodev_backend_schemas_add(sym_stats[i], stats_list);
}
for (int i = 0; i < ARRAY_SIZE(asym_stats); i++) {
stats_list = cryptodev_backend_schemas_add(asym_stats[i], stats_list);
}
add_stats_schema(result, STATS_PROVIDER_CRYPTODEV, STATS_TARGET_CRYPTODEV,
stats_list);
}
static void
@@ -600,17 +232,6 @@ cryptodev_backend_class_init(ObjectClass *oc, void *data)
cryptodev_backend_get_queues,
cryptodev_backend_set_queues,
NULL, NULL);
object_class_property_add(oc, "throttle-bps", "uint64",
cryptodev_backend_get_bps,
cryptodev_backend_set_bps,
NULL, NULL);
object_class_property_add(oc, "throttle-ops", "uint64",
cryptodev_backend_get_ops,
cryptodev_backend_set_ops,
NULL, NULL);
add_stats_callbacks(STATS_PROVIDER_CRYPTODEV, cryptodev_backend_stats_cb,
cryptodev_backend_schemas_cb);
}
static const TypeInfo cryptodev_backend_info = {

View File

@@ -27,7 +27,6 @@ struct HostMemoryBackendFile {
char *mem_path;
uint64_t align;
uint64_t offset;
bool discard_data;
bool is_pmem;
bool readonly;
@@ -57,11 +56,9 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
ram_flags = backend->share ? RAM_SHARED : 0;
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
ram_flags |= fb->is_pmem ? RAM_PMEM : 0;
ram_flags |= RAM_NAMED_FILE;
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name,
backend->size, fb->align, ram_flags,
fb->mem_path, fb->offset, fb->readonly,
errp);
fb->mem_path, fb->readonly, errp);
g_free(name);
#endif
}
@@ -128,36 +125,6 @@ static void file_memory_backend_set_align(Object *o, Visitor *v,
fb->align = val;
}
static void file_memory_backend_get_offset(Object *o, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
uint64_t val = fb->offset;
visit_type_size(v, name, &val, errp);
}
static void file_memory_backend_set_offset(Object *o, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(o);
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
uint64_t val;
if (host_memory_backend_mr_inited(backend)) {
error_setg(errp, "cannot change property '%s' of %s", name,
object_get_typename(o));
return;
}
if (!visit_type_size(v, name, &val, errp)) {
return;
}
fb->offset = val;
}
#ifdef CONFIG_LIBPMEM
static bool file_memory_backend_get_pmem(Object *o, Error **errp)
{
@@ -230,12 +197,6 @@ file_backend_class_init(ObjectClass *oc, void *data)
file_memory_backend_get_align,
file_memory_backend_set_align,
NULL, NULL);
object_class_property_add(oc, "offset", "int",
file_memory_backend_get_offset,
file_memory_backend_set_offset,
NULL, NULL);
object_class_property_set_description(oc, "offset",
"Offset into the target file (ex: 1G)");
#ifdef CONFIG_LIBPMEM
object_class_property_add_bool(oc, "pmem",
file_memory_backend_get_pmem, file_memory_backend_set_pmem);

View File

@@ -1,6 +1,5 @@
system_ss.add([files(
softmmu_ss.add([files(
'cryptodev-builtin.c',
'cryptodev-hmp-cmds.c',
'cryptodev.c',
'hostmem-ram.c',
'hostmem.c',
@@ -10,20 +9,20 @@ system_ss.add([files(
'confidential-guest-support.c',
), numa])
system_ss.add(when: 'CONFIG_POSIX', if_true: files('rng-random.c'))
system_ss.add(when: 'CONFIG_POSIX', if_true: files('hostmem-file.c'))
system_ss.add(when: 'CONFIG_LINUX', if_true: files('hostmem-memfd.c'))
softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files('rng-random.c'))
softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files('hostmem-file.c'))
softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files('hostmem-memfd.c'))
if keyutils.found()
system_ss.add(keyutils, files('cryptodev-lkcf.c'))
softmmu_ss.add(keyutils, files('cryptodev-lkcf.c'))
endif
if have_vhost_user
system_ss.add(when: 'CONFIG_VIRTIO', if_true: files('vhost-user.c'))
softmmu_ss.add(when: 'CONFIG_VIRTIO', if_true: files('vhost-user.c'))
endif
system_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c'))
softmmu_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c'))
if have_vhost_user_crypto
system_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost-user.c'))
softmmu_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost-user.c'))
endif
system_ss.add(when: gio, if_true: files('dbus-vmstate.c'))
system_ss.add(when: 'CONFIG_SGX', if_true: files('hostmem-epc.c'))
softmmu_ss.add(when: gio, if_true: files('dbus-vmstate.c'))
softmmu_ss.add(when: 'CONFIG_SGX', if_true: files('hostmem-epc.c'))
subdir('tpm')

View File

@@ -13,6 +13,7 @@
#include "qemu/osdep.h"
#include "sysemu/rng.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/module.h"
#include "qom/object_interfaces.h"

View File

@@ -1,6 +1,6 @@
if have_tpm
system_ss.add(files('tpm_backend.c'))
system_ss.add(files('tpm_util.c'))
system_ss.add(when: 'CONFIG_TPM_PASSTHROUGH', if_true: files('tpm_passthrough.c'))
system_ss.add(when: 'CONFIG_TPM_EMULATOR', if_true: files('tpm_emulator.c'))
softmmu_ss.add(files('tpm_backend.c'))
softmmu_ss.add(files('tpm_util.c'))
softmmu_ss.add(when: 'CONFIG_TPM_PASSTHROUGH', if_true: files('tpm_passthrough.c'))
softmmu_ss.add(when: 'CONFIG_TPM_EMULATOR', if_true: files('tpm_emulator.c'))
endif

View File

@@ -100,6 +100,8 @@ bool tpm_backend_had_startup_error(TPMBackend *s)
void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd)
{
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
if (s->cmd != NULL) {
error_report("There is a TPM request pending");
return;
@@ -107,7 +109,7 @@ void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd)
s->cmd = cmd;
object_ref(OBJECT(s));
thread_pool_submit_aio(tpm_backend_worker_thread, s,
thread_pool_submit_aio(pool, tpm_backend_worker_thread, s,
tpm_backend_request_completed, s);
}

View File

@@ -573,13 +573,13 @@ static int tpm_emulator_prepare_data_fd(TPMEmulator *tpm_emu)
goto err_exit;
}
close(fds[1]);
closesocket(fds[1]);
return 0;
err_exit:
close(fds[0]);
close(fds[1]);
closesocket(fds[0]);
closesocket(fds[1]);
return -1;
}

View File

@@ -13,6 +13,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "qom/object_interfaces.h"
#include "sysemu/vhost-user-backend.h"
@@ -20,6 +21,12 @@
#include "io/channel-command.h"
#include "hw/virtio/virtio-bus.h"
static bool
ioeventfd_enabled(void)
{
return kvm_enabled() && kvm_eventfds_enabled();
}
int
vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
unsigned nvqs, Error **errp)
@@ -28,6 +35,11 @@ vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
assert(!b->vdev && vdev);
if (!ioeventfd_enabled()) {
error_setg(errp, "vhost initialization failed: requires kvm");
return -1;
}
if (!vhost_user_init(&b->vhost_user, &b->chr, errp)) {
return -1;
}

291
block.c
View File

@@ -277,8 +277,8 @@ bool bdrv_is_read_only(BlockDriverState *bs)
return !(bs->open_flags & BDRV_O_RDWR);
}
static int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
bool ignore_allow_rdw, Error **errp)
int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
bool ignore_allow_rdw, Error **errp)
{
IO_CODE();
@@ -555,9 +555,8 @@ int coroutine_fn bdrv_co_create(BlockDriver *drv, const char *filename,
* On success, return @blk's actual length.
* Otherwise, return -errno.
*/
static int64_t coroutine_fn GRAPH_UNLOCKED
create_file_fallback_truncate(BlockBackend *blk, int64_t minimum_size,
Error **errp)
static int64_t create_file_fallback_truncate(BlockBackend *blk,
int64_t minimum_size, Error **errp)
{
Error *local_err = NULL;
int64_t size;
@@ -565,14 +564,14 @@ create_file_fallback_truncate(BlockBackend *blk, int64_t minimum_size,
GLOBAL_STATE_CODE();
ret = blk_co_truncate(blk, minimum_size, false, PREALLOC_MODE_OFF, 0,
&local_err);
ret = blk_truncate(blk, minimum_size, false, PREALLOC_MODE_OFF, 0,
&local_err);
if (ret < 0 && ret != -ENOTSUP) {
error_propagate(errp, local_err);
return ret;
}
size = blk_co_getlength(blk);
size = blk_getlength(blk);
if (size < 0) {
error_free(local_err);
error_setg_errno(errp, -size,
@@ -658,8 +657,8 @@ int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv,
options = qdict_new();
qdict_put_str(options, "driver", drv->format_name);
blk = blk_co_new_open(filename, NULL, options,
BDRV_O_RDWR | BDRV_O_RESIZE, errp);
blk = blk_new_open(filename, NULL, options,
BDRV_O_RDWR | BDRV_O_RESIZE, errp);
if (!blk) {
error_prepend(errp, "Protocol driver '%s' does not support image "
"creation, and opening the image failed: ",
@@ -680,7 +679,7 @@ int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv,
ret = 0;
out:
blk_co_unref(blk);
blk_unref(blk);
return ret;
}
@@ -740,7 +739,6 @@ int coroutine_fn bdrv_co_delete_file(BlockDriverState *bs, Error **errp)
IO_CODE();
assert(bs != NULL);
assert_bdrv_graph_readable();
if (!bs->drv) {
error_setg(errp, "Block node '%s' is not opened", bs->filename);
@@ -1042,7 +1040,6 @@ int coroutine_fn bdrv_co_refresh_total_sectors(BlockDriverState *bs,
{
BlockDriver *drv = bs->drv;
IO_CODE();
assert_bdrv_graph_readable();
if (!drv) {
return -ENOMEDIUM;
@@ -1610,11 +1607,10 @@ out:
* bdrv_refresh_total_sectors() which polls when called from non-coroutine
* context.
*/
static int no_coroutine_fn GRAPH_UNLOCKED
bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name,
QDict *options, int open_flags, Error **errp)
static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv,
const char *node_name, QDict *options,
int open_flags, Error **errp)
{
AioContext *ctx;
Error *local_err = NULL;
int i, ret;
GLOBAL_STATE_CODE();
@@ -1662,22 +1658,13 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name,
bs->supported_read_flags |= BDRV_REQ_REGISTERED_BUF;
bs->supported_write_flags |= BDRV_REQ_REGISTERED_BUF;
/* Get the context after .bdrv_open, it can change the context */
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
ret = bdrv_refresh_total_sectors(bs, bs->total_sectors);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not refresh total sector count");
aio_context_release(ctx);
return ret;
}
bdrv_graph_rdlock_main_loop();
bdrv_refresh_limits(bs, NULL, &local_err);
bdrv_graph_rdunlock_main_loop();
aio_context_release(ctx);
if (local_err) {
error_propagate(errp, local_err);
return -EINVAL;
@@ -2855,7 +2842,7 @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm)
* Replaces the node that a BdrvChild points to without updating permissions.
*
* If @new_bs is non-NULL, the parent of @child must already be drained through
* @child and the caller must hold the AioContext lock for @new_bs.
* @child.
*/
static void bdrv_replace_child_noperm(BdrvChild *child,
BlockDriverState *new_bs)
@@ -2894,7 +2881,7 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
}
/* TODO Pull this up into the callers to avoid polling here */
bdrv_graph_wrlock(new_bs);
bdrv_graph_wrlock();
if (old_bs) {
if (child->klass->detach) {
child->klass->detach(child);
@@ -2990,10 +2977,6 @@ static TransactionActionDrv bdrv_attach_child_common_drv = {
* Function doesn't update permissions, caller is responsible for this.
*
* Returns new created child.
*
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
* @child_bs can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
const char *child_name,
@@ -3004,7 +2987,7 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
Transaction *tran, Error **errp)
{
BdrvChild *new_child;
AioContext *parent_ctx, *new_child_ctx;
AioContext *parent_ctx;
AioContext *child_ctx = bdrv_get_aio_context(child_bs);
assert(child_class->get_parent_desc);
@@ -3055,12 +3038,6 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
}
}
new_child_ctx = bdrv_get_aio_context(child_bs);
if (new_child_ctx != child_ctx) {
aio_context_release(child_ctx);
aio_context_acquire(new_child_ctx);
}
bdrv_ref(child_bs);
/*
* Let every new BdrvChild start with a drained parent. Inserting the child
@@ -3090,20 +3067,11 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs,
};
tran_add(tran, &bdrv_attach_child_common_drv, s);
if (new_child_ctx != child_ctx) {
aio_context_release(new_child_ctx);
aio_context_acquire(child_ctx);
}
return new_child;
}
/*
* Function doesn't update permissions, caller is responsible for this.
*
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
* @child_bs can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static BdrvChild *bdrv_attach_child_noperm(BlockDriverState *parent_bs,
BlockDriverState *child_bs,
@@ -3367,10 +3335,6 @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs)
* callers which don't need their own reference any more must call bdrv_unref().
*
* Function doesn't update permissions, caller is responsible for this.
*
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
* @child_bs can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static int bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs,
BlockDriverState *child_bs,
@@ -3452,18 +3416,11 @@ static int bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs,
}
out:
bdrv_graph_rdlock_main_loop();
bdrv_refresh_limits(parent_bs, tran, NULL);
bdrv_graph_rdunlock_main_loop();
return 0;
}
/*
* The caller must hold the AioContext lock for @backing_hd. Both @bs and
* @backing_hd can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
static int bdrv_set_backing_noperm(BlockDriverState *bs,
BlockDriverState *backing_hd,
Transaction *tran, Error **errp)
@@ -3514,8 +3471,6 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
* itself, all options starting with "${bdref_key}." are considered part of the
* BlockdevRef.
*
* The caller must hold the main AioContext lock.
*
* TODO Can this be unified with bdrv_open_image()?
*/
int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
@@ -3527,7 +3482,6 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
int ret = 0;
bool implicit_backing = false;
BlockDriverState *backing_hd;
AioContext *backing_hd_ctx;
QDict *options;
QDict *tmp_parent_options = NULL;
Error *local_err = NULL;
@@ -3612,12 +3566,8 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
/* Hook up the backing file link; drop our reference, bs owns the
* backing_hd reference now */
backing_hd_ctx = bdrv_get_aio_context(backing_hd);
aio_context_acquire(backing_hd_ctx);
ret = bdrv_set_backing_hd(bs, backing_hd, errp);
bdrv_unref(backing_hd);
aio_context_release(backing_hd_ctx);
if (ret < 0) {
goto free_exit;
}
@@ -3687,10 +3637,6 @@ done:
* BlockdevRef.
*
* The BlockdevRef will be removed from the options QDict.
*
* The caller must hold the lock of the main AioContext and no other AioContext.
* @parent can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
BdrvChild *bdrv_open_child(const char *filename,
QDict *options, const char *bdref_key,
@@ -3700,8 +3646,6 @@ BdrvChild *bdrv_open_child(const char *filename,
bool allow_none, Error **errp)
{
BlockDriverState *bs;
BdrvChild *child;
AioContext *ctx;
GLOBAL_STATE_CODE();
@@ -3711,21 +3655,12 @@ BdrvChild *bdrv_open_child(const char *filename,
return NULL;
}
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role,
errp);
aio_context_release(ctx);
return child;
return bdrv_attach_child(parent, bs, bdref_key, child_class, child_role,
errp);
}
/*
* Wrapper on bdrv_open_child() for most popular case: open primary child of bs.
*
* The caller must hold the lock of the main AioContext and no other AioContext.
* @parent can move to a different AioContext in this function. Callers must
* make sure that their AioContext locking is still correct after this.
*/
int bdrv_open_file_child(const char *filename,
QDict *options, const char *bdref_key,
@@ -3800,7 +3735,6 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
int64_t total_size;
QemuOpts *opts = NULL;
BlockDriverState *bs_snapshot = NULL;
AioContext *ctx = bdrv_get_aio_context(bs);
int ret;
GLOBAL_STATE_CODE();
@@ -3809,10 +3743,7 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
instead of opening 'filename' directly */
/* Get the required size from the image */
aio_context_acquire(ctx);
total_size = bdrv_getlength(bs);
aio_context_release(ctx);
if (total_size < 0) {
error_setg_errno(errp, -total_size, "Could not get image size");
goto out;
@@ -3846,10 +3777,7 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
goto out;
}
aio_context_acquire(ctx);
ret = bdrv_append(bs_snapshot, bs, errp);
aio_context_release(ctx);
if (ret < 0) {
bs_snapshot = NULL;
goto out;
@@ -3875,13 +3803,17 @@ out:
* should be opened. If specified, neither options nor a filename may be given,
* nor can an existing BDS be reused (that is, *pbs has to be NULL).
*
* The caller must always hold the main AioContext lock.
* The caller must always hold @filename AioContext lock, because this
* function eventually calls bdrv_refresh_total_sectors() which polls
* when called from non-coroutine context.
*/
static BlockDriverState * no_coroutine_fn
bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
int flags, BlockDriverState *parent,
const BdrvChildClass *child_class, BdrvChildRole child_role,
Error **errp)
static BlockDriverState *bdrv_open_inherit(const char *filename,
const char *reference,
QDict *options, int flags,
BlockDriverState *parent,
const BdrvChildClass *child_class,
BdrvChildRole child_role,
Error **errp)
{
int ret;
BlockBackend *file = NULL;
@@ -3893,12 +3825,10 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
Error *local_err = NULL;
QDict *snapshot_options = NULL;
int snapshot_flags = 0;
AioContext *ctx = qemu_get_aio_context();
assert(!child_class || !flags);
assert(!child_class == !parent);
GLOBAL_STATE_CODE();
assert(!qemu_in_coroutine());
if (reference) {
bool options_non_empty = options ? qdict_size(options) : false;
@@ -4031,13 +3961,9 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
/* Not requesting BLK_PERM_CONSISTENT_READ because we're only
* looking at the header to guess the image format. This works even
* in cases where a guest would not see a consistent state. */
ctx = bdrv_get_aio_context(file_bs);
aio_context_acquire(ctx);
file = blk_new(ctx, 0, BLK_PERM_ALL);
file = blk_new(bdrv_get_aio_context(file_bs), 0, BLK_PERM_ALL);
blk_insert_bs(file, file_bs, &local_err);
bdrv_unref(file_bs);
aio_context_release(ctx);
if (local_err) {
goto fail;
}
@@ -4083,13 +4009,8 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
goto fail;
}
/* The AioContext could have changed during bdrv_open_common() */
ctx = bdrv_get_aio_context(bs);
if (file) {
aio_context_acquire(ctx);
blk_unref(file);
aio_context_release(ctx);
file = NULL;
}
@@ -4147,16 +4068,13 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
* (snapshot_bs); thus, we have to drop the strong reference to bs
* (which we obtained by calling bdrv_new()). bs will not be deleted,
* though, because the overlay still has a reference to it. */
aio_context_acquire(ctx);
bdrv_unref(bs);
aio_context_release(ctx);
bs = snapshot_bs;
}
return bs;
fail:
aio_context_acquire(ctx);
blk_unref(file);
qobject_unref(snapshot_options);
qobject_unref(bs->explicit_options);
@@ -4165,21 +4083,22 @@ fail:
bs->options = NULL;
bs->explicit_options = NULL;
bdrv_unref(bs);
aio_context_release(ctx);
error_propagate(errp, local_err);
return NULL;
close_and_fail:
aio_context_acquire(ctx);
bdrv_unref(bs);
aio_context_release(ctx);
qobject_unref(snapshot_options);
qobject_unref(options);
error_propagate(errp, local_err);
return NULL;
}
/* The caller must always hold the main AioContext lock. */
/*
* The caller must always hold @filename AioContext lock, because this
* function eventually calls bdrv_refresh_total_sectors() which polls
* when called from non-coroutine context.
*/
BlockDriverState *bdrv_open(const char *filename, const char *reference,
QDict *options, int flags, Error **errp)
{
@@ -4644,11 +4563,6 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
* backing BlockDriverState (or NULL).
*
* Return 0 on success, otherwise return < 0 and set @errp.
*
* The caller must hold the AioContext lock of @reopen_state->bs.
* @reopen_state->bs can move to a different AioContext in this function.
* Callers must make sure that their AioContext locking is still correct after
* this.
*/
static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
bool is_backing, Transaction *tran,
@@ -4661,8 +4575,6 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
const char *child_name = is_backing ? "backing" : "file";
QObject *value;
const char *str;
AioContext *ctx, *old_ctx;
int ret;
GLOBAL_STATE_CODE();
@@ -4727,22 +4639,8 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
reopen_state->old_file_bs = old_child_bs;
}
old_ctx = bdrv_get_aio_context(bs);
ctx = bdrv_get_aio_context(new_child_bs);
if (old_ctx != ctx) {
aio_context_release(old_ctx);
aio_context_acquire(ctx);
}
ret = bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing,
tran, errp);
if (old_ctx != ctx) {
aio_context_release(ctx);
aio_context_acquire(old_ctx);
}
return ret;
return bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing,
tran, errp);
}
/*
@@ -4761,7 +4659,6 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
* It is the responsibility of the caller to then call the abort() or
* commit() for any other BDS that have been left in a prepare() state
*
* The caller must hold the AioContext lock of @reopen_state->bs.
*/
static int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue,
@@ -5018,10 +4915,7 @@ static void bdrv_reopen_commit(BDRVReopenState *reopen_state)
qdict_del(bs->explicit_options, "backing");
qdict_del(bs->options, "backing");
bdrv_graph_rdlock_main_loop();
bdrv_refresh_limits(bs, NULL, NULL);
bdrv_graph_rdunlock_main_loop();
bdrv_refresh_total_sectors(bs, bs->total_sectors);
}
/*
@@ -5372,8 +5266,6 @@ int bdrv_drop_filter(BlockDriverState *bs, Error **errp)
* child.
*
* This function does not create any image files.
*
* The caller must hold the AioContext lock for @bs_top.
*/
int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
Error **errp)
@@ -5381,14 +5273,11 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
int ret;
BdrvChild *child;
Transaction *tran = tran_new();
AioContext *old_context, *new_context = NULL;
GLOBAL_STATE_CODE();
assert(!bs_new->backing);
old_context = bdrv_get_aio_context(bs_top);
child = bdrv_attach_child_noperm(bs_new, bs_top, "backing",
&child_of_bds, bdrv_backing_role(bs_new),
tran, errp);
@@ -5397,19 +5286,6 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
goto out;
}
/*
* bdrv_attach_child_noperm could change the AioContext of bs_top.
* bdrv_replace_node_noperm calls bdrv_drained_begin, so let's temporarily
* hold the new AioContext, since bdrv_drained_begin calls BDRV_POLL_WHILE
* that assumes the new lock is taken.
*/
new_context = bdrv_get_aio_context(bs_top);
if (old_context != new_context) {
aio_context_release(old_context);
aio_context_acquire(new_context);
}
ret = bdrv_replace_node_noperm(bs_top, bs_new, true, tran, errp);
if (ret < 0) {
goto out;
@@ -5419,14 +5295,7 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
out:
tran_finalize(tran, ret);
bdrv_graph_rdlock_main_loop();
bdrv_refresh_limits(bs_top, NULL, NULL);
bdrv_graph_rdunlock_main_loop();
if (new_context && old_context != new_context) {
aio_context_release(new_context);
aio_context_acquire(old_context);
}
return ret;
}
@@ -5487,17 +5356,12 @@ static void bdrv_delete(BlockDriverState *bs)
* empty set of options. The reference to the QDict belongs to the block layer
* after the call (even on failure), so if the caller intends to reuse the
* dictionary, it needs to use qobject_ref() before calling bdrv_open.
*
* The caller holds the AioContext lock for @bs. It must make sure that @bs
* stays in the same AioContext, i.e. @options must not refer to nodes in a
* different AioContext.
*/
BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options,
int flags, Error **errp)
{
ERRP_GUARD();
int ret;
AioContext *ctx = bdrv_get_aio_context(bs);
BlockDriverState *new_node_bs = NULL;
const char *drvname, *node_name;
BlockDriver *drv;
@@ -5518,14 +5382,8 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options,
GLOBAL_STATE_CODE();
aio_context_release(ctx);
aio_context_acquire(qemu_get_aio_context());
new_node_bs = bdrv_new_open_driver_opts(drv, node_name, options, flags,
errp);
aio_context_release(qemu_get_aio_context());
aio_context_acquire(ctx);
assert(bdrv_get_aio_context(bs) == ctx);
options = NULL; /* bdrv_new_open_driver() eats options */
if (!new_node_bs) {
error_prepend(errp, "Could not create node: ");
@@ -5866,8 +5724,7 @@ exit:
* sums the size of all data-bearing children. (This excludes backing
* children.)
*/
static int64_t coroutine_fn GRAPH_RDLOCK
bdrv_sum_allocated_file_size(BlockDriverState *bs)
static int64_t bdrv_sum_allocated_file_size(BlockDriverState *bs)
{
BdrvChild *child;
int64_t child_size, sum = 0;
@@ -5895,7 +5752,6 @@ int64_t coroutine_fn bdrv_co_get_allocated_file_size(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
IO_CODE();
assert_bdrv_graph_readable();
if (!drv) {
return -ENOMEDIUM;
@@ -5963,12 +5819,11 @@ int64_t coroutine_fn bdrv_co_nb_sectors(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
IO_CODE();
assert_bdrv_graph_readable();
if (!drv)
return -ENOMEDIUM;
if (bs->bl.has_variable_length) {
if (drv->has_variable_length) {
int ret = bdrv_co_refresh_total_sectors(bs, bs->total_sectors);
if (ret < 0) {
return ret;
@@ -5977,28 +5832,6 @@ int64_t coroutine_fn bdrv_co_nb_sectors(BlockDriverState *bs)
return bs->total_sectors;
}
/*
* This wrapper is written by hand because this function is in the hot I/O path,
* via blk_get_geometry.
*/
int64_t coroutine_mixed_fn bdrv_nb_sectors(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
IO_CODE();
if (!drv)
return -ENOMEDIUM;
if (bs->bl.has_variable_length) {
int ret = bdrv_refresh_total_sectors(bs, bs->total_sectors);
if (ret < 0) {
return ret;
}
}
return bs->total_sectors;
}
/**
* Return length in bytes on success, -errno on error.
* The length is always a multiple of BDRV_SECTOR_SIZE.
@@ -6007,7 +5840,6 @@ int64_t coroutine_fn bdrv_co_getlength(BlockDriverState *bs)
{
int64_t ret;
IO_CODE();
assert_bdrv_graph_readable();
ret = bdrv_co_nb_sectors(bs);
if (ret < 0) {
@@ -6019,6 +5851,15 @@ int64_t coroutine_fn bdrv_co_getlength(BlockDriverState *bs)
return ret * BDRV_SECTOR_SIZE;
}
/* return 0 as number of sectors if no device present or error */
void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
{
int64_t nb_sectors = bdrv_nb_sectors(bs);
IO_CODE();
*nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
}
bool bdrv_is_sg(BlockDriverState *bs)
{
IO_CODE();
@@ -6465,8 +6306,6 @@ int coroutine_fn bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
int ret;
BlockDriver *drv = bs->drv;
IO_CODE();
assert_bdrv_graph_readable();
/* if bs->drv == NULL, bs is closed, so there's nothing to do here */
if (!drv) {
return -ENOMEDIUM;
@@ -6515,8 +6354,6 @@ BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs)
void coroutine_fn bdrv_co_debug_event(BlockDriverState *bs, BlkdebugEvent event)
{
IO_CODE();
assert_bdrv_graph_readable();
if (!bs || !bs->drv || !bs->drv->bdrv_co_debug_event) {
return;
}
@@ -6966,7 +6803,6 @@ bool coroutine_fn bdrv_co_is_inserted(BlockDriverState *bs)
BlockDriver *drv = bs->drv;
BdrvChild *child;
IO_CODE();
assert_bdrv_graph_readable();
if (!drv) {
return false;
@@ -6989,7 +6825,6 @@ void coroutine_fn bdrv_co_eject(BlockDriverState *bs, bool eject_flag)
{
BlockDriver *drv = bs->drv;
IO_CODE();
assert_bdrv_graph_readable();
if (drv && drv->bdrv_co_eject) {
drv->bdrv_co_eject(bs, eject_flag);
@@ -7004,7 +6839,6 @@ void coroutine_fn bdrv_co_lock_medium(BlockDriverState *bs, bool locked)
{
BlockDriver *drv = bs->drv;
IO_CODE();
assert_bdrv_graph_readable();
trace_bdrv_lock_medium(bs, locked);
if (drv && drv->bdrv_co_lock_medium) {
@@ -7151,8 +6985,6 @@ void bdrv_img_create(const char *filename, const char *fmt,
return;
}
aio_context_acquire(qemu_get_aio_context());
/* Create parameter list */
create_opts = qemu_opts_append(create_opts, drv->create_opts);
create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
@@ -7246,7 +7078,7 @@ void bdrv_img_create(const char *filename, const char *fmt,
if (!backing_fmt) {
error_setg(&local_err,
"Backing file specified without backing format");
error_append_hint(&local_err, "Detected format of %s.\n",
error_append_hint(&local_err, "Detected format of %s.",
bs->drv->format_name);
goto out;
}
@@ -7302,7 +7134,6 @@ out:
qemu_opts_del(opts);
qemu_opts_free(create_opts);
error_propagate(errp, local_err);
aio_context_release(qemu_get_aio_context());
}
AioContext *bdrv_get_aio_context(BlockDriverState *bs)
@@ -7393,6 +7224,9 @@ static void bdrv_detach_aio_context(BlockDriverState *bs)
bs->drv->bdrv_detach_aio_context(bs);
}
if (bs->quiesce_counter) {
aio_enable_external(bs->aio_context);
}
bs->aio_context = NULL;
}
@@ -7402,6 +7236,10 @@ static void bdrv_attach_aio_context(BlockDriverState *bs,
BdrvAioNotifier *ban, *ban_tmp;
GLOBAL_STATE_CODE();
if (bs->quiesce_counter) {
aio_disable_external(new_context);
}
bs->aio_context = new_context;
if (bs->drv && bs->drv->bdrv_attach_aio_context) {
@@ -8085,25 +7923,6 @@ void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs,
return;
}
/*
* Non-zoned block drivers do not follow zoned storage constraints
* (i.e. sequential writes to zones). Refuse mixing zoned and non-zoned
* drivers in a graph.
*/
if (!parent_bs->drv->supports_zoned_children &&
child_bs->bl.zoned == BLK_Z_HM) {
/*
* The host-aware model allows zoned storage constraints and random
* write. Allow mixing host-aware and non-zoned drivers. Using
* host-aware device as a regular device.
*/
error_setg(errp, "Cannot add a %s child to a %s parent",
child_bs->bl.zoned == BLK_Z_HM ? "zoned" : "non-zoned",
parent_bs->drv->supports_zoned_children ?
"support zoned children" : "not support zoned children");
return;
}
if (!QLIST_EMPTY(&child_bs->parents)) {
error_setg(errp, "The node %s already has a parent",
child_bs->node_name);

Some files were not shown because too many files have changed in this diff Show More