Compare commits

..

12 Commits

Author SHA1 Message Date
Fabiano Rosas
eab13108cd tests/qtest: bios-tables-test: Skip if missing configs
If we build with --without-default-devices, CONFIG_HPET and
CONFIG_PARALLEL are set to N, which makes the respective devices go
missing from acpi tables.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
750400c56f tests/qemu-iotests: Require virtio-scsi-pci
Check that virtio-scsi-pci is present in the QEMU build before running
the tests.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
f5ef199312 tests/qtest: Do not include hexloader-test if loader device is not present
Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
c789207417 tests/qtest: Check for devices in bios-tables-test
Do not include tests that require devices that are not available in
the QEMU build.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
3bf0b7dd7b tests/qtest: drive_del-test: Skip tests that require missing devices
Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
3864d5e5c6 tests/qtest: Skip unplug tests that use missing devices
Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
8a840ab4c2 tests/qtest: Fix coding style in device-plug-test.c
We should not mix declarations and statements in QEMU code.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
d32f29f651 tests/qtest: hd-geo-test: Check for missing devices
Don't include tests that require devices not available in the QEMU
binary.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
696cf0c1cd tests/qtest: Don't build virtio-serial-test.c if device not present
The virtconsole device might not be present in the QEMU build that is
being tested.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
ef92be0914 tests/qtest: Add dependence on PCIE_PORT for virtio-net-failover.c
This test depends on the presence of the pcie-root-port device. Add a
build time dependency.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
f85bfa0bcb tests/qtest: Do not run lsi53c895a test if device is not present
The tests are built once for all the targets, so as long as one QEMU
binary is built with CONFIG_LSI_SCSI_PCI=y, this test will
run. However some binaries might not include the device. So check this
again in runtime.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
Fabiano Rosas
ecbd3f095e tests/qtest: Skip PXE tests for missing devices
Check if the devices we're trying to add are present in the QEMU
binary. They could have been removed from the build via Kconfig or the
--without-default-devices option.

Signed-off-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Thomas Huth <thuth@redhat.com>
2023-02-13 18:01:07 -03:00
4126 changed files with 139063 additions and 288834 deletions

109
.cirrus.yml Normal file
View File

@@ -0,0 +1,109 @@
env:
CIRRUS_CLONE_DEPTH: 1
windows_msys2_task:
timeout_in: 90m
windows_container:
image: cirrusci/windowsservercore:2019
os_version: 2019
cpu: 8
memory: 8G
env:
CIRRUS_SHELL: powershell
MSYS: winsymlinks:native
MSYSTEM: MINGW64
MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe
MSYS2_FINGERPRINT: 0
MSYS2_PACKAGES: "
diffutils git grep make pkg-config sed
mingw-w64-x86_64-python
mingw-w64-x86_64-python-sphinx
mingw-w64-x86_64-toolchain
mingw-w64-x86_64-SDL2
mingw-w64-x86_64-SDL2_image
mingw-w64-x86_64-gtk3
mingw-w64-x86_64-glib2
mingw-w64-x86_64-ninja
mingw-w64-x86_64-jemalloc
mingw-w64-x86_64-lzo2
mingw-w64-x86_64-zstd
mingw-w64-x86_64-libjpeg-turbo
mingw-w64-x86_64-pixman
mingw-w64-x86_64-libgcrypt
mingw-w64-x86_64-libpng
mingw-w64-x86_64-libssh
mingw-w64-x86_64-snappy
mingw-w64-x86_64-libusb
mingw-w64-x86_64-usbredir
mingw-w64-x86_64-libtasn1
mingw-w64-x86_64-nettle
mingw-w64-x86_64-cyrus-sasl
mingw-w64-x86_64-curl
mingw-w64-x86_64-gnutls
mingw-w64-x86_64-libnfs
"
CHERE_INVOKING: 1
msys2_cache:
folder: C:\tools\archive
reupload_on_changes: false
# These env variables are used to generate fingerprint to trigger the cache procedure
# If wanna to force re-populate msys2, increase MSYS2_FINGERPRINT
fingerprint_script:
- |
echo $env:CIRRUS_TASK_NAME
echo $env:MSYS2_URL
echo $env:MSYS2_FINGERPRINT
echo $env:MSYS2_PACKAGES
populate_script:
- |
md -Force C:\tools\archive\pkg
$start_time = Get-Date
bitsadmin /transfer msys_download /dynamic /download /priority FOREGROUND $env:MSYS2_URL C:\tools\archive\base.exe
Write-Output "Download time taken: $((Get-Date).Subtract($start_time))"
cd C:\tools
C:\tools\archive\base.exe -y
del -Force C:\tools\archive\base.exe
Write-Output "Base install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
((Get-Content -path C:\tools\msys64\etc\\post-install\\07-pacman-key.post -Raw) -replace '--refresh-keys', '--version') | Set-Content -Path C:\tools\msys64\etc\\post-install\\07-pacman-key.post
C:\tools\msys64\usr\bin\bash.exe -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf"
C:\tools\msys64\usr\bin\bash.exe -lc "export"
C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Sy
echo Y | C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Suu --overwrite=*
taskkill /F /FI "MODULES eq msys-2.0.dll"
tasklist
C:\tools\msys64\usr\bin\bash.exe -lc "mv -f /etc/pacman.conf.pacnew /etc/pacman.conf || true"
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -Syuu --overwrite=*"
Write-Output "Core install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -S --needed $env:MSYS2_PACKAGES"
Write-Output "Package install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\etc\mtab
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\fd
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stderr
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdin
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdout
del -Force -Recurse -ErrorAction SilentlyContinue C:\tools\msys64\var\cache\pacman\pkg
tar cf C:\tools\archive\msys64.tar -C C:\tools\ msys64
Write-Output "Package archive time taken: $((Get-Date).Subtract($start_time))"
del -Force -Recurse -ErrorAction SilentlyContinue c:\tools\msys64
install_script:
- |
$start_time = Get-Date
cd C:\tools
ls C:\tools\archive\msys64.tar
tar xf C:\tools\archive\msys64.tar
Write-Output "Extract msys2 time taken: $((Get-Date).Subtract($start_time))"
script:
- C:\tools\msys64\usr\bin\bash.exe -lc "mkdir build"
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && ../configure --python=python3"
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make -j8"
- exit $LastExitCode
test_script:
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make V=1 check"
- exit $LastExitCode

View File

@@ -1,21 +0,0 @@
#
# List of code-formatting clean ups the git blame can ignore
#
# git blame --ignore-revs-file .git-blame-ignore-revs
#
# or
#
# git config blame.ignoreRevsFile .git-blame-ignore-revs
#
# gdbstub: clean-up indents
ad9e4585b3c7425759d3eea697afbca71d2c2082
# e1000e: fix code style
0eadd56bf53ab196a16d492d7dd31c62e1c24c32
# target/riscv: coding style fixes
8c7feddddd9218b407792120bcfda0347ed16205
# replace TABs with spaces
48805df9c22a0700fba4b3b548fafaa21726ca68

View File

@@ -1,100 +1,64 @@
variables:
# On stable branches this is changed by later rules. Should also
# be overridden per pipeline if running pipelines concurrently
# for different branches in contributor forks.
QEMU_CI_CONTAINER_TAG: latest
# For purposes of CI rules, upstream is the gitlab.com/qemu-project
# namespace. When testing CI, it might be usefult to override this
# to point to a fork repo
QEMU_CI_UPSTREAM: qemu-project
# The order of rules defined here is critically important. # The order of rules defined here is critically important.
# They are evaluated in order and first match wins. # They are evaluated in order and first match wins.
# #
# Thus we group them into a number of stages, ordered from # Thus we group them into a number of stages, ordered from
# most restrictive to least restrictive # most restrictive to least restrictive
# #
# For pipelines running for stable "staging-X.Y" branches
# we must override QEMU_CI_CONTAINER_TAG
#
.base_job_template: .base_job_template:
variables: variables:
# Each script line from will be in a collapsible section in the job output # Each script line from will be in a collapsible section in the job output
# and show the duration of each line. # and show the duration of each line.
FF_SCRIPT_SECTIONS: 1 FF_SCRIPT_SECTIONS: 1
interruptible: true
rules: rules:
############################################################# #############################################################
# Stage 1: exclude scenarios where we definitely don't # Stage 1: exclude scenarios where we definitely don't
# want jobs to run # want jobs to run
############################################################# #############################################################
# Never run jobs upstream on stable branch, staging branch jobs already ran
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /^stable-/'
when: never
# Never run jobs upstream on tags, staging branch jobs already ran
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_TAG'
when: never
# Cirrus jobs can't run unless the creds / target repo are set # Cirrus jobs can't run unless the creds / target repo are set
- if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)' - if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)'
when: never when: never
# Publishing jobs should only run on the default branch in upstream # Publishing jobs should only run on the default branch in upstream
- if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH' - if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
when: never when: never
# Non-publishing jobs should only run on staging branches in upstream # Non-publishing jobs should only run on staging branches in upstream
- if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH !~ /staging/' - if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH !~ /staging/'
when: never when: never
# Jobs only intended for forks should always be skipped on upstream # Jobs only intended for forks should always be skipped on upstream
- if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM' - if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == "qemu-project"'
when: never when: never
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set # Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never when: never
# Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set # Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set
- if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' - if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never when: never
############################################################# #############################################################
# Stage 2: fine tune execution of jobs in specific scenarios # Stage 2: fine tune execution of jobs in specific scenarios
# where the catch all logic is inappropriate # where the catch all logic is inapprorpaite
############################################################# #############################################################
# Optional jobs should not be run unless manually triggered # Optional jobs should not be run unless manually triggered
- if: '$QEMU_JOB_OPTIONAL && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: manual
allow_failure: true
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- if: '$QEMU_JOB_OPTIONAL' - if: '$QEMU_JOB_OPTIONAL'
when: manual when: manual
allow_failure: true allow_failure: true
# Skipped jobs should not be run unless manually triggered # Skipped jobs should not be run unless manually triggered
- if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: manual
allow_failure: true
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- if: '$QEMU_JOB_SKIPPED' - if: '$QEMU_JOB_SKIPPED'
when: manual when: manual
allow_failure: true allow_failure: true
# Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset # Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset
- if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' - if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: manual when: manual
allow_failure: true allow_failure: true
@@ -106,23 +70,8 @@ variables:
# Forks pipeline jobs don't start automatically unless # Forks pipeline jobs don't start automatically unless
# QEMU_CI=2 is set # QEMU_CI=2 is set
- if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' - if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: manual when: manual
# Upstream pipeline jobs start automatically unless told not to # Jobs can run if any jobs they depend on were successfull
# by setting QEMU_CI=1
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: manual
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
when: manual
# Jobs can run if any jobs they depend on were successful
- if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
when: on_success
variables:
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
- when: on_success - when: on_success

View File

@@ -1,61 +1,39 @@
.native_build_job_template: .native_build_job_template:
extends: .base_job_template extends: .base_job_template
stage: build stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
cache:
paths:
- ccache
key: "$CI_JOB_NAME"
when: always
before_script: before_script:
- JOBS=$(expr $(nproc) + 1) - JOBS=$(expr $(nproc) + 1)
script: script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
- export CCACHE_MAXSIZE="500M"
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
- mkdir build
- cd build
- ccache --zero-stats
- ../configure --enable-werror --disable-docs --enable-fdt=system
${TARGETS:+--target-list="$TARGETS"}
$CONFIGURE_ARGS ||
{ cat config.log meson-logs/meson-log.txt && exit 1; }
- if test -n "$LD_JOBS"; - if test -n "$LD_JOBS";
then then
pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ; scripts/git-submodule.sh update meson ;
fi
- mkdir build
- cd build
- if test -n "$TARGETS";
then
../configure --enable-werror --disable-docs ${LD_JOBS:+--meson=git} $CONFIGURE_ARGS --target-list="$TARGETS" ;
else
../configure --enable-werror --disable-docs ${LD_JOBS:+--meson=git} $CONFIGURE_ARGS ;
fi || { cat config.log meson-logs/meson-log.txt && exit 1; }
- if test -n "$LD_JOBS";
then
../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ;
fi || exit 1; fi || exit 1;
- make -j"$JOBS" - make -j"$JOBS"
- if test -n "$MAKE_CHECK_ARGS"; - if test -n "$MAKE_CHECK_ARGS";
then then
make -j"$JOBS" $MAKE_CHECK_ARGS ; make -j"$JOBS" $MAKE_CHECK_ARGS ;
fi fi
- ccache --show-stats
# We jump some hoops in common_test_job_template to avoid
# rebuilding all the object files we skip in the artifacts
.native_build_artifact_template:
artifacts:
when: on_success
expire_in: 2 days
paths:
- build
- .git-submodule-status
exclude:
- build/**/*.p
- build/**/*.a.p
- build/**/*.fa.p
- build/**/*.c.o
- build/**/*.c.o.d
- build/**/*.fa
.common_test_job_template: .common_test_job_template:
extends: .base_job_template extends: .base_job_template
stage: test stage: test
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
script: script:
- scripts/git-submodule.sh update roms/SLOF - scripts/git-submodule.sh update
- meson subprojects download $(cd build/subprojects && echo *) $(sed -n '/GIT_SUBMODULES=/ s/.*=// p' build/config-host.mak)
- cd build - cd build
- find . -type f -exec touch {} + - find . -type f -exec touch {} +
# Avoid recompiling by hiding ninja with NINJA=":" # Avoid recompiling by hiding ninja with NINJA=":"
@@ -65,7 +43,6 @@
extends: .common_test_job_template extends: .common_test_job_template
artifacts: artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: always
expire_in: 7 days expire_in: 7 days
paths: paths:
- build/meson-logs/testlog.txt - build/meson-logs/testlog.txt
@@ -81,7 +58,7 @@
policy: pull-push policy: pull-push
artifacts: artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: always when: on_failure
expire_in: 7 days expire_in: 7 days
paths: paths:
- build/tests/results/latest/results.xml - build/tests/results/latest/results.xml

View File

@@ -2,16 +2,20 @@ include:
- local: '/.gitlab-ci.d/buildtest-template.yml' - local: '/.gitlab-ci.d/buildtest-template.yml'
build-system-alpine: build-system-alpine:
extends: extends: .native_build_job_template
- .native_build_job_template
- .native_build_artifact_template
needs: needs:
- job: amd64-alpine-container - job: amd64-alpine-container
variables: variables:
IMAGE: alpine IMAGE: alpine
TARGETS: avr-softmmu loongarch64-softmmu mips64-softmmu mipsel-softmmu TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog
artifacts:
expire_in: 2 days
paths:
- .git-submodule-status
- build
check-system-alpine: check-system-alpine:
extends: .native_test_job_template extends: .native_test_job_template
@@ -30,19 +34,21 @@ avocado-system-alpine:
variables: variables:
IMAGE: alpine IMAGE: alpine
MAKE_CHECK_ARGS: check-avocado MAKE_CHECK_ARGS: check-avocado
AVOCADO_TAGS: arch:avr arch:loongarch64 arch:mips64 arch:mipsel
build-system-ubuntu: build-system-ubuntu:
extends: extends: .native_build_job_template
- .native_build_job_template
- .native_build_artifact_template
needs: needs:
job: amd64-ubuntu2204-container job: amd64-ubuntu2004-container
variables: variables:
IMAGE: ubuntu2204 IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-docs CONFIGURE_ARGS: --enable-docs --enable-fdt=system --enable-capstone
TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-ubuntu: check-system-ubuntu:
extends: .native_test_job_template extends: .native_test_job_template
@@ -50,7 +56,7 @@ check-system-ubuntu:
- job: build-system-ubuntu - job: build-system-ubuntu
artifacts: true artifacts: true
variables: variables:
IMAGE: ubuntu2204 IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check MAKE_CHECK_ARGS: check
avocado-system-ubuntu: avocado-system-ubuntu:
@@ -59,22 +65,22 @@ avocado-system-ubuntu:
- job: build-system-ubuntu - job: build-system-ubuntu
artifacts: true artifacts: true
variables: variables:
IMAGE: ubuntu2204 IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check-avocado MAKE_CHECK_ARGS: check-avocado
AVOCADO_TAGS: arch:alpha arch:microblaze arch:mips64el
build-system-debian: build-system-debian:
extends: extends: .native_build_job_template
- .native_build_job_template
- .native_build_artifact_template
needs: needs:
job: amd64-debian-container job: amd64-debian-container
variables: variables:
IMAGE: debian-amd64 IMAGE: debian-amd64
CONFIGURE_ARGS: --with-coroutine=sigaltstack TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu
TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu
sparc-softmmu xtensa-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-debian: check-system-debian:
extends: .native_test_job_template extends: .native_test_job_template
@@ -93,7 +99,6 @@ avocado-system-debian:
variables: variables:
IMAGE: debian-amd64 IMAGE: debian-amd64
MAKE_CHECK_ARGS: check-avocado MAKE_CHECK_ARGS: check-avocado
AVOCADO_TAGS: arch:arm arch:i386 arch:riscv64 arch:sh4 arch:sparc arch:xtensa
crash-test-debian: crash-test-debian:
extends: .native_test_job_template extends: .native_test_job_template
@@ -104,21 +109,24 @@ crash-test-debian:
IMAGE: debian-amd64 IMAGE: debian-amd64
script: script:
- cd build - cd build
- make NINJA=":" check-venv - make check-venv
- pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386 - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-i386
build-system-fedora: build-system-fedora:
extends: extends: .native_build_job_template
- .native_build_job_template
- .native_build_artifact_template
needs: needs:
job: amd64-fedora-container job: amd64-fedora-container
variables: variables:
IMAGE: fedora IMAGE: fedora
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
TARGETS: microblaze-softmmu mips-softmmu --enable-fdt=system --enable-slirp --enable-capstone
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-fedora: check-system-fedora:
extends: .native_test_job_template extends: .native_test_job_template
@@ -137,8 +145,6 @@ avocado-system-fedora:
variables: variables:
IMAGE: fedora IMAGE: fedora
MAKE_CHECK_ARGS: check-avocado MAKE_CHECK_ARGS: check-avocado
AVOCADO_TAGS: arch:microblaze arch:mips arch:xtensa arch:m68k
arch:riscv32 arch:ppc arch:sparc64
crash-test-fedora: crash-test-fedora:
extends: .native_test_job_template extends: .native_test_job_template
@@ -149,23 +155,26 @@ crash-test-fedora:
IMAGE: fedora IMAGE: fedora
script: script:
- cd build - cd build
- make NINJA=":" check-venv - make check-venv
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32 - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
build-system-centos: build-system-centos:
extends: extends: .native_build_job_template
- .native_build_job_template
- .native_build_artifact_template
needs: needs:
job: amd64-centos8-container job: amd64-centos8-container
variables: variables:
IMAGE: centos8 IMAGE: centos8
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-vfio-user-server CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-fdt=system
--enable-modules --enable-trace-backends=dtrace --enable-docs --enable-modules --enable-trace-backends=dtrace --enable-docs
--enable-vfio-user-server
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-centos: check-system-centos:
extends: .native_test_job_template extends: .native_test_job_template
@@ -184,19 +193,20 @@ avocado-system-centos:
variables: variables:
IMAGE: centos8 IMAGE: centos8
MAKE_CHECK_ARGS: check-avocado MAKE_CHECK_ARGS: check-avocado
AVOCADO_TAGS: arch:ppc64 arch:or1k arch:390x arch:x86_64 arch:rx
arch:sh4 arch:nios2
build-system-opensuse: build-system-opensuse:
extends: extends: .native_build_job_template
- .native_build_job_template
- .native_build_artifact_template
needs: needs:
job: amd64-opensuse-leap-container job: amd64-opensuse-leap-container
variables: variables:
IMAGE: opensuse-leap IMAGE: opensuse-leap
CONFIGURE_ARGS: --enable-fdt=system
TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-opensuse: check-system-opensuse:
extends: .native_test_job_template extends: .native_test_job_template
@@ -215,7 +225,6 @@ avocado-system-opensuse:
variables: variables:
IMAGE: opensuse-leap IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check-avocado MAKE_CHECK_ARGS: check-avocado
AVOCADO_TAGS: arch:s390x arch:x86_64 arch:aarch64
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by # This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
@@ -256,7 +265,6 @@ build-user:
variables: variables:
IMAGE: debian-all-test-cross IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-system CONFIGURE_ARGS: --disable-tools --disable-system
--target-list-exclude=alpha-linux-user,sh4-linux-user
MAKE_CHECK_ARGS: check-tcg MAKE_CHECK_ARGS: check-tcg
build-user-static: build-user-static:
@@ -266,18 +274,6 @@ build-user-static:
variables: variables:
IMAGE: debian-all-test-cross IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-system --static CONFIGURE_ARGS: --disable-tools --disable-system --static
--target-list-exclude=alpha-linux-user,sh4-linux-user
MAKE_CHECK_ARGS: check-tcg
# targets stuck on older compilers
build-legacy:
extends: .native_build_job_template
needs:
job: amd64-debian-legacy-cross-container
variables:
IMAGE: debian-legacy-test-cross
TARGETS: alpha-linux-user alpha-softmmu sh4-linux-user
CONFIGURE_ARGS: --disable-tools
MAKE_CHECK_ARGS: check-tcg MAKE_CHECK_ARGS: check-tcg
build-user-hexagon: build-user-hexagon:
@@ -290,9 +286,7 @@ build-user-hexagon:
CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg
MAKE_CHECK_ARGS: check-tcg MAKE_CHECK_ARGS: check-tcg
# Build the softmmu targets we have check-tcg tests and compilers in # Only build the softmmu targets we have check-tcg tests for
# our omnibus all-test-cross container. Those targets that haven't got
# Debian cross compiler support need to use special containers.
build-some-softmmu: build-some-softmmu:
extends: .native_build_job_template extends: .native_build_job_template
needs: needs:
@@ -300,18 +294,7 @@ build-some-softmmu:
variables: variables:
IMAGE: debian-all-test-cross IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --enable-debug CONFIGURE_ARGS: --disable-tools --enable-debug
TARGETS: arm-softmmu aarch64-softmmu i386-softmmu riscv64-softmmu TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
s390x-softmmu x86_64-softmmu
MAKE_CHECK_ARGS: check-tcg
build-loongarch64:
extends: .native_build_job_template
needs:
job: loongarch-debian-cross-container
variables:
IMAGE: debian-loongarch-cross
CONFIGURE_ARGS: --disable-tools --enable-debug
TARGETS: loongarch64-linux-user loongarch64-softmmu
MAKE_CHECK_ARGS: check-tcg MAKE_CHECK_ARGS: check-tcg
# We build tricore in a very minimal tricore only container # We build tricore in a very minimal tricore only container
@@ -344,7 +327,7 @@ clang-user:
variables: variables:
IMAGE: debian-all-test-cross IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
--target-list-exclude=alpha-linux-user,microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user --target-list-exclude=microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined --extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
MAKE_CHECK_ARGS: check-unit check-tcg MAKE_CHECK_ARGS: check-unit check-tcg
@@ -358,9 +341,7 @@ clang-user:
# Split in three sets of build/check/avocado to limit the execution time of each # Split in three sets of build/check/avocado to limit the execution time of each
# job # job
build-cfi-aarch64: build-cfi-aarch64:
extends: extends: .native_build_job_template
- .native_build_job_template
- .native_build_artifact_template
needs: needs:
- job: amd64-fedora-container - job: amd64-fedora-container
variables: variables:
@@ -376,6 +357,10 @@ build-cfi-aarch64:
# skipped until the situation has been solved. # skipped until the situation has been solved.
QEMU_JOB_SKIPPED: 1 QEMU_JOB_SKIPPED: 1
timeout: 90m timeout: 90m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-aarch64: check-cfi-aarch64:
extends: .native_test_job_template extends: .native_test_job_template
@@ -396,9 +381,7 @@ avocado-cfi-aarch64:
MAKE_CHECK_ARGS: check-avocado MAKE_CHECK_ARGS: check-avocado
build-cfi-ppc64-s390x: build-cfi-ppc64-s390x:
extends: extends: .native_build_job_template
- .native_build_job_template
- .native_build_artifact_template
needs: needs:
- job: amd64-fedora-container - job: amd64-fedora-container
variables: variables:
@@ -414,6 +397,10 @@ build-cfi-ppc64-s390x:
# skipped until the situation has been solved. # skipped until the situation has been solved.
QEMU_JOB_SKIPPED: 1 QEMU_JOB_SKIPPED: 1
timeout: 80m timeout: 80m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-ppc64-s390x: check-cfi-ppc64-s390x:
extends: .native_test_job_template extends: .native_test_job_template
@@ -434,9 +421,7 @@ avocado-cfi-ppc64-s390x:
MAKE_CHECK_ARGS: check-avocado MAKE_CHECK_ARGS: check-avocado
build-cfi-x86_64: build-cfi-x86_64:
extends: extends: .native_build_job_template
- .native_build_job_template
- .native_build_artifact_template
needs: needs:
- job: amd64-fedora-container - job: amd64-fedora-container
variables: variables:
@@ -448,6 +433,10 @@ build-cfi-x86_64:
TARGETS: x86_64-softmmu TARGETS: x86_64-softmmu
MAKE_CHECK_ARGS: check-build MAKE_CHECK_ARGS: check-build
timeout: 70m timeout: 70m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-x86_64: check-cfi-x86_64:
extends: .native_test_job_template extends: .native_test_job_template
@@ -470,24 +459,36 @@ avocado-cfi-x86_64:
tsan-build: tsan-build:
extends: .native_build_job_template extends: .native_build_job_template
needs: needs:
job: amd64-ubuntu2204-container job: amd64-ubuntu2004-container
variables: variables:
IMAGE: ubuntu2204 IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-tsan --cc=clang --cxx=clang++ CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10
--enable-trace-backends=ust --disable-slirp --enable-trace-backends=ust --enable-fdt=system --disable-slirp
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
MAKE_CHECK_ARGS: bench V=1
# gcov is a GCC features # gprof/gcov are GCC features
gcov: build-gprof-gcov:
extends: .native_build_job_template extends: .native_build_job_template
needs: needs:
job: amd64-ubuntu2204-container job: amd64-ubuntu2004-container
timeout: 80m
variables: variables:
IMAGE: ubuntu2204 IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-gcov CONFIGURE_ARGS: --enable-gprof --enable-gcov
TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
MAKE_CHECK_ARGS: check-unit check-softfloat artifacts:
expire_in: 1 days
paths:
- build
check-gprof-gcov:
extends: .native_test_job_template
needs:
- job: build-gprof-gcov
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check
after_script: after_script:
- cd build - cd build
- gcovr --xml-pretty --exclude-unreachable-branches --print-summary - gcovr --xml-pretty --exclude-unreachable-branches --print-summary
@@ -495,12 +496,8 @@ gcov:
coverage: /^\s*lines:\s*\d+.\d+\%/ coverage: /^\s*lines:\s*\d+.\d+\%/
artifacts: artifacts:
name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA} name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA}
when: always
expire_in: 2 days expire_in: 2 days
paths:
- build/meson-logs/testlog.txt
reports: reports:
junit: build/meson-logs/testlog.junit.xml
coverage_report: coverage_report:
coverage_format: cobertura coverage_format: cobertura
path: build/coverage.xml path: build/coverage.xml
@@ -531,7 +528,7 @@ build-tci:
variables: variables:
IMAGE: debian-all-test-cross IMAGE: debian-all-test-cross
script: script:
- TARGETS="aarch64 arm hppa m68k microblaze ppc64 s390x x86_64" - TARGETS="aarch64 alpha arm hppa m68k microblaze ppc64 s390x x86_64"
- mkdir build - mkdir build
- cd build - cd build
- ../configure --enable-tcg-interpreter --disable-docs --disable-gtk --disable-vnc - ../configure --enable-tcg-interpreter --disable-docs --disable-gtk --disable-vnc
@@ -548,6 +545,18 @@ build-tci:
- QTEST_QEMU_BINARY="./qemu-system-s390x" ./tests/qtest/pxe-test -m slow - QTEST_QEMU_BINARY="./qemu-system-s390x" ./tests/qtest/pxe-test -m slow
- make check-tcg - make check-tcg
# Alternate coroutines implementations are only really of interest to KVM users
# However we can't test against KVM on Gitlab-CI so we can only run unit tests
build-coroutine-sigaltstack:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --with-coroutine=sigaltstack --disable-tcg
--enable-trace-backends=ftrace
MAKE_CHECK_ARGS: check-unit
# Check our reduced build configurations # Check our reduced build configurations
build-without-defaults: build-without-defaults:
extends: .native_build_job_template extends: .native_build_job_template
@@ -564,12 +573,12 @@ build-without-defaults:
--disable-strip --disable-strip
TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu
sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user
MAKE_CHECK_ARGS: check MAKE_CHECK_ARGS: check-unit check-qtest-avr check-qtest-mips64
build-libvhost-user: build-libvhost-user:
extends: .base_job_template extends: .base_job_template
stage: build stage: build
image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG image: $CI_REGISTRY_IMAGE/qemu/fedora:latest
needs: needs:
job: amd64-fedora-container job: amd64-fedora-container
script: script:
@@ -581,9 +590,7 @@ build-libvhost-user:
# No targets are built here, just tools, docs, and unit tests. This # No targets are built here, just tools, docs, and unit tests. This
# also feeds into the eventual documentation deployment steps later # also feeds into the eventual documentation deployment steps later
build-tools-and-docs-debian: build-tools-and-docs-debian:
extends: extends: .native_build_job_template
- .native_build_job_template
- .native_build_artifact_template
needs: needs:
job: amd64-debian-container job: amd64-debian-container
# when running on 'master' we use pre-existing container # when running on 'master' we use pre-existing container
@@ -593,6 +600,10 @@ build-tools-and-docs-debian:
MAKE_CHECK_ARGS: check-unit ctags TAGS cscope MAKE_CHECK_ARGS: check-unit ctags TAGS cscope
CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
QEMU_JOB_PUBLISH: 1 QEMU_JOB_PUBLISH: 1
artifacts:
expire_in: 2 days
paths:
- build
# Prepare for GitLab pages deployment. Anything copied into the # Prepare for GitLab pages deployment. Anything copied into the
# "public" directory will be deployed to $USER.gitlab.io/$PROJECT # "public" directory will be deployed to $USER.gitlab.io/$PROJECT
@@ -609,7 +620,7 @@ build-tools-and-docs-debian:
# of what topic branch they're currently using # of what topic branch they're currently using
pages: pages:
extends: .base_job_template extends: .base_job_template
image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:$QEMU_CI_CONTAINER_TAG image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:latest
stage: test stage: test
needs: needs:
- job: build-tools-and-docs-debian - job: build-tools-and-docs-debian
@@ -624,7 +635,6 @@ pages:
- make -C build install DESTDIR=$(pwd)/temp-install - make -C build install DESTDIR=$(pwd)/temp-install
- mv temp-install/usr/local/share/doc/qemu/* public/ - mv temp-install/usr/local/share/doc/qemu/* public/
artifacts: artifacts:
when: on_success
paths: paths:
- public - public
variables: variables:

View File

@@ -15,10 +15,8 @@
stage: build stage: build
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
needs: [] needs: []
# 20 mins larger than "timeout_in" in cirrus/build.yml
# as there's often a 5-10 minute delay before Cirrus CI
# actually starts the task
timeout: 80m timeout: 80m
allow_failure: true
script: script:
- source .gitlab-ci.d/cirrus/$NAME.vars - source .gitlab-ci.d/cirrus/$NAME.vars
- sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g" - sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
@@ -46,13 +44,26 @@
variables: variables:
QEMU_JOB_CIRRUS: 1 QEMU_JOB_CIRRUS: 1
x64-freebsd-12-build:
extends: .cirrus_build_job
variables:
NAME: freebsd-12
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-12-4
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update; pkg upgrade -y
INSTALL_COMMAND: pkg install -y
TEST_TARGETS: check
x64-freebsd-13-build: x64-freebsd-13-build:
extends: .cirrus_build_job extends: .cirrus_build_job
variables: variables:
NAME: freebsd-13 NAME: freebsd-13
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-13-2 CIRRUS_VM_IMAGE_NAME: freebsd-13-1
CIRRUS_VM_CPUS: 8 CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update; pkg upgrade -y UPDATE_COMMAND: pkg update; pkg upgrade -y

View File

@@ -16,8 +16,6 @@ env:
TEST_TARGETS: "@TEST_TARGETS@" TEST_TARGETS: "@TEST_TARGETS@"
build_task: build_task:
# A little shorter than GitLab timeout in ../cirrus.yml
timeout_in: 60m
install_script: install_script:
- @UPDATE_COMMAND@ - @UPDATE_COMMAND@
- @INSTALL_COMMAND@ @PKGS@ - @INSTALL_COMMAND@ @PKGS@
@@ -34,9 +32,6 @@ build_task:
- $MAKE -j$(sysctl -n hw.ncpu) - $MAKE -j$(sysctl -n hw.ncpu)
- for TARGET in $TEST_TARGETS ; - for TARGET in $TEST_TARGETS ;
do do
$MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1 ; $MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
done done
always:
build_result_artifacts:
path: build/meson-logs/*log.txt
type: text/plain

View File

@@ -0,0 +1,16 @@
# THIS FILE WAS AUTO-GENERATED
#
# $ lcitool variables freebsd-12 qemu
#
# https://gitlab.com/libvirt/libvirt-ci
CCACHE='/usr/local/bin/ccache'
CPAN_PKGS=''
CROSS_PKGS=''
MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract usbredir virglrenderer vte3 zstd'
PYPI_PKGS=''
PYTHON='/usr/local/bin/python3'

View File

@@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja' NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg' PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8' PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-tomli py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd' PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract usbredir virglrenderer vte3 zstd'
PYPI_PKGS='' PYPI_PKGS=''
PYTHON='/usr/local/bin/python3' PYTHON='/usr/local/bin/python3'

View File

@@ -15,7 +15,7 @@ env:
folder: $HOME/.cache/qemu-vm folder: $HOME/.cache/qemu-vm
install_script: install_script:
- dnf update -y - dnf update -y
- dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget meson - dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget
clone_script: clone_script:
- git clone --depth 100 "$CI_REPOSITORY_URL" . - git clone --depth 100 "$CI_REPOSITORY_URL" .
- git fetch origin "$CI_COMMIT_REF_NAME" - git fetch origin "$CI_COMMIT_REF_NAME"

View File

@@ -11,6 +11,6 @@ MAKE='/opt/homebrew/bin/gmake'
NINJA='/opt/homebrew/bin/ninja' NINJA='/opt/homebrew/bin/ninja'
PACKAGING_COMMAND='brew' PACKAGING_COMMAND='brew'
PIP3='/opt/homebrew/bin/pip3' PIP3='/opt/homebrew/bin/pip3'
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd' PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract usbredir vde vte3 zlib zstd'
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli' PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme'
PYTHON='/opt/homebrew/bin/python3' PYTHON='/opt/homebrew/bin/python3'

View File

@@ -1,3 +1,9 @@
alpha-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-alpha-cross
amd64-debian-cross-container: amd64-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers stage: containers
@@ -10,12 +16,6 @@ amd64-debian-user-cross-container:
variables: variables:
NAME: debian-all-test-cross NAME: debian-all-test-cross
amd64-debian-legacy-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-legacy-test-cross
arm64-debian-cross-container: arm64-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers stage: containers
@@ -40,11 +40,23 @@ hexagon-cross-container:
variables: variables:
NAME: debian-hexagon-cross NAME: debian-hexagon-cross
loongarch-debian-cross-container: hppa-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers stage: containers
variables: variables:
NAME: debian-loongarch-cross NAME: debian-hppa-cross
m68k-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-m68k-cross
mips64-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-mips64-cross
mips64el-debian-cross-container: mips64el-debian-cross-container:
extends: .container_job_template extends: .container_job_template
@@ -52,12 +64,24 @@ mips64el-debian-cross-container:
variables: variables:
NAME: debian-mips64el-cross NAME: debian-mips64el-cross
mips-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-mips-cross
mipsel-debian-cross-container: mipsel-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers stage: containers
variables: variables:
NAME: debian-mipsel-cross NAME: debian-mipsel-cross
powerpc-test-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-powerpc-test-cross
ppc64el-debian-cross-container: ppc64el-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers stage: containers
@@ -71,7 +95,13 @@ riscv64-debian-cross-container:
allow_failure: true allow_failure: true
variables: variables:
NAME: debian-riscv64-cross NAME: debian-riscv64-cross
QEMU_JOB_OPTIONAL: 1
# we can however build TCG tests using a non-sid base
riscv64-debian-test-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-riscv64-test-cross
s390x-debian-cross-container: s390x-debian-cross-container:
extends: .container_job_template extends: .container_job_template
@@ -79,6 +109,18 @@ s390x-debian-cross-container:
variables: variables:
NAME: debian-s390x-cross NAME: debian-s390x-cross
sh4-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-sh4-cross
sparc64-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-sparc64-cross
tricore-debian-cross-container: tricore-debian-cross-container:
extends: .container_job_template extends: .container_job_template
stage: containers stage: containers

View File

@@ -1,21 +1,22 @@
.container_job_template: .container_job_template:
extends: .base_job_template extends: .base_job_template
image: docker:latest image: docker:stable
stage: containers stage: containers
services: services:
- docker:dind - docker:dind
before_script: before_script:
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:$QEMU_CI_CONTAINER_TAG" - export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
# Always ':latest' because we always use upstream as a common cache source - export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/$NAME:latest"
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest" - apk add python3
- docker info
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" - docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
- until docker info; do sleep 1; done
script: script:
- echo "TAG:$TAG" - echo "TAG:$TAG"
- echo "COMMON_TAG:$COMMON_TAG" - echo "COMMON_TAG:$COMMON_TAG"
- docker build --tag "$TAG" --cache-from "$TAG" --cache-from "$COMMON_TAG" - ./tests/docker/docker.py --engine docker build
--build-arg BUILDKIT_INLINE_CACHE=1 -t "qemu/$NAME" -f "tests/docker/dockerfiles/$NAME.docker"
-f "tests/docker/dockerfiles/$NAME.docker" "." -r $CI_REGISTRY/qemu-project/qemu
- docker tag "qemu/$NAME" "$TAG"
- docker push "$TAG" - docker push "$TAG"
after_script: after_script:
- docker logout - docker logout

View File

@@ -13,10 +13,10 @@ amd64-debian-container:
variables: variables:
NAME: debian-amd64 NAME: debian-amd64
amd64-ubuntu2204-container: amd64-ubuntu2004-container:
extends: .container_job_template extends: .container_job_template
variables: variables:
NAME: ubuntu2204 NAME: ubuntu2004
amd64-opensuse-leap-container: amd64-opensuse-leap-container:
extends: .container_job_template extends: .container_job_template

View File

@@ -1,24 +1,13 @@
.cross_system_build_job: .cross_system_build_job:
extends: .base_job_template extends: .base_job_template
stage: build stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
cache:
paths:
- ccache
key: "$CI_JOB_NAME"
when: always
timeout: 80m timeout: 80m
script: script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
- export CCACHE_MAXSIZE="500M"
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
- mkdir build - mkdir build
- cd build - cd build
- ccache --zero-stats - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
- ../configure --enable-werror --disable-docs --enable-fdt=system --disable-user --target-list-exclude="arm-softmmu cris-softmmu
--disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
--target-list-exclude="arm-softmmu cris-softmmu
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu
sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS" sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS"
@@ -28,7 +17,6 @@
version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)"; version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
mv -v qemu-setup*.exe qemu-setup-${version}.exe; mv -v qemu-setup*.exe qemu-setup-${version}.exe;
fi fi
- ccache --show-stats
# Job to cross-build specific accelerators. # Job to cross-build specific accelerators.
# #
@@ -38,17 +26,9 @@
.cross_accel_build_job: .cross_accel_build_job:
extends: .base_job_template extends: .base_job_template
stage: build stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
timeout: 30m timeout: 30m
cache:
paths:
- ccache/
key: "$CI_JOB_NAME"
script: script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
- export CCACHE_MAXSIZE="500M"
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
- mkdir build - mkdir build
- cd build - cd build
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
@@ -58,15 +38,8 @@
.cross_user_build_job: .cross_user_build_job:
extends: .base_job_template extends: .base_job_template
stage: build stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
cache:
paths:
- ccache/
key: "$CI_JOB_NAME"
script: script:
- export CCACHE_BASEDIR="$(pwd)"
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
- export CCACHE_MAXSIZE="500M"
- mkdir build - mkdir build
- cd build - cd build
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
@@ -75,15 +48,3 @@
nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user
xtensa-linux-user $CROSS_SKIP_TARGETS" xtensa-linux-user $CROSS_SKIP_TARGETS"
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
# We can still run some tests on some of our cross build jobs. They can add this
# template to their extends to save the build logs and test results
.cross_test_artifacts:
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: always
expire_in: 7 days
paths:
- build/meson-logs/testlog.txt
reports:
junit: build/meson-logs/testlog.junit.xml

View File

@@ -1,6 +1,13 @@
include: include:
- local: '/.gitlab-ci.d/crossbuild-template.yml' - local: '/.gitlab-ci.d/crossbuild-template.yml'
cross-armel-system:
extends: .cross_system_build_job
needs:
job: armel-debian-cross-container
variables:
IMAGE: debian-armel-cross
cross-armel-user: cross-armel-user:
extends: .cross_user_build_job extends: .cross_user_build_job
needs: needs:
@@ -8,6 +15,13 @@ cross-armel-user:
variables: variables:
IMAGE: debian-armel-cross IMAGE: debian-armel-cross
cross-armhf-system:
extends: .cross_system_build_job
needs:
job: armhf-debian-cross-container
variables:
IMAGE: debian-armhf-cross
cross-armhf-user: cross-armhf-user:
extends: .cross_user_build_job extends: .cross_user_build_job
needs: needs:
@@ -29,18 +43,16 @@ cross-arm64-user:
variables: variables:
IMAGE: debian-arm64-cross IMAGE: debian-arm64-cross
cross-arm64-kvm-only: cross-i386-system:
extends: .cross_accel_build_job extends: .cross_system_build_job
needs: needs:
job: arm64-debian-cross-container job: i386-fedora-cross-container
variables: variables:
IMAGE: debian-arm64-cross IMAGE: fedora-i386-cross
EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-features MAKE_CHECK_ARGS: check-qtest
cross-i386-user: cross-i386-user:
extends: extends: .cross_user_build_job
- .cross_user_build_job
- .cross_test_artifacts
needs: needs:
job: i386-fedora-cross-container job: i386-fedora-cross-container
variables: variables:
@@ -48,16 +60,14 @@ cross-i386-user:
MAKE_CHECK_ARGS: check MAKE_CHECK_ARGS: check
cross-i386-tci: cross-i386-tci:
extends: extends: .cross_accel_build_job
- .cross_accel_build_job
- .cross_test_artifacts
timeout: 60m timeout: 60m
needs: needs:
job: i386-fedora-cross-container job: i386-fedora-cross-container
variables: variables:
IMAGE: fedora-i386-cross IMAGE: fedora-i386-cross
ACCEL: tcg-interpreter ACCEL: tcg-interpreter
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user
MAKE_CHECK_ARGS: check check-tcg MAKE_CHECK_ARGS: check check-tcg
cross-mipsel-system: cross-mipsel-system:
@@ -149,7 +159,7 @@ cross-s390x-kvm-only:
job: s390x-debian-cross-container job: s390x-debian-cross-container
variables: variables:
IMAGE: debian-s390x-cross IMAGE: debian-s390x-cross
EXTRA_CONFIGURE_OPTS: --disable-tcg --enable-trace-backends=ftrace EXTRA_CONFIGURE_OPTS: --disable-tcg
cross-mips64el-kvm-only: cross-mips64el-kvm-only:
extends: .cross_accel_build_job extends: .cross_accel_build_job
@@ -165,11 +175,9 @@ cross-win32-system:
job: win32-fedora-cross-container job: win32-fedora-cross-container
variables: variables:
IMAGE: fedora-win32-cross IMAGE: fedora-win32-cross
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
microblazeel-softmmu mips64el-softmmu nios2-softmmu microblazeel-softmmu mips64el-softmmu nios2-softmmu
artifacts: artifacts:
when: on_success
paths: paths:
- build/qemu-setup*.exe - build/qemu-setup*.exe
@@ -179,13 +187,11 @@ cross-win64-system:
job: win64-fedora-cross-container job: win64-fedora-cross-container
variables: variables:
IMAGE: fedora-win64-cross IMAGE: fedora-win64-cross
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu
m68k-softmmu microblazeel-softmmu nios2-softmmu m68k-softmmu microblazeel-softmmu nios2-softmmu
or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
tricore-softmmu xtensaeb-softmmu tricore-softmmu xtensaeb-softmmu
artifacts: artifacts:
when: on_success
paths: paths:
- build/qemu-setup*.exe - build/qemu-setup*.exe

View File

@@ -15,15 +15,12 @@ variables:
# All custom runners can extend this template to upload the testlog # All custom runners can extend this template to upload the testlog
# data as an artifact and also feed the junit report # data as an artifact and also feed the junit report
.custom_runner_template: .custom_artifacts_template:
extends: .base_job_template
artifacts: artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
expire_in: 7 days expire_in: 7 days
when: always
paths: paths:
- build/build.ninja - build/meson-logs/testlog.txt
- build/meson-logs
reports: reports:
junit: build/meson-logs/testlog.junit.xml junit: build/meson-logs/testlog.junit.xml

View File

@@ -1,9 +1,4 @@
# All centos-stream-8 jobs should run successfully in an environment
# setup by the scripts/ci/setup/stream/8/build-environment.yml task
# "Installation of extra packages to build QEMU"
centos-stream-8-x86_64: centos-stream-8-x86_64:
extends: .custom_runner_template
allow_failure: true allow_failure: true
needs: [] needs: []
stage: build stage: build
@@ -13,6 +8,15 @@ centos-stream-8-x86_64:
rules: rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE" - if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE"
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: on_failure
expire_in: 7 days
paths:
- build/tests/results/latest/results.xml
- build/tests/results/latest/test-results
reports:
junit: build/tests/results/latest/results.xml
before_script: before_script:
- JOBS=$(expr $(nproc) + 1) - JOBS=$(expr $(nproc) + 1)
script: script:
@@ -21,4 +25,6 @@ centos-stream-8-x86_64:
- ../scripts/ci/org.centos/stream/8/x86_64/configure - ../scripts/ci/org.centos/stream/8/x86_64/configure
|| { cat config.log meson-logs/meson-log.txt; exit 1; } || { cat config.log meson-logs/meson-log.txt; exit 1; }
- make -j"$JOBS" - make -j"$JOBS"
- make NINJA=":" check check-avocado - make NINJA=":" check
|| { cat meson-logs/testlog.txt; exit 1; } ;
- ../scripts/ci/org.centos/stream/8/x86_64/test-avocado

View File

@@ -3,7 +3,7 @@
# "Install basic packages to build QEMU on Ubuntu 20.04/20.04" # "Install basic packages to build QEMU on Ubuntu 20.04/20.04"
ubuntu-20.04-s390x-all-linux-static: ubuntu-20.04-s390x-all-linux-static:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:
@@ -24,7 +24,7 @@ ubuntu-20.04-s390x-all-linux-static:
- make --output-sync -j`nproc` check - make --output-sync -j`nproc` check
ubuntu-20.04-s390x-all: ubuntu-20.04-s390x-all:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:
@@ -43,7 +43,7 @@ ubuntu-20.04-s390x-all:
- make --output-sync -j`nproc` check - make --output-sync -j`nproc` check
ubuntu-20.04-s390x-alldbg: ubuntu-20.04-s390x-alldbg:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:
@@ -66,7 +66,7 @@ ubuntu-20.04-s390x-alldbg:
- make --output-sync -j`nproc` check - make --output-sync -j`nproc` check
ubuntu-20.04-s390x-clang: ubuntu-20.04-s390x-clang:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:
@@ -108,7 +108,7 @@ ubuntu-20.04-s390x-tci:
- make --output-sync -j`nproc` - make --output-sync -j`nproc`
ubuntu-20.04-s390x-notcg: ubuntu-20.04-s390x-notcg:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:

View File

@@ -1,9 +1,9 @@
# All ubuntu-22.04 jobs should run successfully in an environment # All ubuntu-22.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/qemu/build-environment.yml task # setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 22.04" # "Install basic packages to build QEMU on Ubuntu 20.04"
ubuntu-22.04-aarch32-all: ubuntu-22.04-aarch32-all:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:

View File

@@ -1,9 +1,9 @@
# All ubuntu-22.04 jobs should run successfully in an environment # All ubuntu-20.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/qemu/build-environment.yml task # setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 22.04" # "Install basic packages to build QEMU on Ubuntu 20.04"
ubuntu-22.04-aarch64-all-linux-static: ubuntu-22.04-aarch64-all-linux-static:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:
@@ -24,7 +24,7 @@ ubuntu-22.04-aarch64-all-linux-static:
- make --output-sync -j`nproc --ignore=40` check - make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-all: ubuntu-22.04-aarch64-all:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:
@@ -45,30 +45,8 @@ ubuntu-22.04-aarch64-all:
- make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check - make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-without-defaults:
extends: .custom_runner_template
needs: []
stage: build
tags:
- ubuntu_22.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --disable-user --without-default-devices --without-default-features
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-alldbg: ubuntu-22.04-aarch64-alldbg:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:
@@ -87,7 +65,7 @@ ubuntu-22.04-aarch64-alldbg:
- make --output-sync -j`nproc --ignore=40` check - make --output-sync -j`nproc --ignore=40` check
ubuntu-22.04-aarch64-clang: ubuntu-22.04-aarch64-clang:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:
@@ -129,7 +107,7 @@ ubuntu-22.04-aarch64-tci:
- make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40`
ubuntu-22.04-aarch64-notcg: ubuntu-22.04-aarch64-notcg:
extends: .custom_runner_template extends: .custom_artifacts_template
needs: [] needs: []
stage: build stage: build
tags: tags:
@@ -145,7 +123,7 @@ ubuntu-22.04-aarch64-notcg:
script: script:
- mkdir build - mkdir build
- cd build - cd build
- ../configure --disable-tcg --with-devices-aarch64=minimal - ../configure --disable-tcg
|| { cat config.log meson-logs/meson-log.txt; exit 1; } || { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check - make --output-sync -j`nproc --ignore=40` check

85
.gitlab-ci.d/edk2.yml Normal file
View File

@@ -0,0 +1,85 @@
# All jobs needing docker-edk2 must use the same rules it uses.
.edk2_job_rules:
rules:
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never
# In forks, if QEMU_CI=1 is set, then create manual job
# if any of the files affecting the build are touched
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
changes:
- .gitlab-ci.d/edk2.yml
- .gitlab-ci.d/edk2/Dockerfile
- roms/edk2/*
when: manual
# In forks, if QEMU_CI=1 is set, then create manual job
# if the branch/tag starts with 'edk2'
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^edk2/'
when: manual
# In forks, if QEMU_CI=1 is set, then create manual job
# if last commit msg contains 'EDK2' (case insensitive)
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /edk2/i'
when: manual
# Run if any files affecting the build output are touched
- changes:
- .gitlab-ci.d/edk2.yml
- .gitlab-ci.d/edk2/Dockerfile
- roms/edk2/*
when: on_success
# Run if the branch/tag starts with 'edk2'
- if: '$CI_COMMIT_REF_NAME =~ /^edk2/'
when: on_success
# Run if last commit msg contains 'EDK2' (case insensitive)
- if: '$CI_COMMIT_MESSAGE =~ /edk2/i'
when: on_success
docker-edk2:
extends: .edk2_job_rules
stage: containers
image: docker:19.03.1
services:
- docker:19.03.1-dind
variables:
GIT_DEPTH: 3
IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build
# We don't use TLS
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- docker pull $IMAGE_TAG || true
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
--tag $IMAGE_TAG .gitlab-ci.d/edk2
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker push $IMAGE_TAG
build-edk2:
extends: .edk2_job_rules
stage: build
needs: ['docker-edk2']
artifacts:
paths: # 'artifacts.zip' will contains the following files:
- pc-bios/edk2*bz2
- pc-bios/edk2-licenses.txt
- edk2-stdout.log
- edk2-stderr.log
image: $CI_REGISTRY_IMAGE:edk2-cross-build
variables:
GIT_DEPTH: 3
script: # Clone the required submodules and build EDK2
- git submodule update --init roms/edk2
- git -C roms/edk2 submodule update --init --
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
BaseTools/Source/C/BrotliCompress/brotli
CryptoPkg/Library/OpensslLib/openssl
MdeModulePkg/Library/BrotliCustomDecompressLib/brotli
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
- echo "=== Using ${JOBS} simultaneous jobs ==="
- make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2

View File

@@ -0,0 +1,27 @@
#
# Docker image to cross-compile EDK2 firmware binaries
#
FROM ubuntu:18.04
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
# Install packages required to build EDK2
RUN apt update \
&& \
\
DEBIAN_FRONTEND=noninteractive \
apt install --assume-yes --no-install-recommends \
build-essential \
ca-certificates \
dos2unix \
gcc-aarch64-linux-gnu \
gcc-arm-linux-gnueabi \
git \
iasl \
make \
nasm \
python3 \
uuid-dev \
&& \
\
rm -rf /var/lib/apt/lists/*

View File

@@ -42,15 +42,17 @@
docker-opensbi: docker-opensbi:
extends: .opensbi_job_rules extends: .opensbi_job_rules
stage: containers stage: containers
image: docker:latest image: docker:19.03.1
services: services:
- docker:dind - docker:19.03.1-dind
variables: variables:
GIT_DEPTH: 3 GIT_DEPTH: 3
IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build
# We don't use TLS
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
before_script: before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- until docker info; do sleep 1; done
script: script:
- docker pull $IMAGE_TAG || true - docker pull $IMAGE_TAG || true
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
@@ -63,7 +65,6 @@ build-opensbi:
stage: build stage: build
needs: ['docker-opensbi'] needs: ['docker-opensbi']
artifacts: artifacts:
when: on_success
paths: # 'artifacts.zip' will contains the following files: paths: # 'artifacts.zip' will contains the following files:
- pc-bios/opensbi-riscv32-generic-fw_dynamic.bin - pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
- pc-bios/opensbi-riscv64-generic-fw_dynamic.bin - pc-bios/opensbi-riscv64-generic-fw_dynamic.bin

View File

@@ -15,7 +15,6 @@ RUN apt update \
ca-certificates \ ca-certificates \
git \ git \
make \ make \
python3 \
wget \ wget \
&& \ && \
\ \

View File

@@ -1,16 +1,10 @@
# This file contains the set of jobs run by the QEMU project: # This file contains the set of jobs run by the QEMU project:
# https://gitlab.com/qemu-project/qemu/-/pipelines # https://gitlab.com/qemu-project/qemu/-/pipelines
variables:
RUNNER_TAG: ""
default:
tags:
- $RUNNER_TAG
include: include:
- local: '/.gitlab-ci.d/base.yml' - local: '/.gitlab-ci.d/base.yml'
- local: '/.gitlab-ci.d/stages.yml' - local: '/.gitlab-ci.d/stages.yml'
- local: '/.gitlab-ci.d/edk2.yml'
- local: '/.gitlab-ci.d/opensbi.yml' - local: '/.gitlab-ci.d/opensbi.yml'
- local: '/.gitlab-ci.d/containers.yml' - local: '/.gitlab-ci.d/containers.yml'
- local: '/.gitlab-ci.d/crossbuilds.yml' - local: '/.gitlab-ci.d/crossbuilds.yml'

View File

@@ -23,12 +23,12 @@ check-dco:
before_script: before_script:
- apk -U add git - apk -U add git
check-python-minreqs: check-python-pipenv:
extends: .base_job_template extends: .base_job_template
stage: test stage: test
image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG image: $CI_REGISTRY_IMAGE/qemu/python:latest
script: script:
- make -C python check-minreqs - make -C python check-pipenv
variables: variables:
GIT_DEPTH: 1 GIT_DEPTH: 1
needs: needs:
@@ -37,7 +37,7 @@ check-python-minreqs:
check-python-tox: check-python-tox:
extends: .base_job_template extends: .base_job_template
stage: test stage: test
image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG image: $CI_REGISTRY_IMAGE/qemu/python:latest
script: script:
- make -C python check-tox - make -C python check-tox
variables: variables:

View File

@@ -5,60 +5,21 @@
- windows - windows
- windows-1809 - windows-1809
cache: cache:
key: "$CI_JOB_NAME" key: "${CI_JOB_NAME}-cache"
paths: paths:
- msys64/var/cache - ${CI_PROJECT_DIR}/msys64/var/cache
- ccache
when: always
needs: [] needs: []
stage: build stage: build
timeout: 100m timeout: 80m
variables:
# This feature doesn't (currently) work with PowerShell, it stops
# the echo'ing of commands being run and doesn't show any timing
FF_SCRIPT_SECTIONS: 0
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
expire_in: 7 days
paths:
- build/meson-logs/testlog.txt
reports:
junit: "build/meson-logs/testlog.junit.xml"
before_script: before_script:
- Write-Output "Acquiring msys2.exe installer at $(Get-Date -Format u)"
- If ( !(Test-Path -Path msys64\var\cache ) ) { - If ( !(Test-Path -Path msys64\var\cache ) ) {
mkdir msys64\var\cache mkdir msys64\var\cache
} }
- Invoke-WebRequest - If ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
"https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe.sig"
-outfile "msys2.exe.sig"
- if ( Test-Path -Path msys64\var\cache\msys2.exe.sig ) {
Write-Output "Cached installer sig" ;
if ( ((Get-FileHash msys2.exe.sig).Hash -ne (Get-FileHash msys64\var\cache\msys2.exe.sig).Hash) ) {
Write-Output "Mis-matched installer sig, new installer download required" ;
Remove-Item -Path msys64\var\cache\msys2.exe.sig ;
if ( Test-Path -Path msys64\var\cache\msys2.exe ) {
Remove-Item -Path msys64\var\cache\msys2.exe
}
} else {
Write-Output "Matched installer sig, cached installer still valid"
}
} else {
Write-Output "No cached installer sig, new installer download required" ;
if ( Test-Path -Path msys64\var\cache\msys2.exe ) {
Remove-Item -Path msys64\var\cache\msys2.exe
}
}
- if ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
Write-Output "Fetching latest installer" ;
Invoke-WebRequest Invoke-WebRequest
"https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe" "https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe"
-outfile "msys64\var\cache\msys2.exe" ; -outfile "msys64\var\cache\msys2.exe"
Copy-Item -Path msys2.exe.sig -Destination msys64\var\cache\msys2.exe.sig
} else {
Write-Output "Using cached installer"
} }
- Write-Output "Invoking msys2.exe installer at $(Get-Date -Format u)"
- msys64\var\cache\msys2.exe -y - msys64\var\cache\msys2.exe -y
- ((Get-Content -path .\msys64\etc\\post-install\\07-pacman-key.post -Raw) - ((Get-Content -path .\msys64\etc\\post-install\\07-pacman-key.post -Raw)
-replace '--refresh-keys', '--version') | -replace '--refresh-keys', '--version') |
@@ -67,75 +28,92 @@
- .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Core update - .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Core update
- .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Normal update - .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Normal update
- taskkill /F /FI "MODULES eq msys-2.0.dll" - taskkill /F /FI "MODULES eq msys-2.0.dll"
script:
- Write-Output "Installing mingw packages at $(Get-Date -Format u)"
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
bison diffutils flex
git grep make sed
$MINGW_TARGET-binutils
$MINGW_TARGET-capstone
$MINGW_TARGET-ccache
$MINGW_TARGET-curl
$MINGW_TARGET-cyrus-sasl
$MINGW_TARGET-dtc
$MINGW_TARGET-gcc
$MINGW_TARGET-glib2
$MINGW_TARGET-gnutls
$MINGW_TARGET-gtk3
$MINGW_TARGET-libgcrypt
$MINGW_TARGET-libjpeg-turbo
$MINGW_TARGET-libnfs
$MINGW_TARGET-libpng
$MINGW_TARGET-libssh
$MINGW_TARGET-libtasn1
$MINGW_TARGET-libusb
$MINGW_TARGET-lzo2
$MINGW_TARGET-nettle
$MINGW_TARGET-ninja
$MINGW_TARGET-pixman
$MINGW_TARGET-pkgconf
$MINGW_TARGET-python
$MINGW_TARGET-SDL2
$MINGW_TARGET-SDL2_image
$MINGW_TARGET-snappy
$MINGW_TARGET-spice
$MINGW_TARGET-usbredir
$MINGW_TARGET-zstd "
- Write-Output "Running build at $(Get-Date -Format u)"
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR"
- $env:CCACHE_DIR = "$env:CCACHE_BASEDIR/ccache"
- $env:CCACHE_MAXSIZE = "500M"
- $env:CCACHE_DEPEND = 1 # cache misses are too expensive with preprocessor mode
- $env:CC = "ccache gcc"
- mkdir build
- cd build
- ..\msys64\usr\bin\bash -lc "ccache --zero-stats"
- ..\msys64\usr\bin\bash -lc "../configure --enable-fdt=system $CONFIGURE_ARGS"
- ..\msys64\usr\bin\bash -lc "make"
- ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;"
- ..\msys64\usr\bin\bash -lc "ccache --show-stats"
- Write-Output "Finished build at $(Get-Date -Format u)"
msys2-64bit: msys2-64bit:
extends: .shared_msys2_builder extends: .shared_msys2_builder
variables: script:
MINGW_TARGET: mingw-w64-x86_64 - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
MSYSTEM: MINGW64 bison diffutils flex
# do not remove "--without-default-devices"! git grep make sed
mingw-w64-x86_64-capstone
mingw-w64-x86_64-curl
mingw-w64-x86_64-cyrus-sasl
mingw-w64-x86_64-gcc
mingw-w64-x86_64-glib2
mingw-w64-x86_64-gnutls
mingw-w64-x86_64-gtk3
mingw-w64-x86_64-libgcrypt
mingw-w64-x86_64-libjpeg-turbo
mingw-w64-x86_64-libnfs
mingw-w64-x86_64-libpng
mingw-w64-x86_64-libssh
mingw-w64-x86_64-libtasn1
mingw-w64-x86_64-libusb
mingw-w64-x86_64-lzo2
mingw-w64-x86_64-nettle
mingw-w64-x86_64-ninja
mingw-w64-x86_64-pixman
mingw-w64-x86_64-pkgconf
mingw-w64-x86_64-python
mingw-w64-x86_64-SDL2
mingw-w64-x86_64-SDL2_image
mingw-w64-x86_64-snappy
mingw-w64-x86_64-usbredir
mingw-w64-x86_64-zstd "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYSTEM = 'MINGW64' # Start a 64-bit MinGW environment
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- mkdir output
- cd output
# Note: do not remove "--without-default-devices"!
# commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices" # commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices"
# changed to compile QEMU with the --without-default-devices switch # changed to compile QEMU with the --without-default-devices switch
# for the msys2 64-bit job, due to the build could not complete within # for the msys2 64-bit job, due to the build could not complete within
CONFIGURE_ARGS: --target-list=x86_64-softmmu --without-default-devices -Ddebug=false -Doptimization=0 # the project timeout.
- ..\msys64\usr\bin\bash -lc '../configure --target-list=x86_64-softmmu
--without-default-devices'
- ..\msys64\usr\bin\bash -lc 'make'
# qTests don't run successfully with "--without-default-devices", # qTests don't run successfully with "--without-default-devices",
# so let's exclude the qtests from CI for now. # so let's exclude the qtests from CI for now.
TEST_ARGS: --no-suite qtest - ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" || { cat meson-logs/testlog.txt; exit 1; } ;'
msys2-32bit: msys2-32bit:
extends: .shared_msys2_builder extends: .shared_msys2_builder
variables: script:
MINGW_TARGET: mingw-w64-i686 - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
MSYSTEM: MINGW32 bison diffutils flex
CONFIGURE_ARGS: --target-list=ppc64-softmmu -Ddebug=false -Doptimization=0 git grep make sed
TEST_ARGS: --no-suite qtest mingw-w64-i686-capstone
mingw-w64-i686-curl
mingw-w64-i686-cyrus-sasl
mingw-w64-i686-gcc
mingw-w64-i686-glib2
mingw-w64-i686-gnutls
mingw-w64-i686-gtk3
mingw-w64-i686-libgcrypt
mingw-w64-i686-libjpeg-turbo
mingw-w64-i686-libnfs
mingw-w64-i686-libpng
mingw-w64-i686-libssh
mingw-w64-i686-libtasn1
mingw-w64-i686-libusb
mingw-w64-i686-lzo2
mingw-w64-i686-nettle
mingw-w64-i686-ninja
mingw-w64-i686-pixman
mingw-w64-i686-pkgconf
mingw-w64-i686-python
mingw-w64-i686-SDL2
mingw-w64-i686-SDL2_image
mingw-w64-i686-snappy
mingw-w64-i686-usbredir
mingw-w64-i686-zstd "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinGW environment
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- mkdir output
- cd output
- ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu'
- ..\msys64\usr\bin\bash -lc 'make'
- ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" ||
{ cat meson-logs/testlog.txt; exit 1; }'

21
.gitmodules vendored
View File

@@ -13,6 +13,12 @@
[submodule "roms/qemu-palcode"] [submodule "roms/qemu-palcode"]
path = roms/qemu-palcode path = roms/qemu-palcode
url = https://gitlab.com/qemu-project/qemu-palcode.git url = https://gitlab.com/qemu-project/qemu-palcode.git
[submodule "roms/sgabios"]
path = roms/sgabios
url = https://gitlab.com/qemu-project/sgabios.git
[submodule "dtc"]
path = dtc
url = https://gitlab.com/qemu-project/dtc.git
[submodule "roms/u-boot"] [submodule "roms/u-boot"]
path = roms/u-boot path = roms/u-boot
url = https://gitlab.com/qemu-project/u-boot.git url = https://gitlab.com/qemu-project/u-boot.git
@@ -22,12 +28,21 @@
[submodule "roms/QemuMacDrivers"] [submodule "roms/QemuMacDrivers"]
path = roms/QemuMacDrivers path = roms/QemuMacDrivers
url = https://gitlab.com/qemu-project/QemuMacDrivers.git url = https://gitlab.com/qemu-project/QemuMacDrivers.git
[submodule "ui/keycodemapdb"]
path = ui/keycodemapdb
url = https://gitlab.com/qemu-project/keycodemapdb.git
[submodule "roms/seabios-hppa"] [submodule "roms/seabios-hppa"]
path = roms/seabios-hppa path = roms/seabios-hppa
url = https://gitlab.com/qemu-project/seabios-hppa.git url = https://gitlab.com/qemu-project/seabios-hppa.git
[submodule "roms/u-boot-sam460ex"] [submodule "roms/u-boot-sam460ex"]
path = roms/u-boot-sam460ex path = roms/u-boot-sam460ex
url = https://gitlab.com/qemu-project/u-boot-sam460ex.git url = https://gitlab.com/qemu-project/u-boot-sam460ex.git
[submodule "tests/fp/berkeley-testfloat-3"]
path = tests/fp/berkeley-testfloat-3
url = https://gitlab.com/qemu-project/berkeley-testfloat-3.git
[submodule "tests/fp/berkeley-softfloat-3"]
path = tests/fp/berkeley-softfloat-3
url = https://gitlab.com/qemu-project/berkeley-softfloat-3.git
[submodule "roms/edk2"] [submodule "roms/edk2"]
path = roms/edk2 path = roms/edk2
url = https://gitlab.com/qemu-project/edk2.git url = https://gitlab.com/qemu-project/edk2.git
@@ -37,9 +52,15 @@
[submodule "roms/qboot"] [submodule "roms/qboot"]
path = roms/qboot path = roms/qboot
url = https://gitlab.com/qemu-project/qboot.git url = https://gitlab.com/qemu-project/qboot.git
[submodule "meson"]
path = meson
url = https://gitlab.com/qemu-project/meson.git
[submodule "roms/vbootrom"] [submodule "roms/vbootrom"]
path = roms/vbootrom path = roms/vbootrom
url = https://gitlab.com/qemu-project/vbootrom.git url = https://gitlab.com/qemu-project/vbootrom.git
[submodule "tests/lcitool/libvirt-ci"] [submodule "tests/lcitool/libvirt-ci"]
path = tests/lcitool/libvirt-ci path = tests/lcitool/libvirt-ci
url = https://gitlab.com/libvirt/libvirt-ci.git url = https://gitlab.com/libvirt/libvirt-ci.git
[submodule "subprojects/libvfio-user"]
path = subprojects/libvfio-user
url = https://gitlab.com/qemu-project/libvfio-user.git

View File

@@ -30,38 +30,22 @@ malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
# Corrupted Author fields # Corrupted Author fields
Aaron Larson <alarson@ddci.com> alarson@ddci.com Aaron Larson <alarson@ddci.com> alarson@ddci.com
Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber> Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber>
fanwenjie <fanwj@mail.ustc.edu.cn> fanwj@mail.ustc.edu.cn <fanwj@mail.ustc.edu.cn>
Jason Wang <jasowang@redhat.com> Jason Wang <jasowang> Jason Wang <jasowang@redhat.com> Jason Wang <jasowang>
Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com> Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org> Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org>
Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com> Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
Timothée Cocault <timothee.cocault@gmail.com> timothee.cocault@gmail.com <timothee.cocault@gmail.com>
# There is also a: # There is also a:
# (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162> # (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
# for the cvs2svn initialization commit e63c3dc74bf. # for the cvs2svn initialization commit e63c3dc74bf.
# Next, translate a few commits where mailman rewrote the From: line due # Next, translate a few commits where mailman rewrote the From: line due
# to strict SPF and DMARC. Usually, our build process should be flagging # to strict SPF, although we prefer to avoid adding more entries like that.
# commits like these before maintainer merges; if you find the need to add
# a line here, please also report a bug against the part of the build
# process that let the mis-attribution slip through in the first place.
#
# If the mailing list munges your emails, use:
# git config sendemail.from '"Your Name" <your.email@example.com>'
# the use of "" in that line will differ from the typically unquoted
# 'git config user.name', which in turn is sufficient for 'git send-email'
# to add an extra From: line in the body of your email that takes
# precedence over any munged From: in the mail's headers.
# See https://lists.openembedded.org/g/openembedded-core/message/166515
# and https://lists.gnu.org/archive/html/qemu-devel/2023-09/msg06784.html
Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org> Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org>
Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org> Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org>
Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org> Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org> Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org> Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org>
Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> Andrey Drobyshev via <qemu-block@nongnu.org>
BALATON Zoltan <balaton@eik.bme.hu> BALATON Zoltan via <qemu-ppc@nongnu.org>
# Next, replace old addresses by a more recent one. # Next, replace old addresses by a more recent one.
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com> Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com>
@@ -70,10 +54,8 @@ Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <amarkovic@wavecomp.com>
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <arikalo@wavecomp.com> Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <arikalo@wavecomp.com>
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com> Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com>
Alexander Graf <agraf@csgraf.de> <agraf@suse.de> Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
Ani Sinha <anisinha@redhat.com> <ani@anisinha.ca>
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com> Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com> Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com>
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com> Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
Frederic Konrad <konrad.frederic@yahoo.fr> <fred.konrad@greensocs.com> Frederic Konrad <konrad.frederic@yahoo.fr> <fred.konrad@greensocs.com>
Frederic Konrad <konrad.frederic@yahoo.fr> <konrad@adacore.com> Frederic Konrad <konrad.frederic@yahoo.fr> <konrad@adacore.com>
@@ -83,9 +65,6 @@ Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com> James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org> Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com> Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr>
Luc Michel <luc@lmichel.fr> <luc.michel@greensocs.com>
Luc Michel <luc@lmichel.fr> <lmichel@kalray.eu>
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org> Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
Paul Brook <paul@nowt.org> <paul@codesourcery.com> Paul Brook <paul@nowt.org> <paul@codesourcery.com>
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com> Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
@@ -95,9 +74,7 @@ Paul Burton <paulburton@kernel.org> <pburton@wavecomp.com>
Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org> Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org>
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com> Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com> Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
Roman Bolshakov <rbolshakov@ddn.com> <r.bolshakov@yadro.com>
Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com> Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
Taylor Simpson <ltaylorsimpson@gmail.com> <tsimpson@quicinc.com>
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com> Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
# Also list preferred name forms where people have changed their # Also list preferred name forms where people have changed their

View File

@@ -34,7 +34,7 @@ env:
- BASE_CONFIG="--disable-docs --disable-tools" - BASE_CONFIG="--disable-docs --disable-tools"
- TEST_BUILD_CMD="" - TEST_BUILD_CMD=""
- TEST_CMD="make check V=1" - TEST_CMD="make check V=1"
# This is broadly a list of "mainline" system targets which have support across the major distros # This is broadly a list of "mainline" softmmu targets which have support across the major distros
- MAIN_SOFTMMU_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu" - MAIN_SOFTMMU_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
- CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime" - CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime"
- CCACHE_MAXSIZE=1G - CCACHE_MAXSIZE=1G
@@ -197,7 +197,7 @@ jobs:
$(exit $BUILD_RC); $(exit $BUILD_RC);
fi fi
- name: "[s390x] GCC (other-system)" - name: "[s390x] GCC (other-softmmu)"
arch: s390x arch: s390x
dist: focal dist: focal
addons: addons:
@@ -237,15 +237,13 @@ jobs:
- libglib2.0-dev - libglib2.0-dev
- libgnutls28-dev - libgnutls28-dev
- ninja-build - ninja-build
- flex
- bison
env: env:
- CONFIG="--disable-containers --disable-system" - CONFIG="--disable-containers --disable-system"
- name: "[s390x] Clang (disable-tcg)" - name: "[s390x] Clang (disable-tcg)"
arch: s390x arch: s390x
dist: focal dist: focal
compiler: clang-10 compiler: clang
addons: addons:
apt_packages: apt_packages:
- libaio-dev - libaio-dev
@@ -271,7 +269,6 @@ jobs:
- libvdeplug-dev - libvdeplug-dev
- libvte-2.91-dev - libvte-2.91-dev
- ninja-build - ninja-build
- clang-10
env: env:
- TEST_CMD="make check-unit" - TEST_CMD="make check-unit"
- CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools - CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools

View File

@@ -11,9 +11,6 @@ config OPENGL
config X11 config X11
bool bool
config PIXMAN
bool
config SPICE config SPICE
bool bool
@@ -49,6 +46,3 @@ config FUZZ
config VFIO_USER_SERVER_ALLOWED config VFIO_USER_SERVER_ALLOWED
bool bool
imply VFIO_USER_SERVER imply VFIO_USER_SERVER
config HV_BALLOON_POSSIBLE
bool

File diff suppressed because it is too large Load Diff

View File

@@ -26,9 +26,9 @@ quiet-command-run = $(if $(V),,$(if $2,printf " %-7s %s\n" $2 $3 && ))$1
quiet-@ = $(if $(V),,@) quiet-@ = $(if $(V),,@)
quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3) quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3)
UNCHECKED_GOALS := TAGS gtags cscope ctags dist \ UNCHECKED_GOALS := %clean TAGS cscope ctags dist \
help check-help print-% \ help check-help print-% \
docker docker-% lcitool-refresh vm-help vm-test vm-build-% docker docker-% vm-help vm-test vm-build-%
all: all:
.PHONY: all clean distclean recurse-all dist msi FORCE .PHONY: all clean distclean recurse-all dist msi FORCE
@@ -45,6 +45,18 @@ include config-host.mak
include Makefile.prereqs include Makefile.prereqs
Makefile.prereqs: config-host.mak Makefile.prereqs: config-host.mak
git-submodule-update:
.git-submodule-status: git-submodule-update config-host.mak
Makefile: .git-submodule-status
.PHONY: git-submodule-update
git-submodule-update:
ifneq ($(GIT_SUBMODULES_ACTION),ignore)
$(call quiet-command, \
(GIT="$(GIT)" "$(SRC_PATH)/scripts/git-submodule.sh" $(GIT_SUBMODULES_ACTION) $(GIT_SUBMODULES)), \
"GIT","$(GIT_SUBMODULES)")
endif
# 0. ensure the build tree is okay # 0. ensure the build tree is okay
# Check that we're not trying to do an out-of-tree build from # Check that we're not trying to do an out-of-tree build from
@@ -83,17 +95,16 @@ config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh
@if test -f meson-private/coredata.dat; then \ @if test -f meson-private/coredata.dat; then \
./config.status --skip-meson; \ ./config.status --skip-meson; \
else \ else \
./config.status; \ ./config.status && touch build.ninja.stamp; \
fi fi
# 2. meson.stamp exists if meson has run at least once (so ninja reconfigure # 2. meson.stamp exists if meson has run at least once (so ninja reconfigure
# works), but otherwise never needs to be updated # works), but otherwise never needs to be updated
meson-private/coredata.dat: meson.stamp meson-private/coredata.dat: meson.stamp
meson.stamp: config-host.mak meson.stamp: config-host.mak
@touch meson.stamp @touch meson.stamp
# 3. ensure meson-generated build files are up-to-date # 3. ensure generated build files are up-to-date
ifneq ($(NINJA),) ifneq ($(NINJA),)
Makefile.ninja: build.ninja Makefile.ninja: build.ninja
@@ -104,23 +115,15 @@ Makefile.ninja: build.ninja
$(NINJA) -t query build.ninja | sed -n '1,/^ input:/d; /^ outputs:/q; s/$$/ \\/p'; \ $(NINJA) -t query build.ninja | sed -n '1,/^ input:/d; /^ outputs:/q; s/$$/ \\/p'; \
} > $@.tmp && mv $@.tmp $@ } > $@.tmp && mv $@.tmp $@
-include Makefile.ninja -include Makefile.ninja
endif
ifneq ($(MESON),) # A separate rule is needed for Makefile dependencies to avoid -n
# The path to meson always points to pyvenv/bin/meson, but the absolute
# paths could change. In that case, force a regeneration of build.ninja.
# Note that this invocation of $(NINJA), just like when Make rebuilds
# Makefiles, does not include -n.
build.ninja: build.ninja.stamp build.ninja: build.ninja.stamp
$(build-files): $(build-files):
build.ninja.stamp: meson.stamp $(build-files) build.ninja.stamp: meson.stamp $(build-files)
@if test "$$(cat build.ninja.stamp)" = "$(MESON)" && test -n "$(NINJA)"; then \ $(NINJA) $(if $V,-v,) build.ninja && touch $@
$(NINJA) build.ninja; \ endif
else \
echo "$(MESON) setup --reconfigure $(SRC_PATH)"; \
$(MESON) setup --reconfigure $(SRC_PATH); \
fi && echo "$(MESON)" > $@
ifneq ($(MESON),)
Makefile.mtest: build.ninja scripts/mtest2make.py Makefile.mtest: build.ninja scripts/mtest2make.py
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@ $(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
-include Makefile.mtest -include Makefile.mtest
@@ -164,9 +167,19 @@ ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),)
endif endif
endif endif
ifeq ($(CONFIG_PLUGIN),y)
.PHONY: plugins
plugins:
$(call quiet-command,\
$(MAKE) $(SUBDIR_MAKEFLAGS) -C contrib/plugins V="$(V)", \
"BUILD", "example plugins")
endif # $(CONFIG_PLUGIN)
else # config-host.mak does not exist else # config-host.mak does not exist
config-host.mak:
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail)) ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
$(error Please call configure before running make) @echo "Please call configure before running make!"
@exit 1
endif endif
endif # config-host.mak does not exist endif # config-host.mak does not exist
@@ -176,20 +189,15 @@ include $(SRC_PATH)/tests/Makefile.include
all: recurse-all all: recurse-all
SUBDIR_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(SUBDIRS))) ROMS_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(ROMS)))
.PHONY: $(SUBDIR_RULES) .PHONY: $(ROMS_RULES)
$(SUBDIR_RULES): $(ROMS_RULES):
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),) $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),)
ifneq ($(filter contrib/plugins, $(SUBDIRS)),)
.PHONY: plugins
plugins: contrib/plugins/all
endif
.PHONY: recurse-all recurse-clean .PHONY: recurse-all recurse-clean
recurse-all: $(addsuffix /all, $(SUBDIRS)) recurse-all: $(addsuffix /all, $(ROMS))
recurse-clean: $(addsuffix /clean, $(SUBDIRS)) recurse-clean: $(addsuffix /clean, $(ROMS))
recurse-distclean: $(addsuffix /distclean, $(SUBDIRS)) recurse-distclean: $(addsuffix /distclean, $(ROMS))
###################################################################### ######################################################################
@@ -212,7 +220,7 @@ qemu-%.tar.bz2:
distclean: clean recurse-distclean distclean: clean recurse-distclean
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || : -$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || :
rm -f config-host.mak Makefile.prereqs rm -f config-host.mak Makefile.prereqs qemu-bundle
rm -f tests/tcg/*/config-target.mak tests/tcg/config-host.mak rm -f tests/tcg/*/config-target.mak tests/tcg/config-host.mak
rm -f config.status rm -f config.status
rm -f roms/seabios/config.mak rm -f roms/seabios/config.mak
@@ -222,7 +230,7 @@ distclean: clean recurse-distclean
rm -f Makefile.ninja Makefile.mtest build.ninja.stamp meson.stamp rm -f Makefile.ninja Makefile.mtest build.ninja.stamp meson.stamp
rm -f config.log rm -f config.log
rm -f linux-headers/asm rm -f linux-headers/asm
rm -Rf .sdk qemu-bundle rm -Rf .sdk
find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \ find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \
-type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \) -type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \)
@@ -283,13 +291,6 @@ include $(SRC_PATH)/tests/vm/Makefile.include
print-help-run = printf " %-30s - %s\\n" "$1" "$2" print-help-run = printf " %-30s - %s\\n" "$1" "$2"
print-help = @$(call print-help-run,$1,$2) print-help = @$(call print-help-run,$1,$2)
.PHONY: update-linux-vdso
update-linux-vdso:
@for m in $(SRC_PATH)/linux-user/*/Makefile.vdso; do \
$(MAKE) $(SUBDIR_MAKEFLAGS) -C $$(dirname $$m) -f Makefile.vdso \
SRC_PATH=$(SRC_PATH) BUILD_DIR=$(BUILD_DIR); \
done
.PHONY: help .PHONY: help
help: help:
@echo 'Generic targets:' @echo 'Generic targets:'
@@ -300,7 +301,7 @@ help:
$(call print-help,cscope,Generate cscope index) $(call print-help,cscope,Generate cscope index)
$(call print-help,sparse,Run sparse on the QEMU source) $(call print-help,sparse,Run sparse on the QEMU source)
@echo '' @echo ''
ifneq ($(filter contrib/plugins, $(SUBDIRS)),) ifeq ($(CONFIG_PLUGIN),y)
@echo 'Plugin targets:' @echo 'Plugin targets:'
$(call print-help,plugins,Build the example TCG plugins) $(call print-help,plugins,Build the example TCG plugins)
@echo '' @echo ''
@@ -310,9 +311,6 @@ endif
$(call print-help,distclean,Remove all generated files) $(call print-help,distclean,Remove all generated files)
$(call print-help,dist,Build a distributable tarball) $(call print-help,dist,Build a distributable tarball)
@echo '' @echo ''
@echo 'Linux-user targets:'
$(call print-help,update-linux-vdso,Build linux-user vdso images)
@echo ''
@echo 'Test targets:' @echo 'Test targets:'
$(call print-help,check,Run all tests (check-help for details)) $(call print-help,check,Run all tests (check-help for details))
$(call print-help,bench,Run all benchmarks) $(call print-help,bench,Run all benchmarks)
@@ -323,7 +321,7 @@ endif
@echo 'Documentation targets:' @echo 'Documentation targets:'
$(call print-help,html man,Build documentation in specified format) $(call print-help,html man,Build documentation in specified format)
@echo '' @echo ''
ifneq ($(filter msi, $(ninja-targets)),) ifdef CONFIG_WIN32
@echo 'Windows targets:' @echo 'Windows targets:'
$(call print-help,installer,Build NSIS-based installer for QEMU) $(call print-help,installer,Build NSIS-based installer for QEMU)
$(call print-help,msi,Build MSI-based installer for qemu-ga) $(call print-help,msi,Build MSI-based installer for qemu-ga)

View File

@@ -1 +1 @@
8.1.91 7.2.50

View File

@@ -4,6 +4,9 @@ config WHPX
config NVMM config NVMM
bool bool
config HAX
bool
config HVF config HVF
bool bool

View File

@@ -30,7 +30,7 @@
#include "hw/core/accel-cpu.h" #include "hw/core/accel-cpu.h"
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
#include "accel-system.h" #include "accel-softmmu.h"
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
static const TypeInfo accel_type = { static const TypeInfo accel_type = {
@@ -119,37 +119,16 @@ void accel_cpu_instance_init(CPUState *cpu)
} }
} }
bool accel_cpu_common_realize(CPUState *cpu, Error **errp) bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
AccelState *accel = current_accel();
AccelClass *acc = ACCEL_GET_CLASS(accel);
/* target specific realization */ if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) {
if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize return cc->accel_cpu->cpu_realizefn(cpu, errp);
&& !cc->accel_cpu->cpu_target_realize(cpu, errp)) {
return false;
} }
/* generic realization */
if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
return false;
}
return true; return true;
} }
void accel_cpu_common_unrealize(CPUState *cpu)
{
AccelState *accel = current_accel();
AccelClass *acc = ACCEL_GET_CLASS(accel);
/* generic unrealization */
if (acc->cpu_common_unrealize) {
acc->cpu_common_unrealize(cpu);
}
}
int accel_supported_gdbstub_sstep_flags(void) int accel_supported_gdbstub_sstep_flags(void)
{ {
AccelState *accel = current_accel(); AccelState *accel = current_accel();

View File

@@ -27,8 +27,8 @@
#include "qemu/accel.h" #include "qemu/accel.h"
#include "hw/boards.h" #include "hw/boards.h"
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "qemu/error-report.h"
#include "accel-system.h" #include "accel-softmmu.h"
int accel_init_machine(AccelState *accel, MachineState *ms) int accel_init_machine(AccelState *accel, MachineState *ms)
{ {
@@ -99,8 +99,8 @@ static const TypeInfo accel_ops_type_info = {
.class_size = sizeof(AccelOpsClass), .class_size = sizeof(AccelOpsClass),
}; };
static void accel_system_register_types(void) static void accel_softmmu_register_types(void)
{ {
type_register_static(&accel_ops_type_info); type_register_static(&accel_ops_type_info);
} }
type_init(accel_system_register_types); type_init(accel_softmmu_register_types);

View File

@@ -7,9 +7,9 @@
* See the COPYING file in the top-level directory. * See the COPYING file in the top-level directory.
*/ */
#ifndef ACCEL_SYSTEM_H #ifndef ACCEL_SOFTMMU_H
#define ACCEL_SYSTEM_H #define ACCEL_SOFTMMU_H
void accel_init_ops_interfaces(AccelClass *ac); void accel_init_ops_interfaces(AccelClass *ac);
#endif /* ACCEL_SYSTEM_H */ #endif /* ACCEL_SOFTMMU_H */

View File

@@ -27,7 +27,7 @@ static void *dummy_cpu_thread_fn(void *arg)
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true; cpu->can_do_io = 1;
current_cpu = cpu; current_cpu = cpu;
#ifndef _WIN32 #ifndef _WIN32

View File

@@ -52,7 +52,6 @@
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "exec/address-spaces.h" #include "exec/address-spaces.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/gdbstub.h"
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "sysemu/hvf.h" #include "sysemu/hvf.h"
#include "sysemu/hvf_int.h" #include "sysemu/hvf_int.h"
@@ -304,7 +303,7 @@ static void hvf_region_del(MemoryListener *listener,
static MemoryListener hvf_memory_listener = { static MemoryListener hvf_memory_listener = {
.name = "hvf", .name = "hvf",
.priority = MEMORY_LISTENER_PRIORITY_ACCEL, .priority = 10,
.region_add = hvf_region_add, .region_add = hvf_region_add,
.region_del = hvf_region_del, .region_del = hvf_region_del,
.log_start = hvf_log_start, .log_start = hvf_log_start,
@@ -335,26 +334,18 @@ static int hvf_accel_init(MachineState *ms)
s->slots[x].slot_id = x; s->slots[x].slot_id = x;
} }
QTAILQ_INIT(&s->hvf_sw_breakpoints);
hvf_state = s; hvf_state = s;
memory_listener_register(&hvf_memory_listener, &address_space_memory); memory_listener_register(&hvf_memory_listener, &address_space_memory);
return hvf_arch_init(); return hvf_arch_init();
} }
static inline int hvf_gdbstub_sstep_flags(void)
{
return SSTEP_ENABLE | SSTEP_NOIRQ;
}
static void hvf_accel_class_init(ObjectClass *oc, void *data) static void hvf_accel_class_init(ObjectClass *oc, void *data)
{ {
AccelClass *ac = ACCEL_CLASS(oc); AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "HVF"; ac->name = "HVF";
ac->init_machine = hvf_accel_init; ac->init_machine = hvf_accel_init;
ac->allowed = &hvf_allowed; ac->allowed = &hvf_allowed;
ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags;
} }
static const TypeInfo hvf_accel_type = { static const TypeInfo hvf_accel_type = {
@@ -372,19 +363,19 @@ type_init(hvf_type_init);
static void hvf_vcpu_destroy(CPUState *cpu) static void hvf_vcpu_destroy(CPUState *cpu)
{ {
hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd); hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
assert_hvf_ok(ret); assert_hvf_ok(ret);
hvf_arch_vcpu_destroy(cpu); hvf_arch_vcpu_destroy(cpu);
g_free(cpu->accel); g_free(cpu->hvf);
cpu->accel = NULL; cpu->hvf = NULL;
} }
static int hvf_init_vcpu(CPUState *cpu) static int hvf_init_vcpu(CPUState *cpu)
{ {
int r; int r;
cpu->accel = g_new0(AccelCPUState, 1); cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
/* init cpu signals */ /* init cpu signals */
struct sigaction sigact; struct sigaction sigact;
@@ -393,20 +384,17 @@ static int hvf_init_vcpu(CPUState *cpu)
sigact.sa_handler = dummy_signal; sigact.sa_handler = dummy_signal;
sigaction(SIG_IPI, &sigact, NULL); sigaction(SIG_IPI, &sigact, NULL);
pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask); pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI); sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
#ifdef __aarch64__ #ifdef __aarch64__
r = hv_vcpu_create(&cpu->accel->fd, r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
(hv_vcpu_exit_t **)&cpu->accel->exit, NULL);
#else #else
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->accel->fd, HV_VCPU_DEFAULT); r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
#endif #endif
cpu->vcpu_dirty = 1; cpu->vcpu_dirty = 1;
assert_hvf_ok(r); assert_hvf_ok(r);
cpu->accel->guest_debug_enabled = false;
return hvf_arch_init_vcpu(cpu); return hvf_arch_init_vcpu(cpu);
} }
@@ -428,7 +416,7 @@ static void *hvf_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true; cpu->can_do_io = 1;
current_cpu = cpu; current_cpu = cpu;
hvf_init_vcpu(cpu); hvf_init_vcpu(cpu);
@@ -474,108 +462,6 @@ static void hvf_start_vcpu_thread(CPUState *cpu)
cpu, QEMU_THREAD_JOINABLE); cpu, QEMU_THREAD_JOINABLE);
} }
static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
{
struct hvf_sw_breakpoint *bp;
int err;
if (type == GDB_BREAKPOINT_SW) {
bp = hvf_find_sw_breakpoint(cpu, addr);
if (bp) {
bp->use_count++;
return 0;
}
bp = g_new(struct hvf_sw_breakpoint, 1);
bp->pc = addr;
bp->use_count = 1;
err = hvf_arch_insert_sw_breakpoint(cpu, bp);
if (err) {
g_free(bp);
return err;
}
QTAILQ_INSERT_HEAD(&hvf_state->hvf_sw_breakpoints, bp, entry);
} else {
err = hvf_arch_insert_hw_breakpoint(addr, len, type);
if (err) {
return err;
}
}
CPU_FOREACH(cpu) {
err = hvf_update_guest_debug(cpu);
if (err) {
return err;
}
}
return 0;
}
static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
{
struct hvf_sw_breakpoint *bp;
int err;
if (type == GDB_BREAKPOINT_SW) {
bp = hvf_find_sw_breakpoint(cpu, addr);
if (!bp) {
return -ENOENT;
}
if (bp->use_count > 1) {
bp->use_count--;
return 0;
}
err = hvf_arch_remove_sw_breakpoint(cpu, bp);
if (err) {
return err;
}
QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
g_free(bp);
} else {
err = hvf_arch_remove_hw_breakpoint(addr, len, type);
if (err) {
return err;
}
}
CPU_FOREACH(cpu) {
err = hvf_update_guest_debug(cpu);
if (err) {
return err;
}
}
return 0;
}
static void hvf_remove_all_breakpoints(CPUState *cpu)
{
struct hvf_sw_breakpoint *bp, *next;
CPUState *tmpcpu;
QTAILQ_FOREACH_SAFE(bp, &hvf_state->hvf_sw_breakpoints, entry, next) {
if (hvf_arch_remove_sw_breakpoint(cpu, bp) != 0) {
/* Try harder to find a CPU that currently sees the breakpoint. */
CPU_FOREACH(tmpcpu)
{
if (hvf_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
break;
}
}
}
QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
g_free(bp);
}
hvf_arch_remove_all_hw_breakpoints();
CPU_FOREACH(cpu) {
hvf_update_guest_debug(cpu);
}
}
static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
{ {
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
@@ -587,12 +473,6 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
ops->synchronize_post_init = hvf_cpu_synchronize_post_init; ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
ops->synchronize_state = hvf_cpu_synchronize_state; ops->synchronize_state = hvf_cpu_synchronize_state;
ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm; ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
ops->insert_breakpoint = hvf_insert_breakpoint;
ops->remove_breakpoint = hvf_remove_breakpoint;
ops->remove_all_breakpoints = hvf_remove_all_breakpoints;
ops->update_guest_debug = hvf_update_guest_debug;
ops->supports_guest_debug = hvf_arch_supports_guest_debug;
}; };
static const TypeInfo hvf_accel_ops_type = { static const TypeInfo hvf_accel_ops_type = {
.name = ACCEL_OPS_NAME("hvf"), .name = ACCEL_OPS_NAME("hvf"),

View File

@@ -38,38 +38,9 @@ void assert_hvf_ok(hv_return_t ret)
case HV_UNSUPPORTED: case HV_UNSUPPORTED:
error_report("Error: HV_UNSUPPORTED"); error_report("Error: HV_UNSUPPORTED");
break; break;
#if defined(MAC_OS_VERSION_11_0) && \
MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
case HV_DENIED:
error_report("Error: HV_DENIED");
break;
#endif
default: default:
error_report("Unknown Error"); error_report("Unknown Error");
} }
abort(); abort();
} }
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
{
struct hvf_sw_breakpoint *bp;
QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
if (bp->pc == pc) {
return bp;
}
}
return NULL;
}
int hvf_sw_breakpoints_active(CPUState *cpu)
{
return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
}
int hvf_update_guest_debug(CPUState *cpu)
{
hvf_arch_update_guest_debug(cpu);
return 0;
}

View File

@@ -36,7 +36,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true; cpu->can_do_io = 1;
current_cpu = cpu; current_cpu = cpu;
r = kvm_init_vcpu(cpu, &error_fatal); r = kvm_init_vcpu(cpu, &error_fatal);
@@ -86,13 +86,6 @@ static bool kvm_cpus_are_resettable(void)
return !kvm_enabled() || kvm_cpu_check_are_resettable(); return !kvm_enabled() || kvm_cpu_check_are_resettable();
} }
#ifdef KVM_CAP_SET_GUEST_DEBUG
static int kvm_update_guest_debug_ops(CPUState *cpu)
{
return kvm_update_guest_debug(cpu, 0);
}
#endif
static void kvm_accel_ops_class_init(ObjectClass *oc, void *data) static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
{ {
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
@@ -106,7 +99,6 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm; ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
#ifdef KVM_CAP_SET_GUEST_DEBUG #ifdef KVM_CAP_SET_GUEST_DEBUG
ops->update_guest_debug = kvm_update_guest_debug_ops;
ops->supports_guest_debug = kvm_supports_guest_debug; ops->supports_guest_debug = kvm_supports_guest_debug;
ops->insert_breakpoint = kvm_insert_breakpoint; ops->insert_breakpoint = kvm_insert_breakpoint;
ops->remove_breakpoint = kvm_remove_breakpoint; ops->remove_breakpoint = kvm_remove_breakpoint;

View File

@@ -90,6 +90,8 @@ bool kvm_kernel_irqchip;
bool kvm_split_irqchip; bool kvm_split_irqchip;
bool kvm_async_interrupts_allowed; bool kvm_async_interrupts_allowed;
bool kvm_halt_in_kernel_allowed; bool kvm_halt_in_kernel_allowed;
bool kvm_eventfds_allowed;
bool kvm_irqfds_allowed;
bool kvm_resamplefds_allowed; bool kvm_resamplefds_allowed;
bool kvm_msi_via_irqfd_allowed; bool kvm_msi_via_irqfd_allowed;
bool kvm_gsi_routing_allowed; bool kvm_gsi_routing_allowed;
@@ -97,6 +99,8 @@ bool kvm_gsi_direct_mapping;
bool kvm_allowed; bool kvm_allowed;
bool kvm_readonly_mem_allowed; bool kvm_readonly_mem_allowed;
bool kvm_vm_attributes_allowed; bool kvm_vm_attributes_allowed;
bool kvm_direct_msi_allowed;
bool kvm_ioeventfd_any_length_allowed;
bool kvm_msi_use_devid; bool kvm_msi_use_devid;
bool kvm_has_guest_debug; bool kvm_has_guest_debug;
static int kvm_sstep_flags; static int kvm_sstep_flags;
@@ -107,9 +111,6 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
KVM_CAP_INFO(USER_MEMORY), KVM_CAP_INFO(USER_MEMORY),
KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS), KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS), KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
KVM_CAP_INFO(INTERNAL_ERROR_DATA),
KVM_CAP_INFO(IOEVENTFD),
KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
KVM_CAP_LAST_INFO KVM_CAP_LAST_INFO
}; };
@@ -173,31 +174,13 @@ void kvm_resample_fd_notify(int gsi)
} }
} }
unsigned int kvm_get_max_memslots(void) int kvm_get_max_memslots(void)
{ {
KVMState *s = KVM_STATE(current_accel()); KVMState *s = KVM_STATE(current_accel());
return s->nr_slots; return s->nr_slots;
} }
unsigned int kvm_get_free_memslots(void)
{
unsigned int used_slots = 0;
KVMState *s = kvm_state;
int i;
kvm_slots_lock();
for (i = 0; i < s->nr_as; i++) {
if (!s->as[i].ml) {
continue;
}
used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots);
}
kvm_slots_unlock();
return s->nr_slots - used_slots;
}
/* Called with KVMMemoryListener.slots_lock held */ /* Called with KVMMemoryListener.slots_lock held */
static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
{ {
@@ -213,6 +196,19 @@ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
return NULL; return NULL;
} }
bool kvm_has_free_slot(MachineState *ms)
{
KVMState *s = KVM_STATE(ms->accelerator);
bool result;
KVMMemoryListener *kml = &s->memory_listener;
kvm_slots_lock();
result = !!kvm_get_free_slot(kml);
kvm_slots_unlock();
return result;
}
/* Called with KVMMemoryListener.slots_lock held */ /* Called with KVMMemoryListener.slots_lock held */
static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml) static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
{ {
@@ -454,8 +450,6 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
"kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)", "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
kvm_arch_vcpu_id(cpu)); kvm_arch_vcpu_id(cpu));
} }
cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
err: err:
return ret; return ret;
} }
@@ -691,15 +685,6 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
uint32_t ring_size = s->kvm_dirty_ring_size; uint32_t ring_size = s->kvm_dirty_ring_size;
uint32_t count = 0, fetch = cpu->kvm_fetch_index; uint32_t count = 0, fetch = cpu->kvm_fetch_index;
/*
* It's possible that we race with vcpu creation code where the vcpu is
* put onto the vcpus list but not yet initialized the dirty ring
* structures. If so, skip it.
*/
if (!cpu->created) {
return 0;
}
assert(dirty_gfns && ring_size); assert(dirty_gfns && ring_size);
trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index); trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
@@ -1105,6 +1090,12 @@ static void kvm_coalesce_pio_del(MemoryListener *listener,
} }
} }
static MemoryListener kvm_coalesced_pio_listener = {
.name = "kvm-coalesced-pio",
.coalesced_io_add = kvm_coalesce_pio_add,
.coalesced_io_del = kvm_coalesce_pio_del,
};
int kvm_check_extension(KVMState *s, unsigned int extension) int kvm_check_extension(KVMState *s, unsigned int extension)
{ {
int ret; int ret;
@@ -1246,6 +1237,43 @@ static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
} }
static int kvm_check_many_ioeventfds(void)
{
/* Userspace can use ioeventfd for io notification. This requires a host
* that supports eventfd(2) and an I/O thread; since eventfd does not
* support SIGIO it cannot interrupt the vcpu.
*
* Older kernels have a 6 device limit on the KVM io bus. Find out so we
* can avoid creating too many ioeventfds.
*/
#if defined(CONFIG_EVENTFD)
int ioeventfds[7];
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
if (ioeventfds[i] < 0) {
break;
}
ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
if (ret < 0) {
close(ioeventfds[i]);
break;
}
}
/* Decide whether many devices are supported or not */
ret = i == ARRAY_SIZE(ioeventfds);
while (i-- > 0) {
kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
close(ioeventfds[i]);
}
return ret;
#else
return 0;
#endif
}
static const KVMCapabilityInfo * static const KVMCapabilityInfo *
kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
{ {
@@ -1324,10 +1352,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
*/ */
if (kvm_state->kvm_dirty_ring_size) { if (kvm_state->kvm_dirty_ring_size) {
kvm_dirty_ring_reap_locked(kvm_state, NULL); kvm_dirty_ring_reap_locked(kvm_state, NULL);
if (kvm_state->kvm_dirty_ring_with_bitmap) {
kvm_slot_sync_dirty_pages(mem);
kvm_slot_get_dirty_log(kvm_state, mem);
}
} else { } else {
kvm_slot_get_dirty_log(kvm_state, mem); kvm_slot_get_dirty_log(kvm_state, mem);
} }
@@ -1347,7 +1371,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
} }
start_addr += slot_size; start_addr += slot_size;
size -= slot_size; size -= slot_size;
kml->nr_used_slots--;
} while (size); } while (size);
return; return;
} }
@@ -1373,7 +1396,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
ram_start_offset += slot_size; ram_start_offset += slot_size;
ram += slot_size; ram += slot_size;
size -= slot_size; size -= slot_size;
kml->nr_used_slots++;
} while (size); } while (size);
} }
@@ -1416,74 +1438,13 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
return NULL; return NULL;
} }
static void kvm_dirty_ring_reaper_init(KVMState *s) static int kvm_dirty_ring_reaper_init(KVMState *s)
{ {
struct KVMDirtyRingReaper *r = &s->reaper; struct KVMDirtyRingReaper *r = &s->reaper;
qemu_thread_create(&r->reaper_thr, "kvm-reaper", qemu_thread_create(&r->reaper_thr, "kvm-reaper",
kvm_dirty_ring_reaper_thread, kvm_dirty_ring_reaper_thread,
s, QEMU_THREAD_JOINABLE); s, QEMU_THREAD_JOINABLE);
}
static int kvm_dirty_ring_init(KVMState *s)
{
uint32_t ring_size = s->kvm_dirty_ring_size;
uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
int ret;
s->kvm_dirty_ring_size = 0;
s->kvm_dirty_ring_bytes = 0;
/* Bail if the dirty ring size isn't specified */
if (!ring_size) {
return 0;
}
/*
* Read the max supported pages. Fall back to dirty logging mode
* if the dirty ring isn't supported.
*/
ret = kvm_vm_check_extension(s, capability);
if (ret <= 0) {
capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
ret = kvm_vm_check_extension(s, capability);
}
if (ret <= 0) {
warn_report("KVM dirty ring not available, using bitmap method");
return 0;
}
if (ring_bytes > ret) {
error_report("KVM dirty ring size %" PRIu32 " too big "
"(maximum is %ld). Please use a smaller value.",
ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
return -EINVAL;
}
ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
"Suggested minimum value is 1024.", strerror(-ret));
return -EIO;
}
/* Enable the backup bitmap if it is supported */
ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
if (ret > 0) {
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
if (ret) {
error_report("Enabling of KVM dirty ring's backup bitmap failed: "
"%s. ", strerror(-ret));
return -EIO;
}
s->kvm_dirty_ring_with_bitmap = true;
}
s->kvm_dirty_ring_size = ring_size;
s->kvm_dirty_ring_bytes = ring_bytes;
return 0; return 0;
} }
@@ -1593,7 +1554,7 @@ static void kvm_log_sync(MemoryListener *listener,
kvm_slots_unlock(); kvm_slots_unlock();
} }
static void kvm_log_sync_global(MemoryListener *l, bool last_stage) static void kvm_log_sync_global(MemoryListener *l)
{ {
KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener); KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
KVMState *s = kvm_state; KVMState *s = kvm_state;
@@ -1612,12 +1573,6 @@ static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
mem = &kml->slots[i]; mem = &kml->slots[i];
if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_slot_sync_dirty_pages(mem); kvm_slot_sync_dirty_pages(mem);
if (s->kvm_dirty_ring_with_bitmap && last_stage &&
kvm_slot_get_dirty_log(s, mem)) {
kvm_slot_sync_dirty_pages(mem);
}
/* /*
* This is not needed by KVM_GET_DIRTY_LOG because the * This is not needed by KVM_GET_DIRTY_LOG because the
* ioctl will unconditionally overwrite the whole region. * ioctl will unconditionally overwrite the whole region.
@@ -1738,7 +1693,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
kml->listener.commit = kvm_region_commit; kml->listener.commit = kvm_region_commit;
kml->listener.log_start = kvm_log_start; kml->listener.log_start = kvm_log_start;
kml->listener.log_stop = kvm_log_stop; kml->listener.log_stop = kvm_log_stop;
kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL; kml->listener.priority = 10;
kml->listener.name = name; kml->listener.name = name;
if (s->kvm_dirty_ring_size) { if (s->kvm_dirty_ring_size) {
@@ -1761,11 +1716,9 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
static MemoryListener kvm_io_listener = { static MemoryListener kvm_io_listener = {
.name = "kvm-io", .name = "kvm-io",
.coalesced_io_add = kvm_coalesce_pio_add,
.coalesced_io_del = kvm_coalesce_pio_del,
.eventfd_add = kvm_io_ioeventfd_add, .eventfd_add = kvm_io_ioeventfd_add,
.eventfd_del = kvm_io_ioeventfd_del, .eventfd_del = kvm_io_ioeventfd_del,
.priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND, .priority = 10,
}; };
int kvm_set_irq(KVMState *s, int irq, int level) int kvm_set_irq(KVMState *s, int irq, int level)
@@ -1804,7 +1757,7 @@ static void clear_gsi(KVMState *s, unsigned int gsi)
void kvm_init_irq_routing(KVMState *s) void kvm_init_irq_routing(KVMState *s)
{ {
int gsi_count; int gsi_count, i;
gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
if (gsi_count > 0) { if (gsi_count > 0) {
@@ -1816,6 +1769,12 @@ void kvm_init_irq_routing(KVMState *s)
s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
s->nr_allocated_irq_routes = 0; s->nr_allocated_irq_routes = 0;
if (!kvm_direct_msi_allowed) {
for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
QTAILQ_INIT(&s->msi_hashtab[i]);
}
}
kvm_arch_init_irq_routing(s); kvm_arch_init_irq_routing(s);
} }
@@ -1935,10 +1894,41 @@ void kvm_irqchip_change_notify(void)
notifier_list_notify(&kvm_irqchip_change_notifiers, NULL); notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
} }
static unsigned int kvm_hash_msi(uint32_t data)
{
/* This is optimized for IA32 MSI layout. However, no other arch shall
* repeat the mistake of not providing a direct MSI injection API. */
return data & 0xff;
}
static void kvm_flush_dynamic_msi_routes(KVMState *s)
{
KVMMSIRoute *route, *next;
unsigned int hash;
for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
kvm_irqchip_release_virq(s, route->kroute.gsi);
QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
g_free(route);
}
}
}
static int kvm_irqchip_get_virq(KVMState *s) static int kvm_irqchip_get_virq(KVMState *s)
{ {
int next_virq; int next_virq;
/*
* PIC and IOAPIC share the first 16 GSI numbers, thus the available
* GSI numbers are more than the number of IRQ route. Allocating a GSI
* number can succeed even though a new route entry cannot be added.
* When this happens, flush dynamic MSI entries to free IRQ route entries.
*/
if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
kvm_flush_dynamic_msi_routes(s);
}
/* Return the lowest unused GSI in the bitmap */ /* Return the lowest unused GSI in the bitmap */
next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count); next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
if (next_virq >= s->gsi_count) { if (next_virq >= s->gsi_count) {
@@ -1948,10 +1938,27 @@ static int kvm_irqchip_get_virq(KVMState *s)
} }
} }
static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
{
unsigned int hash = kvm_hash_msi(msg.data);
KVMMSIRoute *route;
QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
route->kroute.u.msi.address_hi == (msg.address >> 32) &&
route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
return route;
}
}
return NULL;
}
int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
{ {
struct kvm_msi msi; struct kvm_msi msi;
KVMMSIRoute *route;
if (kvm_direct_msi_allowed) {
msi.address_lo = (uint32_t)msg.address; msi.address_lo = (uint32_t)msg.address;
msi.address_hi = msg.address >> 32; msi.address_hi = msg.address >> 32;
msi.data = le32_to_cpu(msg.data); msi.data = le32_to_cpu(msg.data);
@@ -1961,6 +1968,35 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
} }
route = kvm_lookup_msi_route(s, msg);
if (!route) {
int virq;
virq = kvm_irqchip_get_virq(s);
if (virq < 0) {
return virq;
}
route = g_new0(KVMMSIRoute, 1);
route->kroute.gsi = virq;
route->kroute.type = KVM_IRQ_ROUTING_MSI;
route->kroute.flags = 0;
route->kroute.u.msi.address_lo = (uint32_t)msg.address;
route->kroute.u.msi.address_hi = msg.address >> 32;
route->kroute.u.msi.data = le32_to_cpu(msg.data);
kvm_add_routing_entry(s, &route->kroute);
kvm_irqchip_commit_routes(s);
QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
entry);
}
assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
return kvm_set_irq(s, route->kroute.gsi, 1);
}
int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
{ {
struct kvm_irq_routing_entry kroute = {}; struct kvm_irq_routing_entry kroute = {};
@@ -2085,6 +2121,10 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
} }
} }
if (!kvm_irqfds_enabled()) {
return -ENOSYS;
}
return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd); return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
} }
@@ -2245,11 +2285,6 @@ static void kvm_irqchip_create(KVMState *s)
return; return;
} }
if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
fprintf(stderr, "kvm: irqfd not implemented\n");
exit(1);
}
/* First probe and see if there's a arch-specific hook to create the /* First probe and see if there's a arch-specific hook to create the
* in-kernel irqchip for us */ * in-kernel irqchip for us */
ret = kvm_arch_irqchip_create(s); ret = kvm_arch_irqchip_create(s);
@@ -2326,19 +2361,19 @@ static int kvm_init(MachineState *ms)
static const char upgrade_note[] = static const char upgrade_note[] =
"Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
"(see http://sourceforge.net/projects/kvm).\n"; "(see http://sourceforge.net/projects/kvm).\n";
const struct { struct {
const char *name; const char *name;
int num; int num;
} num_cpus[] = { } num_cpus[] = {
{ "SMP", ms->smp.cpus }, { "SMP", ms->smp.cpus },
{ "hotpluggable", ms->smp.max_cpus }, { "hotpluggable", ms->smp.max_cpus },
{ /* end of list */ } { NULL, }
}, *nc = num_cpus; }, *nc = num_cpus;
int soft_vcpus_limit, hard_vcpus_limit; int soft_vcpus_limit, hard_vcpus_limit;
KVMState *s; KVMState *s;
const KVMCapabilityInfo *missing_cap; const KVMCapabilityInfo *missing_cap;
int ret; int ret;
int type; int type = 0;
uint64_t dirty_log_manual_caps; uint64_t dirty_log_manual_caps;
qemu_mutex_init(&kml_slots_lock); qemu_mutex_init(&kml_slots_lock);
@@ -2403,13 +2438,6 @@ static int kvm_init(MachineState *ms)
type = mc->kvm_type(ms, kvm_type); type = mc->kvm_type(ms, kvm_type);
} else if (mc->kvm_type) { } else if (mc->kvm_type) {
type = mc->kvm_type(ms, NULL); type = mc->kvm_type(ms, NULL);
} else {
type = kvm_arch_get_default_type(ms);
}
if (type < 0) {
ret = -EINVAL;
goto err;
} }
do { do {
@@ -2484,11 +2512,37 @@ static int kvm_init(MachineState *ms)
* Enable KVM dirty ring if supported, otherwise fall back to * Enable KVM dirty ring if supported, otherwise fall back to
* dirty logging mode * dirty logging mode
*/ */
ret = kvm_dirty_ring_init(s); if (s->kvm_dirty_ring_size > 0) {
if (ret < 0) { uint64_t ring_bytes;
ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
/* Read the max supported pages */
ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING);
if (ret > 0) {
if (ring_bytes > ret) {
error_report("KVM dirty ring size %" PRIu32 " too big "
"(maximum is %ld). Please use a smaller value.",
s->kvm_dirty_ring_size,
(long)ret / sizeof(struct kvm_dirty_gfn));
ret = -EINVAL;
goto err; goto err;
} }
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
"Suggested minimum value is 1024.", strerror(-ret));
goto err;
}
s->kvm_dirty_ring_bytes = ring_bytes;
} else {
warn_report("KVM dirty ring not available, using bitmap method");
s->kvm_dirty_ring_size = 0;
}
}
/* /*
* KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
* enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
@@ -2524,8 +2578,22 @@ static int kvm_init(MachineState *ms)
#ifdef KVM_CAP_VCPU_EVENTS #ifdef KVM_CAP_VCPU_EVENTS
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
#endif #endif
s->robust_singlestep =
kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
#ifdef KVM_CAP_DEBUGREGS
s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
#endif
s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
#ifdef KVM_CAP_IRQ_ROUTING
kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
#endif
s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
s->irq_set_ioctl = KVM_IRQ_LINE; s->irq_set_ioctl = KVM_IRQ_LINE;
if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
@@ -2534,12 +2602,21 @@ static int kvm_init(MachineState *ms)
kvm_readonly_mem_allowed = kvm_readonly_mem_allowed =
(kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
kvm_eventfds_allowed =
(kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
kvm_irqfds_allowed =
(kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
kvm_resamplefds_allowed = kvm_resamplefds_allowed =
(kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
kvm_vm_attributes_allowed = kvm_vm_attributes_allowed =
(kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
kvm_ioeventfd_any_length_allowed =
(kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
#ifdef KVM_CAP_SET_GUEST_DEBUG #ifdef KVM_CAP_SET_GUEST_DEBUG
kvm_has_guest_debug = kvm_has_guest_debug =
(kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0); (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
@@ -2576,15 +2653,23 @@ static int kvm_init(MachineState *ms)
kvm_irqchip_create(s); kvm_irqchip_create(s);
} }
if (kvm_eventfds_allowed) {
s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
}
s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
kvm_memory_listener_register(s, &s->memory_listener, kvm_memory_listener_register(s, &s->memory_listener,
&address_space_memory, 0, "kvm-memory"); &address_space_memory, 0, "kvm-memory");
if (kvm_eventfds_allowed) {
memory_listener_register(&kvm_io_listener, memory_listener_register(&kvm_io_listener,
&address_space_io); &address_space_io);
}
memory_listener_register(&kvm_coalesced_pio_listener,
&address_space_io);
s->many_ioeventfds = kvm_check_many_ioeventfds();
s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
if (!s->sync_mmu) { if (!s->sync_mmu) {
@@ -2593,7 +2678,10 @@ static int kvm_init(MachineState *ms)
} }
if (s->kvm_dirty_ring_size) { if (s->kvm_dirty_ring_size) {
kvm_dirty_ring_reaper_init(s); ret = kvm_dirty_ring_reaper_init(s);
if (ret) {
goto err;
}
} }
if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) { if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
@@ -2611,7 +2699,6 @@ err:
if (s->fd != -1) { if (s->fd != -1) {
close(s->fd); close(s->fd);
} }
g_free(s->as);
g_free(s->memory_listener.slots); g_free(s->memory_listener.slots);
return ret; return ret;
@@ -2638,15 +2725,17 @@ static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direc
static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run) static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
{ {
int i;
fprintf(stderr, "KVM internal error. Suberror: %d\n", fprintf(stderr, "KVM internal error. Suberror: %d\n",
run->internal.suberror); run->internal.suberror);
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
int i;
for (i = 0; i < run->internal.ndata; ++i) { for (i = 0; i < run->internal.ndata; ++i) {
fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n", fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
i, (uint64_t)run->internal.data[i]); i, (uint64_t)run->internal.data[i]);
} }
}
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
fprintf(stderr, "emulation failure\n"); fprintf(stderr, "emulation failure\n");
if (!kvm_arch_stop_on_emulation_error(cpu)) { if (!kvm_arch_stop_on_emulation_error(cpu)) {
@@ -2664,7 +2753,7 @@ void kvm_flush_coalesced_mmio_buffer(void)
{ {
KVMState *s = kvm_state; KVMState *s = kvm_state;
if (!s || s->coalesced_flush_in_progress) { if (s->coalesced_flush_in_progress) {
return; return;
} }
@@ -2700,13 +2789,7 @@ bool kvm_cpu_check_are_resettable(void)
static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{ {
if (!cpu->vcpu_dirty) { if (!cpu->vcpu_dirty) {
int ret = kvm_arch_get_registers(cpu); kvm_arch_get_registers(cpu);
if (ret) {
error_report("Failed to get registers: %s", strerror(-ret));
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
vm_stop(RUN_STATE_INTERNAL_ERROR);
}
cpu->vcpu_dirty = true; cpu->vcpu_dirty = true;
} }
} }
@@ -2720,13 +2803,7 @@ void kvm_cpu_synchronize_state(CPUState *cpu)
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
{ {
int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
if (ret) {
error_report("Failed to put registers after reset: %s", strerror(-ret));
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
vm_stop(RUN_STATE_INTERNAL_ERROR);
}
cpu->vcpu_dirty = false; cpu->vcpu_dirty = false;
} }
@@ -2737,12 +2814,7 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu)
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
{ {
int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
if (ret) {
error_report("Failed to put registers after init: %s", strerror(-ret));
exit(1);
}
cpu->vcpu_dirty = false; cpu->vcpu_dirty = false;
} }
@@ -2835,14 +2907,7 @@ int kvm_cpu_exec(CPUState *cpu)
MemTxAttrs attrs; MemTxAttrs attrs;
if (cpu->vcpu_dirty) { if (cpu->vcpu_dirty) {
ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
if (ret) {
error_report("Failed to put registers after init: %s",
strerror(-ret));
ret = -1;
break;
}
cpu->vcpu_dirty = false; cpu->vcpu_dirty = false;
} }
@@ -3139,11 +3204,29 @@ int kvm_has_vcpu_events(void)
return kvm_state->vcpu_events; return kvm_state->vcpu_events;
} }
int kvm_has_robust_singlestep(void)
{
return kvm_state->robust_singlestep;
}
int kvm_has_debugregs(void)
{
return kvm_state->debugregs;
}
int kvm_max_nested_state_length(void) int kvm_max_nested_state_length(void)
{ {
return kvm_state->max_nested_state_len; return kvm_state->max_nested_state_len;
} }
int kvm_has_many_ioeventfds(void)
{
if (!kvm_enabled()) {
return 0;
}
return kvm_state->many_ioeventfds;
}
int kvm_has_gsi_routing(void) int kvm_has_gsi_routing(void)
{ {
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
@@ -3153,13 +3236,19 @@ int kvm_has_gsi_routing(void)
#endif #endif
} }
int kvm_has_intx_set_mask(void)
{
return kvm_state->intx_set_mask;
}
bool kvm_arm_supports_user_irq(void) bool kvm_arm_supports_user_irq(void)
{ {
return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ); return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
} }
#ifdef KVM_CAP_SET_GUEST_DEBUG #ifdef KVM_CAP_SET_GUEST_DEBUG
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc) struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
target_ulong pc)
{ {
struct kvm_sw_breakpoint *bp; struct kvm_sw_breakpoint *bp;
@@ -3216,7 +3305,7 @@ bool kvm_supports_guest_debug(void)
return kvm_has_guest_debug; return kvm_has_guest_debug;
} }
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
{ {
struct kvm_sw_breakpoint *bp; struct kvm_sw_breakpoint *bp;
int err; int err;
@@ -3254,7 +3343,7 @@ int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
return 0; return 0;
} }
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
{ {
struct kvm_sw_breakpoint *bp; struct kvm_sw_breakpoint *bp;
int err; int err;
@@ -3612,13 +3701,8 @@ static void kvm_accel_instance_init(Object *obj)
s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
/* KVM dirty ring is by default off */ /* KVM dirty ring is by default off */
s->kvm_dirty_ring_size = 0; s->kvm_dirty_ring_size = 0;
s->kvm_dirty_ring_with_bitmap = false;
s->kvm_eager_split_size = 0;
s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
s->notify_window = 0; s->notify_window = 0;
s->xen_version = 0;
s->xen_gnttab_max_frames = 64;
s->xen_evtchn_max_pirq = 256;
} }
/** /**
@@ -3863,7 +3947,7 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd
/* Read stats header */ /* Read stats header */
kvm_stats_header = &descriptors->kvm_stats_header; kvm_stats_header = &descriptors->kvm_stats_header;
ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0); ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header));
if (ret != sizeof(*kvm_stats_header)) { if (ret != sizeof(*kvm_stats_header)) {
error_setg(errp, "KVM stats: failed to read stats header: " error_setg(errp, "KVM stats: failed to read stats header: "
"expected %zu actual %zu", "expected %zu actual %zu",
@@ -3894,8 +3978,7 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd
} }
static void query_stats(StatsResultList **result, StatsTarget target, static void query_stats(StatsResultList **result, StatsTarget target,
strList *names, int stats_fd, CPUState *cpu, strList *names, int stats_fd, Error **errp)
Error **errp)
{ {
struct kvm_stats_desc *kvm_stats_desc; struct kvm_stats_desc *kvm_stats_desc;
struct kvm_stats_header *kvm_stats_header; struct kvm_stats_header *kvm_stats_header;
@@ -3953,7 +4036,7 @@ static void query_stats(StatsResultList **result, StatsTarget target,
break; break;
case STATS_TARGET_VCPU: case STATS_TARGET_VCPU:
add_stats_entry(result, STATS_PROVIDER_KVM, add_stats_entry(result, STATS_PROVIDER_KVM,
cpu->parent_obj.canonical_path, current_cpu->parent_obj.canonical_path,
stats_list); stats_list);
break; break;
default: default:
@@ -3990,9 +4073,10 @@ static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list); add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
} }
static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args) static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
{ {
int stats_fd = cpu->kvm_vcpu_stats_fd; StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
Error *local_err = NULL; Error *local_err = NULL;
if (stats_fd == -1) { if (stats_fd == -1) {
@@ -4001,13 +4085,14 @@ static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
return; return;
} }
query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU, query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
kvm_stats_args->names, stats_fd, cpu, kvm_stats_args->names, stats_fd, kvm_stats_args->errp);
kvm_stats_args->errp); close(stats_fd);
} }
static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args) static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data)
{ {
int stats_fd = cpu->kvm_vcpu_stats_fd; StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
Error *local_err = NULL; Error *local_err = NULL;
if (stats_fd == -1) { if (stats_fd == -1) {
@@ -4017,6 +4102,7 @@ static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
} }
query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd, query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
kvm_stats_args->errp); kvm_stats_args->errp);
close(stats_fd);
} }
static void query_stats_cb(StatsResultList **result, StatsTarget target, static void query_stats_cb(StatsResultList **result, StatsTarget target,
@@ -4034,7 +4120,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target,
error_setg_errno(errp, errno, "KVM stats: ioctl failed"); error_setg_errno(errp, errno, "KVM stats: ioctl failed");
return; return;
} }
query_stats(result, target, names, stats_fd, NULL, errp); query_stats(result, target, names, stats_fd, errp);
close(stats_fd); close(stats_fd);
break; break;
} }
@@ -4048,7 +4134,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target,
if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) { if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
continue; continue;
} }
query_stats_vcpu(cpu, &stats_args); run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
} }
break; break;
} }
@@ -4074,6 +4160,6 @@ void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
if (first_cpu) { if (first_cpu) {
stats_args.result.schema = result; stats_args.result.schema = result;
stats_args.errp = errp; stats_args.errp = errp;
query_stats_schema_vcpu(first_cpu, &stats_args); run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
} }
} }

View File

@@ -19,8 +19,8 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu);
void kvm_cpu_synchronize_post_init(CPUState *cpu); void kvm_cpu_synchronize_post_init(CPUState *cpu);
void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu); void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu);
bool kvm_supports_guest_debug(void); bool kvm_supports_guest_debug(void);
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len); int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len);
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len); int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len);
void kvm_remove_all_breakpoints(CPUState *cpu); void kvm_remove_all_breakpoints(CPUState *cpu);
#endif /* KVM_CPUS_H */ #endif /* KVM_CPUS_H */

View File

@@ -1,5 +1,5 @@
specific_ss.add(files('accel-target.c')) specific_ss.add(files('accel-common.c', 'accel-blocker.c'))
system_ss.add(files('accel-system.c', 'accel-blocker.c')) softmmu_ss.add(files('accel-softmmu.c'))
user_ss.add(files('accel-user.c')) user_ss.add(files('accel-user.c'))
subdir('tcg') subdir('tcg')
@@ -12,4 +12,4 @@ if have_system
endif endif
# qtest # qtest
system_ss.add(files('dummy-cpus.c')) softmmu_ss.add(files('dummy-cpus.c'))

View File

@@ -1 +1 @@
qtest_module_ss.add(when: ['CONFIG_SYSTEM_ONLY'], if_true: files('qtest.c')) qtest_module_ss.add(when: ['CONFIG_SOFTMMU'], if_true: files('qtest.c'))

24
accel/stubs/hax-stub.c Normal file
View File

@@ -0,0 +1,24 @@
/*
* QEMU HAXM support
*
* Copyright (c) 2015, Intel Corporation
*
* Copyright 2016 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "sysemu/hax.h"
bool hax_allowed;
int hax_sync_vcpus(void)
{
return 0;
}

View File

@@ -17,12 +17,15 @@
KVMState *kvm_state; KVMState *kvm_state;
bool kvm_kernel_irqchip; bool kvm_kernel_irqchip;
bool kvm_async_interrupts_allowed; bool kvm_async_interrupts_allowed;
bool kvm_eventfds_allowed;
bool kvm_irqfds_allowed;
bool kvm_resamplefds_allowed; bool kvm_resamplefds_allowed;
bool kvm_msi_via_irqfd_allowed; bool kvm_msi_via_irqfd_allowed;
bool kvm_gsi_routing_allowed; bool kvm_gsi_routing_allowed;
bool kvm_gsi_direct_mapping; bool kvm_gsi_direct_mapping;
bool kvm_allowed; bool kvm_allowed;
bool kvm_readonly_mem_allowed; bool kvm_readonly_mem_allowed;
bool kvm_ioeventfd_any_length_allowed;
bool kvm_msi_use_devid; bool kvm_msi_use_devid;
void kvm_flush_coalesced_mmio_buffer(void) void kvm_flush_coalesced_mmio_buffer(void)
@@ -38,6 +41,11 @@ bool kvm_has_sync_mmu(void)
return false; return false;
} }
int kvm_has_many_ioeventfds(void)
{
return 0;
}
int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
{ {
return 1; return 1;
@@ -83,6 +91,11 @@ void kvm_irqchip_change_notify(void)
{ {
} }
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
{
return -ENOSYS;
}
int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
EventNotifier *rn, int virq) EventNotifier *rn, int virq)
{ {
@@ -95,14 +108,9 @@ int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
return -ENOSYS; return -ENOSYS;
} }
unsigned int kvm_get_max_memslots(void) bool kvm_has_free_slot(MachineState *ms)
{ {
return 0; return false;
}
unsigned int kvm_get_free_memslots(void)
{
return 0;
} }
void kvm_init_cpu_signals(CPUState *cpu) void kvm_init_cpu_signals(CPUState *cpu)

View File

@@ -1,6 +1,7 @@
system_stubs_ss = ss.source_set() sysemu_stubs_ss = ss.source_set()
system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c')) sysemu_stubs_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c'))
system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) sysemu_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c')) sysemu_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
sysemu_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss) specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: sysemu_stubs_ss)

View File

@@ -11,25 +11,28 @@
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "exec/tb-flush.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
void tb_flush(CPUState *cpu) void tb_flush(CPUState *cpu)
{ {
} }
void tlb_set_dirty(CPUState *cpu, vaddr vaddr) void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
{ {
} }
int probe_access_flags(CPUArchState *env, vaddr addr, int size, void tcg_flush_jmp_cache(CPUState *cpu)
{
}
int probe_access_flags(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr) bool nonfault, void **phost, uintptr_t retaddr)
{ {
g_assert_not_reached(); g_assert_not_reached();
} }
void *probe_access(CPUArchState *env, vaddr addr, int size, void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{ {
/* Handled by hardware accelerator. */ /* Handled by hardware accelerator. */

View File

@@ -13,12 +13,26 @@
* See the COPYING file in the top-level directory. * See the COPYING file in the top-level directory.
*/ */
static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr, static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi) MemOpIdx oi)
{ {
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
} }
#if HAVE_ATOMIC128
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
}
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
#endif
/* /*
* Atomic helpers callable from TCG. * Atomic helpers callable from TCG.
* These have a common interface and all defer to cpu_atomic_* * These have a common interface and all defer to cpu_atomic_*
@@ -26,7 +40,7 @@ static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
*/ */
#define CMPXCHG_HELPER(OP, TYPE) \ #define CMPXCHG_HELPER(OP, TYPE) \
TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr, \ TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \
TYPE oldv, TYPE newv, uint32_t oi) \ TYPE oldv, TYPE newv, uint32_t oi) \
{ return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); } { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
@@ -41,23 +55,43 @@ CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
CMPXCHG_HELPER(cmpxchgq_le, uint64_t) CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif #endif
#if HAVE_CMPXCHG128 #ifdef CONFIG_CMPXCHG128
CMPXCHG_HELPER(cmpxchgo_be, Int128) CMPXCHG_HELPER(cmpxchgo_be, Int128)
CMPXCHG_HELPER(cmpxchgo_le, Int128) CMPXCHG_HELPER(cmpxchgo_le, Int128)
#endif #endif
#undef CMPXCHG_HELPER #undef CMPXCHG_HELPER
Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr, Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv, uint32_t oi) Int128 cmpv, Int128 newv, uint32_t oi)
{ {
#if TCG_TARGET_REG_BITS == 32 #if TCG_TARGET_REG_BITS == 32
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
Int128 oldv; Int128 oldv;
oldv = cpu_ld16_mmu(env, addr, oi, ra); oldv = cpu_ld16_be_mmu(env, addr, oi, ra);
if (int128_eq(oldv, cmpv)) { if (int128_eq(oldv, cmpv)) {
cpu_st16_mmu(env, addr, newv, oi, ra); cpu_st16_be_mmu(env, addr, newv, oi, ra);
} else {
/* Even with comparison failure, still need a write cycle. */
probe_write(env, addr, 16, get_mmuidx(oi), ra);
}
return oldv;
#else
g_assert_not_reached();
#endif
}
Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
uintptr_t ra = GETPC();
Int128 oldv;
oldv = cpu_ld16_le_mmu(env, addr, oi, ra);
if (int128_eq(oldv, cmpv)) {
cpu_st16_le_mmu(env, addr, newv, oi, ra);
} else { } else {
/* Even with comparison failure, still need a write cycle. */ /* Even with comparison failure, still need a write cycle. */
probe_write(env, addr, 16, get_mmuidx(oi), ra); probe_write(env, addr, 16, get_mmuidx(oi), ra);
@@ -69,7 +103,7 @@ Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
} }
#define ATOMIC_HELPER(OP, TYPE) \ #define ATOMIC_HELPER(OP, TYPE) \
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr, \ TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \
TYPE val, uint32_t oi) \ TYPE val, uint32_t oi) \
{ return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); } { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }

View File

@@ -69,12 +69,12 @@
# define END _le # define END _le
#endif #endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv, ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_SIZE, retaddr); PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret; DATA_TYPE ret;
#if DATA_SIZE == 16 #if DATA_SIZE == 16
@@ -87,12 +87,38 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
return ret; return ret;
} }
#if DATA_SIZE < 16 #if DATA_SIZE >= 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, #if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_SIZE, retaddr); PAGE_READ, retaddr);
DATA_TYPE val;
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, oi);
return val;
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret; DATA_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, val); ret = qatomic_xchg__nocheck(haddr, val);
@@ -102,11 +128,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
} }
#define GEN_ATOMIC_HELPER(X) \ #define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \ { \
DATA_TYPE *haddr, ret; \ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \ PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
ret = qatomic_##X(haddr, val); \ ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \ ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \ atomic_trace_rmw_post(env, addr, oi); \
@@ -133,11 +160,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
* of CF_PARALLEL's value, we'll trace just a read and a write. * of CF_PARALLEL's value, we'll trace just a read and a write.
*/ */
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \ { \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \ PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE cmp, old, new, val = xval; \
smp_mb(); \ smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \ cmp = qatomic_read__nocheck(haddr); \
do { \ do { \
@@ -160,7 +188,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#undef GEN_ATOMIC_HELPER_FN #undef GEN_ATOMIC_HELPER_FN
#endif /* DATA SIZE < 16 */ #endif /* DATA SIZE >= 16 */
#undef END #undef END
@@ -174,12 +202,12 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
# define END _be # define END _be
#endif #endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv, ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_SIZE, retaddr); PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret; DATA_TYPE ret;
#if DATA_SIZE == 16 #if DATA_SIZE == 16
@@ -192,12 +220,39 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
return BSWAP(ret); return BSWAP(ret);
} }
#if DATA_SIZE < 16 #if DATA_SIZE >= 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, #if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_SIZE, retaddr); PAGE_READ, retaddr);
DATA_TYPE val;
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, oi);
return BSWAP(val);
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
val = BSWAP(val);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
ABI_TYPE ret; ABI_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
@@ -207,11 +262,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
} }
#define GEN_ATOMIC_HELPER(X) \ #define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \ { \
DATA_TYPE *haddr, ret; \ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \ PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
ret = qatomic_##X(haddr, BSWAP(val)); \ ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \ ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \ atomic_trace_rmw_post(env, addr, oi); \
@@ -235,11 +291,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
* of CF_PARALLEL's value, we'll trace just a read and a write. * of CF_PARALLEL's value, we'll trace just a read and a write.
*/ */
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \ { \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \ PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
smp_mb(); \ smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \ ldn = qatomic_read__nocheck(haddr); \
do { \ do { \
@@ -269,7 +326,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#undef ADD #undef ADD
#undef GEN_ATOMIC_HELPER_FN #undef GEN_ATOMIC_HELPER_FN
#endif /* DATA_SIZE < 16 */ #endif /* DATA_SIZE >= 16 */
#undef END #undef END
#endif /* DATA_SIZE > 1 */ #endif /* DATA_SIZE > 1 */

View File

@@ -20,8 +20,7 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "qemu/plugin.h" #include "exec/exec-all.h"
#include "internal-common.h"
bool tcg_allowed; bool tcg_allowed;
@@ -32,12 +31,40 @@ void cpu_loop_exit_noexc(CPUState *cpu)
cpu_loop_exit(cpu); cpu_loop_exit(cpu);
} }
#if defined(CONFIG_SOFTMMU)
void cpu_reloading_memory_map(void)
{
if (qemu_in_vcpu_thread() && current_cpu->running) {
/* The guest can in theory prolong the RCU critical section as long
* as it feels like. The major problem with this is that because it
* can do multiple reconfigurations of the memory map within the
* critical section, we could potentially accumulate an unbounded
* collection of memory data structures awaiting reclamation.
*
* Because the only thing we're currently protecting with RCU is the
* memory data structures, it's sufficient to break the critical section
* in this callback, which we know will get called every time the
* memory map is rearranged.
*
* (If we add anything else in the system that uses RCU to protect
* its data structures, we will need to implement some other mechanism
* to force TCG CPUs to exit the critical section, at which point this
* part of this callback might become unnecessary.)
*
* This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
* only protects cpu->as->dispatch. Since we know our caller is about
* to reload it, it's safe to split the critical section.
*/
rcu_read_unlock();
rcu_read_lock();
}
}
#endif
void cpu_loop_exit(CPUState *cpu) void cpu_loop_exit(CPUState *cpu)
{ {
/* Undo the setting in cpu_tb_exec. */ /* Undo the setting in cpu_tb_exec. */
cpu->neg.can_do_io = true; cpu->can_do_io = 1;
/* Undo any setting in generated code. */
qemu_plugin_disable_mem_helpers(cpu);
siglongjmp(cpu->jmp_env, 1); siglongjmp(cpu->jmp_env, 1);
} }
@@ -51,8 +78,6 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
{ {
/* Prevent looping if already executing in a serial context. */
g_assert(!cpu_in_serial_context(cpu));
cpu->exception_index = EXCP_ATOMIC; cpu->exception_index = EXCP_ATOMIC;
cpu_loop_exit_restore(cpu, pc); cpu_loop_exit_restore(cpu, pc);
} }

View File

@@ -20,6 +20,7 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/qemu-print.h" #include "qemu/qemu-print.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "qapi/type-helpers.h" #include "qapi/type-helpers.h"
#include "hw/core/tcg-cpu-ops.h" #include "hw/core/tcg-cpu-ops.h"
#include "trace.h" #include "trace.h"
@@ -27,6 +28,7 @@
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "tcg/tcg.h" #include "tcg/tcg.h"
#include "qemu/atomic.h" #include "qemu/atomic.h"
#include "qemu/timer.h"
#include "qemu/rcu.h" #include "qemu/rcu.h"
#include "exec/log.h" #include "exec/log.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
@@ -36,14 +38,13 @@
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "exec/cpu-all.h" #include "exec/cpu-all.h"
#include "sysemu/cpu-timers.h" #include "sysemu/cpu-timers.h"
#include "exec/replay-core.h" #include "sysemu/replay.h"
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "exec/helper-proto-common.h" #include "exec/helper-proto.h"
#include "tb-jmp-cache.h" #include "tb-jmp-cache.h"
#include "tb-hash.h" #include "tb-hash.h"
#include "tb-context.h" #include "tb-context.h"
#include "internal-common.h" #include "internal.h"
#include "internal-target.h"
/* -icount align implementation. */ /* -icount align implementation. */
@@ -63,8 +64,8 @@ typedef struct SyncClocks {
#define MAX_DELAY_PRINT_RATE 2000000000LL #define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100 #define MAX_NB_PRINTS 100
int64_t max_delay; static int64_t max_delay;
int64_t max_advance; static int64_t max_advance;
static void align_clocks(SyncClocks *sc, CPUState *cpu) static void align_clocks(SyncClocks *sc, CPUState *cpu)
{ {
@@ -74,7 +75,7 @@ static void align_clocks(SyncClocks *sc, CPUState *cpu)
return; return;
} }
cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low; cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
sc->last_cpu_icount = cpu_icount; sc->last_cpu_icount = cpu_icount;
@@ -125,7 +126,7 @@ static void init_delay_params(SyncClocks *sc, CPUState *cpu)
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
sc->last_cpu_icount sc->last_cpu_icount
= cpu->icount_extra + cpu->neg.icount_decr.u16.low; = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
if (sc->diff_clk < max_delay) { if (sc->diff_clk < max_delay) {
max_delay = sc->diff_clk; max_delay = sc->diff_clk;
} }
@@ -160,7 +161,7 @@ uint32_t curr_cflags(CPUState *cpu)
*/ */
if (unlikely(cpu->singlestep_enabled)) { if (unlikely(cpu->singlestep_enabled)) {
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
} else if (qatomic_read(&one_insn_per_tb)) { } else if (singlestep) {
cflags |= CF_NO_GOTO_TB | 1; cflags |= CF_NO_GOTO_TB | 1;
} else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
cflags |= CF_NO_GOTO_TB; cflags |= CF_NO_GOTO_TB;
@@ -170,12 +171,13 @@ uint32_t curr_cflags(CPUState *cpu)
} }
struct tb_desc { struct tb_desc {
vaddr pc; target_ulong pc;
uint64_t cs_base; target_ulong cs_base;
CPUArchState *env; CPUArchState *env;
tb_page_addr_t page_addr0; tb_page_addr_t page_addr0;
uint32_t flags; uint32_t flags;
uint32_t cflags; uint32_t cflags;
uint32_t trace_vcpu_dstate;
}; };
static bool tb_lookup_cmp(const void *p, const void *d) static bool tb_lookup_cmp(const void *p, const void *d)
@@ -183,10 +185,11 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p; const TranslationBlock *tb = p;
const struct tb_desc *desc = d; const struct tb_desc *desc = d;
if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) && if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
tb_page_addr0(tb) == desc->page_addr0 && tb_page_addr0(tb) == desc->page_addr0 &&
tb->cs_base == desc->cs_base && tb->cs_base == desc->cs_base &&
tb->flags == desc->flags && tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
tb_cflags(tb) == desc->cflags) { tb_cflags(tb) == desc->cflags) {
/* check next page if needed */ /* check next page if needed */
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
@@ -194,7 +197,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return true; return true;
} else { } else {
tb_page_addr_t phys_page1; tb_page_addr_t phys_page1;
vaddr virt_page1; target_ulong virt_page1;
/* /*
* We know that the first page matched, and an otherwise valid TB * We know that the first page matched, and an otherwise valid TB
@@ -215,33 +218,34 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return false; return false;
} }
static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc, static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
uint64_t cs_base, uint32_t flags, target_ulong cs_base, uint32_t flags,
uint32_t cflags) uint32_t cflags)
{ {
tb_page_addr_t phys_pc; tb_page_addr_t phys_pc;
struct tb_desc desc; struct tb_desc desc;
uint32_t h; uint32_t h;
desc.env = cpu_env(cpu); desc.env = cpu->env_ptr;
desc.cs_base = cs_base; desc.cs_base = cs_base;
desc.flags = flags; desc.flags = flags;
desc.cflags = cflags; desc.cflags = cflags;
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc; desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc); phys_pc = get_page_addr_code(desc.env, pc);
if (phys_pc == -1) { if (phys_pc == -1) {
return NULL; return NULL;
} }
desc.page_addr0 = phys_pc; desc.page_addr0 = phys_pc;
h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc), h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
flags, cs_base, cflags); flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
} }
/* Might cause an exception, so have a longjmp destination ready */ /* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
uint64_t cs_base, uint32_t flags, target_ulong cs_base,
uint32_t cflags) uint32_t flags, uint32_t cflags)
{ {
TranslationBlock *tb; TranslationBlock *tb;
CPUJumpCache *jc; CPUJumpCache *jc;
@@ -252,15 +256,13 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
hash = tb_jmp_cache_hash_func(pc); hash = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache; jc = cpu->tb_jmp_cache;
tb = tb_jmp_cache_get_tb(jc, hash);
if (cflags & CF_PCREL) {
/* Use acquire to ensure current load of pc from jc. */
tb = qatomic_load_acquire(&jc->array[hash].tb);
if (likely(tb && if (likely(tb &&
jc->array[hash].pc == pc && tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
tb->cs_base == cs_base && tb->cs_base == cs_base &&
tb->flags == flags && tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
tb_cflags(tb) == cflags)) { tb_cflags(tb) == cflags)) {
return tb; return tb;
} }
@@ -268,41 +270,21 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
if (tb == NULL) { if (tb == NULL) {
return NULL; return NULL;
} }
jc->array[hash].pc = pc; tb_jmp_cache_set(jc, hash, tb, pc);
/* Ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
} else {
/* Use rcu_read to ensure current load of pc from *tb. */
tb = qatomic_rcu_read(&jc->array[hash].tb);
if (likely(tb &&
tb->pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb_cflags(tb) == cflags)) {
return tb;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[hash].tb, tb);
}
return tb; return tb;
} }
static void log_cpu_exec(vaddr pc, CPUState *cpu, static void log_cpu_exec(target_ulong pc, CPUState *cpu,
const TranslationBlock *tb) const TranslationBlock *tb)
{ {
if (qemu_log_in_addr_range(pc)) { if (qemu_log_in_addr_range(pc)) {
qemu_log_mask(CPU_LOG_EXEC, qemu_log_mask(CPU_LOG_EXEC,
"Trace %d: %p [%08" PRIx64 "Trace %d: %p [" TARGET_FMT_lx
"/%016" VADDR_PRIx "/%08x/%08x] %s\n", "/" TARGET_FMT_lx "/%08x/%08x] %s\n",
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
tb->flags, tb->cflags, lookup_symbol(pc)); tb->flags, tb->cflags, lookup_symbol(pc));
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
FILE *logfile = qemu_log_trylock(); FILE *logfile = qemu_log_trylock();
if (logfile) { if (logfile) {
@@ -314,17 +296,15 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
#if defined(TARGET_I386) #if defined(TARGET_I386)
flags |= CPU_DUMP_CCOP; flags |= CPU_DUMP_CCOP;
#endif #endif
if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
flags |= CPU_DUMP_VPU;
}
cpu_dump_state(cpu, logfile, flags); cpu_dump_state(cpu, logfile, flags);
qemu_log_unlock(logfile); qemu_log_unlock(logfile);
} }
} }
#endif /* DEBUG_DISAS */
} }
} }
static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc, static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
uint32_t *cflags) uint32_t *cflags)
{ {
CPUBreakpoint *bp; CPUBreakpoint *bp;
@@ -390,7 +370,7 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
return false; return false;
} }
static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc, static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
uint32_t *cflags) uint32_t *cflags)
{ {
return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) && return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
@@ -409,8 +389,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TranslationBlock *tb; TranslationBlock *tb;
vaddr pc; target_ulong cs_base, pc;
uint64_t cs_base;
uint32_t flags, cflags; uint32_t flags, cflags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
@@ -445,7 +424,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
static inline TranslationBlock * QEMU_DISABLE_CFI static inline TranslationBlock * QEMU_DISABLE_CFI
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
{ {
CPUArchState *env = cpu_env(cpu); CPUArchState *env = cpu->env_ptr;
uintptr_t ret; uintptr_t ret;
TranslationBlock *last_tb; TranslationBlock *last_tb;
const void *tb_ptr = itb->tc.ptr; const void *tb_ptr = itb->tc.ptr;
@@ -456,8 +435,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
qemu_thread_jit_execute(); qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr); ret = tcg_qemu_tb_exec(env, tb_ptr);
cpu->neg.can_do_io = true; cpu->can_do_io = 1;
qemu_plugin_disable_mem_helpers(cpu);
/* /*
* TODO: Delay swapping back to the read-write region of the TB * TODO: Delay swapping back to the read-write region of the TB
* until we actually need to modify the TB. The read-only copy, * until we actually need to modify the TB. The read-only copy,
@@ -481,15 +459,15 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
if (cc->tcg_ops->synchronize_from_tb) { if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb); cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
} else { } else {
tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL)); assert(!TARGET_TB_PCREL);
assert(cc->set_pc); assert(cc->set_pc);
cc->set_pc(cpu, last_tb->pc); cc->set_pc(cpu, tb_pc(last_tb));
} }
if (qemu_loglevel_mask(CPU_LOG_EXEC)) { if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
vaddr pc = log_pc(cpu, last_tb); target_ulong pc = log_pc(cpu, last_tb);
if (qemu_log_in_addr_range(pc)) { if (qemu_log_in_addr_range(pc)) {
qemu_log("Stopped execution of TB chain before %p [%016" qemu_log("Stopped execution of TB chain before %p ["
VADDR_PRIx "] %s\n", TARGET_FMT_lx "] %s\n",
last_tb->tc.ptr, pc, lookup_symbol(pc)); last_tb->tc.ptr, pc, lookup_symbol(pc));
} }
} }
@@ -525,51 +503,14 @@ static void cpu_exec_exit(CPUState *cpu)
if (cc->tcg_ops->cpu_exec_exit) { if (cc->tcg_ops->cpu_exec_exit) {
cc->tcg_ops->cpu_exec_exit(cpu); cc->tcg_ops->cpu_exec_exit(cpu);
} }
} QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL);
static void cpu_exec_longjmp_cleanup(CPUState *cpu)
{
/* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);
#ifdef CONFIG_USER_ONLY
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#else
/*
* For softmmu, a tlb_fill fault during translation will land here,
* and we need to release any page locks held. In system mode we
* have one tcg_ctx per thread, so we know it was this cpu doing
* the translation.
*
* Alternative 1: Install a cleanup to be called via an exception
* handling safe longjmp. It seems plausible that all our hosts
* support such a thing. We'd have to properly register unwind info
* for the JIT for EH, rather that just for GDB.
*
* Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
* capture the cpu_loop_exit longjmp, perform the cleanup, and
* jump again to arrive here.
*/
if (tcg_ctx->gen_tb) {
tb_unlock_pages(tcg_ctx->gen_tb);
tcg_ctx->gen_tb = NULL;
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
} }
void cpu_exec_step_atomic(CPUState *cpu) void cpu_exec_step_atomic(CPUState *cpu)
{ {
CPUArchState *env = cpu_env(cpu); CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb; TranslationBlock *tb;
vaddr pc; target_ulong cs_base, pc;
uint64_t cs_base;
uint32_t flags, cflags; uint32_t flags, cflags;
int tb_exit; int tb_exit;
@@ -606,7 +547,17 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_tb_exec(cpu, tb, &tb_exit); cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu); cpu_exec_exit(cpu);
} else { } else {
cpu_exec_longjmp_cleanup(cpu); #ifndef CONFIG_SOFTMMU
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
qemu_plugin_disable_mem_helpers(cpu);
} }
/* /*
@@ -718,7 +669,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
if (cpu->exception_index < 0) { if (cpu->exception_index < 0) {
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
if (replay_has_exception() if (replay_has_exception()
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) { && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
/* Execute just one insn to trigger exception pending in the log */ /* Execute just one insn to trigger exception pending in the log */
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
| CF_NOIRQ | 1; | CF_NOIRQ | 1;
@@ -808,7 +759,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* Ensure zeroing happens before reading cpu->exit_request or * Ensure zeroing happens before reading cpu->exit_request or
* cpu->interrupt_request (see also smp_wmb in cpu_exit()) * cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/ */
qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0); qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
if (unlikely(qatomic_read(&cpu->interrupt_request))) { if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request; int interrupt_request;
@@ -899,7 +850,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (unlikely(qatomic_read(&cpu->exit_request)) if (unlikely(qatomic_read(&cpu->exit_request))
|| (icount_enabled() || (icount_enabled()
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) { && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
qatomic_set(&cpu->exit_request, 0); qatomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) { if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT; cpu->exception_index = EXCP_INTERRUPT;
@@ -911,8 +862,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
} }
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
vaddr pc, TranslationBlock **last_tb, target_ulong pc,
int *tb_exit) TranslationBlock **last_tb, int *tb_exit)
{ {
int32_t insns_left; int32_t insns_left;
@@ -924,7 +875,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
} }
*last_tb = NULL; *last_tb = NULL;
insns_left = qatomic_read(&cpu->neg.icount_decr.u32); insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
if (insns_left < 0) { if (insns_left < 0) {
/* Something asked us to stop executing chained TBs; just /* Something asked us to stop executing chained TBs; just
* continue round the main loop. Whatever requested the exit * continue round the main loop. Whatever requested the exit
@@ -943,7 +894,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
icount_update(cpu); icount_update(cpu);
/* Refill decrementer and continue execution. */ /* Refill decrementer and continue execution. */
insns_left = MIN(0xffff, cpu->icount_budget); insns_left = MIN(0xffff, cpu->icount_budget);
cpu->neg.icount_decr.u16.low = insns_left; cpu_neg(cpu)->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left; cpu->icount_extra = cpu->icount_budget - insns_left;
/* /*
@@ -973,11 +924,10 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
while (!cpu_handle_interrupt(cpu, &last_tb)) { while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb; TranslationBlock *tb;
vaddr pc; target_ulong cs_base, pc;
uint64_t cs_base;
uint32_t flags, cflags; uint32_t flags, cflags;
cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags); cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
/* /*
* When requested, use an exact setting for cflags for the next * When requested, use an exact setting for cflags for the next
@@ -999,27 +949,17 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
tb = tb_lookup(cpu, pc, cs_base, flags, cflags); tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) { if (tb == NULL) {
CPUJumpCache *jc;
uint32_t h; uint32_t h;
mmap_lock(); mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock(); mmap_unlock();
/* /*
* We add the TB in the virtual pc hash table * We add the TB in the virtual pc hash table
* for the fast lookup * for the fast lookup
*/ */
h = tb_jmp_cache_hash_func(pc); h = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache; tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
if (cflags & CF_PCREL) {
jc->array[h].pc = pc;
/* Ensure pc is written first. */
qatomic_store_release(&jc->array[h].tb, tb);
} else {
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[h].tb, tb);
}
} }
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
@@ -1040,6 +980,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL);
/* Try to align the host and virtual clocks /* Try to align the host and virtual clocks
if the guest is in advance */ if the guest is in advance */
align_clocks(sc, cpu); align_clocks(sc, cpu);
@@ -1052,7 +993,21 @@ static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
{ {
/* Prepare setjmp context for exception handling. */ /* Prepare setjmp context for exception handling. */
if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) { if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
cpu_exec_longjmp_cleanup(cpu); /* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);
#ifndef CONFIG_SOFTMMU
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
qemu_plugin_disable_mem_helpers(cpu);
assert_no_pages_locked();
} }
return cpu_exec_loop(cpu, sc); return cpu_exec_loop(cpu, sc);
@@ -1089,7 +1044,7 @@ int cpu_exec(CPUState *cpu)
return ret; return ret;
} }
bool tcg_exec_realizefn(CPUState *cpu, Error **errp) void tcg_exec_realizefn(CPUState *cpu, Error **errp)
{ {
static bool tcg_target_initialized; static bool tcg_target_initialized;
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
@@ -1105,8 +1060,6 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
tcg_iommu_init_notifier_list(cpu); tcg_iommu_init_notifier_list(cpu);
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
/* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */ /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
return true;
} }
/* undo the initializations in reverse order */ /* undo the initializations in reverse order */
@@ -1119,3 +1072,86 @@ void tcg_exec_unrealizefn(CPUState *cpu)
tlb_destroy(cpu); tlb_destroy(cpu);
g_free_rcu(cpu->tb_jmp_cache, rcu); g_free_rcu(cpu->tb_jmp_cache, rcu);
} }
#ifndef CONFIG_USER_ONLY
static void dump_drift_info(GString *buf)
{
if (!icount_enabled()) {
return;
}
g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
(cpu_get_clock() - icount_get()) / SCALE_MS);
if (icount_align_option) {
g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
-max_delay / SCALE_MS);
g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
max_advance / SCALE_MS);
} else {
g_string_append_printf(buf, "Max guest delay NA\n");
g_string_append_printf(buf, "Max guest advance NA\n");
}
}
HumanReadableText *qmp_x_query_jit(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "JIT information is only available with accel=tcg");
return NULL;
}
dump_exec_info(buf);
dump_drift_info(buf);
return human_readable_text_from_str(buf);
}
HumanReadableText *qmp_x_query_opcount(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "Opcode count information is only available with accel=tcg");
return NULL;
}
tcg_dump_op_count(buf);
return human_readable_text_from_str(buf);
}
#ifdef CONFIG_PROFILER
int64_t dev_time;
HumanReadableText *qmp_x_query_profile(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
static int64_t last_cpu_exec_time;
int64_t cpu_exec_time;
int64_t delta;
cpu_exec_time = tcg_cpu_exec_time();
delta = cpu_exec_time - last_cpu_exec_time;
g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
delta, delta / (double)NANOSECONDS_PER_SECOND);
last_cpu_exec_time = cpu_exec_time;
dev_time = 0;
return human_readable_text_from_str(buf);
}
#else
HumanReadableText *qmp_x_query_profile(Error **errp)
{
error_setg(errp, "Internal profiler not compiled");
return NULL;
}
#endif
#endif /* !CONFIG_USER_ONLY */

File diff suppressed because it is too large Load Diff

14
accel/tcg/hmp.c Normal file
View File

@@ -0,0 +1,14 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "exec/exec-all.h"
#include "monitor/monitor.h"
static void hmp_tcg_register(void)
{
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
}
type_init(hmp_tcg_register);

View File

@@ -1,26 +0,0 @@
/*
* Internal execution defines for qemu (target agnostic)
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#ifndef ACCEL_TCG_INTERNAL_COMMON_H
#define ACCEL_TCG_INTERNAL_COMMON_H
#include "exec/translation-block.h"
extern int64_t max_delay;
extern int64_t max_advance;
/*
* Return true if CS is not running in parallel with other cpus, either
* because there are no other cpus or we are within an exclusive context.
*/
static inline bool cpu_in_serial_context(CPUState *cs)
{
return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
}
#endif

View File

@@ -1,132 +0,0 @@
/*
* Internal execution defines for qemu (target specific)
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#ifndef ACCEL_TCG_INTERNAL_TARGET_H
#define ACCEL_TCG_INTERNAL_TARGET_H
#include "exec/exec-all.h"
#include "exec/translate-all.h"
/*
* Access to the various translations structures need to be serialised
* via locks for consistency. In user-mode emulation access to the
* memory related structures are protected with mmap_lock.
* In !user-mode we use per-page locks.
*/
#ifdef CONFIG_USER_ONLY
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
#else
#define assert_memory_lock()
#endif
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
void assert_no_pages_locked(void);
#else
static inline void assert_no_pages_locked(void) { }
#endif
#ifdef CONFIG_USER_ONLY
static inline void page_table_config_init(void) { }
#else
void page_table_config_init(void);
#endif
#ifdef CONFIG_USER_ONLY
/*
* For user-only, page_protect sets the page read-only.
* Since most execution is already on read-only pages, and we'd need to
* account for other TBs on the same page, defer undoing any page protection
* until we receive the write fault.
*/
static inline void tb_lock_page0(tb_page_addr_t p0)
{
page_protect(p0);
}
static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
{
page_protect(p1);
}
static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
static inline void tb_unlock_pages(TranslationBlock *tb) { }
#else
void tb_lock_page0(tb_page_addr_t);
void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_pages(TranslationBlock *);
#endif
#ifdef CONFIG_SOFTMMU
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size,
uintptr_t retaddr);
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
uint64_t cs_base, uint32_t flags,
int cflags);
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
TranslationBlock *tb_link_page(TranslationBlock *tb);
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc);
bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
void tcg_exec_unrealizefn(CPUState *cpu);
/* Return the current PC from CPU, which may be cached in TB. */
static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
{
if (tb_cflags(tb) & CF_PCREL) {
return cpu->cc->get_pc(cpu);
} else {
return tb->pc;
}
}
extern bool one_insn_per_tb;
/**
* tcg_req_mo:
* @type: TCGBar
*
* Filter @type to the barrier that is required for the guest
* memory ordering vs the host memory ordering. A non-zero
* result indicates that some barrier is required.
*
* If TCG_GUEST_DEFAULT_MO is not defined, assume that the
* guest requires strict ordering.
*
* This is a macro so that it's constant even without optimization.
*/
#ifdef TCG_GUEST_DEFAULT_MO
# define tcg_req_mo(type) \
((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
#else
# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
#endif
/**
* cpu_req_mo:
* @type: TCGBar
*
* If tcg_req_mo indicates a barrier for @type is required
* for the guest memory model, issue a host memory barrier.
*/
#define cpu_req_mo(type) \
do { \
if (tcg_req_mo(type)) { \
smp_mb(); \
} \
} while (0)
#endif /* ACCEL_TCG_INTERNAL_H */

67
accel/tcg/internal.h Normal file
View File

@@ -0,0 +1,67 @@
/*
* Internal execution defines for qemu
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#ifndef ACCEL_TCG_INTERNAL_H
#define ACCEL_TCG_INTERNAL_H
#include "exec/exec-all.h"
/*
* Access to the various translations structures need to be serialised
* via locks for consistency. In user-mode emulation access to the
* memory related structures are protected with mmap_lock.
* In !user-mode we use per-page locks.
*/
#ifdef CONFIG_SOFTMMU
#define assert_memory_lock()
#else
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
#endif
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
void assert_no_pages_locked(void);
#else
static inline void assert_no_pages_locked(void) { }
#endif
#ifdef CONFIG_USER_ONLY
static inline void page_table_config_init(void) { }
#else
void page_table_config_init(void);
#endif
#ifdef CONFIG_SOFTMMU
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size,
uintptr_t retaddr);
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
int cflags);
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2);
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc);
/* Return the current PC from CPU, which may be cached in TB. */
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
{
#if TARGET_TB_PCREL
return cpu->cc->get_pc(cpu);
#else
return tb_pc(tb);
#endif
}
#endif /* ACCEL_TCG_INTERNAL_H */

File diff suppressed because it is too large Load Diff

View File

@@ -8,231 +8,6 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later. * This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory. * See the COPYING file in the top-level directory.
*/ */
/*
* Load helpers for tcg-ldst.h
*/
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
}
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
}
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
}
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
}
/*
* Provide signed versions of the load routines as well. We can of course
* avoid this for 64-bit data, or for 32-bit data on 32-bit host.
*/
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
}
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
return do_ld16_mmu(env_cpu(env), addr, oi, retaddr);
}
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
{
return helper_ld16_mmu(env, addr, oi, GETPC());
}
/*
* Store helpers for tcg-ldst.h
*/
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
do_st1_mmu(env_cpu(env), addr, val, oi, ra);
}
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
}
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
}
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
}
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
}
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
{
helper_st16_mmu(env, addr, val, oi, GETPC());
}
/*
* Load helpers for cpu_ldst.h
*/
static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
}
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
{
uint8_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint16_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint32_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint64_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
Int128 ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
plugin_load_cb(env, addr, oi);
return ret;
}
/*
* Store helpers for cpu_ldst.h
*/
static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr)
{
helper_stb_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
/*
* Wrappers of the above
*/
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
@@ -251,7 +26,7 @@ uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
return cpu_ldw_mmu(env, addr, oi, ra); return cpu_ldw_be_mmu(env, addr, oi, ra);
} }
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
@@ -264,21 +39,21 @@ uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
return cpu_ldl_mmu(env, addr, oi, ra); return cpu_ldl_be_mmu(env, addr, oi, ra);
} }
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
return cpu_ldq_mmu(env, addr, oi, ra); return cpu_ldq_be_mmu(env, addr, oi, ra);
} }
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
return cpu_ldw_mmu(env, addr, oi, ra); return cpu_ldw_le_mmu(env, addr, oi, ra);
} }
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
@@ -291,14 +66,14 @@ uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
return cpu_ldl_mmu(env, addr, oi, ra); return cpu_ldl_le_mmu(env, addr, oi, ra);
} }
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
return cpu_ldq_mmu(env, addr, oi, ra); return cpu_ldq_le_mmu(env, addr, oi, ra);
} }
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
@@ -312,42 +87,42 @@ void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
cpu_stw_mmu(env, addr, val, oi, ra); cpu_stw_be_mmu(env, addr, val, oi, ra);
} }
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
cpu_stl_mmu(env, addr, val, oi, ra); cpu_stl_be_mmu(env, addr, val, oi, ra);
} }
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
cpu_stq_mmu(env, addr, val, oi, ra); cpu_stq_be_mmu(env, addr, val, oi, ra);
} }
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
cpu_stw_mmu(env, addr, val, oi, ra); cpu_stw_le_mmu(env, addr, val, oi, ra);
} }
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
cpu_stl_mmu(env, addr, val, oi, ra); cpu_stl_le_mmu(env, addr, val, oi, ra);
} }
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
cpu_stq_mmu(env, addr, val, oi, ra); cpu_stq_le_mmu(env, addr, val, oi, ra);
} }
/*--------------------------*/ /*--------------------------*/

View File

@@ -1,9 +1,7 @@
tcg_ss = ss.source_set() tcg_ss = ss.source_set()
common_ss.add(when: 'CONFIG_TCG', if_true: files(
'cpu-exec-common.c',
))
tcg_ss.add(files( tcg_ss.add(files(
'tcg-all.c', 'tcg-all.c',
'cpu-exec-common.c',
'cpu-exec.c', 'cpu-exec.c',
'tb-maint.c', 'tb-maint.c',
'tcg-runtime-gvec.c', 'tcg-runtime-gvec.c',
@@ -12,24 +10,18 @@ tcg_ss.add(files(
'translator.c', 'translator.c',
)) ))
tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c')) tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
tcg_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c')) tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c'))
if get_option('plugins') tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')])
tcg_ss.add(files('plugin-gen.c'))
endif
tcg_ss.add(when: libdw, if_true: files('debuginfo.c')) tcg_ss.add(when: libdw, if_true: files('debuginfo.c'))
tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c')) tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c'))
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss) specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files( specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
'cputlb.c', 'cputlb.c',
'hmp.c',
)) ))
system_ss.add(when: ['CONFIG_TCG'], if_true: files( tcg_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
'icount-common.c',
'monitor.c',
))
tcg_module_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
'tcg-accel-ops.c', 'tcg-accel-ops.c',
'tcg-accel-ops-mttcg.c', 'tcg-accel-ops-mttcg.c',
'tcg-accel-ops-icount.c', 'tcg-accel-ops-icount.c',

View File

@@ -1,244 +0,0 @@
/*
* SPDX-License-Identifier: LGPL-2.1-or-later
*
* QEMU TCG monitor
*
* Copyright (c) 2003-2005 Fabrice Bellard
*/
#include "qemu/osdep.h"
#include "qemu/accel.h"
#include "qemu/qht.h"
#include "qapi/error.h"
#include "qapi/type-helpers.h"
#include "qapi/qapi-commands-machine.h"
#include "monitor/monitor.h"
#include "sysemu/cpus.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/tcg.h"
#include "tcg/tcg.h"
#include "internal-common.h"
#include "tb-context.h"
static void dump_drift_info(GString *buf)
{
if (!icount_enabled()) {
return;
}
g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
(cpu_get_clock() - icount_get()) / SCALE_MS);
if (icount_align_option) {
g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
-max_delay / SCALE_MS);
g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
max_advance / SCALE_MS);
} else {
g_string_append_printf(buf, "Max guest delay NA\n");
g_string_append_printf(buf, "Max guest advance NA\n");
}
}
static void dump_accel_info(GString *buf)
{
AccelState *accel = current_accel();
bool one_insn_per_tb = object_property_get_bool(OBJECT(accel),
"one-insn-per-tb",
&error_fatal);
g_string_append_printf(buf, "Accelerator settings:\n");
g_string_append_printf(buf, "one-insn-per-tb: %s\n\n",
one_insn_per_tb ? "on" : "off");
}
static void print_qht_statistics(struct qht_stats hst, GString *buf)
{
uint32_t hgram_opts;
size_t hgram_bins;
char *hgram;
if (!hst.head_buckets) {
return;
}
g_string_append_printf(buf, "TB hash buckets %zu/%zu "
"(%0.2f%% head buckets used)\n",
hst.used_head_buckets, hst.head_buckets,
(double)hst.used_head_buckets /
hst.head_buckets * 100);
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
hgram_opts |= QDIST_PR_NODECIMAL;
}
hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. "
"Histogram: %s\n",
qdist_avg(&hst.occupancy) * 100, hgram);
g_free(hgram);
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
if (hgram_bins > 10) {
hgram_bins = 10;
} else {
hgram_bins = 0;
hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
}
hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. "
"Histogram: %s\n",
qdist_avg(&hst.chain), hgram);
g_free(hgram);
}
struct tb_tree_stats {
size_t nb_tbs;
size_t host_size;
size_t target_size;
size_t max_target_size;
size_t direct_jmp_count;
size_t direct_jmp2_count;
size_t cross_page;
};
static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
{
const TranslationBlock *tb = value;
struct tb_tree_stats *tst = data;
tst->nb_tbs++;
tst->host_size += tb->tc.size;
tst->target_size += tb->size;
if (tb->size > tst->max_target_size) {
tst->max_target_size = tb->size;
}
if (tb->page_addr[1] != -1) {
tst->cross_page++;
}
if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
tst->direct_jmp_count++;
if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
tst->direct_jmp2_count++;
}
}
return false;
}
static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
{
CPUState *cpu;
size_t full = 0, part = 0, elide = 0;
CPU_FOREACH(cpu) {
full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
}
*pfull = full;
*ppart = part;
*pelide = elide;
}
static void tcg_dump_info(GString *buf)
{
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
}
static void dump_exec_info(GString *buf)
{
struct tb_tree_stats tst = {};
struct qht_stats hst;
size_t nb_tbs, flush_full, flush_part, flush_elide;
tcg_tb_foreach(tb_tree_stats_iter, &tst);
nb_tbs = tst.nb_tbs;
/* XXX: avoid using doubles ? */
g_string_append_printf(buf, "Translation buffer state:\n");
/*
* Report total code size including the padding and TB structs;
* otherwise users might think "-accel tcg,tb-size" is not honoured.
* For avg host size we use the precise numbers from tb_tree_stats though.
*/
g_string_append_printf(buf, "gen code size %zu/%zu\n",
tcg_code_size(), tcg_code_capacity());
g_string_append_printf(buf, "TB count %zu\n", nb_tbs);
g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n",
nb_tbs ? tst.target_size / nb_tbs : 0,
tst.max_target_size);
g_string_append_printf(buf, "TB avg host size %zu bytes "
"(expansion ratio: %0.1f)\n",
nb_tbs ? tst.host_size / nb_tbs : 0,
tst.target_size ?
(double)tst.host_size / tst.target_size : 0);
g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
tst.cross_page,
nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
g_string_append_printf(buf, "direct jump count %zu (%zu%%) "
"(2 jumps=%zu %zu%%)\n",
tst.direct_jmp_count,
nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
tst.direct_jmp2_count,
nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
qht_statistics_init(&tb_ctx.htable, &hst);
print_qht_statistics(hst, buf);
qht_statistics_destroy(&hst);
g_string_append_printf(buf, "\nStatistics:\n");
g_string_append_printf(buf, "TB flush count %u\n",
qatomic_read(&tb_ctx.tb_flush_count));
g_string_append_printf(buf, "TB invalidate count %u\n",
qatomic_read(&tb_ctx.tb_phys_invalidate_count));
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
tcg_dump_info(buf);
}
HumanReadableText *qmp_x_query_jit(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp, "JIT information is only available with accel=tcg");
return NULL;
}
dump_accel_info(buf);
dump_exec_info(buf);
dump_drift_info(buf);
return human_readable_text_from_str(buf);
}
static void tcg_dump_op_count(GString *buf)
{
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
}
HumanReadableText *qmp_x_query_opcount(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
error_setg(errp,
"Opcode count information is only available with accel=tcg");
return NULL;
}
tcg_dump_op_count(buf);
return human_readable_text_from_str(buf);
}
static void hmp_tcg_register(void)
{
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
}
type_init(hmp_tcg_register);

View File

@@ -111,8 +111,6 @@ static void write_perfmap_entry(const void *start, size_t insn,
} }
static FILE *jitdump; static FILE *jitdump;
static size_t perf_marker_size;
static void *perf_marker = MAP_FAILED;
#define JITHEADER_MAGIC 0x4A695444 #define JITHEADER_MAGIC 0x4A695444
#define JITHEADER_VERSION 1 #define JITHEADER_VERSION 1
@@ -192,6 +190,7 @@ void perf_enable_jitdump(void)
{ {
struct jitheader header; struct jitheader header;
char jitdump_file[32]; char jitdump_file[32];
void *perf_marker;
if (!use_rt_clock) { if (!use_rt_clock) {
warn_report("CLOCK_MONOTONIC is not available, proceeding without jitdump"); warn_report("CLOCK_MONOTONIC is not available, proceeding without jitdump");
@@ -211,8 +210,7 @@ void perf_enable_jitdump(void)
* PERF_RECORD_MMAP or PERF_RECORD_MMAP2 event is of the form jit-%d.dump * PERF_RECORD_MMAP or PERF_RECORD_MMAP2 event is of the form jit-%d.dump
* and will process it as a jitdump file. * and will process it as a jitdump file.
*/ */
perf_marker_size = qemu_real_host_page_size(); perf_marker = mmap(NULL, qemu_real_host_page_size(), PROT_READ | PROT_EXEC,
perf_marker = mmap(NULL, perf_marker_size, PROT_READ | PROT_EXEC,
MAP_PRIVATE, fileno(jitdump), 0); MAP_PRIVATE, fileno(jitdump), 0);
if (perf_marker == MAP_FAILED) { if (perf_marker == MAP_FAILED) {
warn_report("Could not map %s: %s, proceeding without jitdump", warn_report("Could not map %s: %s, proceeding without jitdump",
@@ -313,8 +311,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
const void *start) const void *start)
{ {
struct debuginfo_query *q; struct debuginfo_query *q;
size_t insn, start_words; size_t insn;
uint64_t *gen_insn_data;
if (!perfmap && !jitdump) { if (!perfmap && !jitdump) {
return; return;
@@ -328,13 +325,10 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
debuginfo_lock(); debuginfo_lock();
/* Query debuginfo for each guest instruction. */ /* Query debuginfo for each guest instruction. */
gen_insn_data = tcg_ctx->gen_insn_data;
start_words = tcg_ctx->insn_start_words;
for (insn = 0; insn < tb->icount; insn++) { for (insn = 0; insn < tb->icount; insn++) {
/* FIXME: This replicates the restore_state_to_opc() logic. */ /* FIXME: This replicates the restore_state_to_opc() logic. */
q[insn].address = gen_insn_data[insn * start_words + 0]; q[insn].address = tcg_ctx->gen_insn_data[insn][0];
if (tb_cflags(tb) & CF_PCREL) { if (TARGET_TB_PCREL) {
q[insn].address |= (guest_pc & TARGET_PAGE_MASK); q[insn].address |= (guest_pc & TARGET_PAGE_MASK);
} else { } else {
#if defined(TARGET_I386) #if defined(TARGET_I386)
@@ -374,11 +368,6 @@ void perf_exit(void)
perfmap = NULL; perfmap = NULL;
} }
if (perf_marker != MAP_FAILED) {
munmap(perf_marker, perf_marker_size);
perf_marker = MAP_FAILED;
}
if (jitdump) { if (jitdump) {
fclose(jitdump); fclose(jitdump);
jitdump = NULL; jitdump = NULL;

View File

@@ -43,18 +43,11 @@
* CPU's index into a TCG temp, since the first callback did it already. * CPU's index into a TCG temp, since the first callback did it already.
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "cpu.h"
#include "tcg/tcg.h" #include "tcg/tcg.h"
#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h" #include "tcg/tcg-op.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/plugin-gen.h" #include "exec/plugin-gen.h"
#include "exec/translator.h" #include "exec/translator.h"
#include "exec/helper-proto-common.h"
#define HELPER_H "accel/tcg/plugin-helpers.h"
#include "exec/helper-info.c.inc"
#undef HELPER_H
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
# define CONFIG_SOFTMMU_GATE 1 # define CONFIG_SOFTMMU_GATE 1
@@ -98,13 +91,31 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
void *userdata) void *userdata)
{ } { }
static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
{
TCGv_i32 cpu_index = tcg_temp_new_i32();
TCGv_i32 meminfo = tcg_const_i32(info);
TCGv_i64 vaddr64 = tcg_temp_new_i64();
TCGv_ptr udata = tcg_const_ptr(NULL);
tcg_gen_ld_i32(cpu_index, cpu_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
tcg_gen_extu_tl_i64(vaddr64, vaddr);
gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata);
tcg_temp_free_ptr(udata);
tcg_temp_free_i64(vaddr64);
tcg_temp_free_i32(meminfo);
tcg_temp_free_i32(cpu_index);
}
static void gen_empty_udata_cb(void) static void gen_empty_udata_cb(void)
{ {
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); TCGv_i32 cpu_index = tcg_temp_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr(); TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */
tcg_gen_movi_ptr(udata, 0); tcg_gen_ld_i32(cpu_index, cpu_env,
tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
@@ -118,10 +129,9 @@ static void gen_empty_udata_cb(void)
*/ */
static void gen_empty_inline_cb(void) static void gen_empty_inline_cb(void)
{ {
TCGv_i64 val = tcg_temp_ebb_new_i64(); TCGv_i64 val = tcg_temp_new_i64();
TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */
tcg_gen_movi_ptr(ptr, 0);
tcg_gen_ld_i64(val, ptr, 0); tcg_gen_ld_i64(val, ptr, 0);
/* pass an immediate != 0 so that it doesn't get optimized away */ /* pass an immediate != 0 so that it doesn't get optimized away */
tcg_gen_addi_i64(val, val, 0xdeadface); tcg_gen_addi_i64(val, val, 0xdeadface);
@@ -130,22 +140,9 @@ static void gen_empty_inline_cb(void)
tcg_temp_free_i64(val); tcg_temp_free_i64(val);
} }
static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info) static void gen_empty_mem_cb(TCGv addr, uint32_t info)
{ {
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); do_gen_mem_cb(addr, info);
TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
tcg_gen_movi_i32(meminfo, info);
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
tcg_temp_free_ptr(udata);
tcg_temp_free_i32(meminfo);
tcg_temp_free_i32(cpu_index);
} }
/* /*
@@ -154,10 +151,10 @@ static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
*/ */
static void gen_empty_mem_helper(void) static void gen_empty_mem_helper(void)
{ {
TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); TCGv_ptr ptr;
tcg_gen_movi_ptr(ptr, 0); ptr = tcg_const_ptr(NULL);
tcg_gen_st_ptr(ptr, tcg_env, offsetof(CPUState, plugin_mem_cbs) - tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env)); offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr); tcg_temp_free_ptr(ptr);
} }
@@ -200,17 +197,35 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
} }
} }
void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info) union mem_gen_fn {
void (*mem_fn)(TCGv, uint32_t);
void (*inline_fn)(void);
};
static void gen_mem_wrapped(enum plugin_gen_cb type,
const union mem_gen_fn *f, TCGv addr,
uint32_t info, bool is_mem)
{ {
enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw); gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw);
gen_empty_mem_cb(addr, info); if (is_mem) {
f->mem_fn(addr, info);
} else {
f->inline_fn();
}
tcg_gen_plugin_cb_end(); tcg_gen_plugin_cb_end();
}
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw); void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
gen_empty_inline_cb(); {
tcg_gen_plugin_cb_end(); union mem_gen_fn fn;
fn.mem_fn = gen_empty_mem_cb;
gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true);
fn.inline_fn = gen_empty_inline_cb;
gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false);
} }
static TCGOp *find_op(TCGOp *op, TCGOpcode opc) static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
@@ -260,6 +275,33 @@ static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
return op; return op;
} }
static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
{
if (TCG_TARGET_REG_BITS == 32) {
/* mov_i32 */
op = copy_op(begin_op, op, INDEX_op_mov_i32);
/* mov_i32 w/ $0 */
op = copy_op(begin_op, op, INDEX_op_mov_i32);
} else {
/* extu_i32_i64 */
op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
}
return op;
}
static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
{
if (TCG_TARGET_REG_BITS == 32) {
/* 2x mov_i32 */
op = copy_op(begin_op, op, INDEX_op_mov_i32);
op = copy_op(begin_op, op, INDEX_op_mov_i32);
} else {
/* mov_i64 */
op = copy_op(begin_op, op, INDEX_op_mov_i64);
}
return op;
}
static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
{ {
if (UINTPTR_MAX == UINT32_MAX) { if (UINTPTR_MAX == UINT32_MAX) {
@@ -274,6 +316,18 @@ static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
return op; return op;
} }
static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
{
if (TARGET_LONG_BITS == 32) {
/* extu_i32_i64 */
op = copy_extu_i32_i64(begin_op, op);
} else {
/* mov_i64 */
op = copy_mov_i64(begin_op, op);
}
return op;
}
static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
{ {
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
@@ -327,7 +381,8 @@ static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
return op; return op;
} }
static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx) static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
void *func, int *cb_idx)
{ {
TCGOp *old_op; TCGOp *old_op;
int func_idx; int func_idx;
@@ -371,7 +426,8 @@ static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
} }
/* call */ /* call */
op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx); op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
cb->f.vcpu_udata, cb_idx);
return op; return op;
} }
@@ -416,9 +472,13 @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
} }
/* extu_tl_i64 */
op = copy_extu_tl_i64(&begin_op, op);
if (type == PLUGIN_GEN_CB_MEM) { if (type == PLUGIN_GEN_CB_MEM) {
/* call */ /* call */
op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx); op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
cb->f.vcpu_udata, cb_idx);
} }
return op; return op;
@@ -566,6 +626,8 @@ static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */ /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
void plugin_gen_disable_mem_helpers(void) void plugin_gen_disable_mem_helpers(void)
{ {
TCGv_ptr ptr;
/* /*
* We could emit the clearing unconditionally and be done. However, this can * We could emit the clearing unconditionally and be done. However, this can
* be wasteful if for instance plugins don't track memory accesses, or if * be wasteful if for instance plugins don't track memory accesses, or if
@@ -578,8 +640,10 @@ void plugin_gen_disable_mem_helpers(void)
if (!tcg_ctx->plugin_tb->mem_helper) { if (!tcg_ctx->plugin_tb->mem_helper) {
return; return;
} }
tcg_gen_st_ptr(tcg_constant_ptr(NULL), tcg_env, ptr = tcg_const_ptr(NULL);
offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env)); tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr);
} }
static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb, static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
@@ -846,7 +910,7 @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
} else { } else {
if (ptb->vaddr2 == -1) { if (ptb->vaddr2 == -1) {
ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first); ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2); get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
} }
pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
} }
@@ -863,14 +927,10 @@ void plugin_gen_insn_end(void)
* do any clean-up here and make sure things are reset in * do any clean-up here and make sure things are reset in
* plugin_gen_tb_start. * plugin_gen_tb_start.
*/ */
void plugin_gen_tb_end(CPUState *cpu, size_t num_insns) void plugin_gen_tb_end(CPUState *cpu)
{ {
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
/* translator may have removed instructions, update final count */
g_assert(num_insns <= ptb->n);
ptb->n = num_insns;
/* collect instrumentation requests */ /* collect instrumentation requests */
qemu_plugin_tb_trans_cb(cpu, ptb); qemu_plugin_tb_trans_cb(cpu, ptb);

View File

@@ -35,16 +35,16 @@
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) #define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
static inline unsigned int tb_jmp_cache_hash_page(vaddr pc) static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
{ {
vaddr tmp; target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
} }
static inline unsigned int tb_jmp_cache_hash_func(vaddr pc) static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
{ {
vaddr tmp; target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
| (tmp & TB_JMP_ADDR_MASK)); | (tmp & TB_JMP_ADDR_MASK));
@@ -53,7 +53,7 @@ static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
#else #else
/* In user-mode we can get better hashing because we do not have a TLB */ /* In user-mode we can get better hashing because we do not have a TLB */
static inline unsigned int tb_jmp_cache_hash_func(vaddr pc) static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
{ {
return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1); return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
} }
@@ -61,10 +61,10 @@ static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
#endif /* CONFIG_SOFTMMU */ #endif /* CONFIG_SOFTMMU */
static inline static inline
uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc, uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags,
uint32_t flags, uint64_t flags2, uint32_t cf_mask) uint32_t cf_mask, uint32_t trace_vcpu_dstate)
{ {
return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask); return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate);
} }
#endif #endif

View File

@@ -14,15 +14,53 @@
/* /*
* Accessed in parallel; all accesses to 'tb' must be atomic. * Accessed in parallel; all accesses to 'tb' must be atomic.
* For CF_PCREL, accesses to 'pc' must be protected by a * For TARGET_TB_PCREL, accesses to 'pc' must be protected by
* load_acquire/store_release to 'tb'. * a load_acquire/store_release to 'tb'.
*/ */
struct CPUJumpCache { struct CPUJumpCache {
struct rcu_head rcu; struct rcu_head rcu;
struct { struct {
TranslationBlock *tb; TranslationBlock *tb;
vaddr pc; #if TARGET_TB_PCREL
target_ulong pc;
#endif
} array[TB_JMP_CACHE_SIZE]; } array[TB_JMP_CACHE_SIZE];
}; };
static inline TranslationBlock *
tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
{
#if TARGET_TB_PCREL
/* Use acquire to ensure current load of pc from jc. */
return qatomic_load_acquire(&jc->array[hash].tb);
#else
/* Use rcu_read to ensure current load of pc from *tb. */
return qatomic_rcu_read(&jc->array[hash].tb);
#endif
}
static inline target_ulong
tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
{
#if TARGET_TB_PCREL
return jc->array[hash].pc;
#else
return tb_pc(tb);
#endif
}
static inline void
tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
TranslationBlock *tb, target_ulong pc)
{
#if TARGET_TB_PCREL
jc->array[hash].pc = pc;
/* Use store_release on tb to ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
#else
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[hash].tb, tb);
#endif
}
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */ #endif /* ACCEL_TCG_TB_JMP_CACHE_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Translation Block Maintenance * Translation Block Maintaince
* *
* Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2003 Fabrice Bellard
* *
@@ -19,18 +19,15 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/interval-tree.h" #include "qemu/interval-tree.h"
#include "qemu/qtree.h"
#include "exec/cputlb.h" #include "exec/cputlb.h"
#include "exec/log.h" #include "exec/log.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/tb-flush.h"
#include "exec/translate-all.h" #include "exec/translate-all.h"
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "tcg/tcg.h" #include "tcg/tcg.h"
#include "tb-hash.h" #include "tb-hash.h"
#include "tb-context.h" #include "tb-context.h"
#include "internal-common.h" #include "internal.h"
#include "internal-target.h"
/* List iterators for lists of tagged pointers in TranslationBlock. */ /* List iterators for lists of tagged pointers in TranslationBlock. */
@@ -47,10 +44,11 @@ static bool tb_cmp(const void *ap, const void *bp)
const TranslationBlock *a = ap; const TranslationBlock *a = ap;
const TranslationBlock *b = bp; const TranslationBlock *b = bp;
return ((tb_cflags(a) & CF_PCREL || a->pc == b->pc) && return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
a->cs_base == b->cs_base && a->cs_base == b->cs_base &&
a->flags == b->flags && a->flags == b->flags &&
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
tb_page_addr0(a) == tb_page_addr0(b) && tb_page_addr0(a) == tb_page_addr0(b) &&
tb_page_addr1(a) == tb_page_addr1(b)); tb_page_addr1(a) == tb_page_addr1(b));
} }
@@ -71,7 +69,17 @@ typedef struct PageDesc PageDesc;
*/ */
#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
static inline void tb_lock_pages(const TranslationBlock *tb) { } static inline void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
PageDesc **ret_p2, tb_page_addr_t phys2,
bool alloc)
{
*ret_p1 = NULL;
*ret_p2 = NULL;
}
static inline void page_unlock(PageDesc *pd) { }
static inline void page_lock_tb(const TranslationBlock *tb) { }
static inline void page_unlock_tb(const TranslationBlock *tb) { }
/* /*
* For user-only, since we are protecting all of memory with a single lock, * For user-only, since we are protecting all of memory with a single lock,
@@ -87,9 +95,9 @@ static void tb_remove_all(void)
} }
/* Call with mmap_lock held. */ /* Call with mmap_lock held. */
static void tb_record(TranslationBlock *tb) static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
{ {
vaddr addr; target_ulong addr;
int flags; int flags;
assert_memory_lock(); assert_memory_lock();
@@ -117,29 +125,29 @@ static void tb_remove(TranslationBlock *tb)
} }
/* TODO: For now, still shared with translate-all.c for system mode. */ /* TODO: For now, still shared with translate-all.c for system mode. */
#define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \ #define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \
for (T = foreach_tb_first(start, last), \ for (T = foreach_tb_first(start, end), \
N = foreach_tb_next(T, start, last); \ N = foreach_tb_next(T, start, end); \
T != NULL; \ T != NULL; \
T = N, N = foreach_tb_next(N, start, last)) T = N, N = foreach_tb_next(N, start, end))
typedef TranslationBlock *PageForEachNext; typedef TranslationBlock *PageForEachNext;
static PageForEachNext foreach_tb_first(tb_page_addr_t start, static PageForEachNext foreach_tb_first(tb_page_addr_t start,
tb_page_addr_t last) tb_page_addr_t end)
{ {
IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last); IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1);
return n ? container_of(n, TranslationBlock, itree) : NULL; return n ? container_of(n, TranslationBlock, itree) : NULL;
} }
static PageForEachNext foreach_tb_next(PageForEachNext tb, static PageForEachNext foreach_tb_next(PageForEachNext tb,
tb_page_addr_t start, tb_page_addr_t start,
tb_page_addr_t last) tb_page_addr_t end)
{ {
IntervalTreeNode *n; IntervalTreeNode *n;
if (tb) { if (tb) {
n = interval_tree_iter_next(&tb->itree, start, last); n = interval_tree_iter_next(&tb->itree, start, end - 1);
if (n) { if (n) {
return container_of(n, TranslationBlock, itree); return container_of(n, TranslationBlock, itree);
} }
@@ -208,12 +216,13 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
{ {
PageDesc *pd; PageDesc *pd;
void **lp; void **lp;
int i;
/* Level 1. Always allocated. */ /* Level 1. Always allocated. */
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
/* Level 2..N-1. */ /* Level 2..N-1. */
for (int i = v_l2_levels; i > 0; i--) { for (i = v_l2_levels; i > 0; i--) {
void **p = qatomic_rcu_read(lp); void **p = qatomic_rcu_read(lp);
if (p == NULL) { if (p == NULL) {
@@ -304,12 +313,12 @@ struct page_entry {
* See also: page_collection_lock(). * See also: page_collection_lock().
*/ */
struct page_collection { struct page_collection {
QTree *tree; GTree *tree;
struct page_entry *max; struct page_entry *max;
}; };
typedef int PageForEachNext; typedef int PageForEachNext;
#define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \ #define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
#ifdef CONFIG_DEBUG_TCG #ifdef CONFIG_DEBUG_TCG
@@ -381,108 +390,12 @@ static void page_lock(PageDesc *pd)
qemu_spin_lock(&pd->lock); qemu_spin_lock(&pd->lock);
} }
/* Like qemu_spin_trylock, returns false on success */
static bool page_trylock(PageDesc *pd)
{
bool busy = qemu_spin_trylock(&pd->lock);
if (!busy) {
page_lock__debug(pd);
}
return busy;
}
static void page_unlock(PageDesc *pd) static void page_unlock(PageDesc *pd)
{ {
qemu_spin_unlock(&pd->lock); qemu_spin_unlock(&pd->lock);
page_unlock__debug(pd); page_unlock__debug(pd);
} }
void tb_lock_page0(tb_page_addr_t paddr)
{
page_lock(page_find_alloc(paddr >> TARGET_PAGE_BITS, true));
}
void tb_lock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
{
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
PageDesc *pd0, *pd1;
if (pindex0 == pindex1) {
/* Identical pages, and the first page is already locked. */
return;
}
pd1 = page_find_alloc(pindex1, true);
if (pindex0 < pindex1) {
/* Correct locking order, we may block. */
page_lock(pd1);
return;
}
/* Incorrect locking order, we cannot block lest we deadlock. */
if (!page_trylock(pd1)) {
return;
}
/*
* Drop the lock on page0 and get both page locks in the right order.
* Restart translation via longjmp.
*/
pd0 = page_find_alloc(pindex0, false);
page_unlock(pd0);
page_lock(pd1);
page_lock(pd0);
siglongjmp(tcg_ctx->jmp_trans, -3);
}
void tb_unlock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
{
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (pindex0 != pindex1) {
page_unlock(page_find_alloc(pindex1, false));
}
}
static void tb_lock_pages(TranslationBlock *tb)
{
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (unlikely(paddr0 == -1)) {
return;
}
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
if (pindex0 < pindex1) {
page_lock(page_find_alloc(pindex0, true));
page_lock(page_find_alloc(pindex1, true));
return;
}
page_lock(page_find_alloc(pindex1, true));
}
page_lock(page_find_alloc(pindex0, true));
}
void tb_unlock_pages(TranslationBlock *tb)
{
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (unlikely(paddr0 == -1)) {
return;
}
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
page_unlock(page_find_alloc(pindex1, false));
}
page_unlock(page_find_alloc(pindex0, false));
}
static inline struct page_entry * static inline struct page_entry *
page_entry_new(PageDesc *pd, tb_page_addr_t index) page_entry_new(PageDesc *pd, tb_page_addr_t index)
{ {
@@ -506,10 +419,13 @@ static void page_entry_destroy(gpointer p)
/* returns false on success */ /* returns false on success */
static bool page_entry_trylock(struct page_entry *pe) static bool page_entry_trylock(struct page_entry *pe)
{ {
bool busy = page_trylock(pe->pd); bool busy;
busy = qemu_spin_trylock(&pe->pd->lock);
if (!busy) { if (!busy) {
g_assert(!pe->locked); g_assert(!pe->locked);
pe->locked = true; pe->locked = true;
page_lock__debug(pe->pd);
} }
return busy; return busy;
} }
@@ -550,7 +466,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
struct page_entry *pe; struct page_entry *pe;
PageDesc *pd; PageDesc *pd;
pe = q_tree_lookup(set->tree, &index); pe = g_tree_lookup(set->tree, &index);
if (pe) { if (pe) {
return false; return false;
} }
@@ -561,7 +477,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
} }
pe = page_entry_new(pd, index); pe = page_entry_new(pd, index);
q_tree_insert(set->tree, &pe->index, pe); g_tree_insert(set->tree, &pe->index, pe);
/* /*
* If this is either (1) the first insertion or (2) a page whose index * If this is either (1) the first insertion or (2) a page whose index
@@ -593,30 +509,30 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
} }
/* /*
* Lock a range of pages ([@start,@last]) as well as the pages of all * Lock a range of pages ([@start,@end[) as well as the pages of all
* intersecting TBs. * intersecting TBs.
* Locking order: acquire locks in ascending order of page index. * Locking order: acquire locks in ascending order of page index.
*/ */
static struct page_collection *page_collection_lock(tb_page_addr_t start, static struct page_collection *page_collection_lock(tb_page_addr_t start,
tb_page_addr_t last) tb_page_addr_t end)
{ {
struct page_collection *set = g_malloc(sizeof(*set)); struct page_collection *set = g_malloc(sizeof(*set));
tb_page_addr_t index; tb_page_addr_t index;
PageDesc *pd; PageDesc *pd;
start >>= TARGET_PAGE_BITS; start >>= TARGET_PAGE_BITS;
last >>= TARGET_PAGE_BITS; end >>= TARGET_PAGE_BITS;
g_assert(start <= last); g_assert(start <= end);
set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL, set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
page_entry_destroy); page_entry_destroy);
set->max = NULL; set->max = NULL;
assert_no_pages_locked(); assert_no_pages_locked();
retry: retry:
q_tree_foreach(set->tree, page_entry_lock, NULL); g_tree_foreach(set->tree, page_entry_lock, NULL);
for (index = start; index <= last; index++) { for (index = start; index <= end; index++) {
TranslationBlock *tb; TranslationBlock *tb;
PageForEachNext n; PageForEachNext n;
@@ -625,7 +541,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
continue; continue;
} }
if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
q_tree_foreach(set->tree, page_entry_unlock, NULL); g_tree_foreach(set->tree, page_entry_unlock, NULL);
goto retry; goto retry;
} }
assert_page_locked(pd); assert_page_locked(pd);
@@ -634,7 +550,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
(tb_page_addr1(tb) != -1 && (tb_page_addr1(tb) != -1 &&
page_trylock_add(set, tb_page_addr1(tb)))) { page_trylock_add(set, tb_page_addr1(tb)))) {
/* drop all locks, and reacquire in order */ /* drop all locks, and reacquire in order */
q_tree_foreach(set->tree, page_entry_unlock, NULL); g_tree_foreach(set->tree, page_entry_unlock, NULL);
goto retry; goto retry;
} }
} }
@@ -645,7 +561,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
static void page_collection_unlock(struct page_collection *set) static void page_collection_unlock(struct page_collection *set)
{ {
/* entries are unlocked and freed via page_entry_destroy */ /* entries are unlocked and freed via page_entry_destroy */
q_tree_destroy(set->tree); g_tree_destroy(set->tree);
g_free(set); g_free(set);
} }
@@ -687,7 +603,8 @@ static void tb_remove_all(void)
* Add the tb in the target page and protect it if necessary. * Add the tb in the target page and protect it if necessary.
* Called with @p->lock held. * Called with @p->lock held.
*/ */
static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n) static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
unsigned int n)
{ {
bool page_already_protected; bool page_already_protected;
@@ -707,21 +624,15 @@ static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n)
} }
} }
static void tb_record(TranslationBlock *tb) static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
{ {
tb_page_addr_t paddr0 = tb_page_addr0(tb); tb_page_add(p1, tb, 0);
tb_page_addr_t paddr1 = tb_page_addr1(tb); if (unlikely(p2)) {
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS; tb_page_add(p2, tb, 1);
tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
assert(paddr0 != -1);
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
tb_page_add(page_find_alloc(pindex1, false), tb, 1);
} }
tb_page_add(page_find_alloc(pindex0, false), tb, 0);
} }
static void tb_page_remove(PageDesc *pd, TranslationBlock *tb) static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
{ {
TranslationBlock *tb1; TranslationBlock *tb1;
uintptr_t *pprev; uintptr_t *pprev;
@@ -741,16 +652,74 @@ static void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
static void tb_remove(TranslationBlock *tb) static void tb_remove(TranslationBlock *tb)
{ {
tb_page_addr_t paddr0 = tb_page_addr0(tb); PageDesc *pd;
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
assert(paddr0 != -1); pd = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
if (unlikely(paddr1 != -1) && pindex0 != pindex1) { tb_page_remove(pd, tb);
tb_page_remove(page_find_alloc(pindex1, false), tb); if (unlikely(tb->page_addr[1] != -1)) {
pd = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
tb_page_remove(pd, tb);
}
}
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
{
PageDesc *p1, *p2;
tb_page_addr_t page1;
tb_page_addr_t page2;
assert_memory_lock();
g_assert(phys1 != -1);
page1 = phys1 >> TARGET_PAGE_BITS;
page2 = phys2 >> TARGET_PAGE_BITS;
p1 = page_find_alloc(page1, alloc);
if (ret_p1) {
*ret_p1 = p1;
}
if (likely(phys2 == -1)) {
page_lock(p1);
return;
} else if (page1 == page2) {
page_lock(p1);
if (ret_p2) {
*ret_p2 = p1;
}
return;
}
p2 = page_find_alloc(page2, alloc);
if (ret_p2) {
*ret_p2 = p2;
}
if (page1 < page2) {
page_lock(p1);
page_lock(p2);
} else {
page_lock(p2);
page_lock(p1);
}
}
/* lock the page(s) of a TB in the correct acquisition order */
static void page_lock_tb(const TranslationBlock *tb)
{
page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
}
static void page_unlock_tb(const TranslationBlock *tb)
{
PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
page_unlock(p1);
if (unlikely(tb_page_addr1(tb) != -1)) {
PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
if (p2 != p1) {
page_unlock(p2);
}
} }
tb_page_remove(page_find_alloc(pindex0, false), tb);
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */
@@ -775,7 +744,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
tcg_region_reset_all(); tcg_region_reset_all();
/* XXX: flush processor icache at this point if cache flush is expensive */ /* XXX: flush processor icache at this point if cache flush is expensive */
qatomic_inc(&tb_ctx.tb_flush_count); qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
done: done:
mmap_unlock(); mmap_unlock();
@@ -787,9 +756,9 @@ done:
void tb_flush(CPUState *cpu) void tb_flush(CPUState *cpu)
{ {
if (tcg_enabled()) { if (tcg_enabled()) {
unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count); unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
if (cpu_in_serial_context(cpu)) { if (cpu_in_exclusive_context(cpu)) {
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
} else { } else {
async_safe_run_on_cpu(cpu, do_tb_flush, async_safe_run_on_cpu(cpu, do_tb_flush,
@@ -878,13 +847,13 @@ static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
{ {
CPUState *cpu; CPUState *cpu;
if (tb_cflags(tb) & CF_PCREL) { if (TARGET_TB_PCREL) {
/* A TB may be at any virtual address */ /* A TB may be at any virtual address */
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
tcg_flush_jmp_cache(cpu); tcg_flush_jmp_cache(cpu);
} }
} else { } else {
uint32_t h = tb_jmp_cache_hash_func(tb->pc); uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
CPUJumpCache *jc = cpu->tb_jmp_cache; CPUJumpCache *jc = cpu->tb_jmp_cache;
@@ -916,8 +885,8 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* remove the TB from the hash list */ /* remove the TB from the hash list */
phys_pc = tb_page_addr0(tb); phys_pc = tb_page_addr0(tb);
h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb->pc), h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
tb->flags, tb->cs_base, orig_cflags); tb->flags, orig_cflags, tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) { if (!qht_remove(&tb_ctx.htable, tb, h)) {
return; return;
} }
@@ -955,16 +924,18 @@ static void tb_phys_invalidate__locked(TranslationBlock *tb)
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{ {
if (page_addr == -1 && tb_page_addr0(tb) != -1) { if (page_addr == -1 && tb_page_addr0(tb) != -1) {
tb_lock_pages(tb); page_lock_tb(tb);
do_tb_phys_invalidate(tb, true); do_tb_phys_invalidate(tb, true);
tb_unlock_pages(tb); page_unlock_tb(tb);
} else { } else {
do_tb_phys_invalidate(tb, false); do_tb_phys_invalidate(tb, false);
} }
} }
/* /*
* Add a new TB and link it to the physical page tables. * Add a new TB and link it to the physical page tables. phys_page2 is
* (-1) to indicate that only one page contains the TB.
*
* Called with mmap_lock held for user-mode emulation. * Called with mmap_lock held for user-mode emulation.
* *
* Returns a pointer @tb, or a pointer to an existing TB that matches @tb. * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
@@ -972,29 +943,43 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
* for the same block of guest code that @tb corresponds to. In that case, * for the same block of guest code that @tb corresponds to. In that case,
* the caller should discard the original @tb, and use instead the returned TB. * the caller should discard the original @tb, and use instead the returned TB.
*/ */
TranslationBlock *tb_link_page(TranslationBlock *tb) TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2)
{ {
PageDesc *p;
PageDesc *p2 = NULL;
void *existing_tb = NULL; void *existing_tb = NULL;
uint32_t h; uint32_t h;
assert_memory_lock(); assert_memory_lock();
tcg_debug_assert(!(tb->cflags & CF_INVALID)); tcg_debug_assert(!(tb->cflags & CF_INVALID));
tb_record(tb); /*
* Add the TB to the page list, acquiring first the pages's locks.
* We keep the locks held until after inserting the TB in the hash table,
* so that if the insertion fails we know for sure that the TBs are still
* in the page descriptors.
* Note that inserting into the hash table first isn't an option, since
* we can only insert TBs that are fully initialized.
*/
page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
tb_record(tb, p, p2);
/* add in the hash table */ /* add in the hash table */
h = tb_hash_func(tb_page_addr0(tb), (tb->cflags & CF_PCREL ? 0 : tb->pc), h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
tb->flags, tb->cs_base, tb->cflags); tb->flags, tb->cflags, tb->trace_vcpu_dstate);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb); qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
/* remove TB from the page(s) if we couldn't insert it */ /* remove TB from the page(s) if we couldn't insert it */
if (unlikely(existing_tb)) { if (unlikely(existing_tb)) {
tb_remove(tb); tb_remove(tb);
tb_unlock_pages(tb); tb = existing_tb;
return existing_tb;
} }
tb_unlock_pages(tb); if (p2 && p2 != p) {
page_unlock(p2);
}
page_unlock(p);
return tb; return tb;
} }
@@ -1004,14 +989,14 @@ TranslationBlock *tb_link_page(TranslationBlock *tb)
* Called with mmap_lock held for user-mode emulation. * Called with mmap_lock held for user-mode emulation.
* NOTE: this function must not be called while a TB is running. * NOTE: this function must not be called while a TB is running.
*/ */
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{ {
TranslationBlock *tb; TranslationBlock *tb;
PageForEachNext n; PageForEachNext n;
assert_memory_lock(); assert_memory_lock();
PAGE_FOR_EACH_TB(start, last, unused, tb, n) { PAGE_FOR_EACH_TB(start, end, unused, tb, n) {
tb_phys_invalidate__locked(tb); tb_phys_invalidate__locked(tb);
} }
} }
@@ -1023,11 +1008,11 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
*/ */
void tb_invalidate_phys_page(tb_page_addr_t addr) void tb_invalidate_phys_page(tb_page_addr_t addr)
{ {
tb_page_addr_t start, last; tb_page_addr_t start, end;
start = addr & TARGET_PAGE_MASK; start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK; end = start + TARGET_PAGE_SIZE;
tb_invalidate_phys_range(start, last); tb_invalidate_phys_range(start, end);
} }
/* /*
@@ -1043,7 +1028,6 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
bool current_tb_modified; bool current_tb_modified;
TranslationBlock *tb; TranslationBlock *tb;
PageForEachNext n; PageForEachNext n;
tb_page_addr_t last;
/* /*
* Without precise smc semantics, or when outside of a TB, * Without precise smc semantics, or when outside of a TB,
@@ -1060,11 +1044,10 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
assert_memory_lock(); assert_memory_lock();
current_tb = tcg_tb_lookup(pc); current_tb = tcg_tb_lookup(pc);
last = addr | ~TARGET_PAGE_MASK;
addr &= TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK;
current_tb_modified = false; current_tb_modified = false;
PAGE_FOR_EACH_TB(addr, last, unused, tb, n) { PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) {
if (current_tb == tb && if (current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/* /*
@@ -1096,36 +1079,34 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
static void static void
tb_invalidate_phys_page_range__locked(struct page_collection *pages, tb_invalidate_phys_page_range__locked(struct page_collection *pages,
PageDesc *p, tb_page_addr_t start, PageDesc *p, tb_page_addr_t start,
tb_page_addr_t last, tb_page_addr_t end,
uintptr_t retaddr) uintptr_t retaddr)
{ {
TranslationBlock *tb; TranslationBlock *tb;
tb_page_addr_t tb_start, tb_end;
PageForEachNext n; PageForEachNext n;
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
bool current_tb_modified = false; bool current_tb_modified = false;
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL; TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
#endif /* TARGET_HAS_PRECISE_SMC */ #endif /* TARGET_HAS_PRECISE_SMC */
/* Range may not cross a page. */
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
/* /*
* We remove all the TBs in the range [start, last]. * We remove all the TBs in the range [start, end[.
* XXX: see if in some cases it could be faster to invalidate all the code * XXX: see if in some cases it could be faster to invalidate all the code
*/ */
PAGE_FOR_EACH_TB(start, last, p, tb, n) { PAGE_FOR_EACH_TB(start, end, p, tb, n) {
tb_page_addr_t tb_start, tb_last;
/* NOTE: this is subtle as a TB may span two physical pages */ /* NOTE: this is subtle as a TB may span two physical pages */
tb_start = tb_page_addr0(tb);
tb_last = tb_start + tb->size - 1;
if (n == 0) { if (n == 0) {
tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK); /* NOTE: tb_end may be after the end of the page, but
it is not a problem */
tb_start = tb_page_addr0(tb);
tb_end = tb_start + tb->size;
} else { } else {
tb_start = tb_page_addr1(tb); tb_start = tb_page_addr1(tb);
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK); tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
& ~TARGET_PAGE_MASK);
} }
if (!(tb_last < start || tb_start > last)) { if (!(tb_end <= start || tb_start >= end)) {
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb && if (current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
@@ -1167,7 +1148,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
void tb_invalidate_phys_page(tb_page_addr_t addr) void tb_invalidate_phys_page(tb_page_addr_t addr)
{ {
struct page_collection *pages; struct page_collection *pages;
tb_page_addr_t start, last; tb_page_addr_t start, end;
PageDesc *p; PageDesc *p;
p = page_find(addr >> TARGET_PAGE_BITS); p = page_find(addr >> TARGET_PAGE_BITS);
@@ -1176,40 +1157,36 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
} }
start = addr & TARGET_PAGE_MASK; start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK; end = start + TARGET_PAGE_SIZE;
pages = page_collection_lock(start, last); pages = page_collection_lock(start, end);
tb_invalidate_phys_page_range__locked(pages, p, start, last, 0); tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
page_collection_unlock(pages); page_collection_unlock(pages);
} }
/* /*
* Invalidate all TBs which intersect with the target physical address range * Invalidate all TBs which intersect with the target physical address range
* [start;last]. NOTE: start and end may refer to *different* physical pages. * [start;end[. NOTE: start and end may refer to *different* physical pages.
* 'is_cpu_write_access' should be true if called from a real cpu write * 'is_cpu_write_access' should be true if called from a real cpu write
* access: the virtual CPU will exit the current TB if code is modified inside * access: the virtual CPU will exit the current TB if code is modified inside
* this TB. * this TB.
*/ */
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{ {
struct page_collection *pages; struct page_collection *pages;
tb_page_addr_t index, index_last; tb_page_addr_t next;
pages = page_collection_lock(start, last); pages = page_collection_lock(start, end);
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
index_last = last >> TARGET_PAGE_BITS; start < end;
for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) { start = next, next += TARGET_PAGE_SIZE) {
PageDesc *pd = page_find(index); PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
tb_page_addr_t page_start, page_last; tb_page_addr_t bound = MIN(next, end);
if (pd == NULL) { if (pd == NULL) {
continue; continue;
} }
assert_page_locked(pd); assert_page_locked(pd);
page_start = index << TARGET_PAGE_BITS; tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
page_last = page_start | ~TARGET_PAGE_MASK;
page_last = MIN(page_last, last);
tb_invalidate_phys_page_range__locked(pages, pd,
page_start, page_last, 0);
} }
page_collection_unlock(pages); page_collection_unlock(pages);
} }
@@ -1229,7 +1206,7 @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
} }
assert_page_locked(p); assert_page_locked(p);
tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra); tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
} }
/* /*
@@ -1243,7 +1220,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
{ {
struct page_collection *pages; struct page_collection *pages;
pages = page_collection_lock(ram_addr, ram_addr + size - 1); pages = page_collection_lock(ram_addr, ram_addr + size);
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr); tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
page_collection_unlock(pages); page_collection_unlock(pages);
} }

View File

@@ -89,20 +89,7 @@ void icount_handle_deadline(void)
} }
} }
/* Distribute the budget evenly across all CPUs */ void icount_prepare_for_run(CPUState *cpu)
int64_t icount_percpu_budget(int cpu_count)
{
int64_t limit = icount_get_limit();
int64_t timeslice = limit / cpu_count;
if (timeslice == 0) {
timeslice = limit;
}
return timeslice;
}
void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
{ {
int insns_left; int insns_left;
@@ -111,16 +98,16 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
* each vCPU execution. However u16.high can be raised * each vCPU execution. However u16.high can be raised
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
*/ */
g_assert(cpu->neg.icount_decr.u16.low == 0); g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
g_assert(cpu->icount_extra == 0); g_assert(cpu->icount_extra == 0);
replay_mutex_lock(); cpu->icount_budget = icount_get_limit();
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
insns_left = MIN(0xffff, cpu->icount_budget); insns_left = MIN(0xffff, cpu->icount_budget);
cpu->neg.icount_decr.u16.low = insns_left; cpu_neg(cpu)->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left; cpu->icount_extra = cpu->icount_budget - insns_left;
replay_mutex_lock();
if (cpu->icount_budget == 0) { if (cpu->icount_budget == 0) {
/* /*
* We're called without the iothread lock, so must take it while * We're called without the iothread lock, so must take it while
@@ -138,7 +125,7 @@ void icount_process_data(CPUState *cpu)
icount_update(cpu); icount_update(cpu);
/* Reset the counters */ /* Reset the counters */
cpu->neg.icount_decr.u16.low = 0; cpu_neg(cpu)->icount_decr.u16.low = 0;
cpu->icount_extra = 0; cpu->icount_extra = 0;
cpu->icount_budget = 0; cpu->icount_budget = 0;
@@ -153,7 +140,7 @@ void icount_handle_interrupt(CPUState *cpu, int mask)
tcg_handle_interrupt(cpu, mask); tcg_handle_interrupt(cpu, mask);
if (qemu_cpu_is_self(cpu) && if (qemu_cpu_is_self(cpu) &&
!cpu->neg.can_do_io !cpu->can_do_io
&& (mask & ~old_mask) != 0) { && (mask & ~old_mask) != 0) {
cpu_abort(cpu, "Raised interrupt while not in I/O function"); cpu_abort(cpu, "Raised interrupt while not in I/O function");
} }

View File

@@ -11,8 +11,7 @@
#define TCG_ACCEL_OPS_ICOUNT_H #define TCG_ACCEL_OPS_ICOUNT_H
void icount_handle_deadline(void); void icount_handle_deadline(void);
void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget); void icount_prepare_for_run(CPUState *cpu);
int64_t icount_percpu_budget(int cpu_count);
void icount_process_data(CPUState *cpu); void icount_process_data(CPUState *cpu);
void icount_handle_interrupt(CPUState *cpu, int mask); void icount_handle_interrupt(CPUState *cpu, int mask);

View File

@@ -32,7 +32,7 @@
#include "qemu/guest-random.h" #include "qemu/guest-random.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "hw/boards.h" #include "hw/boards.h"
#include "tcg/startup.h"
#include "tcg-accel-ops.h" #include "tcg-accel-ops.h"
#include "tcg-accel-ops-mttcg.h" #include "tcg-accel-ops-mttcg.h"
@@ -80,7 +80,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true; cpu->can_do_io = 1;
current_cpu = cpu; current_cpu = cpu;
cpu_thread_signal_created(cpu); cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed); qemu_guest_random_seed_thread_part2(cpu->random_seed);
@@ -100,9 +100,14 @@ static void *mttcg_cpu_thread_fn(void *arg)
break; break;
case EXCP_HALTED: case EXCP_HALTED:
/* /*
* Usually cpu->halted is set, but may have already been * during start-up the vCPU is reset and the thread is
* reset by another thread by the time we arrive here. * kicked several times. If we don't ensure we go back
* to sleep in the halted state we won't cleanly
* start-up when the vCPU is enabled.
*
* cpu->halted should ensure we sleep in wait_io_event
*/ */
g_assert(cpu->halted);
break; break;
case EXCP_ATOMIC: case EXCP_ATOMIC:
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
@@ -114,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
} }
} }
qatomic_set_mb(&cpu->exit_request, 0); qatomic_mb_set(&cpu->exit_request, 0);
qemu_wait_io_event(cpu); qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu)); } while (!cpu->unplug || cpu_can_run(cpu));
@@ -147,4 +152,8 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn, qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE); cpu, QEMU_THREAD_JOINABLE);
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
} }

View File

@@ -24,7 +24,6 @@
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/lockable.h"
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "sysemu/replay.h" #include "sysemu/replay.h"
#include "sysemu/cpu-timers.h" #include "sysemu/cpu-timers.h"
@@ -32,7 +31,7 @@
#include "qemu/notify.h" #include "qemu/notify.h"
#include "qemu/guest-random.h" #include "qemu/guest-random.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "tcg/startup.h"
#include "tcg-accel-ops.h" #include "tcg-accel-ops.h"
#include "tcg-accel-ops-rr.h" #include "tcg-accel-ops-rr.h"
#include "tcg-accel-ops-icount.h" #include "tcg-accel-ops-icount.h"
@@ -72,13 +71,11 @@ static void rr_kick_next_cpu(void)
{ {
CPUState *cpu; CPUState *cpu;
do { do {
cpu = qatomic_read(&rr_current_cpu); cpu = qatomic_mb_read(&rr_current_cpu);
if (cpu) { if (cpu) {
cpu_exit(cpu); cpu_exit(cpu);
} }
/* Finish kicking this cpu before reading again. */ } while (cpu != qatomic_mb_read(&rr_current_cpu));
smp_mb();
} while (cpu != qatomic_read(&rr_current_cpu));
} }
static void rr_kick_thread(void *opaque) static void rr_kick_thread(void *opaque)
@@ -142,33 +139,6 @@ static void rr_force_rcu(Notifier *notify, void *data)
rr_kick_next_cpu(); rr_kick_next_cpu();
} }
/*
* Calculate the number of CPUs that we will process in a single iteration of
* the main CPU thread loop so that we can fairly distribute the instruction
* count across CPUs.
*
* The CPU count is cached based on the CPU list generation ID to avoid
* iterating the list every time.
*/
static int rr_cpu_count(void)
{
static unsigned int last_gen_id = ~0;
static int cpu_count;
CPUState *cpu;
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
if (cpu_list_generation_id_get() != last_gen_id) {
cpu_count = 0;
CPU_FOREACH(cpu) {
++cpu_count;
}
last_gen_id = cpu_list_generation_id_get();
}
return cpu_count;
}
/* /*
* In the single-threaded case each vCPU is simulated in turn. If * In the single-threaded case each vCPU is simulated in turn. If
* there is more than a single vCPU we create a simple timer to kick * there is more than a single vCPU we create a simple timer to kick
@@ -192,7 +162,7 @@ static void *rr_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true; cpu->can_do_io = 1;
cpu_thread_signal_created(cpu); cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed); qemu_guest_random_seed_thread_part2(cpu->random_seed);
@@ -215,16 +185,11 @@ static void *rr_cpu_thread_fn(void *arg)
cpu->exit_request = 1; cpu->exit_request = 1;
while (1) { while (1) {
/* Only used for icount_enabled() */
int64_t cpu_budget = 0;
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
replay_mutex_lock(); replay_mutex_lock();
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
if (icount_enabled()) { if (icount_enabled()) {
int cpu_count = rr_cpu_count();
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */ /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
icount_account_warp_timer(); icount_account_warp_timer();
/* /*
@@ -232,8 +197,6 @@ static void *rr_cpu_thread_fn(void *arg)
* waking up the I/O thread and waiting for completion. * waking up the I/O thread and waiting for completion.
*/ */
icount_handle_deadline(); icount_handle_deadline();
cpu_budget = icount_percpu_budget(cpu_count);
} }
replay_mutex_unlock(); replay_mutex_unlock();
@@ -243,9 +206,8 @@ static void *rr_cpu_thread_fn(void *arg)
} }
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
/* Store rr_current_cpu before evaluating cpu_can_run(). */
qatomic_set_mb(&rr_current_cpu, cpu);
qatomic_mb_set(&rr_current_cpu, cpu);
current_cpu = cpu; current_cpu = cpu;
qemu_clock_enable(QEMU_CLOCK_VIRTUAL, qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
@@ -256,7 +218,7 @@ static void *rr_cpu_thread_fn(void *arg)
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
if (icount_enabled()) { if (icount_enabled()) {
icount_prepare_for_run(cpu, cpu_budget); icount_prepare_for_run(cpu);
} }
r = tcg_cpus_exec(cpu); r = tcg_cpus_exec(cpu);
if (icount_enabled()) { if (icount_enabled()) {
@@ -283,11 +245,11 @@ static void *rr_cpu_thread_fn(void *arg)
cpu = CPU_NEXT(cpu); cpu = CPU_NEXT(cpu);
} /* while (cpu && !cpu->exit_request).. */ } /* while (cpu && !cpu->exit_request).. */
/* Does not need a memory barrier because a spurious wakeup is okay. */ /* Does not need qatomic_mb_set because a spurious wakeup is okay. */
qatomic_set(&rr_current_cpu, NULL); qatomic_set(&rr_current_cpu, NULL);
if (cpu && cpu->exit_request) { if (cpu && cpu->exit_request) {
qatomic_set_mb(&cpu->exit_request, 0); qatomic_mb_set(&cpu->exit_request, 0);
} }
if (icount_enabled() && all_cpu_threads_idle()) { if (icount_enabled() && all_cpu_threads_idle()) {
@@ -329,12 +291,15 @@ void rr_start_vcpu_thread(CPUState *cpu)
single_tcg_halt_cond = cpu->halt_cond; single_tcg_halt_cond = cpu->halt_cond;
single_tcg_cpu_thread = cpu->thread; single_tcg_cpu_thread = cpu->thread;
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
} else { } else {
/* we share the thread */ /* we share the thread */
cpu->thread = single_tcg_cpu_thread; cpu->thread = single_tcg_cpu_thread;
cpu->halt_cond = single_tcg_halt_cond; cpu->halt_cond = single_tcg_halt_cond;
cpu->thread_id = first_cpu->thread_id; cpu->thread_id = first_cpu->thread_id;
cpu->neg.can_do_io = 1; cpu->can_do_io = 1;
cpu->created = true; cpu->created = true;
} }
} }

View File

@@ -31,10 +31,8 @@
#include "sysemu/cpu-timers.h" #include "sysemu/cpu-timers.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "qemu/guest-random.h" #include "qemu/guest-random.h"
#include "qemu/timer.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/hwaddr.h" #include "exec/hwaddr.h"
#include "exec/tb-flush.h"
#include "exec/gdbstub.h" #include "exec/gdbstub.h"
#include "tcg-accel-ops.h" #include "tcg-accel-ops.h"
@@ -46,21 +44,10 @@
void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
{ {
uint32_t cflags; uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
/*
* Include the cluster number in the hash we use to look up TBs.
* This is important because a TB that is valid for one cluster at
* a given physical address and set of CPU flags is not necessarily
* valid for another:
* the two clusters may have different views of physical memory, or
* may have different CPU features (eg FPU present or absent).
*/
cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
cflags |= parallel ? CF_PARALLEL : 0; cflags |= parallel ? CF_PARALLEL : 0;
cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
cpu->tcg_cflags |= cflags; cpu->tcg_cflags = cflags;
} }
void tcg_cpus_destroy(CPUState *cpu) void tcg_cpus_destroy(CPUState *cpu)
@@ -71,20 +58,23 @@ void tcg_cpus_destroy(CPUState *cpu)
int tcg_cpus_exec(CPUState *cpu) int tcg_cpus_exec(CPUState *cpu)
{ {
int ret; int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
assert(tcg_enabled()); assert(tcg_enabled());
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
cpu_exec_start(cpu); cpu_exec_start(cpu);
ret = cpu_exec(cpu); ret = cpu_exec(cpu);
cpu_exec_end(cpu); cpu_exec_end(cpu);
#ifdef CONFIG_PROFILER
qatomic_set(&tcg_ctx->prof.cpu_exec_time,
tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
#endif
return ret; return ret;
} }
static void tcg_cpu_reset_hold(CPUState *cpu)
{
tcg_flush_jmp_cache(cpu);
tlb_flush(cpu);
}
/* mask must never be zero, except for A20 change call */ /* mask must never be zero, except for A20 change call */
void tcg_handle_interrupt(CPUState *cpu, int mask) void tcg_handle_interrupt(CPUState *cpu, int mask)
{ {
@@ -99,7 +89,7 @@ void tcg_handle_interrupt(CPUState *cpu, int mask)
if (!qemu_cpu_is_self(cpu)) { if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu); qemu_cpu_kick(cpu);
} else { } else {
qatomic_set(&cpu->neg.icount_decr.u16.high, -1); qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
} }
} }
@@ -126,7 +116,7 @@ static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
return cputype; return cputype;
} }
static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len) static int tcg_insert_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len)
{ {
CPUState *cpu; CPUState *cpu;
int err = 0; int err = 0;
@@ -157,7 +147,7 @@ static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
} }
} }
static int tcg_remove_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len) static int tcg_remove_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len)
{ {
CPUState *cpu; CPUState *cpu;
int err = 0; int err = 0;
@@ -213,7 +203,6 @@ static void tcg_accel_ops_init(AccelOpsClass *ops)
} }
} }
ops->cpu_reset_hold = tcg_cpu_reset_hold;
ops->supports_guest_debug = tcg_supports_guest_debug; ops->supports_guest_debug = tcg_supports_guest_debug;
ops->insert_breakpoint = tcg_insert_breakpoint; ops->insert_breakpoint = tcg_insert_breakpoint;
ops->remove_breakpoint = tcg_remove_breakpoint; ops->remove_breakpoint = tcg_remove_breakpoint;

View File

@@ -25,26 +25,23 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "exec/replay-core.h" #include "sysemu/replay.h"
#include "sysemu/cpu-timers.h" #include "sysemu/cpu-timers.h"
#include "tcg/startup.h" #include "tcg/tcg.h"
#include "tcg/oversized-guest.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qemu/accel.h" #include "qemu/accel.h"
#include "qemu/atomic.h"
#include "qapi/qapi-builtin-visit.h" #include "qapi/qapi-builtin-visit.h"
#include "qemu/units.h" #include "qemu/units.h"
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
#include "hw/boards.h" #include "hw/boards.h"
#endif #endif
#include "internal-target.h" #include "internal.h"
struct TCGState { struct TCGState {
AccelState parent_obj; AccelState parent_obj;
bool mttcg_enabled; bool mttcg_enabled;
bool one_insn_per_tb;
int splitwx_enabled; int splitwx_enabled;
unsigned long tb_size; unsigned long tb_size;
}; };
@@ -64,24 +61,38 @@ DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
* they can set the appropriate CONFIG flags in ${target}-softmmu.mak * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
* *
* Once a guest architecture has been converted to the new primitives * Once a guest architecture has been converted to the new primitives
* there is one remaining limitation to check: * there are two remaining limitations to check.
*
* - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
* - The host must have a stronger memory order than the guest
*
* It may be possible in future to support strong guests on weak hosts
* but that will require tagging all load/stores in a guest with their
* implicit memory order requirements which would likely slow things
* down a lot.
*/ */
static bool check_tcg_memory_orders_compatible(void)
{
#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
#else
return false;
#endif
}
static bool default_mttcg_enabled(void) static bool default_mttcg_enabled(void)
{ {
if (icount_enabled() || TCG_OVERSIZED_GUEST) { if (icount_enabled() || TCG_OVERSIZED_GUEST) {
return false; return false;
} } else {
#ifdef TARGET_SUPPORTS_MTTCG #ifdef TARGET_SUPPORTS_MTTCG
# ifndef TCG_GUEST_DEFAULT_MO return check_tcg_memory_orders_compatible();
# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
# endif
return true;
#else #else
return false; return false;
#endif #endif
} }
}
static void tcg_accel_instance_init(Object *obj) static void tcg_accel_instance_init(Object *obj)
{ {
@@ -98,7 +109,6 @@ static void tcg_accel_instance_init(Object *obj)
} }
bool mttcg_enabled; bool mttcg_enabled;
bool one_insn_per_tb;
static int tcg_init_machine(MachineState *ms) static int tcg_init_machine(MachineState *ms)
{ {
@@ -121,7 +131,7 @@ static int tcg_init_machine(MachineState *ms)
* There's no guest base to take into account, so go ahead and * There's no guest base to take into account, so go ahead and
* initialize the prologue now. * initialize the prologue now.
*/ */
tcg_prologue_init(); tcg_prologue_init(tcg_ctx);
#endif #endif
return 0; return 0;
@@ -148,6 +158,11 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
warn_report("Guest not yet converted to MTTCG - " warn_report("Guest not yet converted to MTTCG - "
"you may get unexpected results"); "you may get unexpected results");
#endif #endif
if (!check_tcg_memory_orders_compatible()) {
warn_report("Guest expects a stronger memory ordering "
"than the host provides");
error_printf("This may cause strange/hard to debug errors\n");
}
s->mttcg_enabled = true; s->mttcg_enabled = true;
} }
} else if (strcmp(value, "single") == 0) { } else if (strcmp(value, "single") == 0) {
@@ -193,20 +208,6 @@ static void tcg_set_splitwx(Object *obj, bool value, Error **errp)
s->splitwx_enabled = value; s->splitwx_enabled = value;
} }
static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp)
{
TCGState *s = TCG_STATE(obj);
return s->one_insn_per_tb;
}
static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp)
{
TCGState *s = TCG_STATE(obj);
s->one_insn_per_tb = value;
/* Set the global also: this changes the behaviour */
qatomic_set(&one_insn_per_tb, value);
}
static int tcg_gdbstub_supported_sstep_flags(void) static int tcg_gdbstub_supported_sstep_flags(void)
{ {
/* /*
@@ -227,8 +228,6 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
AccelClass *ac = ACCEL_CLASS(oc); AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "tcg"; ac->name = "tcg";
ac->init_machine = tcg_init_machine; ac->init_machine = tcg_init_machine;
ac->cpu_common_realize = tcg_exec_realizefn;
ac->cpu_common_unrealize = tcg_exec_unrealizefn;
ac->allowed = &tcg_allowed; ac->allowed = &tcg_allowed;
ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags; ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;
@@ -246,12 +245,6 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
tcg_get_splitwx, tcg_set_splitwx); tcg_get_splitwx, tcg_set_splitwx);
object_class_property_set_description(oc, "split-wx", object_class_property_set_description(oc, "split-wx",
"Map jit pages into separate RW and RX regions"); "Map jit pages into separate RW and RX regions");
object_class_property_add_bool(oc, "one-insn-per-tb",
tcg_get_one_insn_per_tb,
tcg_set_one_insn_per_tb);
object_class_property_set_description(oc, "one-insn-per-tb",
"Only put one guest insn in each translation block");
} }
static const TypeInfo tcg_accel_type = { static const TypeInfo tcg_accel_type = {

View File

@@ -20,7 +20,7 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/host-utils.h" #include "qemu/host-utils.h"
#include "cpu.h" #include "cpu.h"
#include "exec/helper-proto-common.h" #include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h" #include "tcg/tcg-gvec-desc.h"
@@ -550,17 +550,6 @@ void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc)
clear_high(d, oprsz, desc); clear_high(d, oprsz, desc);
} }
void HELPER(gvec_andcs)(void *d, void *a, uint64_t b, uint32_t desc)
{
intptr_t oprsz = simd_oprsz(desc);
intptr_t i;
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
*(uint64_t *)(d + i) = *(uint64_t *)(a + i) & ~b;
}
clear_high(d, oprsz, desc);
}
void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc) void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
{ {
intptr_t oprsz = simd_oprsz(desc); intptr_t oprsz = simd_oprsz(desc);
@@ -1042,32 +1031,6 @@ DO_CMP2(64)
#undef DO_CMP1 #undef DO_CMP1
#undef DO_CMP2 #undef DO_CMP2
#define DO_CMP1(NAME, TYPE, OP) \
void HELPER(NAME)(void *d, void *a, uint64_t b64, uint32_t desc) \
{ \
intptr_t oprsz = simd_oprsz(desc); \
TYPE inv = simd_data(desc), b = b64; \
for (intptr_t i = 0; i < oprsz; i += sizeof(TYPE)) { \
*(TYPE *)(d + i) = -((*(TYPE *)(a + i) OP b) ^ inv); \
} \
clear_high(d, oprsz, desc); \
}
#define DO_CMP2(SZ) \
DO_CMP1(gvec_eqs##SZ, uint##SZ##_t, ==) \
DO_CMP1(gvec_lts##SZ, int##SZ##_t, <) \
DO_CMP1(gvec_les##SZ, int##SZ##_t, <=) \
DO_CMP1(gvec_ltus##SZ, uint##SZ##_t, <) \
DO_CMP1(gvec_leus##SZ, uint##SZ##_t, <=)
DO_CMP2(8)
DO_CMP2(16)
DO_CMP2(32)
DO_CMP2(64)
#undef DO_CMP1
#undef DO_CMP2
void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc) void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc)
{ {
intptr_t oprsz = simd_oprsz(desc); intptr_t oprsz = simd_oprsz(desc);

View File

@@ -24,17 +24,13 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/host-utils.h" #include "qemu/host-utils.h"
#include "cpu.h" #include "cpu.h"
#include "exec/helper-proto-common.h" #include "exec/helper-proto.h"
#include "exec/cpu_ldst.h" #include "exec/cpu_ldst.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "disas/disas.h" #include "disas/disas.h"
#include "exec/log.h" #include "exec/log.h"
#include "tcg/tcg.h" #include "tcg/tcg.h"
#define HELPER_H "accel/tcg/tcg-runtime.h"
#include "exec/helper-info.c.inc"
#undef HELPER_H
/* 32-bit helpers */ /* 32-bit helpers */
int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2) int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2)

View File

@@ -39,63 +39,62 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr) DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr)
#endif /* IN_HELPER_PROTO */ #endif /* IN_HELPER_PROTO */
DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32)
DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
i32, env, i64, i32, i32, i32) i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
i32, env, i64, i32, i32, i32) i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG,
i32, env, i64, i32, i32, i32) i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
i32, env, i64, i32, i32, i32) i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
i32, env, i64, i32, i32, i32) i32, env, tl, i32, i32, i32)
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
i64, env, i64, i64, i64, i32) i64, env, tl, i64, i64, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
i64, env, i64, i64, i64, i32) i64, env, tl, i64, i64, i32)
#endif #endif
#if HAVE_CMPXCHG128 #ifdef CONFIG_CMPXCHG128
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32) i128, env, tl, i128, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32) i128, env, tl, i128, i128, i32)
#endif #endif
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32) i128, env, tl, i128, i128, i32)
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG,
i128, env, tl, i128, i128, i32)
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(NAME) \ #define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \
TCG_CALL_NO_WG, i64, env, i64, i64, i32) \ TCG_CALL_NO_WG, i64, env, tl, i64, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \
TCG_CALL_NO_WG, i64, env, i64, i64, i32) TCG_CALL_NO_WG, i64, env, tl, i64, i32)
#else #else
#define GEN_ATOMIC_HELPERS(NAME) \ #define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
TCG_CALL_NO_WG, i32, env, i64, i32, i32) TCG_CALL_NO_WG, i32, env, tl, i32, i32)
#endif /* CONFIG_ATOMIC64 */ #endif /* CONFIG_ATOMIC64 */
GEN_ATOMIC_HELPERS(fetch_add) GEN_ATOMIC_HELPERS(fetch_add)
@@ -218,7 +217,6 @@ DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_andcs, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
@@ -297,29 +295,4 @@ DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eqs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_eqs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_eqs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_eqs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_lts8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_lts16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_lts32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_lts64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_les8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_les16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_les32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_les64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_ltus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_ltus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_ltus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_ltus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_leus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_leus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_leus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_leus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)

View File

@@ -19,6 +19,7 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#define NO_CPU_IO_DEFS
#include "trace.h" #include "trace.h"
#include "disas/disas.h" #include "disas/disas.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
@@ -46,12 +47,11 @@
#include "exec/cputlb.h" #include "exec/cputlb.h"
#include "exec/translate-all.h" #include "exec/translate-all.h"
#include "exec/translator.h" #include "exec/translator.h"
#include "exec/tb-flush.h"
#include "qemu/bitmap.h" #include "qemu/bitmap.h"
#include "qemu/qemu-print.h" #include "qemu/qemu-print.h"
#include "qemu/timer.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "qemu/cacheinfo.h" #include "qemu/cacheinfo.h"
#include "qemu/timer.h"
#include "exec/log.h" #include "exec/log.h"
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "sysemu/cpu-timers.h" #include "sysemu/cpu-timers.h"
@@ -61,18 +61,19 @@
#include "tb-jmp-cache.h" #include "tb-jmp-cache.h"
#include "tb-hash.h" #include "tb-hash.h"
#include "tb-context.h" #include "tb-context.h"
#include "internal-common.h" #include "internal.h"
#include "internal-target.h"
#include "perf.h" #include "perf.h"
#include "tcg/insn-start-words.h"
/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
sizeof_field(TranslationBlock, trace_vcpu_dstate)
* BITS_PER_BYTE);
TBContext tb_ctx; TBContext tb_ctx;
/* /* Encode VAL as a signed leb128 sequence at P.
* Encode VAL as a signed leb128 sequence at P. Return P incremented past the encoded value. */
* Return P incremented past the encoded value. static uint8_t *encode_sleb128(uint8_t *p, target_long val)
*/
static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
{ {
int more, byte; int more, byte;
@@ -90,23 +91,21 @@ static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
return p; return p;
} }
/* /* Decode a signed leb128 sequence at *PP; increment *PP past the
* Decode a signed leb128 sequence at *PP; increment *PP past the decoded value. Return the decoded value. */
* decoded value. Return the decoded value. static target_long decode_sleb128(const uint8_t **pp)
*/
static int64_t decode_sleb128(const uint8_t **pp)
{ {
const uint8_t *p = *pp; const uint8_t *p = *pp;
int64_t val = 0; target_long val = 0;
int byte, shift = 0; int byte, shift = 0;
do { do {
byte = *p++; byte = *p++;
val |= (int64_t)(byte & 0x7f) << shift; val |= (target_ulong)(byte & 0x7f) << shift;
shift += 7; shift += 7;
} while (byte & 0x80); } while (byte & 0x80);
if (shift < TARGET_LONG_BITS && (byte & 0x40)) { if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
val |= -(int64_t)1 << shift; val |= -(target_ulong)1 << shift;
} }
*pp = p; *pp = p;
@@ -128,26 +127,22 @@ static int64_t decode_sleb128(const uint8_t **pp)
static int encode_search(TranslationBlock *tb, uint8_t *block) static int encode_search(TranslationBlock *tb, uint8_t *block)
{ {
uint8_t *highwater = tcg_ctx->code_gen_highwater; uint8_t *highwater = tcg_ctx->code_gen_highwater;
uint64_t *insn_data = tcg_ctx->gen_insn_data;
uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
uint8_t *p = block; uint8_t *p = block;
int i, j, n; int i, j, n;
for (i = 0, n = tb->icount; i < n; ++i) { for (i = 0, n = tb->icount; i < n; ++i) {
uint64_t prev, curr; target_ulong prev;
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
if (i == 0) { if (i == 0) {
prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0); prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0);
} else { } else {
prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j]; prev = tcg_ctx->gen_insn_data[i - 1][j];
} }
curr = insn_data[i * TARGET_INSN_START_WORDS + j]; p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
p = encode_sleb128(p, curr - prev);
} }
prev = (i == 0 ? 0 : insn_end_off[i - 1]); prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
curr = insn_end_off[i]; p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
p = encode_sleb128(p, curr - prev);
/* Test for (pending) buffer overflow. The assumption is that any /* Test for (pending) buffer overflow. The assumption is that any
one row beginning below the high water mark cannot overrun one row beginning below the high water mark cannot overrun
@@ -175,8 +170,8 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
} }
memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS); memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
if (!(tb_cflags(tb) & CF_PCREL)) { if (!TARGET_TB_PCREL) {
data[0] = tb->pc; data[0] = tb_pc(tb);
} }
/* /*
@@ -203,6 +198,10 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc) uintptr_t host_pc)
{ {
uint64_t data[TARGET_INSN_START_WORDS]; uint64_t data[TARGET_INSN_START_WORDS];
#ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
int64_t ti = profile_getclock();
#endif
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data); int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
if (insns_left < 0) { if (insns_left < 0) {
@@ -215,10 +214,16 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
* Reset the cycle counter to the start of the block and * Reset the cycle counter to the start of the block and
* shift if to the number of actually executed instructions. * shift if to the number of actually executed instructions.
*/ */
cpu->neg.icount_decr.u16.low += insns_left; cpu_neg(cpu)->icount_decr.u16.low += insns_left;
} }
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data); cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
#ifdef CONFIG_PROFILER
qatomic_set(&prof->restore_time,
prof->restore_time + profile_getclock() - ti);
qatomic_set(&prof->restore_count, prof->restore_count + 1);
#endif
} }
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc) bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
@@ -265,7 +270,7 @@ void page_init(void)
* Return the size of the generated code, or negative on error. * Return the size of the generated code, or negative on error.
*/ */
static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
vaddr pc, void *host_pc, target_ulong pc, void *host_pc,
int *max_insns, int64_t *ti) int *max_insns, int64_t *ti)
{ {
int ret = sigsetjmp(tcg_ctx->jmp_trans, 0); int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
@@ -276,24 +281,34 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
tcg_func_start(tcg_ctx); tcg_func_start(tcg_ctx);
tcg_ctx->cpu = env_cpu(env); tcg_ctx->cpu = env_cpu(env);
gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc); gen_intermediate_code(env_cpu(env), tb, *max_insns, pc, host_pc);
assert(tb->size != 0); assert(tb->size != 0);
tcg_ctx->cpu = NULL; tcg_ctx->cpu = NULL;
*max_insns = tb->icount; *max_insns = tb->icount;
#ifdef CONFIG_PROFILER
qatomic_set(&tcg_ctx->prof.tb_count, tcg_ctx->prof.tb_count + 1);
qatomic_set(&tcg_ctx->prof.interm_time,
tcg_ctx->prof.interm_time + profile_getclock() - *ti);
*ti = profile_getclock();
#endif
return tcg_gen_code(tcg_ctx, tb, pc); return tcg_gen_code(tcg_ctx, tb, pc);
} }
/* Called with mmap_lock held for user mode emulation. */ /* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu, TranslationBlock *tb_gen_code(CPUState *cpu,
vaddr pc, uint64_t cs_base, target_ulong pc, target_ulong cs_base,
uint32_t flags, int cflags) uint32_t flags, int cflags)
{ {
CPUArchState *env = cpu_env(cpu); CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb, *existing_tb; TranslationBlock *tb, *existing_tb;
tb_page_addr_t phys_pc, phys_p2; tb_page_addr_t phys_pc;
tcg_insn_unit *gen_code_buf; tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns; int gen_code_size, search_size, max_insns;
#ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
#endif
int64_t ti; int64_t ti;
void *host_pc; void *host_pc;
@@ -304,7 +319,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
if (phys_pc == -1) { if (phys_pc == -1) {
/* Generate a one-shot TB with 1 insn in it */ /* Generate a one-shot TB with 1 insn in it */
cflags = (cflags & ~CF_COUNT_MASK) | 1; cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1;
} }
max_insns = cflags & CF_COUNT_MASK; max_insns = cflags & CF_COUNT_MASK;
@@ -314,7 +329,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS); QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
buffer_overflow: buffer_overflow:
assert_no_pages_locked();
tb = tcg_tb_alloc(tcg_ctx); tb = tcg_tb_alloc(tcg_ctx);
if (unlikely(!tb)) { if (unlikely(!tb)) {
/* flush must be done */ /* flush must be done */
@@ -327,33 +341,24 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx->code_gen_ptr; gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
if (!(cflags & CF_PCREL)) { #if !TARGET_TB_PCREL
tb->pc = pc; tb->pc = pc;
} #endif
tb->cs_base = cs_base; tb->cs_base = cs_base;
tb->flags = flags; tb->flags = flags;
tb->cflags = cflags; tb->cflags = cflags;
tb->trace_vcpu_dstate = *cpu->trace_dstate;
tb_set_page_addr0(tb, phys_pc); tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1); tb_set_page_addr1(tb, -1);
if (phys_pc != -1) {
tb_lock_page0(phys_pc);
}
tcg_ctx->gen_tb = tb; tcg_ctx->gen_tb = tb;
tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64; tb_overflow:
#ifdef CONFIG_SOFTMMU
tcg_ctx->page_bits = TARGET_PAGE_BITS; #ifdef CONFIG_PROFILER
tcg_ctx->page_mask = TARGET_PAGE_MASK; /* includes aborted translations because of exceptions */
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS; qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
#endif ti = profile_getclock();
tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
#ifdef TCG_GUEST_DEFAULT_MO
tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
#else
tcg_ctx->guest_mo = TCG_MO_ALL;
#endif #endif
restart_translate:
trace_translate_block(tb, pc, tb->tc.ptr); trace_translate_block(tb, pc, tb->tc.ptr);
gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti); gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
@@ -372,8 +377,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation for " "Restarting code generation for "
"code_gen_buffer overflow\n"); "code_gen_buffer overflow\n");
tb_unlock_pages(tb);
tcg_ctx->gen_tb = NULL;
goto buffer_overflow; goto buffer_overflow;
case -2: case -2:
@@ -392,49 +395,32 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
"Restarting code generation with " "Restarting code generation with "
"smaller translation block (max %d insns)\n", "smaller translation block (max %d insns)\n",
max_insns); max_insns);
goto tb_overflow;
/*
* The half-sized TB may not cross pages.
* TODO: Fix all targets that cross pages except with
* the first insn, at which point this can't be reached.
*/
phys_p2 = tb_page_addr1(tb);
if (unlikely(phys_p2 != -1)) {
tb_unlock_page1(phys_pc, phys_p2);
tb_set_page_addr1(tb, -1);
}
goto restart_translate;
case -3:
/*
* We had a page lock ordering problem. In order to avoid
* deadlock we had to drop the lock on page0, which means
* that everything we translated so far is compromised.
* Restart with locks held on both pages.
*/
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation with re-locked pages");
goto restart_translate;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
} }
tcg_ctx->gen_tb = NULL;
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) { if (unlikely(search_size < 0)) {
tb_unlock_pages(tb);
goto buffer_overflow; goto buffer_overflow;
} }
tb->tc.size = gen_code_size; tb->tc.size = gen_code_size;
/* /*
* For CF_PCREL, attribute all executions of the generated code * For TARGET_TB_PCREL, attribute all executions of the generated
* to its first mapping. * code to its first mapping.
*/ */
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf)); perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
#ifdef CONFIG_PROFILER
qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
#endif
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(pc)) { qemu_log_in_addr_range(pc)) {
FILE *logfile = qemu_log_trylock(); FILE *logfile = qemu_log_trylock();
@@ -457,8 +443,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
/* Dump header and the first instruction */ /* Dump header and the first instruction */
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size); fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
fprintf(logfile, fprintf(logfile,
" -- guest addr 0x%016" PRIx64 " + tb prologue\n", " -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]); tcg_ctx->gen_insn_data[insn][0]);
chunk_start = tcg_ctx->gen_insn_end_off[insn]; chunk_start = tcg_ctx->gen_insn_end_off[insn];
disas(logfile, tb->tc.ptr, chunk_start); disas(logfile, tb->tc.ptr, chunk_start);
@@ -470,8 +456,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
while (insn < tb->icount) { while (insn < tb->icount) {
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
if (chunk_end > chunk_start) { if (chunk_end > chunk_start) {
fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n", fprintf(logfile, " -- guest addr 0x" TARGET_FMT_lx "\n",
tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]); tcg_ctx->gen_insn_data[insn][0]);
disas(logfile, tb->tc.ptr + chunk_start, disas(logfile, tb->tc.ptr + chunk_start,
chunk_end - chunk_start); chunk_end - chunk_start);
chunk_start = chunk_end; chunk_start = chunk_end;
@@ -507,6 +493,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qemu_log_unlock(logfile); qemu_log_unlock(logfile);
} }
} }
#endif
qatomic_set(&tcg_ctx->code_gen_ptr, (void *) qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
@@ -534,7 +521,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* before attempting to link to other TBs or add to the lookup table. * before attempting to link to other TBs or add to the lookup table.
*/ */
if (tb_page_addr0(tb) == -1) { if (tb_page_addr0(tb) == -1) {
assert_no_pages_locked();
return tb; return tb;
} }
@@ -549,9 +535,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* No explicit memory barrier is required -- tb_link_page() makes the * No explicit memory barrier is required -- tb_link_page() makes the
* TB visible in a consistent state. * TB visible in a consistent state.
*/ */
existing_tb = tb_link_page(tb); existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb));
assert_no_pages_locked();
/* if the TB already exists, discard what we just translated */ /* if the TB already exists, discard what we just translated */
if (unlikely(existing_tb != tb)) { if (unlikely(existing_tb != tb)) {
uintptr_t orig_aligned = (uintptr_t)gen_code_buf; uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
@@ -579,16 +563,15 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
} else { } else {
/* The exception probably happened in a helper. The CPU state should /* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */ have been saved before calling it. Fetch the PC from there. */
CPUArchState *env = cpu_env(cpu); CPUArchState *env = cpu->env_ptr;
vaddr pc; target_ulong pc, cs_base;
uint64_t cs_base;
tb_page_addr_t addr; tb_page_addr_t addr;
uint32_t flags; uint32_t flags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
addr = get_page_addr_code(env, pc); addr = get_page_addr_code(env, pc);
if (addr != -1) { if (addr != -1) {
tb_invalidate_phys_range(addr, addr); tb_invalidate_phys_range(addr, addr + 1);
} }
} }
} }
@@ -622,7 +605,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cc = CPU_GET_CLASS(cpu); cc = CPU_GET_CLASS(cpu);
if (cc->tcg_ops->io_recompile_replay_branch && if (cc->tcg_ops->io_recompile_replay_branch &&
cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
cpu->neg.icount_decr.u16.low++; cpu_neg(cpu)->icount_decr.u16.low++;
n = 2; n = 2;
} }
@@ -632,26 +615,153 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
* operations only (which execute after completion) so we don't * operations only (which execute after completion) so we don't
* double instrument the instruction. * double instrument the instruction.
*/ */
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n; cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
if (qemu_loglevel_mask(CPU_LOG_EXEC)) { if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
vaddr pc = log_pc(cpu, tb); target_ulong pc = log_pc(cpu, tb);
if (qemu_log_in_addr_range(pc)) { if (qemu_log_in_addr_range(pc)) {
qemu_log("cpu_io_recompile: rewound execution of TB to %016" qemu_log("cpu_io_recompile: rewound execution of TB to "
VADDR_PRIx "\n", pc); TARGET_FMT_lx "\n", pc);
} }
} }
cpu_loop_exit_noexc(cpu); cpu_loop_exit_noexc(cpu);
} }
static void print_qht_statistics(struct qht_stats hst, GString *buf)
{
uint32_t hgram_opts;
size_t hgram_bins;
char *hgram;
if (!hst.head_buckets) {
return;
}
g_string_append_printf(buf, "TB hash buckets %zu/%zu "
"(%0.2f%% head buckets used)\n",
hst.used_head_buckets, hst.head_buckets,
(double)hst.used_head_buckets /
hst.head_buckets * 100);
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
hgram_opts |= QDIST_PR_NODECIMAL;
}
hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. "
"Histogram: %s\n",
qdist_avg(&hst.occupancy) * 100, hgram);
g_free(hgram);
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
if (hgram_bins > 10) {
hgram_bins = 10;
} else {
hgram_bins = 0;
hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
}
hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. "
"Histogram: %s\n",
qdist_avg(&hst.chain), hgram);
g_free(hgram);
}
struct tb_tree_stats {
size_t nb_tbs;
size_t host_size;
size_t target_size;
size_t max_target_size;
size_t direct_jmp_count;
size_t direct_jmp2_count;
size_t cross_page;
};
static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
{
const TranslationBlock *tb = value;
struct tb_tree_stats *tst = data;
tst->nb_tbs++;
tst->host_size += tb->tc.size;
tst->target_size += tb->size;
if (tb->size > tst->max_target_size) {
tst->max_target_size = tb->size;
}
if (tb_page_addr1(tb) != -1) {
tst->cross_page++;
}
if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
tst->direct_jmp_count++;
if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
tst->direct_jmp2_count++;
}
}
return false;
}
void dump_exec_info(GString *buf)
{
struct tb_tree_stats tst = {};
struct qht_stats hst;
size_t nb_tbs, flush_full, flush_part, flush_elide;
tcg_tb_foreach(tb_tree_stats_iter, &tst);
nb_tbs = tst.nb_tbs;
/* XXX: avoid using doubles ? */
g_string_append_printf(buf, "Translation buffer state:\n");
/*
* Report total code size including the padding and TB structs;
* otherwise users might think "-accel tcg,tb-size" is not honoured.
* For avg host size we use the precise numbers from tb_tree_stats though.
*/
g_string_append_printf(buf, "gen code size %zu/%zu\n",
tcg_code_size(), tcg_code_capacity());
g_string_append_printf(buf, "TB count %zu\n", nb_tbs);
g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n",
nb_tbs ? tst.target_size / nb_tbs : 0,
tst.max_target_size);
g_string_append_printf(buf, "TB avg host size %zu bytes "
"(expansion ratio: %0.1f)\n",
nb_tbs ? tst.host_size / nb_tbs : 0,
tst.target_size ?
(double)tst.host_size / tst.target_size : 0);
g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
tst.cross_page,
nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
g_string_append_printf(buf, "direct jump count %zu (%zu%%) "
"(2 jumps=%zu %zu%%)\n",
tst.direct_jmp_count,
nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
tst.direct_jmp2_count,
nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
qht_statistics_init(&tb_ctx.htable, &hst);
print_qht_statistics(hst, buf);
qht_statistics_destroy(&hst);
g_string_append_printf(buf, "\nStatistics:\n");
g_string_append_printf(buf, "TB flush count %u\n",
qatomic_read(&tb_ctx.tb_flush_count));
g_string_append_printf(buf, "TB invalidate count %u\n",
qatomic_read(&tb_ctx.tb_phys_invalidate_count));
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
tcg_dump_info(buf);
}
#else /* CONFIG_USER_ONLY */ #else /* CONFIG_USER_ONLY */
void cpu_interrupt(CPUState *cpu, int mask) void cpu_interrupt(CPUState *cpu, int mask)
{ {
g_assert(qemu_mutex_iothread_locked()); g_assert(qemu_mutex_iothread_locked());
cpu->interrupt_request |= mask; cpu->interrupt_request |= mask;
qatomic_set(&cpu->neg.icount_decr.u16.high, -1); qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */
@@ -673,3 +783,11 @@ void tcg_flush_jmp_cache(CPUState *cpu)
qatomic_set(&jc->array[i].tb, NULL); qatomic_set(&jc->array[i].tb, NULL);
} }
} }
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
void tcg_flush_softmmu_tlb(CPUState *cs)
{
#ifdef CONFIG_SOFTMMU
tlb_flush(cs);
#endif
}

View File

@@ -8,111 +8,30 @@
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/gen-icount.h"
#include "exec/log.h"
#include "exec/translator.h" #include "exec/translator.h"
#include "exec/plugin-gen.h" #include "exec/plugin-gen.h"
#include "tcg/tcg-op-common.h" #include "sysemu/replay.h"
#include "internal-target.h"
static void set_can_do_io(DisasContextBase *db, bool val) /* Pairs with tcg_clear_temp_count.
To be called by #TranslatorOps.{translate_insn,tb_stop} if
(1) the target is sufficiently clean to support reporting,
(2) as and when all temporaries are known to be consumed.
For most targets, (2) is at the end of translate_insn. */
void translator_loop_temp_check(DisasContextBase *db)
{ {
if (db->saved_can_do_io != val) { if (tcg_check_temp_count()) {
db->saved_can_do_io = val; qemu_log("warning: TCG temporary leaks before "
TARGET_FMT_lx "\n", db->pc_next);
QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
offsetof(ArchCPU, parent_obj.neg.can_do_io) -
offsetof(ArchCPU, env));
} }
} }
bool translator_io_start(DisasContextBase *db) bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
{
set_can_do_io(db, true);
/*
* Ensure that this instruction will be the last in the TB.
* The target may override this to something more forceful.
*/
if (db->is_jmp == DISAS_NEXT) {
db->is_jmp = DISAS_TOO_MANY;
}
return true;
}
static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
{
TCGv_i32 count = NULL;
TCGOp *icount_start_insn = NULL;
if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
count = tcg_temp_new_i32();
tcg_gen_ld_i32(count, tcg_env,
offsetof(ArchCPU, parent_obj.neg.icount_decr.u32)
- offsetof(ArchCPU, env));
}
if (cflags & CF_USE_ICOUNT) {
/*
* We emit a sub with a dummy immediate argument. Keep the insn index
* of the sub so that we later (when we know the actual insn count)
* can update the argument with the actual insn count.
*/
tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
icount_start_insn = tcg_last_op();
}
/*
* Emit the check against icount_decr.u32 to see if we should exit
* unless we suppress the check with CF_NOIRQ. If we are using
* icount and have suppressed interruption the higher level code
* should have ensured we don't run more instructions than the
* budget.
*/
if (cflags & CF_NOIRQ) {
tcg_ctx->exitreq_label = NULL;
} else {
tcg_ctx->exitreq_label = gen_new_label();
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
}
if (cflags & CF_USE_ICOUNT) {
tcg_gen_st16_i32(count, tcg_env,
offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low)
- offsetof(ArchCPU, env));
}
/*
* cpu->neg.can_do_io is set automatically here at the beginning of
* each translation block. The cost is minimal, plus it would be
* very easy to forget doing it in the translator.
*/
set_can_do_io(db, db->max_insns == 1);
return icount_start_insn;
}
static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
TCGOp *icount_start_insn, int num_insns)
{
if (cflags & CF_USE_ICOUNT) {
/*
* Update the num_insn immediate parameter now that we know
* the actual insn count.
*/
tcg_set_insn_param(icount_start_insn, 2,
tcgv_i32_arg(tcg_constant_i32(num_insns)));
}
if (tcg_ctx->exitreq_label) {
gen_set_label(tcg_ctx->exitreq_label);
tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
}
}
bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
{ {
/* Suppress goto_tb if requested. */ /* Suppress goto_tb if requested. */
if (tb_cflags(db->tb) & CF_NO_GOTO_TB) { if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
@@ -123,12 +42,11 @@ bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0; return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
} }
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
vaddr pc, void *host_pc, const TranslatorOps *ops, target_ulong pc, void *host_pc,
DisasContextBase *db) const TranslatorOps *ops, DisasContextBase *db)
{ {
uint32_t cflags = tb_cflags(tb); uint32_t cflags = tb_cflags(tb);
TCGOp *icount_start_insn;
bool plugin_enabled; bool plugin_enabled;
/* Initialize DisasContext */ /* Initialize DisasContext */
@@ -137,25 +55,30 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
db->pc_next = pc; db->pc_next = pc;
db->is_jmp = DISAS_NEXT; db->is_jmp = DISAS_NEXT;
db->num_insns = 0; db->num_insns = 0;
db->max_insns = *max_insns; db->max_insns = max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP; db->singlestep_enabled = cflags & CF_SINGLE_STEP;
db->saved_can_do_io = -1;
db->host_addr[0] = host_pc; db->host_addr[0] = host_pc;
db->host_addr[1] = NULL; db->host_addr[1] = NULL;
#ifdef CONFIG_USER_ONLY
page_protect(pc);
#endif
ops->init_disas_context(db, cpu); ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
/* Reset the temp count so that we can identify leaks */
tcg_clear_temp_count();
/* Start translating. */ /* Start translating. */
icount_start_insn = gen_tb_start(db, cflags); gen_tb_start(db->tb);
ops->tb_start(db, cpu); ops->tb_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY); plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
db->plugin_enabled = plugin_enabled;
while (true) { while (true) {
*max_insns = ++db->num_insns; db->num_insns++;
ops->insn_start(db, cpu); ops->insn_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@@ -163,17 +86,19 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
plugin_gen_insn_start(cpu, db); plugin_gen_insn_start(cpu, db);
} }
/* /* Disassemble one instruction. The translate_insn hook should
* Disassemble one instruction. The translate_insn hook should update db->pc_next and db->is_jmp to indicate what should be
* update db->pc_next and db->is_jmp to indicate what should be done next -- either exiting this loop or locate the start of
* done next -- either exiting this loop or locate the start of the next instruction. */
* the next instruction. if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
*/
if (db->num_insns == db->max_insns) {
/* Accept I/O on the last instruction. */ /* Accept I/O on the last instruction. */
set_can_do_io(db, true); gen_io_start();
}
ops->translate_insn(db, cpu); ops->translate_insn(db, cpu);
} else {
/* we should only see CF_MEMI_ONLY for io_recompile */
tcg_debug_assert(!(cflags & CF_MEMI_ONLY));
ops->translate_insn(db, cpu);
}
/* /*
* We can't instrument after instructions that change control * We can't instrument after instructions that change control
@@ -203,16 +128,17 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
/* Emit code to exit the TB, as indicated by db->is_jmp. */ /* Emit code to exit the TB, as indicated by db->is_jmp. */
ops->tb_stop(db, cpu); ops->tb_stop(db, cpu);
gen_tb_end(tb, cflags, icount_start_insn, db->num_insns); gen_tb_end(db->tb, db->num_insns);
if (plugin_enabled) { if (plugin_enabled) {
plugin_gen_tb_end(cpu, db->num_insns); plugin_gen_tb_end(cpu);
} }
/* The disas_log hook may use these values rather than recompute. */ /* The disas_log hook may use these values rather than recompute. */
tb->size = db->pc_next - db->pc_first; tb->size = db->pc_next - db->pc_first;
tb->icount = db->num_insns; tb->icount = db->num_insns;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(db->pc_first)) { && qemu_log_in_addr_range(db->pc_first)) {
FILE *logfile = qemu_log_trylock(); FILE *logfile = qemu_log_trylock();
@@ -223,13 +149,14 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
qemu_log_unlock(logfile); qemu_log_unlock(logfile);
} }
} }
#endif
} }
static void *translator_access(CPUArchState *env, DisasContextBase *db, static void *translator_access(CPUArchState *env, DisasContextBase *db,
vaddr pc, size_t len) target_ulong pc, size_t len)
{ {
void *host; void *host;
vaddr base, end; target_ulong base, end;
TranslationBlock *tb; TranslationBlock *tb;
tb = db->tb; tb = db->tb;
@@ -247,36 +174,14 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
host = db->host_addr[1]; host = db->host_addr[1];
base = TARGET_PAGE_ALIGN(db->pc_first); base = TARGET_PAGE_ALIGN(db->pc_first);
if (host == NULL) { if (host == NULL) {
tb_page_addr_t page0, old_page1, new_page1; tb_page_addr_t phys_page =
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]); /* We cannot handle MMIO as second page. */
assert(phys_page != -1);
/* tb_set_page_addr1(tb, phys_page);
* If the second page is MMIO, treat as if the first page #ifdef CONFIG_USER_ONLY
* was MMIO as well, so that we do not cache the TB. page_protect(end);
*/ #endif
if (unlikely(new_page1 == -1)) {
tb_unlock_pages(tb);
tb_set_page_addr0(tb, -1);
return NULL;
}
/*
* If this is not the first time around, and page1 matches,
* then we already have the page locked. Alternately, we're
* not doing anything to prevent the PTE from changing, so
* we might wind up with a different page, requiring us to
* re-do the locking.
*/
old_page1 = tb_page_addr1(tb);
if (likely(new_page1 != old_page1)) {
page0 = tb_page_addr0(tb);
if (unlikely(old_page1 != -1)) {
tb_unlock_page1(page0, old_page1);
}
tb_set_page_addr1(tb, new_page1);
tb_lock_page1(page0, new_page1);
}
host = db->host_addr[1]; host = db->host_addr[1];
} }
@@ -290,27 +195,6 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
return host + (pc - base); return host + (pc - base);
} }
static void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
{
#ifdef CONFIG_PLUGIN
struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn;
abi_ptr off;
if (insn == NULL) {
return;
}
off = pc - insn->vaddr;
if (off < insn->data->len) {
g_byte_array_set_size(insn->data, off);
} else if (off > insn->data->len) {
/* we have an unexpected gap */
g_assert_not_reached();
}
insn->data = g_byte_array_append(insn->data, from, size);
#endif
}
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc) uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
{ {
uint8_t ret; uint8_t ret;
@@ -369,8 +253,3 @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
plugin_insn_append(pc, &plug, sizeof(ret)); plugin_insn_append(pc, &plug, sizeof(ret));
return ret; return ret;
} }
void translator_fake_ldb(uint8_t insn8, abi_ptr pc)
{
plugin_insn_append(pc, &insn8, sizeof(insn8));
}

View File

@@ -1,6 +1,8 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "hw/core/cpu.h" #include "hw/core/cpu.h"
#include "exec/replay-core.h" #include "sysemu/replay.h"
bool enable_cpu_pm = false;
void cpu_resume(CPUState *cpu) void cpu_resume(CPUState *cpu)
{ {
@@ -14,10 +16,6 @@ void qemu_init_vcpu(CPUState *cpu)
{ {
} }
void cpu_exec_reset_hold(CPUState *cpu)
{
}
/* User mode emulation does not support record/replay yet. */ /* User mode emulation does not support record/replay yet. */
bool replay_exception(void) bool replay_exception(void)

View File

@@ -29,8 +29,7 @@
#include "qemu/atomic128.h" #include "qemu/atomic128.h"
#include "trace/trace-root.h" #include "trace/trace-root.h"
#include "tcg/tcg-ldst.h" #include "tcg/tcg-ldst.h"
#include "internal-common.h" #include "internal.h"
#include "internal-target.h"
__thread uintptr_t helper_retaddr; __thread uintptr_t helper_retaddr;
@@ -145,7 +144,7 @@ typedef struct PageFlagsNode {
static IntervalTreeRoot pageflags_root; static IntervalTreeRoot pageflags_root;
static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last) static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
{ {
IntervalTreeNode *n; IntervalTreeNode *n;
@@ -154,7 +153,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
} }
static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start, static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
target_ulong last) target_long last)
{ {
IntervalTreeNode *n; IntervalTreeNode *n;
@@ -481,22 +480,24 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
* The flag PAGE_WRITE_ORG is positioned automatically depending * The flag PAGE_WRITE_ORG is positioned automatically depending
* on PAGE_WRITE. The mmap_lock should already be held. * on PAGE_WRITE. The mmap_lock should already be held.
*/ */
void page_set_flags(target_ulong start, target_ulong last, int flags) void page_set_flags(target_ulong start, target_ulong end, int flags)
{ {
target_ulong last;
bool reset = false; bool reset = false;
bool inval_tb = false; bool inval_tb = false;
/* This function should never be called with addresses outside the /* This function should never be called with addresses outside the
guest address space. If this assert fires, it probably indicates guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */ a missing call to h2g_valid. */
assert(start <= last); assert(start < end);
assert(last <= GUEST_ADDR_MAX); assert(end - 1 <= GUEST_ADDR_MAX);
/* Only set PAGE_ANON with new mappings. */ /* Only set PAGE_ANON with new mappings. */
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
assert_memory_lock(); assert_memory_lock();
start &= TARGET_PAGE_MASK; start = start & TARGET_PAGE_MASK;
last |= ~TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end);
last = end - 1;
if (!(flags & PAGE_VALID)) { if (!(flags & PAGE_VALID)) {
flags = 0; flags = 0;
@@ -509,7 +510,7 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
} }
if (!flags || reset) { if (!flags || reset) {
page_reset_target_data(start, last); page_reset_target_data(start, end);
inval_tb |= pageflags_unset(start, last); inval_tb |= pageflags_unset(start, last);
} }
if (flags) { if (flags) {
@@ -517,23 +518,23 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
~(reset ? 0 : PAGE_STICKY)); ~(reset ? 0 : PAGE_STICKY));
} }
if (inval_tb) { if (inval_tb) {
tb_invalidate_phys_range(start, last); tb_invalidate_phys_range(start, end);
} }
} }
bool page_check_range(target_ulong start, target_ulong len, int flags) int page_check_range(target_ulong start, target_ulong len, int flags)
{ {
target_ulong last; target_ulong last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */ int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
bool ret; int ret;
if (len == 0) { if (len == 0) {
return true; /* trivial length */ return 0; /* trivial length */
} }
last = start + len - 1; last = start + len - 1;
if (last < start) { if (last < start) {
return false; /* wrap around */ return -1; /* wrap around */
} }
locked = have_mmap_lock(); locked = have_mmap_lock();
@@ -552,33 +553,33 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
p = pageflags_find(start, last); p = pageflags_find(start, last);
} }
if (!p) { if (!p) {
ret = false; /* entire region invalid */ ret = -1; /* entire region invalid */
break; break;
} }
} }
if (start < p->itree.start) { if (start < p->itree.start) {
ret = false; /* initial bytes invalid */ ret = -1; /* initial bytes invalid */
break; break;
} }
missing = flags & ~p->flags; missing = flags & ~p->flags;
if (missing & ~PAGE_WRITE) { if (missing & PAGE_READ) {
ret = false; /* page doesn't match */ ret = -1; /* page not readable */
break; break;
} }
if (missing & PAGE_WRITE) { if (missing & PAGE_WRITE) {
if (!(p->flags & PAGE_WRITE_ORG)) { if (!(p->flags & PAGE_WRITE_ORG)) {
ret = false; /* page not writable */ ret = -1; /* page not writable */
break; break;
} }
/* Asking about writable, but has been protected: undo. */ /* Asking about writable, but has been protected: undo. */
if (!page_unprotect(start, 0)) { if (!page_unprotect(start, 0)) {
ret = false; ret = -1;
break; break;
} }
/* TODO: page_unprotect should take a range, not a single page. */ /* TODO: page_unprotect should take a range, not a single page. */
if (last - start < TARGET_PAGE_SIZE) { if (last - start < TARGET_PAGE_SIZE) {
ret = true; /* ok */ ret = 0; /* ok */
break; break;
} }
start += TARGET_PAGE_SIZE; start += TARGET_PAGE_SIZE;
@@ -586,7 +587,7 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
} }
if (last <= p->itree.last) { if (last <= p->itree.last) {
ret = true; /* ok */ ret = 0; /* ok */
break; break;
} }
start = p->itree.last + 1; start = p->itree.last + 1;
@@ -599,54 +600,6 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
return ret; return ret;
} }
bool page_check_range_empty(target_ulong start, target_ulong last)
{
assert(last >= start);
assert_memory_lock();
return pageflags_find(start, last) == NULL;
}
target_ulong page_find_range_empty(target_ulong min, target_ulong max,
target_ulong len, target_ulong align)
{
target_ulong len_m1, align_m1;
assert(min <= max);
assert(max <= GUEST_ADDR_MAX);
assert(len != 0);
assert(is_power_of_2(align));
assert_memory_lock();
len_m1 = len - 1;
align_m1 = align - 1;
/* Iteratively narrow the search region. */
while (1) {
PageFlagsNode *p;
/* Align min and double-check there's enough space remaining. */
min = (min + align_m1) & ~align_m1;
if (min > max) {
return -1;
}
if (len_m1 > max - min) {
return -1;
}
p = pageflags_find(min, min + len_m1);
if (p == NULL) {
/* Found! */
return min;
}
if (max <= p->itree.last) {
/* Existing allocation fills the remainder of the search region. */
return -1;
}
/* Skip across existing allocation. */
min = p->itree.last + 1;
}
}
void page_protect(tb_page_addr_t address) void page_protect(tb_page_addr_t address)
{ {
PageFlagsNode *p; PageFlagsNode *p;
@@ -770,7 +723,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
return current_tb_invalidated ? 2 : 1; return current_tb_invalidated ? 2 : 1;
} }
static int probe_access_internal(CPUArchState *env, vaddr addr, static int probe_access_internal(CPUArchState *env, target_ulong addr,
int fault_size, MMUAccessType access_type, int fault_size, MMUAccessType access_type,
bool nonfault, uintptr_t ra) bool nonfault, uintptr_t ra)
{ {
@@ -794,10 +747,6 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
if (guest_addr_valid_untagged(addr)) { if (guest_addr_valid_untagged(addr)) {
int page_flags = page_get_flags(addr); int page_flags = page_get_flags(addr);
if (page_flags & acc_flag) { if (page_flags & acc_flag) {
if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE)
&& cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
return TLB_MMIO;
}
return 0; /* success */ return 0; /* success */
} }
maperr = !(page_flags & PAGE_VALID); maperr = !(page_flags & PAGE_VALID);
@@ -812,31 +761,30 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
} }
int probe_access_flags(CPUArchState *env, vaddr addr, int size, int probe_access_flags(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t ra) bool nonfault, void **phost, uintptr_t ra)
{ {
int flags; int flags;
g_assert(-(addr | TARGET_PAGE_MASK) >= size); flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
flags = probe_access_internal(env, addr, size, access_type, nonfault, ra); *phost = flags ? NULL : g2h(env_cpu(env), addr);
*phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
return flags; return flags;
} }
void *probe_access(CPUArchState *env, vaddr addr, int size, void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t ra) MMUAccessType access_type, int mmu_idx, uintptr_t ra)
{ {
int flags; int flags;
g_assert(-(addr | TARGET_PAGE_MASK) >= size); g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, false, ra); flags = probe_access_internal(env, addr, size, access_type, false, ra);
g_assert((flags & ~TLB_MMIO) == 0); g_assert(flags == 0);
return size ? g2h(env_cpu(env), addr) : NULL; return size ? g2h(env_cpu(env), addr) : NULL;
} }
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp) void **hostp)
{ {
int flags; int flags;
@@ -867,14 +815,15 @@ typedef struct TargetPageDataNode {
static IntervalTreeRoot targetdata_root; static IntervalTreeRoot targetdata_root;
void page_reset_target_data(target_ulong start, target_ulong last) void page_reset_target_data(target_ulong start, target_ulong end)
{ {
IntervalTreeNode *n, *next; IntervalTreeNode *n, *next;
target_ulong last;
assert_memory_lock(); assert_memory_lock();
start &= TARGET_PAGE_MASK; start = start & TARGET_PAGE_MASK;
last |= ~TARGET_PAGE_MASK; last = TARGET_PAGE_ALIGN(end) - 1;
for (n = interval_tree_iter_first(&targetdata_root, start, last), for (n = interval_tree_iter_first(&targetdata_root, start, last),
next = n ? interval_tree_iter_next(n, start, last) : NULL; next = n ? interval_tree_iter_next(n, start, last) : NULL;
@@ -937,188 +886,299 @@ void *page_get_target_data(target_ulong address)
return t->data[(page - region) >> TARGET_PAGE_BITS]; return t->data[(page - region) >> TARGET_PAGE_BITS];
} }
#else #else
void page_reset_target_data(target_ulong start, target_ulong last) { } void page_reset_target_data(target_ulong start, target_ulong end) { }
#endif /* TARGET_PAGE_DATA_SIZE */ #endif /* TARGET_PAGE_DATA_SIZE */
/* The system-mode versions of these helpers are in cputlb.c. */ /* The softmmu versions of these helpers are in cputlb.c. */
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr, /*
MemOp mop, uintptr_t ra, MMUAccessType type) * Verify that we have passed the correct MemOp to the correct function.
*
* We could present one function to target code, and dispatch based on
* the MemOp, but so far we have worked hard to avoid an indirect function
* call along the memory path.
*/
static void validate_memop(MemOpIdx oi, MemOp expected)
{ {
#ifdef CONFIG_DEBUG_TCG
MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
assert(have == expected);
#endif
}
void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
{
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
}
void helper_unaligned_st(CPUArchState *env, target_ulong addr)
{
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
}
static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t ra, MMUAccessType type)
{
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop); int a_bits = get_alignment_bits(mop);
void *ret; void *ret;
/* Enforce guest required alignment. */ /* Enforce guest required alignment. */
if (unlikely(addr & ((1 << a_bits) - 1))) { if (unlikely(addr & ((1 << a_bits) - 1))) {
cpu_loop_exit_sigbus(cpu, addr, type, ra); cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
} }
ret = g2h(cpu, addr); ret = g2h(env_cpu(env), addr);
set_helper_retaddr(ra); set_helper_retaddr(ra);
return ret; return ret;
} }
#include "ldst_atomicity.c.inc" uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{ {
void *haddr; void *haddr;
uint8_t ret; uint8_t ret;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); validate_memop(oi, MO_UB);
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldub_p(haddr); ret = ldub_p(haddr);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret; return ret;
} }
static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
uintptr_t ra, MMUAccessType access_type) MemOpIdx oi, uintptr_t ra)
{ {
void *haddr; void *haddr;
uint16_t ret; uint16_t ret;
MemOp mop = get_memop(oi);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); validate_memop(oi, MO_BEUW);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = load_atom_2(cpu, ra, haddr, mop); ret = lduw_be_p(haddr);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
if (mop & MO_BSWAP) {
ret = bswap16(ret);
}
return ret; return ret;
} }
static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
uintptr_t ra, MMUAccessType access_type) MemOpIdx oi, uintptr_t ra)
{ {
void *haddr; void *haddr;
uint32_t ret; uint32_t ret;
MemOp mop = get_memop(oi);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); validate_memop(oi, MO_BEUL);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = load_atom_4(cpu, ra, haddr, mop); ret = ldl_be_p(haddr);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
if (mop & MO_BSWAP) {
ret = bswap32(ret);
}
return ret; return ret;
} }
static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
uintptr_t ra, MMUAccessType access_type) MemOpIdx oi, uintptr_t ra)
{ {
void *haddr; void *haddr;
uint64_t ret; uint64_t ret;
MemOp mop = get_memop(oi);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); validate_memop(oi, MO_BEUQ);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = load_atom_8(cpu, ra, haddr, mop); ret = ldq_be_p(haddr);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
if (mop & MO_BSWAP) {
ret = bswap64(ret);
}
return ret; return ret;
} }
static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr, uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint16_t ret;
validate_memop(oi, MO_LEUW);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = lduw_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint32_t ret;
validate_memop(oi, MO_LEUL);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldl_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint64_t ret;
validate_memop(oi, MO_LEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
void *haddr; void *haddr;
Int128 ret; Int128 ret;
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_128); validate_memop(oi, MO_128 | MO_BE);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD); memcpy(&ret, haddr, 16);
ret = load_atom_16(cpu, ra, haddr, mop);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
if (mop & MO_BSWAP) { if (!HOST_BIG_ENDIAN) {
ret = bswap128(ret); ret = bswap128(ret);
} }
return ret; return ret;
} }
static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
Int128 ret;
validate_memop(oi, MO_128 | MO_LE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
memcpy(&ret, haddr, 16);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
if (HOST_BIG_ENDIAN) {
ret = bswap128(ret);
}
return ret;
}
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
void *haddr; void *haddr;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); validate_memop(oi, MO_UB);
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stb_p(haddr, val); stb_p(haddr, val);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
void *haddr; void *haddr;
MemOp mop = get_memop(oi);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); validate_memop(oi, MO_BEUW);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stw_be_p(haddr, val);
if (mop & MO_BSWAP) {
val = bswap16(val);
}
store_atom_2(cpu, ra, haddr, mop, val);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
void *haddr; void *haddr;
MemOp mop = get_memop(oi);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); validate_memop(oi, MO_BEUL);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stl_be_p(haddr, val);
if (mop & MO_BSWAP) {
val = bswap32(val);
}
store_atom_4(cpu, ra, haddr, mop, val);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
void *haddr; void *haddr;
MemOp mop = get_memop(oi);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); validate_memop(oi, MO_BEUQ);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_be_p(haddr, val);
if (mop & MO_BSWAP) {
val = bswap64(val);
}
store_atom_8(cpu, ra, haddr, mop, val);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
void *haddr; void *haddr;
MemOpIdx mop = get_memop(oi);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); validate_memop(oi, MO_LEUW);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stw_le_p(haddr, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
if (mop & MO_BSWAP) { void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_LEUL);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stl_le_p(haddr, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_LEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_le_p(haddr, val);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr,
Int128 val, MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_128 | MO_BE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
if (!HOST_BIG_ENDIAN) {
val = bswap128(val); val = bswap128(val);
} }
store_atom_16(cpu, ra, haddr, mop, val); memcpy(haddr, &val, 16);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr,
Int128 val, MemOpIdx oi, uintptr_t ra)
{
void *haddr;
validate_memop(oi, MO_128 | MO_LE);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
if (HOST_BIG_ENDIAN) {
val = bswap128(val);
}
memcpy(haddr, &val, 16);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
@@ -1161,70 +1221,16 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
return ret; return ret;
} }
uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint8_t ret;
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
ret = ldub_p(haddr);
clear_helper_retaddr();
return ret;
}
uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint16_t ret;
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
ret = lduw_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
ret = bswap16(ret);
}
return ret;
}
uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint32_t ret;
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
ret = ldl_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
ret = bswap32(ret);
}
return ret;
}
uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint64_t ret;
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
ret = bswap64(ret);
}
return ret;
}
#include "ldst_common.c.inc" #include "ldst_common.c.inc"
/* /*
* Do not allow unaligned operations to proceed. Return the host address. * Do not allow unaligned operations to proceed. Return the host address.
*
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/ */
static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
int size, uintptr_t retaddr) MemOpIdx oi, int size, int prot,
uintptr_t retaddr)
{ {
MemOp mop = get_memop(oi); MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop); int a_bits = get_alignment_bits(mop);
@@ -1232,15 +1238,16 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
/* Enforce guest required alignment. */ /* Enforce guest required alignment. */
if (unlikely(addr & ((1 << a_bits) - 1))) { if (unlikely(addr & ((1 << a_bits) - 1))) {
cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr); MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
} }
/* Enforce qemu required alignment. */ /* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) { if (unlikely(addr & (size - 1))) {
cpu_loop_exit_atomic(cpu, retaddr); cpu_loop_exit_atomic(env_cpu(env), retaddr);
} }
ret = g2h(cpu, addr); ret = g2h(env_cpu(env), addr);
set_helper_retaddr(retaddr); set_helper_retaddr(retaddr);
return ret; return ret;
} }
@@ -1270,7 +1277,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
#include "atomic_template.h" #include "atomic_template.h"
#endif #endif
#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
#define DATA_SIZE 16 #define DATA_SIZE 16
#include "atomic_template.h" #include "atomic_template.h"
#endif #endif

View File

@@ -12,7 +12,6 @@
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qemu/module.h" #include "qemu/module.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "hw/xen/xen_native.h"
#include "hw/xen/xen-legacy-backend.h" #include "hw/xen/xen-legacy-backend.h"
#include "hw/xen/xen_pt.h" #include "hw/xen/xen_pt.h"
#include "chardev/char.h" #include "chardev/char.h"
@@ -24,18 +23,99 @@
#include "migration/global_state.h" #include "migration/global_state.h"
#include "hw/boards.h" #include "hw/boards.h"
//#define DEBUG_XEN
#ifdef DEBUG_XEN
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
bool xen_allowed; bool xen_allowed;
xc_interface *xen_xc; xc_interface *xen_xc;
xenforeignmemory_handle *xen_fmem; xenforeignmemory_handle *xen_fmem;
xendevicemodel_handle *xen_dmod; xendevicemodel_handle *xen_dmod;
static void xenstore_record_dm_state(const char *state) static int store_dev_info(int domid, Chardev *cs, const char *string)
{
struct xs_handle *xs = NULL;
char *path = NULL;
char *newpath = NULL;
char *pts = NULL;
int ret = -1;
/* Only continue if we're talking to a pty. */
if (!CHARDEV_IS_PTY(cs)) {
return 0;
}
pts = cs->filename + 4;
/* We now have everything we need to set the xenstore entry. */
xs = xs_open(0);
if (xs == NULL) {
fprintf(stderr, "Could not contact XenStore\n");
goto out;
}
path = xs_get_domain_path(xs, domid);
if (path == NULL) {
fprintf(stderr, "xs_get_domain_path() error\n");
goto out;
}
newpath = realloc(path, (strlen(path) + strlen(string) +
strlen("/tty") + 1));
if (newpath == NULL) {
fprintf(stderr, "realloc error\n");
goto out;
}
path = newpath;
strcat(path, string);
strcat(path, "/tty");
if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) {
fprintf(stderr, "xs_write for '%s' fail", string);
goto out;
}
ret = 0;
out:
free(path);
xs_close(xs);
return ret;
}
void xenstore_store_pv_console_info(int i, Chardev *chr)
{
if (i == 0) {
store_dev_info(xen_domid, chr, "/console");
} else {
char buf[32];
snprintf(buf, sizeof(buf), "/device/console/%d", i);
store_dev_info(xen_domid, chr, buf);
}
}
static void xenstore_record_dm_state(struct xs_handle *xs, const char *state)
{ {
char path[50]; char path[50];
if (xs == NULL) {
error_report("xenstore connection not initialized");
exit(1);
}
snprintf(path, sizeof (path), "device-model/%u/state", xen_domid); snprintf(path, sizeof (path), "device-model/%u/state", xen_domid);
if (!qemu_xen_xs_write(xenstore, XBT_NULL, path, state, strlen(state))) { /*
* This call may fail when running restricted so don't make it fatal in
* that case. Toolstacks should instead use QMP to listen for state changes.
*/
if (!xs_write(xs, XBT_NULL, path, state, strlen(state)) &&
!xen_domid_restrict) {
error_report("error recording dm state"); error_report("error recording dm state");
exit(1); exit(1);
} }
@@ -47,7 +127,7 @@ static void xen_change_state_handler(void *opaque, bool running,
{ {
if (running) { if (running) {
/* record state running */ /* record state running */
xenstore_record_dm_state("running"); xenstore_record_dm_state(xenstore, "running");
} }
} }
@@ -96,21 +176,11 @@ static int xen_init(MachineState *ms)
xc_interface_close(xen_xc); xc_interface_close(xen_xc);
return -1; return -1;
} }
/*
* The XenStore write would fail when running restricted so don't attempt
* it in that case. Toolstacks should instead use QMP to listen for state
* changes.
*/
if (!xen_domid_restrict) {
qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
}
/* /*
* opt out of system RAM being allocated by generic code * opt out of system RAM being allocated by generic code
*/ */
mc->default_ram_id = NULL; mc->default_ram_id = NULL;
xen_mode = XEN_ATTACH;
return 0; return 0;
} }

View File

@@ -222,7 +222,11 @@ static int alsa_poll_helper (snd_pcm_t *handle, struct pollhlp *hlp, int mask)
return -1; return -1;
} }
pfds = g_new0(struct pollfd, count); pfds = audio_calloc ("alsa_poll_helper", count, sizeof (*pfds));
if (!pfds) {
dolog ("Could not initialize poll mode\n");
return -1;
}
err = snd_pcm_poll_descriptors (handle, pfds, count); err = snd_pcm_poll_descriptors (handle, pfds, count);
if (err < 0) { if (err < 0) {
@@ -904,7 +908,7 @@ static void alsa_init_per_direction(AudiodevAlsaPerDirectionOptions *apdo)
} }
} }
static void *alsa_audio_init(Audiodev *dev, Error **errp) static void *alsa_audio_init(Audiodev *dev)
{ {
AudiodevAlsaOptions *aopts; AudiodevAlsaOptions *aopts;
assert(dev->driver == AUDIODEV_DRIVER_ALSA); assert(dev->driver == AUDIODEV_DRIVER_ALSA);
@@ -913,23 +917,28 @@ static void *alsa_audio_init(Audiodev *dev, Error **errp)
alsa_init_per_direction(aopts->in); alsa_init_per_direction(aopts->in);
alsa_init_per_direction(aopts->out); alsa_init_per_direction(aopts->out);
/* don't set has_* so alsa_open can identify it wasn't set by the user */ /*
* need to define them, as otherwise alsa produces no sound
* doesn't set has_* so alsa_open can identify it wasn't set by the user
*/
if (!dev->u.alsa.out->has_period_length) { if (!dev->u.alsa.out->has_period_length) {
/* 256 frames assuming 44100Hz */ /* 1024 frames assuming 44100Hz */
dev->u.alsa.out->period_length = 5805; dev->u.alsa.out->period_length = 1024 * 1000000 / 44100;
} }
if (!dev->u.alsa.out->has_buffer_length) { if (!dev->u.alsa.out->has_buffer_length) {
/* 4096 frames assuming 44100Hz */ /* 4096 frames assuming 44100Hz */
dev->u.alsa.out->buffer_length = 92880; dev->u.alsa.out->buffer_length = 4096ll * 1000000 / 44100;
} }
/*
* OptsVisitor sets unspecified optional fields to zero, but do not depend
* on it...
*/
if (!dev->u.alsa.in->has_period_length) { if (!dev->u.alsa.in->has_period_length) {
/* 256 frames assuming 44100Hz */ dev->u.alsa.in->period_length = 0;
dev->u.alsa.in->period_length = 5805;
} }
if (!dev->u.alsa.in->has_buffer_length) { if (!dev->u.alsa.in->has_buffer_length) {
/* 4096 frames assuming 44100Hz */ dev->u.alsa.in->buffer_length = 0;
dev->u.alsa.in->buffer_length = 92880;
} }
return dev; return dev;
@@ -960,6 +969,7 @@ static struct audio_driver alsa_audio_driver = {
.init = alsa_audio_init, .init = alsa_audio_init,
.fini = alsa_audio_fini, .fini = alsa_audio_fini,
.pcm_ops = &alsa_pcm_ops, .pcm_ops = &alsa_pcm_ops,
.can_be_default = 1,
.max_voices_out = INT_MAX, .max_voices_out = INT_MAX,
.max_voices_in = INT_MAX, .max_voices_in = INT_MAX,
.voice_size_out = sizeof (ALSAVoiceOut), .voice_size_out = sizeof (ALSAVoiceOut),

View File

@@ -26,7 +26,6 @@
#include "audio/audio.h" #include "audio/audio.h"
#include "monitor/hmp.h" #include "monitor/hmp.h"
#include "monitor/monitor.h" #include "monitor/monitor.h"
#include "qapi/error.h"
#include "qapi/qmp/qdict.h" #include "qapi/qmp/qdict.h"
static QLIST_HEAD (capture_list_head, CaptureState) capture_head; static QLIST_HEAD (capture_list_head, CaptureState) capture_head;
@@ -66,11 +65,10 @@ void hmp_wavcapture(Monitor *mon, const QDict *qdict)
int nchannels = qdict_get_try_int(qdict, "nchannels", 2); int nchannels = qdict_get_try_int(qdict, "nchannels", 2);
const char *audiodev = qdict_get_str(qdict, "audiodev"); const char *audiodev = qdict_get_str(qdict, "audiodev");
CaptureState *s; CaptureState *s;
Error *local_err = NULL; AudioState *as = audio_state_by_name(audiodev);
AudioState *as = audio_state_by_name(audiodev, &local_err);
if (!as) { if (!as) {
error_report_err(local_err); monitor_printf(mon, "Audiodev '%s' not found\n", audiodev);
return; return;
} }

File diff suppressed because it is too large Load Diff

View File

@@ -94,7 +94,7 @@ typedef struct QEMUAudioTimeStamp {
void AUD_vlog (const char *cap, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0); void AUD_vlog (const char *cap, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0);
void AUD_log (const char *cap, const char *fmt, ...) G_GNUC_PRINTF(2, 3); void AUD_log (const char *cap, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
bool AUD_register_card (const char *name, QEMUSoundCard *card, Error **errp); void AUD_register_card (const char *name, QEMUSoundCard *card);
void AUD_remove_card (QEMUSoundCard *card); void AUD_remove_card (QEMUSoundCard *card);
CaptureVoiceOut *AUD_add_capture( CaptureVoiceOut *AUD_add_capture(
AudioState *s, AudioState *s,
@@ -169,14 +169,12 @@ void audio_sample_from_uint64(void *samples, int pos,
uint64_t left, uint64_t right); uint64_t left, uint64_t right);
void audio_define(Audiodev *audio); void audio_define(Audiodev *audio);
void audio_define_default(Audiodev *dev, Error **errp);
void audio_parse_option(const char *opt); void audio_parse_option(const char *opt);
void audio_create_default_audiodevs(void); bool audio_init_audiodevs(void);
void audio_init_audiodevs(void);
void audio_help(void); void audio_help(void);
void audio_legacy_help(void);
AudioState *audio_state_by_name(const char *name, Error **errp); AudioState *audio_state_by_name(const char *name);
AudioState *audio_get_default_audio_state(Error **errp);
const char *audio_get_id(QEMUSoundCard *card); const char *audio_get_id(QEMUSoundCard *card);
#define DEFINE_AUDIO_PROPERTIES(_s, _f) \ #define DEFINE_AUDIO_PROPERTIES(_s, _f) \

View File

@@ -58,7 +58,7 @@ typedef struct SWVoiceCap SWVoiceCap;
typedef struct STSampleBuffer { typedef struct STSampleBuffer {
size_t pos, size; size_t pos, size;
st_sample *buffer; st_sample samples[];
} STSampleBuffer; } STSampleBuffer;
typedef struct HWVoiceOut { typedef struct HWVoiceOut {
@@ -71,7 +71,7 @@ typedef struct HWVoiceOut {
f_sample *clip; f_sample *clip;
uint64_t ts_helper; uint64_t ts_helper;
STSampleBuffer mix_buf; STSampleBuffer *mix_buf;
void *buf_emul; void *buf_emul;
size_t pos_emul, pending_emul, size_emul; size_t pos_emul, pending_emul, size_emul;
@@ -93,7 +93,7 @@ typedef struct HWVoiceIn {
size_t total_samples_captured; size_t total_samples_captured;
uint64_t ts_helper; uint64_t ts_helper;
STSampleBuffer conv_buf; STSampleBuffer *conv_buf;
void *buf_emul; void *buf_emul;
size_t pos_emul, pending_emul, size_emul; size_t pos_emul, pending_emul, size_emul;
@@ -108,7 +108,8 @@ struct SWVoiceOut {
AudioState *s; AudioState *s;
struct audio_pcm_info info; struct audio_pcm_info info;
t_sample *conv; t_sample *conv;
STSampleBuffer resample_buf; int64_t ratio;
struct st_sample *buf;
void *rate; void *rate;
size_t total_hw_samples_mixed; size_t total_hw_samples_mixed;
int active; int active;
@@ -125,9 +126,10 @@ struct SWVoiceIn {
AudioState *s; AudioState *s;
int active; int active;
struct audio_pcm_info info; struct audio_pcm_info info;
int64_t ratio;
void *rate; void *rate;
size_t total_hw_samples_acquired; size_t total_hw_samples_acquired;
STSampleBuffer resample_buf; struct st_sample *buf;
f_sample *clip; f_sample *clip;
HWVoiceIn *hw; HWVoiceIn *hw;
char *name; char *name;
@@ -140,16 +142,17 @@ typedef struct audio_driver audio_driver;
struct audio_driver { struct audio_driver {
const char *name; const char *name;
const char *descr; const char *descr;
void *(*init) (Audiodev *, Error **); void *(*init) (Audiodev *);
void (*fini) (void *); void (*fini) (void *);
#ifdef CONFIG_GIO #ifdef CONFIG_GIO
void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager, bool p2p); void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager);
#endif #endif
struct audio_pcm_ops *pcm_ops; struct audio_pcm_ops *pcm_ops;
int can_be_default;
int max_voices_out; int max_voices_out;
int max_voices_in; int max_voices_in;
size_t voice_size_out; int voice_size_out;
size_t voice_size_in; int voice_size_in;
QLIST_ENTRY(audio_driver) next; QLIST_ENTRY(audio_driver) next;
}; };
@@ -242,11 +245,13 @@ extern const struct mixeng_volume nominal_volume;
extern const char *audio_prio_list[]; extern const char *audio_prio_list[];
void audio_driver_register(audio_driver *drv); void audio_driver_register(audio_driver *drv);
audio_driver *audio_driver_lookup(const char *name);
void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as); void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as);
void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len); void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len);
int audio_bug (const char *funcname, int cond); int audio_bug (const char *funcname, int cond);
void *audio_calloc (const char *funcname, int nmemb, size_t size);
void audio_run(AudioState *s, const char *msg); void audio_run(AudioState *s, const char *msg);
@@ -289,12 +294,18 @@ static inline size_t audio_ring_posb(size_t pos, size_t dist, size_t len)
#define ldebug(fmt, ...) (void)0 #define ldebug(fmt, ...) (void)0
#endif #endif
#define AUDIO_STRINGIFY_(n) #n
#define AUDIO_STRINGIFY(n) AUDIO_STRINGIFY_(n)
typedef struct AudiodevListEntry { typedef struct AudiodevListEntry {
Audiodev *dev; Audiodev *dev;
QSIMPLEQ_ENTRY(AudiodevListEntry) next; QSIMPLEQ_ENTRY(AudiodevListEntry) next;
} AudiodevListEntry; } AudiodevListEntry;
typedef QSIMPLEQ_HEAD(, AudiodevListEntry) AudiodevListHead; typedef QSIMPLEQ_HEAD(, AudiodevListEntry) AudiodevListHead;
AudiodevListHead audio_handle_legacy_opts(void);
void audio_free_audiodev_list(AudiodevListHead *head);
void audio_create_pdos(Audiodev *dev); void audio_create_pdos(Audiodev *dev);
AudiodevPerDirectionOptions *audio_get_pdo_in(Audiodev *dev); AudiodevPerDirectionOptions *audio_get_pdo_in(Audiodev *dev);

591
audio/audio_legacy.c Normal file
View File

@@ -0,0 +1,591 @@
/*
* QEMU Audio subsystem: legacy configuration handling
*
* Copyright (c) 2015-2019 Zoltán Kővágó <DirtY.iCE.hu@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "audio.h"
#include "audio_int.h"
#include "qemu/cutils.h"
#include "qemu/timer.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-audio.h"
#include "qapi/visitor-impl.h"
#define AUDIO_CAP "audio-legacy"
#include "audio_int.h"
static uint32_t toui32(const char *str)
{
unsigned long long ret;
if (parse_uint_full(str, &ret, 10) || ret > UINT32_MAX) {
dolog("Invalid integer value `%s'\n", str);
exit(1);
}
return ret;
}
/* helper functions to convert env variables */
static void get_bool(const char *env, bool *dst, bool *has_dst)
{
const char *val = getenv(env);
if (val) {
*dst = toui32(val) != 0;
*has_dst = true;
}
}
static void get_int(const char *env, uint32_t *dst, bool *has_dst)
{
const char *val = getenv(env);
if (val) {
*dst = toui32(val);
*has_dst = true;
}
}
static void get_str(const char *env, char **dst)
{
const char *val = getenv(env);
if (val) {
g_free(*dst);
*dst = g_strdup(val);
}
}
static void get_fmt(const char *env, AudioFormat *dst, bool *has_dst)
{
const char *val = getenv(env);
if (val) {
size_t i;
for (i = 0; AudioFormat_lookup.size; ++i) {
if (strcasecmp(val, AudioFormat_lookup.array[i]) == 0) {
*dst = i;
*has_dst = true;
return;
}
}
dolog("Invalid audio format `%s'\n", val);
exit(1);
}
}
#if defined(CONFIG_AUDIO_ALSA) || defined(CONFIG_AUDIO_DSOUND)
static void get_millis_to_usecs(const char *env, uint32_t *dst, bool *has_dst)
{
const char *val = getenv(env);
if (val) {
*dst = toui32(val) * 1000;
*has_dst = true;
}
}
#endif
#if defined(CONFIG_AUDIO_ALSA) || defined(CONFIG_AUDIO_COREAUDIO) || \
defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) || \
defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
static uint32_t frames_to_usecs(uint32_t frames,
AudiodevPerDirectionOptions *pdo)
{
uint32_t freq = pdo->has_frequency ? pdo->frequency : 44100;
return (frames * 1000000 + freq / 2) / freq;
}
#endif
#ifdef CONFIG_AUDIO_COREAUDIO
static void get_frames_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
AudiodevPerDirectionOptions *pdo)
{
const char *val = getenv(env);
if (val) {
*dst = frames_to_usecs(toui32(val), pdo);
*has_dst = true;
}
}
#endif
#if defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) || \
defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
static uint32_t samples_to_usecs(uint32_t samples,
AudiodevPerDirectionOptions *pdo)
{
uint32_t channels = pdo->has_channels ? pdo->channels : 2;
return frames_to_usecs(samples / channels, pdo);
}
#endif
#if defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL)
static void get_samples_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
AudiodevPerDirectionOptions *pdo)
{
const char *val = getenv(env);
if (val) {
*dst = samples_to_usecs(toui32(val), pdo);
*has_dst = true;
}
}
#endif
#if defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
static uint32_t bytes_to_usecs(uint32_t bytes, AudiodevPerDirectionOptions *pdo)
{
AudioFormat fmt = pdo->has_format ? pdo->format : AUDIO_FORMAT_S16;
uint32_t bytes_per_sample = audioformat_bytes_per_sample(fmt);
return samples_to_usecs(bytes / bytes_per_sample, pdo);
}
static void get_bytes_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
AudiodevPerDirectionOptions *pdo)
{
const char *val = getenv(env);
if (val) {
*dst = bytes_to_usecs(toui32(val), pdo);
*has_dst = true;
}
}
#endif
/* backend specific functions */
#ifdef CONFIG_AUDIO_ALSA
/* ALSA */
static void handle_alsa_per_direction(
AudiodevAlsaPerDirectionOptions *apdo, const char *prefix)
{
char buf[64];
size_t len = strlen(prefix);
bool size_in_usecs = false;
bool dummy;
memcpy(buf, prefix, len);
strcpy(buf + len, "TRY_POLL");
get_bool(buf, &apdo->try_poll, &apdo->has_try_poll);
strcpy(buf + len, "DEV");
get_str(buf, &apdo->dev);
strcpy(buf + len, "SIZE_IN_USEC");
get_bool(buf, &size_in_usecs, &dummy);
strcpy(buf + len, "PERIOD_SIZE");
get_int(buf, &apdo->period_length, &apdo->has_period_length);
if (apdo->has_period_length && !size_in_usecs) {
apdo->period_length = frames_to_usecs(
apdo->period_length,
qapi_AudiodevAlsaPerDirectionOptions_base(apdo));
}
strcpy(buf + len, "BUFFER_SIZE");
get_int(buf, &apdo->buffer_length, &apdo->has_buffer_length);
if (apdo->has_buffer_length && !size_in_usecs) {
apdo->buffer_length = frames_to_usecs(
apdo->buffer_length,
qapi_AudiodevAlsaPerDirectionOptions_base(apdo));
}
}
static void handle_alsa(Audiodev *dev)
{
AudiodevAlsaOptions *aopt = &dev->u.alsa;
handle_alsa_per_direction(aopt->in, "QEMU_ALSA_ADC_");
handle_alsa_per_direction(aopt->out, "QEMU_ALSA_DAC_");
get_millis_to_usecs("QEMU_ALSA_THRESHOLD",
&aopt->threshold, &aopt->has_threshold);
}
#endif
#ifdef CONFIG_AUDIO_COREAUDIO
/* coreaudio */
static void handle_coreaudio(Audiodev *dev)
{
get_frames_to_usecs(
"QEMU_COREAUDIO_BUFFER_SIZE",
&dev->u.coreaudio.out->buffer_length,
&dev->u.coreaudio.out->has_buffer_length,
qapi_AudiodevCoreaudioPerDirectionOptions_base(dev->u.coreaudio.out));
get_int("QEMU_COREAUDIO_BUFFER_COUNT",
&dev->u.coreaudio.out->buffer_count,
&dev->u.coreaudio.out->has_buffer_count);
}
#endif
#ifdef CONFIG_AUDIO_DSOUND
/* dsound */
static void handle_dsound(Audiodev *dev)
{
get_millis_to_usecs("QEMU_DSOUND_LATENCY_MILLIS",
&dev->u.dsound.latency, &dev->u.dsound.has_latency);
get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_OUT",
&dev->u.dsound.out->buffer_length,
&dev->u.dsound.out->has_buffer_length,
dev->u.dsound.out);
get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_IN",
&dev->u.dsound.in->buffer_length,
&dev->u.dsound.in->has_buffer_length,
dev->u.dsound.in);
}
#endif
#ifdef CONFIG_AUDIO_OSS
/* OSS */
static void handle_oss_per_direction(
AudiodevOssPerDirectionOptions *opdo, const char *try_poll_env,
const char *dev_env)
{
get_bool(try_poll_env, &opdo->try_poll, &opdo->has_try_poll);
get_str(dev_env, &opdo->dev);
get_bytes_to_usecs("QEMU_OSS_FRAGSIZE",
&opdo->buffer_length, &opdo->has_buffer_length,
qapi_AudiodevOssPerDirectionOptions_base(opdo));
get_int("QEMU_OSS_NFRAGS", &opdo->buffer_count,
&opdo->has_buffer_count);
}
static void handle_oss(Audiodev *dev)
{
AudiodevOssOptions *oopt = &dev->u.oss;
handle_oss_per_direction(oopt->in, "QEMU_AUDIO_ADC_TRY_POLL",
"QEMU_OSS_ADC_DEV");
handle_oss_per_direction(oopt->out, "QEMU_AUDIO_DAC_TRY_POLL",
"QEMU_OSS_DAC_DEV");
get_bool("QEMU_OSS_MMAP", &oopt->try_mmap, &oopt->has_try_mmap);
get_bool("QEMU_OSS_EXCLUSIVE", &oopt->exclusive, &oopt->has_exclusive);
get_int("QEMU_OSS_POLICY", &oopt->dsp_policy, &oopt->has_dsp_policy);
}
#endif
#ifdef CONFIG_AUDIO_PA
/* pulseaudio */
static void handle_pa_per_direction(
AudiodevPaPerDirectionOptions *ppdo, const char *env)
{
get_str(env, &ppdo->name);
}
static void handle_pa(Audiodev *dev)
{
handle_pa_per_direction(dev->u.pa.in, "QEMU_PA_SOURCE");
handle_pa_per_direction(dev->u.pa.out, "QEMU_PA_SINK");
get_samples_to_usecs(
"QEMU_PA_SAMPLES", &dev->u.pa.in->buffer_length,
&dev->u.pa.in->has_buffer_length,
qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.in));
get_samples_to_usecs(
"QEMU_PA_SAMPLES", &dev->u.pa.out->buffer_length,
&dev->u.pa.out->has_buffer_length,
qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.out));
get_str("QEMU_PA_SERVER", &dev->u.pa.server);
}
#endif
#ifdef CONFIG_AUDIO_SDL
/* SDL */
static void handle_sdl(Audiodev *dev)
{
/* SDL is output only */
get_samples_to_usecs("QEMU_SDL_SAMPLES", &dev->u.sdl.out->buffer_length,
&dev->u.sdl.out->has_buffer_length,
qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.out));
}
#endif
/* wav */
static void handle_wav(Audiodev *dev)
{
get_int("QEMU_WAV_FREQUENCY",
&dev->u.wav.out->frequency, &dev->u.wav.out->has_frequency);
get_fmt("QEMU_WAV_FORMAT", &dev->u.wav.out->format,
&dev->u.wav.out->has_format);
get_int("QEMU_WAV_DAC_FIXED_CHANNELS",
&dev->u.wav.out->channels, &dev->u.wav.out->has_channels);
get_str("QEMU_WAV_PATH", &dev->u.wav.path);
}
/* general */
static void handle_per_direction(
AudiodevPerDirectionOptions *pdo, const char *prefix)
{
char buf[64];
size_t len = strlen(prefix);
memcpy(buf, prefix, len);
strcpy(buf + len, "FIXED_SETTINGS");
get_bool(buf, &pdo->fixed_settings, &pdo->has_fixed_settings);
strcpy(buf + len, "FIXED_FREQ");
get_int(buf, &pdo->frequency, &pdo->has_frequency);
strcpy(buf + len, "FIXED_FMT");
get_fmt(buf, &pdo->format, &pdo->has_format);
strcpy(buf + len, "FIXED_CHANNELS");
get_int(buf, &pdo->channels, &pdo->has_channels);
strcpy(buf + len, "VOICES");
get_int(buf, &pdo->voices, &pdo->has_voices);
}
static AudiodevListEntry *legacy_opt(const char *drvname)
{
AudiodevListEntry *e = g_new0(AudiodevListEntry, 1);
e->dev = g_new0(Audiodev, 1);
e->dev->id = g_strdup(drvname);
e->dev->driver = qapi_enum_parse(
&AudiodevDriver_lookup, drvname, -1, &error_abort);
audio_create_pdos(e->dev);
handle_per_direction(audio_get_pdo_in(e->dev), "QEMU_AUDIO_ADC_");
handle_per_direction(audio_get_pdo_out(e->dev), "QEMU_AUDIO_DAC_");
/* Original description: Timer period in HZ (0 - use lowest possible) */
get_int("QEMU_AUDIO_TIMER_PERIOD",
&e->dev->timer_period, &e->dev->has_timer_period);
if (e->dev->has_timer_period && e->dev->timer_period) {
e->dev->timer_period = NANOSECONDS_PER_SECOND / 1000 /
e->dev->timer_period;
}
switch (e->dev->driver) {
#ifdef CONFIG_AUDIO_ALSA
case AUDIODEV_DRIVER_ALSA:
handle_alsa(e->dev);
break;
#endif
#ifdef CONFIG_AUDIO_COREAUDIO
case AUDIODEV_DRIVER_COREAUDIO:
handle_coreaudio(e->dev);
break;
#endif
#ifdef CONFIG_AUDIO_DSOUND
case AUDIODEV_DRIVER_DSOUND:
handle_dsound(e->dev);
break;
#endif
#ifdef CONFIG_AUDIO_OSS
case AUDIODEV_DRIVER_OSS:
handle_oss(e->dev);
break;
#endif
#ifdef CONFIG_AUDIO_PA
case AUDIODEV_DRIVER_PA:
handle_pa(e->dev);
break;
#endif
#ifdef CONFIG_AUDIO_SDL
case AUDIODEV_DRIVER_SDL:
handle_sdl(e->dev);
break;
#endif
case AUDIODEV_DRIVER_WAV:
handle_wav(e->dev);
break;
default:
break;
}
return e;
}
AudiodevListHead audio_handle_legacy_opts(void)
{
const char *drvname = getenv("QEMU_AUDIO_DRV");
AudiodevListHead head = QSIMPLEQ_HEAD_INITIALIZER(head);
if (drvname) {
AudiodevListEntry *e;
audio_driver *driver = audio_driver_lookup(drvname);
if (!driver) {
dolog("Unknown audio driver `%s'\n", drvname);
exit(1);
}
e = legacy_opt(drvname);
QSIMPLEQ_INSERT_TAIL(&head, e, next);
} else {
for (int i = 0; audio_prio_list[i]; i++) {
audio_driver *driver = audio_driver_lookup(audio_prio_list[i]);
if (driver && driver->can_be_default) {
AudiodevListEntry *e = legacy_opt(driver->name);
QSIMPLEQ_INSERT_TAIL(&head, e, next);
}
}
if (QSIMPLEQ_EMPTY(&head)) {
dolog("Internal error: no default audio driver available\n");
exit(1);
}
}
return head;
}
/* visitor to print -audiodev option */
typedef struct {
Visitor visitor;
bool comma;
GList *path;
} LegacyPrintVisitor;
static bool lv_start_struct(Visitor *v, const char *name, void **obj,
size_t size, Error **errp)
{
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
lv->path = g_list_append(lv->path, g_strdup(name));
return true;
}
static void lv_end_struct(Visitor *v, void **obj)
{
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
lv->path = g_list_delete_link(lv->path, g_list_last(lv->path));
}
static void lv_print_key(Visitor *v, const char *name)
{
GList *e;
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
if (lv->comma) {
putchar(',');
} else {
lv->comma = true;
}
for (e = lv->path; e; e = e->next) {
if (e->data) {
printf("%s.", (const char *) e->data);
}
}
printf("%s=", name);
}
static bool lv_type_int64(Visitor *v, const char *name, int64_t *obj,
Error **errp)
{
lv_print_key(v, name);
printf("%" PRIi64, *obj);
return true;
}
static bool lv_type_uint64(Visitor *v, const char *name, uint64_t *obj,
Error **errp)
{
lv_print_key(v, name);
printf("%" PRIu64, *obj);
return true;
}
static bool lv_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
{
lv_print_key(v, name);
printf("%s", *obj ? "on" : "off");
return true;
}
static bool lv_type_str(Visitor *v, const char *name, char **obj, Error **errp)
{
const char *str = *obj;
lv_print_key(v, name);
while (*str) {
if (*str == ',') {
putchar(',');
}
putchar(*str++);
}
return true;
}
static void lv_complete(Visitor *v, void *opaque)
{
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
assert(lv->path == NULL);
}
static void lv_free(Visitor *v)
{
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
g_list_free_full(lv->path, g_free);
g_free(lv);
}
static Visitor *legacy_visitor_new(void)
{
LegacyPrintVisitor *lv = g_new0(LegacyPrintVisitor, 1);
lv->visitor.start_struct = lv_start_struct;
lv->visitor.end_struct = lv_end_struct;
/* lists not supported */
lv->visitor.type_int64 = lv_type_int64;
lv->visitor.type_uint64 = lv_type_uint64;
lv->visitor.type_bool = lv_type_bool;
lv->visitor.type_str = lv_type_str;
lv->visitor.type = VISITOR_OUTPUT;
lv->visitor.complete = lv_complete;
lv->visitor.free = lv_free;
return &lv->visitor;
}
void audio_legacy_help(void)
{
AudiodevListHead head;
AudiodevListEntry *e;
printf("Environment variable based configuration deprecated.\n");
printf("Please use the new -audiodev option.\n");
head = audio_handle_legacy_opts();
printf("\nEquivalent -audiodev to your current environment variables:\n");
if (!getenv("QEMU_AUDIO_DRV")) {
printf("(Since you didn't specify QEMU_AUDIO_DRV, I'll list all "
"possibilities)\n");
}
QSIMPLEQ_FOREACH(e, &head, next) {
Visitor *v;
Audiodev *dev = e->dev;
printf("-audiodev ");
v = legacy_visitor_new();
visit_type_Audiodev(v, NULL, &dev, &error_abort);
visit_free(v);
printf("\n");
}
audio_free_audiodev_list(&head);
}

View File

@@ -37,12 +37,11 @@
#endif #endif
static void glue(audio_init_nb_voices_, TYPE)(AudioState *s, static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
struct audio_driver *drv, int min_voices) struct audio_driver *drv)
{ {
int max_voices = glue (drv->max_voices_, TYPE); int max_voices = glue (drv->max_voices_, TYPE);
size_t voice_size = glue(drv->voice_size_, TYPE); int voice_size = glue (drv->voice_size_, TYPE);
glue (s->nb_hw_voices_, TYPE) = glue(audio_get_pdo_, TYPE)(s->dev)->voices;
if (glue (s->nb_hw_voices_, TYPE) > max_voices) { if (glue (s->nb_hw_voices_, TYPE) > max_voices) {
if (!max_voices) { if (!max_voices) {
#ifdef DAC #ifdef DAC
@@ -57,12 +56,6 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
glue (s->nb_hw_voices_, TYPE) = max_voices; glue (s->nb_hw_voices_, TYPE) = max_voices;
} }
if (glue (s->nb_hw_voices_, TYPE) < min_voices) {
dolog ("Bogus number of " NAME " voices %d, setting to %d\n",
glue (s->nb_hw_voices_, TYPE),
min_voices);
}
if (audio_bug(__func__, !voice_size && max_voices)) { if (audio_bug(__func__, !voice_size && max_voices)) {
dolog ("drv=`%s' voice_size=0 max_voices=%d\n", dolog ("drv=`%s' voice_size=0 max_voices=%d\n",
drv->name, max_voices); drv->name, max_voices);
@@ -70,7 +63,7 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
} }
if (audio_bug(__func__, voice_size && !max_voices)) { if (audio_bug(__func__, voice_size && !max_voices)) {
dolog("drv=`%s' voice_size=%zu max_voices=0\n", dolog ("drv=`%s' voice_size=%d max_voices=0\n",
drv->name, voice_size); drv->name, voice_size);
} }
} }
@@ -78,9 +71,8 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
static void glue (audio_pcm_hw_free_resources_, TYPE) (HW *hw) static void glue (audio_pcm_hw_free_resources_, TYPE) (HW *hw)
{ {
g_free(hw->buf_emul); g_free(hw->buf_emul);
g_free(HWBUF.buffer); g_free (HWBUF);
HWBUF.buffer = NULL; HWBUF = NULL;
HWBUF.size = 0;
} }
static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw) static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw)
@@ -91,67 +83,56 @@ static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw)
dolog("Attempted to allocate empty buffer\n"); dolog("Attempted to allocate empty buffer\n");
} }
HWBUF.buffer = g_new0(st_sample, samples); HWBUF = g_malloc0(sizeof(STSampleBuffer) + sizeof(st_sample) * samples);
HWBUF.size = samples; HWBUF->size = samples;
HWBUF.pos = 0;
} else { } else {
HWBUF.buffer = NULL; HWBUF = NULL;
HWBUF.size = 0;
} }
} }
static void glue (audio_pcm_sw_free_resources_, TYPE) (SW *sw) static void glue (audio_pcm_sw_free_resources_, TYPE) (SW *sw)
{ {
g_free(sw->resample_buf.buffer); g_free (sw->buf);
sw->resample_buf.buffer = NULL;
sw->resample_buf.size = 0;
if (sw->rate) { if (sw->rate) {
st_rate_stop (sw->rate); st_rate_stop (sw->rate);
} }
sw->buf = NULL;
sw->rate = NULL; sw->rate = NULL;
} }
static int glue (audio_pcm_sw_alloc_resources_, TYPE) (SW *sw) static int glue (audio_pcm_sw_alloc_resources_, TYPE) (SW *sw)
{ {
HW *hw = sw->hw; int samples;
uint64_t samples;
if (!glue(audio_get_pdo_, TYPE)(sw->s->dev)->mixing_engine) { if (!glue(audio_get_pdo_, TYPE)(sw->s->dev)->mixing_engine) {
return 0; return 0;
} }
samples = muldiv64(HWBUF.size, sw->info.freq, hw->info.freq); #ifdef DAC
if (samples == 0) { samples = ((int64_t) sw->HWBUF->size << 32) / sw->ratio;
uint64_t f_fe_min; #else
uint64_t f_be = (uint32_t)hw->info.freq; samples = (int64_t)sw->HWBUF->size * sw->ratio >> 32;
#endif
/* f_fe_min = ceil(1 [frames] * f_be [Hz] / size_be [frames]) */ sw->buf = audio_calloc(__func__, samples, sizeof(struct st_sample));
f_fe_min = (f_be + HWBUF.size - 1) / HWBUF.size; if (!sw->buf) {
qemu_log_mask(LOG_UNIMP, dolog ("Could not allocate buffer for `%s' (%d samples)\n",
AUDIO_CAP ": The guest selected a " NAME " sample rate" SW_NAME (sw), samples);
" of %d Hz for %s. Only sample rates >= %" PRIu64 " Hz"
" are supported.\n",
sw->info.freq, sw->name, f_fe_min);
return -1; return -1;
} }
/*
* Allocate one additional audio frame that is needed for upsampling
* if the resample buffer size is small. For large buffer sizes take
* care of overflows and truncation.
*/
samples = samples < SIZE_MAX ? samples + 1 : SIZE_MAX;
sw->resample_buf.buffer = g_new0(st_sample, samples);
sw->resample_buf.size = samples;
sw->resample_buf.pos = 0;
#ifdef DAC #ifdef DAC
sw->rate = st_rate_start(sw->info.freq, hw->info.freq); sw->rate = st_rate_start (sw->info.freq, sw->hw->info.freq);
#else #else
sw->rate = st_rate_start(hw->info.freq, sw->info.freq); sw->rate = st_rate_start (sw->hw->info.freq, sw->info.freq);
#endif #endif
if (!sw->rate) {
g_free (sw->buf);
sw->buf = NULL;
return -1;
}
return 0; return 0;
} }
@@ -168,8 +149,11 @@ static int glue (audio_pcm_sw_init_, TYPE) (
sw->hw = hw; sw->hw = hw;
sw->active = 0; sw->active = 0;
#ifdef DAC #ifdef DAC
sw->ratio = ((int64_t) sw->hw->info.freq << 32) / sw->info.freq;
sw->total_hw_samples_mixed = 0; sw->total_hw_samples_mixed = 0;
sw->empty = 1; sw->empty = 1;
#else
sw->ratio = ((int64_t) sw->info.freq << 32) / sw->hw->info.freq;
#endif #endif
if (sw->info.is_float) { if (sw->info.is_float) {
@@ -280,11 +264,13 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s,
return NULL; return NULL;
} }
/* hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE));
* Since glue(s->nb_hw_voices_, TYPE) is != 0, glue(drv->voice_size_, TYPE) if (!hw) {
* is guaranteed to be != 0. See the audio_init_nb_voices_* functions. dolog ("Can not allocate voice `%s' size %d\n",
*/ drv->name, glue (drv->voice_size_, TYPE));
hw = g_malloc0(glue(drv->voice_size_, TYPE)); return NULL;
}
hw->s = s; hw->s = s;
hw->pcm_ops = drv->pcm_ops; hw->pcm_ops = drv->pcm_ops;
@@ -369,10 +355,6 @@ AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev)
case AUDIODEV_DRIVER_PA: case AUDIODEV_DRIVER_PA:
return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE); return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE);
#endif #endif
#ifdef CONFIG_AUDIO_PIPEWIRE
case AUDIODEV_DRIVER_PIPEWIRE:
return qapi_AudiodevPipewirePerDirectionOptions_base(dev->u.pipewire.TYPE);
#endif
#ifdef CONFIG_AUDIO_SDL #ifdef CONFIG_AUDIO_SDL
case AUDIODEV_DRIVER_SDL: case AUDIODEV_DRIVER_SDL:
return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE); return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE);
@@ -436,28 +418,33 @@ static SW *glue(audio_pcm_create_voice_pair_, TYPE)(
hw_as = *as; hw_as = *as;
} }
sw = g_new0(SW, 1); sw = audio_calloc(__func__, 1, sizeof(*sw));
if (!sw) {
dolog ("Could not allocate soft voice `%s' (%zu bytes)\n",
sw_name ? sw_name : "unknown", sizeof (*sw));
goto err1;
}
sw->s = s; sw->s = s;
hw = glue(audio_pcm_hw_add_, TYPE)(s, &hw_as); hw = glue(audio_pcm_hw_add_, TYPE)(s, &hw_as);
if (!hw) { if (!hw) {
dolog("Could not create a backend for voice `%s'\n", sw_name); goto err2;
goto err1;
} }
glue (audio_pcm_hw_add_sw_, TYPE) (hw, sw); glue (audio_pcm_hw_add_sw_, TYPE) (hw, sw);
if (glue (audio_pcm_sw_init_, TYPE) (sw, hw, sw_name, as)) { if (glue (audio_pcm_sw_init_, TYPE) (sw, hw, sw_name, as)) {
goto err2; goto err3;
} }
return sw; return sw;
err2: err3:
glue (audio_pcm_hw_del_sw_, TYPE) (sw); glue (audio_pcm_hw_del_sw_, TYPE) (sw);
glue (audio_pcm_hw_gc_, TYPE) (&hw); glue (audio_pcm_hw_gc_, TYPE) (&hw);
err1: err2:
g_free (sw); g_free (sw);
err1:
return NULL; return NULL;
} }
@@ -528,7 +515,7 @@ SW *glue (AUD_open_, TYPE) (
HW *hw = sw->hw; HW *hw = sw->hw;
if (!hw) { if (!hw) {
dolog("Internal logic error: voice `%s' has no backend\n", dolog ("Internal logic error voice `%s' has no hardware store\n",
SW_NAME (sw)); SW_NAME (sw));
goto fail; goto fail;
} }
@@ -540,6 +527,7 @@ SW *glue (AUD_open_, TYPE) (
} else { } else {
sw = glue(audio_pcm_create_voice_pair_, TYPE)(s, name, as); sw = glue(audio_pcm_create_voice_pair_, TYPE)(s, name, as);
if (!sw) { if (!sw) {
dolog ("Failed to create voice `%s'\n", name);
return NULL; return NULL;
} }
} }

View File

@@ -644,7 +644,7 @@ static void coreaudio_enable_out(HWVoiceOut *hw, bool enable)
update_device_playback_state(core); update_device_playback_state(core);
} }
static void *coreaudio_audio_init(Audiodev *dev, Error **errp) static void *coreaudio_audio_init(Audiodev *dev)
{ {
return dev; return dev;
} }
@@ -673,6 +673,7 @@ static struct audio_driver coreaudio_audio_driver = {
.init = coreaudio_audio_init, .init = coreaudio_audio_init,
.fini = coreaudio_audio_fini, .fini = coreaudio_audio_fini,
.pcm_ops = &coreaudio_pcm_ops, .pcm_ops = &coreaudio_pcm_ops,
.can_be_default = 1,
.max_voices_out = 1, .max_voices_out = 1,
.max_voices_in = 0, .max_voices_in = 0,
.voice_size_out = sizeof (coreaudioVoiceOut), .voice_size_out = sizeof (coreaudioVoiceOut),

View File

@@ -29,11 +29,7 @@
#include "qemu/timer.h" #include "qemu/timer.h"
#include "qemu/dbus.h" #include "qemu/dbus.h"
#ifdef G_OS_UNIX
#include <gio/gunixfdlist.h> #include <gio/gunixfdlist.h>
#endif
#include "ui/dbus.h"
#include "ui/dbus-display1.h" #include "ui/dbus-display1.h"
#define AUDIO_CAP "dbus" #define AUDIO_CAP "dbus"
@@ -47,7 +43,6 @@
typedef struct DBusAudio { typedef struct DBusAudio {
GDBusObjectManagerServer *server; GDBusObjectManagerServer *server;
bool p2p;
GDBusObjectSkeleton *audio; GDBusObjectSkeleton *audio;
QemuDBusDisplay1Audio *iface; QemuDBusDisplay1Audio *iface;
GHashTable *out_listeners; GHashTable *out_listeners;
@@ -395,7 +390,7 @@ dbus_enable_in(HWVoiceIn *hw, bool enable)
} }
static void * static void *
dbus_audio_init(Audiodev *dev, Error **errp) dbus_audio_init(Audiodev *dev)
{ {
DBusAudio *da = g_new0(DBusAudio, 1); DBusAudio *da = g_new0(DBusAudio, 1);
@@ -448,15 +443,12 @@ listener_in_vanished_cb(GDBusConnection *connection,
static gboolean static gboolean
dbus_audio_register_listener(AudioState *s, dbus_audio_register_listener(AudioState *s,
GDBusMethodInvocation *invocation, GDBusMethodInvocation *invocation,
#ifdef G_OS_UNIX
GUnixFDList *fd_list, GUnixFDList *fd_list,
#endif
GVariant *arg_listener, GVariant *arg_listener,
bool out) bool out)
{ {
DBusAudio *da = s->drv_opaque; DBusAudio *da = s->drv_opaque;
const char *sender = const char *sender = g_dbus_method_invocation_get_sender(invocation);
da->p2p ? "p2p" : g_dbus_method_invocation_get_sender(invocation);
g_autoptr(GDBusConnection) listener_conn = NULL; g_autoptr(GDBusConnection) listener_conn = NULL;
g_autoptr(GError) err = NULL; g_autoptr(GError) err = NULL;
g_autoptr(GSocket) socket = NULL; g_autoptr(GSocket) socket = NULL;
@@ -477,11 +469,6 @@ dbus_audio_register_listener(AudioState *s,
return DBUS_METHOD_INVOCATION_HANDLED; return DBUS_METHOD_INVOCATION_HANDLED;
} }
#ifdef G_OS_WIN32
if (!dbus_win32_import_socket(invocation, arg_listener, &fd)) {
return DBUS_METHOD_INVOCATION_HANDLED;
}
#else
fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err); fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err);
if (err) { if (err) {
g_dbus_method_invocation_return_error(invocation, g_dbus_method_invocation_return_error(invocation,
@@ -491,7 +478,6 @@ dbus_audio_register_listener(AudioState *s,
err->message); err->message);
return DBUS_METHOD_INVOCATION_HANDLED; return DBUS_METHOD_INVOCATION_HANDLED;
} }
#endif
socket = g_socket_new_from_fd(fd, &err); socket = g_socket_new_from_fd(fd, &err);
if (err) { if (err) {
@@ -500,28 +486,15 @@ dbus_audio_register_listener(AudioState *s,
DBUS_DISPLAY_ERROR_FAILED, DBUS_DISPLAY_ERROR_FAILED,
"Couldn't make a socket: %s", "Couldn't make a socket: %s",
err->message); err->message);
#ifdef G_OS_WIN32
closesocket(fd);
#else
close(fd);
#endif
return DBUS_METHOD_INVOCATION_HANDLED; return DBUS_METHOD_INVOCATION_HANDLED;
} }
socket_conn = g_socket_connection_factory_create_connection(socket); socket_conn = g_socket_connection_factory_create_connection(socket);
if (out) { if (out) {
qemu_dbus_display1_audio_complete_register_out_listener( qemu_dbus_display1_audio_complete_register_out_listener(
da->iface, invocation da->iface, invocation, NULL);
#ifdef G_OS_UNIX
, NULL
#endif
);
} else { } else {
qemu_dbus_display1_audio_complete_register_in_listener( qemu_dbus_display1_audio_complete_register_in_listener(
da->iface, invocation da->iface, invocation, NULL);
#ifdef G_OS_UNIX
, NULL
#endif
);
} }
listener_conn = listener_conn =
@@ -599,36 +572,26 @@ dbus_audio_register_listener(AudioState *s,
static gboolean static gboolean
dbus_audio_register_out_listener(AudioState *s, dbus_audio_register_out_listener(AudioState *s,
GDBusMethodInvocation *invocation, GDBusMethodInvocation *invocation,
#ifdef G_OS_UNIX
GUnixFDList *fd_list, GUnixFDList *fd_list,
#endif
GVariant *arg_listener) GVariant *arg_listener)
{ {
return dbus_audio_register_listener(s, invocation, return dbus_audio_register_listener(s, invocation,
#ifdef G_OS_UNIX fd_list, arg_listener, true);
fd_list,
#endif
arg_listener, true);
} }
static gboolean static gboolean
dbus_audio_register_in_listener(AudioState *s, dbus_audio_register_in_listener(AudioState *s,
GDBusMethodInvocation *invocation, GDBusMethodInvocation *invocation,
#ifdef G_OS_UNIX
GUnixFDList *fd_list, GUnixFDList *fd_list,
#endif
GVariant *arg_listener) GVariant *arg_listener)
{ {
return dbus_audio_register_listener(s, invocation, return dbus_audio_register_listener(s, invocation,
#ifdef G_OS_UNIX fd_list, arg_listener, false);
fd_list,
#endif
arg_listener, false);
} }
static void static void
dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p) dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server)
{ {
DBusAudio *da = s->drv_opaque; DBusAudio *da = s->drv_opaque;
@@ -636,7 +599,6 @@ dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p)
g_assert(!da->server); g_assert(!da->server);
da->server = g_object_ref(server); da->server = g_object_ref(server);
da->p2p = p2p;
da->audio = g_dbus_object_skeleton_new(DBUS_DISPLAY1_AUDIO_PATH); da->audio = g_dbus_object_skeleton_new(DBUS_DISPLAY1_AUDIO_PATH);
da->iface = qemu_dbus_display1_audio_skeleton_new(); da->iface = qemu_dbus_display1_audio_skeleton_new();
@@ -676,6 +638,7 @@ static struct audio_driver dbus_audio_driver = {
.fini = dbus_audio_fini, .fini = dbus_audio_fini,
.set_dbus_server = dbus_audio_set_server, .set_dbus_server = dbus_audio_set_server,
.pcm_ops = &dbus_pcm_ops, .pcm_ops = &dbus_pcm_ops,
.can_be_default = 1,
.max_voices_out = INT_MAX, .max_voices_out = INT_MAX,
.max_voices_in = INT_MAX, .max_voices_in = INT_MAX,
.voice_size_out = sizeof(DBusVoiceOut), .voice_size_out = sizeof(DBusVoiceOut),

View File

@@ -619,7 +619,7 @@ static void dsound_audio_fini (void *opaque)
g_free(s); g_free(s);
} }
static void *dsound_audio_init(Audiodev *dev, Error **errp) static void *dsound_audio_init(Audiodev *dev)
{ {
int err; int err;
HRESULT hr; HRESULT hr;
@@ -721,6 +721,7 @@ static struct audio_driver dsound_audio_driver = {
.init = dsound_audio_init, .init = dsound_audio_init,
.fini = dsound_audio_fini, .fini = dsound_audio_fini,
.pcm_ops = &dsound_pcm_ops, .pcm_ops = &dsound_pcm_ops,
.can_be_default = 1,
.max_voices_out = INT_MAX, .max_voices_out = INT_MAX,
.max_voices_in = 1, .max_voices_in = 1,
.voice_size_out = sizeof (DSoundVoiceOut), .voice_size_out = sizeof (DSoundVoiceOut),

View File

@@ -70,9 +70,6 @@ typedef struct QJackClient {
int buffersize; int buffersize;
jack_port_t **port; jack_port_t **port;
QJackBuffer fifo; QJackBuffer fifo;
/* Used as workspace by qjack_process() */
float **process_buffers;
} }
QJackClient; QJackClient;
@@ -270,21 +267,22 @@ static int qjack_process(jack_nframes_t nframes, void *arg)
} }
/* get the buffers for the ports */ /* get the buffers for the ports */
float *buffers[c->nchannels];
for (int i = 0; i < c->nchannels; ++i) { for (int i = 0; i < c->nchannels; ++i) {
c->process_buffers[i] = jack_port_get_buffer(c->port[i], nframes); buffers[i] = jack_port_get_buffer(c->port[i], nframes);
} }
if (c->out) { if (c->out) {
if (likely(c->enabled)) { if (likely(c->enabled)) {
qjack_buffer_read_l(&c->fifo, c->process_buffers, nframes); qjack_buffer_read_l(&c->fifo, buffers, nframes);
} else { } else {
for (int i = 0; i < c->nchannels; ++i) { for (int i = 0; i < c->nchannels; ++i) {
memset(c->process_buffers[i], 0, nframes * sizeof(float)); memset(buffers[i], 0, nframes * sizeof(float));
} }
} }
} else { } else {
if (likely(c->enabled)) { if (likely(c->enabled)) {
qjack_buffer_write_l(&c->fifo, c->process_buffers, nframes); qjack_buffer_write_l(&c->fifo, buffers, nframes);
} }
} }
@@ -402,8 +400,7 @@ static void qjack_client_connect_ports(QJackClient *c)
static int qjack_client_init(QJackClient *c) static int qjack_client_init(QJackClient *c)
{ {
jack_status_t status; jack_status_t status;
int client_name_len = jack_client_name_size(); /* includes NUL */ char client_name[jack_client_name_size()];
g_autofree char *client_name = g_new(char, client_name_len);
jack_options_t options = JackNullOption; jack_options_t options = JackNullOption;
if (c->state == QJACK_STATE_RUNNING) { if (c->state == QJACK_STATE_RUNNING) {
@@ -412,7 +409,7 @@ static int qjack_client_init(QJackClient *c)
c->connect_ports = true; c->connect_ports = true;
snprintf(client_name, client_name_len, "%s-%s", snprintf(client_name, sizeof(client_name), "%s-%s",
c->out ? "out" : "in", c->out ? "out" : "in",
c->opt->client_name ? c->opt->client_name : audio_application_name()); c->opt->client_name ? c->opt->client_name : audio_application_name());
@@ -450,9 +447,6 @@ static int qjack_client_init(QJackClient *c)
jack_get_client_name(c->client)); jack_get_client_name(c->client));
} }
/* Allocate working buffer for process callback */
c->process_buffers = g_new(float *, c->nchannels);
jack_set_process_callback(c->client, qjack_process , c); jack_set_process_callback(c->client, qjack_process , c);
jack_set_port_registration_callback(c->client, qjack_port_registration, c); jack_set_port_registration_callback(c->client, qjack_port_registration, c);
jack_set_xrun_callback(c->client, qjack_xrun, c); jack_set_xrun_callback(c->client, qjack_xrun, c);
@@ -584,7 +578,6 @@ static void qjack_client_fini_locked(QJackClient *c)
qjack_buffer_free(&c->fifo); qjack_buffer_free(&c->fifo);
g_free(c->port); g_free(c->port);
g_free(c->process_buffers);
c->state = QJACK_STATE_DISCONNECTED; c->state = QJACK_STATE_DISCONNECTED;
/* fallthrough */ /* fallthrough */
@@ -645,7 +638,7 @@ static int qjack_thread_creator(jack_native_thread_t *thread,
} }
#endif #endif
static void *qjack_init(Audiodev *dev, Error **errp) static void *qjack_init(Audiodev *dev)
{ {
assert(dev->driver == AUDIODEV_DRIVER_JACK); assert(dev->driver == AUDIODEV_DRIVER_JACK);
return dev; return dev;
@@ -676,6 +669,7 @@ static struct audio_driver jack_driver = {
.init = qjack_init, .init = qjack_init,
.fini = qjack_fini, .fini = qjack_fini,
.pcm_ops = &jack_pcm_ops, .pcm_ops = &jack_pcm_ops,
.can_be_default = 1,
.max_voices_out = INT_MAX, .max_voices_out = INT_MAX,
.max_voices_in = INT_MAX, .max_voices_in = INT_MAX,
.voice_size_out = sizeof(QJackOut), .voice_size_out = sizeof(QJackOut),

View File

@@ -1,14 +1,15 @@
system_ss.add([spice_headers, files('audio.c')]) softmmu_ss.add([spice_headers, files('audio.c')])
system_ss.add(files( softmmu_ss.add(files(
'audio-hmp-cmds.c', 'audio-hmp-cmds.c',
'audio_legacy.c',
'mixeng.c', 'mixeng.c',
'noaudio.c', 'noaudio.c',
'wavaudio.c', 'wavaudio.c',
'wavcapture.c', 'wavcapture.c',
)) ))
system_ss.add(when: coreaudio, if_true: files('coreaudio.m')) softmmu_ss.add(when: coreaudio, if_true: files('coreaudio.m'))
system_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c')) softmmu_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c'))
audio_modules = {} audio_modules = {}
foreach m : [ foreach m : [
@@ -18,7 +19,6 @@ foreach m : [
['sdl', sdl, files('sdlaudio.c')], ['sdl', sdl, files('sdlaudio.c')],
['jack', jack, files('jackaudio.c')], ['jack', jack, files('jackaudio.c')],
['sndio', sndio, files('sndioaudio.c')], ['sndio', sndio, files('sndioaudio.c')],
['pipewire', pipewire, files('pwaudio.c')],
['spice', spice, files('spiceaudio.c')] ['spice', spice, files('spiceaudio.c')]
] ]
if m[1].found() if m[1].found()
@@ -30,7 +30,7 @@ endforeach
if dbus_display if dbus_display
module_ss = ss.source_set() module_ss = ss.source_set()
module_ss.add(when: [gio, pixman], if_true: files('dbusaudio.c')) module_ss.add(when: gio, if_true: files('dbusaudio.c'))
audio_modules += {'dbus': module_ss} audio_modules += {'dbus': module_ss}
endif endif

View File

@@ -414,7 +414,12 @@ struct rate {
*/ */
void *st_rate_start (int inrate, int outrate) void *st_rate_start (int inrate, int outrate)
{ {
struct rate *rate = g_new0(struct rate, 1); struct rate *rate = audio_calloc(__func__, 1, sizeof(*rate));
if (!rate) {
dolog ("Could not allocate resampler (%zu bytes)\n", sizeof (*rate));
return NULL;
}
rate->opos = 0; rate->opos = 0;
@@ -440,86 +445,6 @@ void st_rate_stop (void *opaque)
g_free (opaque); g_free (opaque);
} }
/**
* st_rate_frames_out() - returns the number of frames the resampling code
* generates from frames_in frames
*
* @opaque: pointer to struct rate
* @frames_in: number of frames
*
* When upsampling, there may be more than one correct result. In this case,
* the function returns the maximum number of output frames the resampling
* code can generate.
*/
uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in)
{
struct rate *rate = opaque;
uint64_t opos_end, opos_delta;
uint32_t ipos_end;
uint32_t frames_out;
if (rate->opos_inc == 1ULL << 32) {
return frames_in;
}
/* no output frame without at least one input frame */
if (!frames_in) {
return 0;
}
/* last frame read was at rate->ipos - 1 */
ipos_end = rate->ipos - 1 + frames_in;
opos_end = (uint64_t)ipos_end << 32;
/* last frame written was at rate->opos - rate->opos_inc */
if (opos_end + rate->opos_inc <= rate->opos) {
return 0;
}
opos_delta = opos_end - rate->opos + rate->opos_inc;
frames_out = opos_delta / rate->opos_inc;
return opos_delta % rate->opos_inc ? frames_out : frames_out - 1;
}
/**
* st_rate_frames_in() - returns the number of frames needed to
* get frames_out frames after resampling
*
* @opaque: pointer to struct rate
* @frames_out: number of frames
*
* When downsampling, there may be more than one correct result. In this
* case, the function returns the maximum number of input frames needed.
*/
uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out)
{
struct rate *rate = opaque;
uint64_t opos_start, opos_end;
uint32_t ipos_start, ipos_end;
if (rate->opos_inc == 1ULL << 32) {
return frames_out;
}
if (frames_out) {
opos_start = rate->opos;
ipos_start = rate->ipos;
} else {
uint64_t offset;
/* add offset = ceil(opos_inc) to opos and ipos to avoid an underflow */
offset = (rate->opos_inc + (1ULL << 32) - 1) & ~((1ULL << 32) - 1);
opos_start = rate->opos + offset;
ipos_start = rate->ipos + (offset >> 32);
}
/* last frame written was at opos_start - rate->opos_inc */
opos_end = opos_start - rate->opos_inc + rate->opos_inc * frames_out;
ipos_end = (opos_end >> 32) + 1;
/* last frame read was at ipos_start - 1 */
return ipos_end + 1 > ipos_start ? ipos_end + 1 - ipos_start : 0;
}
void mixeng_clear (struct st_sample *buf, int len) void mixeng_clear (struct st_sample *buf, int len)
{ {
memset (buf, 0, len * sizeof (struct st_sample)); memset (buf, 0, len * sizeof (struct st_sample));

View File

@@ -38,7 +38,7 @@ typedef struct st_sample st_sample;
typedef void (t_sample) (struct st_sample *dst, const void *src, int samples); typedef void (t_sample) (struct st_sample *dst, const void *src, int samples);
typedef void (f_sample) (void *dst, const struct st_sample *src, int samples); typedef void (f_sample) (void *dst, const struct st_sample *src, int samples);
/* indices: [stereo][signed][swap endianness][8, 16 or 32-bits] */ /* indices: [stereo][signed][swap endiannes][8, 16 or 32-bits] */
extern t_sample *mixeng_conv[2][2][2][3]; extern t_sample *mixeng_conv[2][2][2][3];
extern f_sample *mixeng_clip[2][2][2][3]; extern f_sample *mixeng_clip[2][2][2][3];
@@ -52,8 +52,6 @@ void st_rate_flow(void *opaque, st_sample *ibuf, st_sample *obuf,
void st_rate_flow_mix(void *opaque, st_sample *ibuf, st_sample *obuf, void st_rate_flow_mix(void *opaque, st_sample *ibuf, st_sample *obuf,
size_t *isamp, size_t *osamp); size_t *isamp, size_t *osamp);
void st_rate_stop (void *opaque); void st_rate_stop (void *opaque);
uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in);
uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out);
void mixeng_clear (struct st_sample *buf, int len); void mixeng_clear (struct st_sample *buf, int len);
void mixeng_volume (struct st_sample *buf, int len, struct mixeng_volume *vol); void mixeng_volume (struct st_sample *buf, int len, struct mixeng_volume *vol);

View File

@@ -104,7 +104,7 @@ static void no_enable_in(HWVoiceIn *hw, bool enable)
} }
} }
static void *no_audio_init(Audiodev *dev, Error **errp) static void *no_audio_init(Audiodev *dev)
{ {
return &no_audio_init; return &no_audio_init;
} }
@@ -135,6 +135,7 @@ static struct audio_driver no_audio_driver = {
.init = no_audio_init, .init = no_audio_init,
.fini = no_audio_fini, .fini = no_audio_fini,
.pcm_ops = &no_pcm_ops, .pcm_ops = &no_pcm_ops,
.can_be_default = 1,
.max_voices_out = INT_MAX, .max_voices_out = INT_MAX,
.max_voices_in = INT_MAX, .max_voices_in = INT_MAX,
.voice_size_out = sizeof (NoVoiceOut), .voice_size_out = sizeof (NoVoiceOut),

View File

@@ -28,7 +28,6 @@
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "qemu/module.h" #include "qemu/module.h"
#include "qemu/host-utils.h" #include "qemu/host-utils.h"
#include "qapi/error.h"
#include "audio.h" #include "audio.h"
#include "trace.h" #include "trace.h"
@@ -549,6 +548,7 @@ static int oss_init_out(HWVoiceOut *hw, struct audsettings *as,
hw->size_emul); hw->size_emul);
hw->buf_emul = NULL; hw->buf_emul = NULL;
} else { } else {
int err;
int trig = 0; int trig = 0;
if (ioctl (fd, SNDCTL_DSP_SETTRIGGER, &trig) < 0) { if (ioctl (fd, SNDCTL_DSP_SETTRIGGER, &trig) < 0) {
oss_logerr (errno, "SNDCTL_DSP_SETTRIGGER 0 failed\n"); oss_logerr (errno, "SNDCTL_DSP_SETTRIGGER 0 failed\n");
@@ -736,7 +736,7 @@ static void oss_init_per_direction(AudiodevOssPerDirectionOptions *opdo)
} }
} }
static void *oss_audio_init(Audiodev *dev, Error **errp) static void *oss_audio_init(Audiodev *dev)
{ {
AudiodevOssOptions *oopts; AudiodevOssOptions *oopts;
assert(dev->driver == AUDIODEV_DRIVER_OSS); assert(dev->driver == AUDIODEV_DRIVER_OSS);
@@ -745,12 +745,8 @@ static void *oss_audio_init(Audiodev *dev, Error **errp)
oss_init_per_direction(oopts->in); oss_init_per_direction(oopts->in);
oss_init_per_direction(oopts->out); oss_init_per_direction(oopts->out);
if (access(oopts->in->dev ?: "/dev/dsp", R_OK | W_OK) < 0) { if (access(oopts->in->dev ?: "/dev/dsp", R_OK | W_OK) < 0 ||
error_setg_errno(errp, errno, "%s not accessible", oopts->in->dev ?: "/dev/dsp"); access(oopts->out->dev ?: "/dev/dsp", R_OK | W_OK) < 0) {
return NULL;
}
if (access(oopts->out->dev ?: "/dev/dsp", R_OK | W_OK) < 0) {
error_setg_errno(errp, errno, "%s not accessible", oopts->out->dev ?: "/dev/dsp");
return NULL; return NULL;
} }
return dev; return dev;
@@ -783,6 +779,7 @@ static struct audio_driver oss_audio_driver = {
.init = oss_audio_init, .init = oss_audio_init,
.fini = oss_audio_fini, .fini = oss_audio_fini,
.pcm_ops = &oss_pcm_ops, .pcm_ops = &oss_pcm_ops,
.can_be_default = 1,
.max_voices_out = INT_MAX, .max_voices_out = INT_MAX,
.max_voices_in = INT_MAX, .max_voices_in = INT_MAX,
.voice_size_out = sizeof (OSSVoiceOut), .voice_size_out = sizeof (OSSVoiceOut),

View File

@@ -3,7 +3,7 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/module.h" #include "qemu/module.h"
#include "audio.h" #include "audio.h"
#include "qapi/error.h" #include "qapi/opts-visitor.h"
#include <pulse/pulseaudio.h> #include <pulse/pulseaudio.h>
@@ -818,7 +818,7 @@ fail:
return NULL; return NULL;
} }
static void *qpa_audio_init(Audiodev *dev, Error **errp) static void *qpa_audio_init(Audiodev *dev)
{ {
paaudio *g; paaudio *g;
AudiodevPaOptions *popts = &dev->u.pa; AudiodevPaOptions *popts = &dev->u.pa;
@@ -834,12 +834,10 @@ static void *qpa_audio_init(Audiodev *dev, Error **errp)
runtime = getenv("XDG_RUNTIME_DIR"); runtime = getenv("XDG_RUNTIME_DIR");
if (!runtime) { if (!runtime) {
error_setg(errp, "XDG_RUNTIME_DIR not set");
return NULL; return NULL;
} }
snprintf(pidfile, sizeof(pidfile), "%s/pulse/pid", runtime); snprintf(pidfile, sizeof(pidfile), "%s/pulse/pid", runtime);
if (stat(pidfile, &st) != 0) { if (stat(pidfile, &st) != 0) {
error_setg_errno(errp, errno, "could not stat pidfile %s", pidfile);
return NULL; return NULL;
} }
} }
@@ -869,7 +867,6 @@ static void *qpa_audio_init(Audiodev *dev, Error **errp)
} }
if (!g->conn) { if (!g->conn) {
g_free(g); g_free(g);
error_setg(errp, "could not connect to PulseAudio server");
return NULL; return NULL;
} }
@@ -931,6 +928,7 @@ static struct audio_driver pa_audio_driver = {
.init = qpa_audio_init, .init = qpa_audio_init,
.fini = qpa_audio_fini, .fini = qpa_audio_fini,
.pcm_ops = &qpa_pcm_ops, .pcm_ops = &qpa_pcm_ops,
.can_be_default = 1,
.max_voices_out = INT_MAX, .max_voices_out = INT_MAX,
.max_voices_in = INT_MAX, .max_voices_in = INT_MAX,
.voice_size_out = sizeof (PAVoiceOut), .voice_size_out = sizeof (PAVoiceOut),

Some files were not shown because too many files have changed in this diff Show More