Compare commits
74 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
dabb4183d1 | ||
|
ff692a15bb | ||
|
134253a4fe | ||
|
e49884a909 | ||
|
9d622451fd | ||
|
668aeea0ec | ||
|
fae9449998 | ||
|
02bd13ae3a | ||
|
c84bcff3d3 | ||
|
5c4f2f1b60 | ||
|
ba3c7bf178 | ||
|
6e260100d0 | ||
|
9ff3fe63fc | ||
|
0f7ca2bf2c | ||
|
eb134d1d58 | ||
|
a7002f15c8 | ||
|
d001f222e3 | ||
|
84d839e499 | ||
|
a0b89ba845 | ||
|
a91defe16b | ||
|
81d13aa5e0 | ||
|
302ac06ab9 | ||
|
adc49750d2 | ||
|
a9144eed6c | ||
|
0de5117819 | ||
|
db8051ad59 | ||
|
1e029102e6 | ||
|
c283a4bc76 | ||
|
c0ad2a9191 | ||
|
d2a811dd7d | ||
|
cb898262a4 | ||
|
45a67df841 | ||
|
69a6ea7c4b | ||
|
0b1b5a4204 | ||
|
eb82a80f51 | ||
|
8ad637881f | ||
|
21b54a683d | ||
|
36cd9bc8e2 | ||
|
117f33c9a7 | ||
|
e347aa89dd | ||
|
b858c53ef6 | ||
|
7ceebe3f90 | ||
|
950882af67 | ||
|
e09f912550 | ||
|
80a2c1b5fe | ||
|
3148fe1ac8 | ||
|
4b59b5bd14 | ||
|
488ad8b302 | ||
|
f91d0db71e | ||
|
f5301431e8 | ||
|
3b02d0db4a | ||
|
e0deae4f49 | ||
|
38a598aee3 | ||
|
2197a94cb4 | ||
|
8322e5300f | ||
|
4dc5df865c | ||
|
a458252c16 | ||
|
f6227dd60d | ||
|
2daa9e4d7e | ||
|
6944823a6f | ||
|
af08c70ef5 | ||
|
975f12aa52 | ||
|
5477a21350 | ||
|
168f193c5b | ||
|
61ef050639 | ||
|
f0c5a78029 | ||
|
9448a0fa11 | ||
|
8c3cf36260 | ||
|
f528cfc3fa | ||
|
bb47b5bc2e | ||
|
134a1a3320 | ||
|
ac7f07ebc8 | ||
|
3ed99d232c | ||
|
6f7b9f7b6f |
5
.gitattributes
vendored
5
.gitattributes
vendored
@@ -2,8 +2,3 @@
|
||||
*.h.inc diff=c
|
||||
*.m diff=objc
|
||||
*.py diff=python
|
||||
*.rs diff=rust
|
||||
*.rs.inc diff=rust
|
||||
Cargo.lock diff=toml merge=binary
|
||||
|
||||
*.patch -text -whitespace
|
||||
|
@@ -1,33 +1,15 @@
|
||||
|
||||
variables:
|
||||
# On stable branches this is changed by later rules. Should also
|
||||
# be overridden per pipeline if running pipelines concurrently
|
||||
# for different branches in contributor forks.
|
||||
QEMU_CI_CONTAINER_TAG: latest
|
||||
|
||||
# For purposes of CI rules, upstream is the gitlab.com/qemu-project
|
||||
# namespace. When testing CI, it might be usefult to override this
|
||||
# to point to a fork repo
|
||||
QEMU_CI_UPSTREAM: qemu-project
|
||||
|
||||
# The order of rules defined here is critically important.
|
||||
# They are evaluated in order and first match wins.
|
||||
#
|
||||
# Thus we group them into a number of stages, ordered from
|
||||
# most restrictive to least restrictive
|
||||
#
|
||||
# For pipelines running for stable "staging-X.Y" branches
|
||||
# we must override QEMU_CI_CONTAINER_TAG
|
||||
#
|
||||
.base_job_template:
|
||||
variables:
|
||||
# Each script line from will be in a collapsible section in the job output
|
||||
# and show the duration of each line.
|
||||
FF_SCRIPT_SECTIONS: 1
|
||||
# The project has a fairly fat GIT repo so we try and avoid bringing in things
|
||||
# we don't need. The --filter options avoid blobs and tree references we aren't going to use
|
||||
# and we also avoid fetching tags.
|
||||
GIT_FETCH_EXTRA_FLAGS: --filter=blob:none --filter=tree:0 --no-tags --prune --quiet
|
||||
|
||||
interruptible: true
|
||||
|
||||
@@ -37,72 +19,48 @@ variables:
|
||||
# want jobs to run
|
||||
#############################################################
|
||||
|
||||
# Never run jobs upstream on stable branch, staging branch jobs already ran
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /^stable-/'
|
||||
when: never
|
||||
|
||||
# Never run jobs upstream on tags, staging branch jobs already ran
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_TAG'
|
||||
when: never
|
||||
|
||||
# Scheduled runs on mainline don't get pipelines except for the special Coverity job
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
|
||||
# Cirrus jobs can't run unless the creds / target repo are set
|
||||
- if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)'
|
||||
when: never
|
||||
|
||||
# Publishing jobs should only run on the default branch in upstream
|
||||
- if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
|
||||
- if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
|
||||
when: never
|
||||
|
||||
# Non-publishing jobs should only run on staging branches in upstream
|
||||
- if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH !~ /staging/'
|
||||
- if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH !~ /staging/'
|
||||
when: never
|
||||
|
||||
# Jobs only intended for forks should always be skipped on upstream
|
||||
- if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
|
||||
- if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == "qemu-project"'
|
||||
when: never
|
||||
|
||||
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
|
||||
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
|
||||
when: never
|
||||
|
||||
# Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set
|
||||
- if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
- if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
|
||||
when: never
|
||||
|
||||
|
||||
#############################################################
|
||||
# Stage 2: fine tune execution of jobs in specific scenarios
|
||||
# where the catch all logic is inappropriate
|
||||
# where the catch all logic is inapprorpaite
|
||||
#############################################################
|
||||
|
||||
# Optional jobs should not be run unless manually triggered
|
||||
- if: '$QEMU_JOB_OPTIONAL && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
variables:
|
||||
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
|
||||
|
||||
- if: '$QEMU_JOB_OPTIONAL'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
|
||||
# Skipped jobs should not be run unless manually triggered
|
||||
- if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
variables:
|
||||
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
|
||||
|
||||
- if: '$QEMU_JOB_SKIPPED'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
|
||||
# Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset
|
||||
- if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
- if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != "qemu-project"'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
|
||||
@@ -114,23 +72,8 @@ variables:
|
||||
|
||||
# Forks pipeline jobs don't start automatically unless
|
||||
# QEMU_CI=2 is set
|
||||
- if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
when: manual
|
||||
|
||||
# Upstream pipeline jobs start automatically unless told not to
|
||||
# by setting QEMU_CI=1
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
|
||||
when: manual
|
||||
variables:
|
||||
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
|
||||
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
|
||||
- if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
|
||||
when: manual
|
||||
|
||||
# Jobs can run if any jobs they depend on were successful
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
|
||||
when: on_success
|
||||
variables:
|
||||
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
|
||||
|
||||
- when: on_success
|
||||
|
@@ -1,53 +1,34 @@
|
||||
.native_build_job_template:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache
|
||||
key: "$CI_JOB_NAME"
|
||||
when: always
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start setup "Pre-script setup"
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
- cat /packages.txt
|
||||
- section_end setup
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- du -sh .git
|
||||
- if test -n "$LD_JOBS";
|
||||
then
|
||||
scripts/git-submodule.sh update meson ;
|
||||
fi
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ccache --zero-stats
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs --enable-fdt=system
|
||||
${TARGETS:+--target-list="$TARGETS"}
|
||||
${LD_JOBS:+--meson=git} ${TARGETS:+--target-list="$TARGETS"}
|
||||
$CONFIGURE_ARGS ||
|
||||
{ cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- if test -n "$LD_JOBS";
|
||||
then
|
||||
pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ;
|
||||
../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ;
|
||||
fi || exit 1;
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- $MAKE -j"$JOBS"
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- make -j"$JOBS"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
make -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
- ccache --show-stats
|
||||
|
||||
# We jump some hoops in common_test_job_template to avoid
|
||||
# rebuilding all the object files we skip in the artifacts
|
||||
.native_build_artifact_template:
|
||||
artifacts:
|
||||
when: on_success
|
||||
expire_in: 2 days
|
||||
paths:
|
||||
- build
|
||||
@@ -55,57 +36,47 @@
|
||||
exclude:
|
||||
- build/**/*.p
|
||||
- build/**/*.a.p
|
||||
- build/**/*.fa.p
|
||||
- build/**/*.c.o
|
||||
- build/**/*.c.o.d
|
||||
- build/**/*.fa
|
||||
|
||||
.common_test_job_template:
|
||||
extends: .base_job_template
|
||||
stage: test
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||
script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start buildenv "Setting up to run tests"
|
||||
- scripts/git-submodule.sh update roms/SLOF
|
||||
- build/pyvenv/bin/meson subprojects download $(cd build/subprojects && echo *)
|
||||
- scripts/git-submodule.sh update
|
||||
$(sed -n '/GIT_SUBMODULES=/ s/.*=// p' build/config-host.mak)
|
||||
- cd build
|
||||
- find . -type f -exec touch {} +
|
||||
# Avoid recompiling by hiding ninja with NINJA=":"
|
||||
# We also have to pre-cache the functional tests manually in this case
|
||||
- if [ "x${QEMU_TEST_CACHE_DIR}" != "x" ]; then
|
||||
$MAKE precache-functional ;
|
||||
fi
|
||||
- section_end buildenv
|
||||
- section_start test "Running tests"
|
||||
- $MAKE NINJA=":" $MAKE_CHECK_ARGS
|
||||
- section_end test
|
||||
- make NINJA=":" $MAKE_CHECK_ARGS
|
||||
|
||||
.native_test_job_template:
|
||||
extends: .common_test_job_template
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
when: always
|
||||
expire_in: 7 days
|
||||
paths:
|
||||
- build/meson-logs/testlog.txt
|
||||
reports:
|
||||
junit: build/meson-logs/testlog.junit.xml
|
||||
|
||||
.functional_test_job_template:
|
||||
.avocado_test_job_template:
|
||||
extends: .common_test_job_template
|
||||
cache:
|
||||
key: "${CI_JOB_NAME}-cache"
|
||||
paths:
|
||||
- ${CI_PROJECT_DIR}/avocado-cache
|
||||
- ${CI_PROJECT_DIR}/functional-cache
|
||||
policy: pull-push
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
when: always
|
||||
when: on_failure
|
||||
expire_in: 7 days
|
||||
paths:
|
||||
- build/tests/results/latest/results.xml
|
||||
- build/tests/results/latest/test-results
|
||||
- build/tests/functional/*/*/*.log
|
||||
reports:
|
||||
junit: build/tests/results/latest/results.xml
|
||||
before_script:
|
||||
@@ -116,13 +87,11 @@
|
||||
- echo -e '[job.output.testlogs]\nstatuses = ["FAIL", "INTERRUPT"]'
|
||||
>> ~/.config/avocado/avocado.conf
|
||||
- if [ -d ${CI_PROJECT_DIR}/avocado-cache ]; then
|
||||
du -chs ${CI_PROJECT_DIR}/*-cache ;
|
||||
du -chs ${CI_PROJECT_DIR}/avocado-cache ;
|
||||
fi
|
||||
- export AVOCADO_ALLOW_UNTRUSTED_CODE=1
|
||||
- export QEMU_TEST_ALLOW_UNTRUSTED_CODE=1
|
||||
- export QEMU_TEST_CACHE_DIR=${CI_PROJECT_DIR}/functional-cache
|
||||
after_script:
|
||||
- cd build
|
||||
- du -chs ${CI_PROJECT_DIR}/*-cache
|
||||
- du -chs ${CI_PROJECT_DIR}/avocado-cache
|
||||
variables:
|
||||
QEMU_JOB_AVOCADO: 1
|
||||
|
@@ -22,15 +22,14 @@ check-system-alpine:
|
||||
IMAGE: alpine
|
||||
MAKE_CHECK_ARGS: check-unit check-qtest
|
||||
|
||||
functional-system-alpine:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-alpine:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-alpine
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: alpine
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:avr arch:loongarch64 arch:mips64 arch:mipsel
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
build-system-ubuntu:
|
||||
extends:
|
||||
@@ -41,7 +40,8 @@ build-system-ubuntu:
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
CONFIGURE_ARGS: --enable-docs
|
||||
TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu
|
||||
TARGETS: alpha-softmmu cris-softmmu hppa-softmmu
|
||||
microblazeel-softmmu mips64el-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-ubuntu:
|
||||
@@ -53,15 +53,14 @@ check-system-ubuntu:
|
||||
IMAGE: ubuntu2204
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-ubuntu:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-ubuntu:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-ubuntu
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:alpha arch:microblazeel arch:mips64el
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
build-system-debian:
|
||||
extends:
|
||||
@@ -70,10 +69,10 @@ build-system-debian:
|
||||
needs:
|
||||
job: amd64-debian-container
|
||||
variables:
|
||||
IMAGE: debian
|
||||
IMAGE: debian-amd64
|
||||
CONFIGURE_ARGS: --with-coroutine=sigaltstack
|
||||
TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
|
||||
sparc-softmmu xtensa-softmmu
|
||||
sparc-softmmu xtensaeb-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-debian:
|
||||
@@ -82,18 +81,17 @@ check-system-debian:
|
||||
- job: build-system-debian
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
IMAGE: debian-amd64
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-debian:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-debian:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-debian
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:arm arch:i386 arch:riscv64 arch:sh4 arch:sparc arch:xtensa
|
||||
IMAGE: debian-amd64
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
crash-test-debian:
|
||||
extends: .native_test_job_template
|
||||
@@ -101,11 +99,11 @@ crash-test-debian:
|
||||
- job: build-system-debian
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
IMAGE: debian-amd64
|
||||
script:
|
||||
- cd build
|
||||
- make NINJA=":" check-venv
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
|
||||
- make check-venv
|
||||
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-i386
|
||||
|
||||
build-system-fedora:
|
||||
extends:
|
||||
@@ -115,24 +113,11 @@ build-system-fedora:
|
||||
job: amd64-fedora-container
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs --enable-crypto-afalg --enable-rust
|
||||
TARGETS: microblaze-softmmu mips-softmmu
|
||||
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
|
||||
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
|
||||
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
build-system-fedora-rust-nightly:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-fedora-rust-nightly-container
|
||||
variables:
|
||||
IMAGE: fedora-rust-nightly
|
||||
CONFIGURE_ARGS: --disable-docs --enable-rust --enable-strict-rust-lints
|
||||
TARGETS: aarch64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
allow_failure: true
|
||||
|
||||
check-system-fedora:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
@@ -142,16 +127,14 @@ check-system-fedora:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-fedora:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-fedora:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-fedora
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:microblaze arch:mips arch:xtensa arch:m68k
|
||||
arch:riscv32 arch:ppc arch:sparc64
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
crash-test-fedora:
|
||||
extends: .native_test_job_template
|
||||
@@ -162,119 +145,41 @@ crash-test-fedora:
|
||||
IMAGE: fedora
|
||||
script:
|
||||
- cd build
|
||||
- make NINJA=":" check-venv
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
|
||||
- make check-venv
|
||||
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
|
||||
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
|
||||
|
||||
build-system-centos:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-centos9-container
|
||||
job: amd64-centos8-container
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
IMAGE: centos8
|
||||
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-vfio-user-server
|
||||
--enable-modules --enable-trace-backends=dtrace --enable-docs
|
||||
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
|
||||
x86_64-softmmu rx-softmmu sh4-softmmu
|
||||
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
# Previous QEMU release. Used for cross-version migration tests.
|
||||
build-previous-qemu:
|
||||
extends: .native_build_job_template
|
||||
artifacts:
|
||||
when: on_success
|
||||
expire_in: 2 days
|
||||
paths:
|
||||
- build-previous
|
||||
exclude:
|
||||
- build-previous/**/*.p
|
||||
- build-previous/**/*.a.p
|
||||
- build-previous/**/*.c.o
|
||||
- build-previous/**/*.c.o.d
|
||||
needs:
|
||||
job: amd64-opensuse-leap-container
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
TARGETS: x86_64-softmmu aarch64-softmmu
|
||||
# Override the default flags as we need more to grab the old version
|
||||
GIT_FETCH_EXTRA_FLAGS: --prune --quiet
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- export QEMU_PREV_VERSION="$(sed 's/\([0-9.]*\)\.[0-9]*/v\1.0/' VERSION)"
|
||||
- git remote add upstream https://gitlab.com/qemu-project/qemu
|
||||
- git fetch upstream refs/tags/$QEMU_PREV_VERSION:refs/tags/$QEMU_PREV_VERSION
|
||||
- git checkout $QEMU_PREV_VERSION
|
||||
after_script:
|
||||
- mv build build-previous
|
||||
|
||||
.migration-compat-common:
|
||||
extends: .common_test_job_template
|
||||
needs:
|
||||
- job: build-previous-qemu
|
||||
- job: build-system-opensuse
|
||||
# The old QEMU could have bugs unrelated to migration that are
|
||||
# already fixed in the current development branch, so this test
|
||||
# might fail.
|
||||
allow_failure: true
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
script:
|
||||
# Use the migration-tests from the older QEMU tree. This avoids
|
||||
# testing an old QEMU against new features/tests that it is not
|
||||
# compatible with.
|
||||
- cd build-previous
|
||||
# Don't allow python-based tests to run. The
|
||||
# vmstate-checker-script test has a race that causes it to fail
|
||||
# sometimes. It cannot be fixed it because this job runs the test
|
||||
# from the old QEMU version. The test will be removed on master,
|
||||
# but this job will only see the change in the next release.
|
||||
#
|
||||
# TODO: remove this line after 9.2 release
|
||||
- unset PYTHON
|
||||
# old to new
|
||||
- QTEST_QEMU_BINARY_SRC=./qemu-system-${TARGET}
|
||||
QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test
|
||||
# new to old
|
||||
- QTEST_QEMU_BINARY_DST=./qemu-system-${TARGET}
|
||||
QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test
|
||||
|
||||
# This job needs to be disabled until we can have an aarch64 CPU model that
|
||||
# will both (1) support both KVM and TCG, and (2) provide a stable ABI.
|
||||
# Currently only "-cpu max" can provide (1), however it doesn't guarantee
|
||||
# (2). Mark this test skipped until later.
|
||||
migration-compat-aarch64:
|
||||
extends: .migration-compat-common
|
||||
variables:
|
||||
TARGET: aarch64
|
||||
QEMU_JOB_SKIPPED: 1
|
||||
|
||||
migration-compat-x86_64:
|
||||
extends: .migration-compat-common
|
||||
variables:
|
||||
TARGET: x86_64
|
||||
|
||||
check-system-centos:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-system-centos
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
IMAGE: centos8
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-centos:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-centos:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-centos
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:ppc64 arch:or1k arch:s390x arch:x86_64 arch:rx
|
||||
arch:sh4
|
||||
IMAGE: centos8
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
build-system-opensuse:
|
||||
extends:
|
||||
@@ -296,46 +201,15 @@ check-system-opensuse:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-opensuse:
|
||||
extends: .functional_test_job_template
|
||||
avocado-system-opensuse:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-system-opensuse
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:s390x arch:x86_64 arch:aarch64
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
#
|
||||
# Flaky tests. We don't run these by default and they are allow fail
|
||||
# but often the CI system is the only way to trigger the failures.
|
||||
#
|
||||
|
||||
build-system-flaky:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-debian-container
|
||||
variables:
|
||||
IMAGE: debian
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
TARGETS: aarch64-softmmu arm-softmmu mips64el-softmmu
|
||||
ppc64-softmmu rx-softmmu s390x-softmmu sh4-softmmu x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
functional-system-flaky:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-system-flaky
|
||||
artifacts: true
|
||||
allow_failure: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
QEMU_TEST_FLAKY_TESTS: 1
|
||||
AVOCADO_TAGS: flaky
|
||||
|
||||
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
|
||||
# the configure script. The container doesn't contain Xen headers so
|
||||
@@ -347,9 +221,9 @@ functional-system-flaky:
|
||||
build-tcg-disabled:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-centos9-container
|
||||
job: amd64-centos8-container
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
IMAGE: centos8
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
@@ -362,13 +236,11 @@ build-tcg-disabled:
|
||||
- cd tests/qemu-iotests/
|
||||
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
|
||||
052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
|
||||
170 171 184 192 194 208 221 226 227 236 253 277 image-fleecing
|
||||
170 171 183 184 192 194 208 221 226 227 236 253 277 image-fleecing
|
||||
- ./check -qcow2 028 051 056 057 058 065 068 082 085 091 095 096 102 122
|
||||
124 132 139 142 144 145 151 152 155 157 165 194 196 200 202
|
||||
208 209 216 218 227 234 246 247 248 250 254 255 257 258
|
||||
260 261 262 263 264 270 272 273 277 279 image-fleecing
|
||||
- cd ../..
|
||||
- make distclean
|
||||
|
||||
build-user:
|
||||
extends: .native_build_job_template
|
||||
@@ -377,7 +249,6 @@ build-user:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-system
|
||||
--target-list-exclude=alpha-linux-user,sh4-linux-user
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-user-static:
|
||||
@@ -387,18 +258,6 @@ build-user-static:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-system --static
|
||||
--target-list-exclude=alpha-linux-user,sh4-linux-user
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# targets stuck on older compilers
|
||||
build-legacy:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-debian-legacy-cross-container
|
||||
variables:
|
||||
IMAGE: debian-legacy-test-cross
|
||||
TARGETS: alpha-linux-user alpha-softmmu sh4-linux-user
|
||||
CONFIGURE_ARGS: --disable-tools
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-user-hexagon:
|
||||
@@ -411,9 +270,7 @@ build-user-hexagon:
|
||||
CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# Build the softmmu targets we have check-tcg tests and compilers in
|
||||
# our omnibus all-test-cross container. Those targets that haven't got
|
||||
# Debian cross compiler support need to use special containers.
|
||||
# Only build the softmmu targets we have check-tcg tests for
|
||||
build-some-softmmu:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
@@ -421,18 +278,7 @@ build-some-softmmu:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --enable-debug
|
||||
TARGETS: arm-softmmu aarch64-softmmu i386-softmmu riscv64-softmmu
|
||||
s390x-softmmu x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-loongarch64:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: loongarch-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-loongarch-cross
|
||||
CONFIGURE_ARGS: --disable-tools --enable-debug
|
||||
TARGETS: loongarch64-linux-user loongarch64-softmmu
|
||||
TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# We build tricore in a very minimal tricore only container
|
||||
@@ -452,8 +298,8 @@ clang-system:
|
||||
job: amd64-fedora-container
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-ubsan
|
||||
--extra-cflags=-fno-sanitize-recover=undefined
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++
|
||||
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
|
||||
TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu s390x-softmmu
|
||||
MAKE_CHECK_ARGS: check-qtest check-tcg
|
||||
|
||||
@@ -464,9 +310,9 @@ clang-user:
|
||||
timeout: 70m
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system --enable-ubsan
|
||||
--target-list-exclude=alpha-linux-user,microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
|
||||
--extra-cflags=-fno-sanitize-recover=undefined
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
|
||||
--target-list-exclude=microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
|
||||
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
|
||||
MAKE_CHECK_ARGS: check-unit check-tcg
|
||||
|
||||
# Set LD_JOBS=1 because this requires LTO and ld consumes a large amount of memory.
|
||||
@@ -507,14 +353,14 @@ check-cfi-aarch64:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-cfi-aarch64:
|
||||
extends: .functional_test_job_template
|
||||
avocado-cfi-aarch64:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-aarch64
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
build-cfi-ppc64-s390x:
|
||||
extends:
|
||||
@@ -545,14 +391,14 @@ check-cfi-ppc64-s390x:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-cfi-ppc64-s390x:
|
||||
extends: .functional_test_job_template
|
||||
avocado-cfi-ppc64-s390x:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-ppc64-s390x
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
build-cfi-x86_64:
|
||||
extends:
|
||||
@@ -579,14 +425,14 @@ check-cfi-x86_64:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-cfi-x86_64:
|
||||
extends: .functional_test_job_template
|
||||
avocado-cfi-x86_64:
|
||||
extends: .avocado_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-x86_64
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
MAKE_CHECK_ARGS: check-avocado
|
||||
|
||||
tsan-build:
|
||||
extends: .native_build_job_template
|
||||
@@ -597,9 +443,6 @@ tsan-build:
|
||||
CONFIGURE_ARGS: --enable-tsan --cc=clang --cxx=clang++
|
||||
--enable-trace-backends=ust --disable-slirp
|
||||
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
|
||||
# Remove when we switch to a distro with clang >= 18
|
||||
# https://github.com/google/sanitizers/issues/1716
|
||||
MAKE: setarch -R make
|
||||
|
||||
# gcov is a GCC features
|
||||
gcov:
|
||||
@@ -611,7 +454,7 @@ gcov:
|
||||
IMAGE: ubuntu2204
|
||||
CONFIGURE_ARGS: --enable-gcov
|
||||
TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check-unit check-softfloat
|
||||
MAKE_CHECK_ARGS: check
|
||||
after_script:
|
||||
- cd build
|
||||
- gcovr --xml-pretty --exclude-unreachable-branches --print-summary
|
||||
@@ -619,12 +462,8 @@ gcov:
|
||||
coverage: /^\s*lines:\s*\d+.\d+\%/
|
||||
artifacts:
|
||||
name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA}
|
||||
when: always
|
||||
expire_in: 2 days
|
||||
paths:
|
||||
- build/meson-logs/testlog.txt
|
||||
reports:
|
||||
junit: build/meson-logs/testlog.junit.xml
|
||||
coverage_report:
|
||||
coverage_format: cobertura
|
||||
path: build/coverage.xml
|
||||
@@ -641,15 +480,12 @@ build-oss-fuzz:
|
||||
- CC="clang" CXX="clang++" CFLAGS="-fsanitize=address"
|
||||
./scripts/oss-fuzz/build.sh
|
||||
- export ASAN_OPTIONS="fast_unwind_on_malloc=0"
|
||||
- failures=0
|
||||
- for fuzzer in $(find ./build-oss-fuzz/DEST_DIR/ -executable -type f
|
||||
| grep -v slirp); do
|
||||
grep "LLVMFuzzerTestOneInput" ${fuzzer} > /dev/null 2>&1 || continue ;
|
||||
echo Testing ${fuzzer} ... ;
|
||||
"${fuzzer}" -runs=1 -seed=1 || { echo "FAILED:"" ${fuzzer} exit code is $?"; failures=$(($failures+1)); };
|
||||
"${fuzzer}" -runs=1 -seed=1 || exit 1 ;
|
||||
done
|
||||
- echo "Number of failures:"" $failures"
|
||||
- test $failures = 0
|
||||
|
||||
build-tci:
|
||||
extends: .native_build_job_template
|
||||
@@ -658,10 +494,10 @@ build-tci:
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
script:
|
||||
- TARGETS="aarch64 arm hppa m68k microblaze ppc64 s390x x86_64"
|
||||
- TARGETS="aarch64 alpha arm hppa m68k microblaze ppc64 s390x x86_64"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-tcg-interpreter --disable-kvm --disable-docs --disable-gtk --disable-vnc
|
||||
- ../configure --enable-tcg-interpreter --disable-docs --disable-gtk --disable-vnc
|
||||
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)"
|
||||
|| { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- make -j"$JOBS"
|
||||
@@ -679,9 +515,9 @@ build-tci:
|
||||
build-without-defaults:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-centos9-container
|
||||
job: amd64-centos8-container
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
IMAGE: centos8
|
||||
CONFIGURE_ARGS:
|
||||
--without-default-devices
|
||||
--without-default-features
|
||||
@@ -689,13 +525,14 @@ build-without-defaults:
|
||||
--disable-pie
|
||||
--disable-qom-cast-debug
|
||||
--disable-strip
|
||||
--target-list-exclude=aarch64-softmmu,microblaze-softmmu,mips64-softmmu,mipsel-softmmu,ppc64-softmmu,sh4el-softmmu,xtensa-softmmu,x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check
|
||||
TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu
|
||||
sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user
|
||||
MAKE_CHECK_ARGS: check-unit check-qtest-avr check-qtest-mips64
|
||||
|
||||
build-libvhost-user:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG
|
||||
image: $CI_REGISTRY_IMAGE/qemu/fedora:latest
|
||||
needs:
|
||||
job: amd64-fedora-container
|
||||
script:
|
||||
@@ -715,7 +552,7 @@ build-tools-and-docs-debian:
|
||||
# when running on 'master' we use pre-existing container
|
||||
optional: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
IMAGE: debian-amd64
|
||||
MAKE_CHECK_ARGS: check-unit ctags TAGS cscope
|
||||
CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
|
||||
QEMU_JOB_PUBLISH: 1
|
||||
@@ -735,7 +572,7 @@ build-tools-and-docs-debian:
|
||||
# of what topic branch they're currently using
|
||||
pages:
|
||||
extends: .base_job_template
|
||||
image: $CI_REGISTRY_IMAGE/qemu/debian:$QEMU_CI_CONTAINER_TAG
|
||||
image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:latest
|
||||
stage: test
|
||||
needs:
|
||||
- job: build-tools-and-docs-debian
|
||||
@@ -743,55 +580,14 @@ pages:
|
||||
- mkdir -p public
|
||||
# HTML-ised source tree
|
||||
- make gtags
|
||||
# We unset variables to work around a bug in some htags versions
|
||||
# which causes it to fail when the environment is large
|
||||
- CI_COMMIT_MESSAGE= CI_COMMIT_TAG_MESSAGE= htags
|
||||
-anT --tree-view=filetree -m qemu_init
|
||||
- htags -anT --tree-view=filetree -m qemu_init
|
||||
-t "Welcome to the QEMU sourcecode"
|
||||
- mv HTML public/src
|
||||
# Project documentation
|
||||
- make -C build install DESTDIR=$(pwd)/temp-install
|
||||
- mv temp-install/usr/local/share/doc/qemu/* public/
|
||||
artifacts:
|
||||
when: on_success
|
||||
paths:
|
||||
- public
|
||||
variables:
|
||||
QEMU_JOB_PUBLISH: 1
|
||||
|
||||
coverity:
|
||||
image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG
|
||||
stage: build
|
||||
allow_failure: true
|
||||
timeout: 3h
|
||||
needs:
|
||||
- job: amd64-fedora-container
|
||||
optional: true
|
||||
before_script:
|
||||
- dnf install -y curl wget
|
||||
script:
|
||||
# would be nice to cancel the job if over quota (https://gitlab.com/gitlab-org/gitlab/-/issues/256089)
|
||||
# for example:
|
||||
# curl --request POST --header "PRIVATE-TOKEN: $CI_JOB_TOKEN" "${CI_SERVER_URL}/api/v4/projects/${CI_PROJECT_ID}/jobs/${CI_JOB_ID}/cancel
|
||||
- 'scripts/coverity-scan/run-coverity-scan --check-upload-only || { exitcode=$?; if test $exitcode = 1; then
|
||||
exit 0;
|
||||
else
|
||||
exit $exitcode;
|
||||
fi; };
|
||||
scripts/coverity-scan/run-coverity-scan --update-tools-only > update-tools.log 2>&1 || { cat update-tools.log; exit 1; };
|
||||
scripts/coverity-scan/run-coverity-scan --no-update-tools'
|
||||
rules:
|
||||
- if: '$COVERITY_TOKEN == null'
|
||||
when: never
|
||||
- if: '$COVERITY_EMAIL == null'
|
||||
when: never
|
||||
# Never included on upstream pipelines, except for schedules
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: on_success
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
|
||||
when: never
|
||||
# Forks don't get any pipeline unless QEMU_CI=1 or QEMU_CI=2 is set
|
||||
- if: '$QEMU_CI != "1" && $QEMU_CI != "2"'
|
||||
when: never
|
||||
# Always manual on forks even if $QEMU_CI == "2"
|
||||
- when: manual
|
||||
|
@@ -19,9 +19,10 @@ cwd = os.getcwd()
|
||||
reponame = os.path.basename(cwd)
|
||||
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
|
||||
|
||||
print(f"adding upstream git repo @ {repourl}")
|
||||
subprocess.check_call(["git", "remote", "add", "check-dco", repourl])
|
||||
subprocess.check_call(["git", "fetch", "check-dco", "master"])
|
||||
subprocess.check_call(["git", "fetch", "check-dco", "master"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL)
|
||||
|
||||
ancestor = subprocess.check_output(["git", "merge-base",
|
||||
"check-dco/master", "HEAD"],
|
||||
@@ -78,10 +79,7 @@ of Origin 1.1 (DCO):
|
||||
|
||||
To indicate acceptance of the DCO every commit must have a tag
|
||||
|
||||
Signed-off-by: YOUR NAME <EMAIL>
|
||||
|
||||
where "YOUR NAME" is your commonly known identity in the context
|
||||
of the community.
|
||||
Signed-off-by: REAL NAME <EMAIL>
|
||||
|
||||
This can be achieved by passing the "-s" flag to the "git commit" command.
|
||||
|
||||
|
@@ -19,12 +19,13 @@ cwd = os.getcwd()
|
||||
reponame = os.path.basename(cwd)
|
||||
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
|
||||
|
||||
print(f"adding upstream git repo @ {repourl}")
|
||||
# GitLab CI environment does not give us any direct info about the
|
||||
# base for the user's branch. We thus need to figure out a common
|
||||
# ancestor between the user's branch and current git master.
|
||||
subprocess.check_call(["git", "remote", "add", "check-patch", repourl])
|
||||
subprocess.check_call(["git", "fetch", "check-patch", "master"])
|
||||
subprocess.check_call(["git", "fetch", "check-patch", "master"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL)
|
||||
|
||||
ancestor = subprocess.check_output(["git", "merge-base",
|
||||
"check-patch/master", "HEAD"],
|
||||
|
@@ -13,12 +13,10 @@
|
||||
.cirrus_build_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:latest
|
||||
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
|
||||
needs: []
|
||||
# 20 mins larger than "timeout_in" in cirrus/build.yml
|
||||
# as there's often a 5-10 minute delay before Cirrus CI
|
||||
# actually starts the task
|
||||
timeout: 80m
|
||||
allow_failure: true
|
||||
script:
|
||||
- source .gitlab-ci.d/cirrus/$NAME.vars
|
||||
- sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
|
||||
@@ -46,30 +44,80 @@
|
||||
variables:
|
||||
QEMU_JOB_CIRRUS: 1
|
||||
|
||||
x64-freebsd-14-build:
|
||||
x64-freebsd-12-build:
|
||||
extends: .cirrus_build_job
|
||||
variables:
|
||||
NAME: freebsd-14
|
||||
NAME: freebsd-12
|
||||
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
|
||||
CIRRUS_VM_IMAGE_SELECTOR: image_family
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-14-2
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-12-4
|
||||
CIRRUS_VM_CPUS: 8
|
||||
CIRRUS_VM_RAM: 8G
|
||||
UPDATE_COMMAND: pkg update; pkg upgrade -y
|
||||
INSTALL_COMMAND: pkg install -y
|
||||
CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblaze-softmmu,mips64el-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4eb-softmmu,xtensa-softmmu
|
||||
TEST_TARGETS: check
|
||||
|
||||
aarch64-macos-build:
|
||||
x64-freebsd-13-build:
|
||||
extends: .cirrus_build_job
|
||||
variables:
|
||||
NAME: macos-14
|
||||
NAME: freebsd-13
|
||||
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
|
||||
CIRRUS_VM_IMAGE_SELECTOR: image_family
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-13-1
|
||||
CIRRUS_VM_CPUS: 8
|
||||
CIRRUS_VM_RAM: 8G
|
||||
UPDATE_COMMAND: pkg update; pkg upgrade -y
|
||||
INSTALL_COMMAND: pkg install -y
|
||||
TEST_TARGETS: check
|
||||
|
||||
aarch64-macos-12-base-build:
|
||||
extends: .cirrus_build_job
|
||||
variables:
|
||||
NAME: macos-12
|
||||
CIRRUS_VM_INSTANCE_TYPE: macos_instance
|
||||
CIRRUS_VM_IMAGE_SELECTOR: image
|
||||
CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-runner:sonoma
|
||||
CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-monterey-base:latest
|
||||
CIRRUS_VM_CPUS: 12
|
||||
CIRRUS_VM_RAM: 24G
|
||||
UPDATE_COMMAND: brew update
|
||||
INSTALL_COMMAND: brew install
|
||||
PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin
|
||||
PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig
|
||||
CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblazeel-softmmu,mips64-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4-softmmu,xtensaeb-softmmu
|
||||
TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64
|
||||
|
||||
|
||||
# The following jobs run VM-based tests via KVM on a Linux-based Cirrus-CI job
|
||||
.cirrus_kvm_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
|
||||
needs: []
|
||||
timeout: 80m
|
||||
script:
|
||||
- sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
|
||||
-e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g"
|
||||
-e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g"
|
||||
-e "s|[@]NAME@|$NAME|g"
|
||||
-e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g"
|
||||
-e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g"
|
||||
<.gitlab-ci.d/cirrus/kvm-build.yml >.gitlab-ci.d/cirrus/$NAME.yml
|
||||
- cat .gitlab-ci.d/cirrus/$NAME.yml
|
||||
- cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml
|
||||
variables:
|
||||
QEMU_JOB_CIRRUS: 1
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
|
||||
|
||||
x86-netbsd:
|
||||
extends: .cirrus_kvm_job
|
||||
variables:
|
||||
NAME: netbsd
|
||||
CONFIGURE_ARGS: --target-list=x86_64-softmmu,ppc64-softmmu,aarch64-softmmu
|
||||
TEST_TARGETS: check
|
||||
|
||||
x86-openbsd:
|
||||
extends: .cirrus_kvm_job
|
||||
variables:
|
||||
NAME: openbsd
|
||||
CONFIGURE_ARGS: --target-list=i386-softmmu,riscv64-softmmu,mips64-softmmu
|
||||
TEST_TARGETS: check
|
||||
|
@@ -16,17 +16,15 @@ env:
|
||||
TEST_TARGETS: "@TEST_TARGETS@"
|
||||
|
||||
build_task:
|
||||
# A little shorter than GitLab timeout in ../cirrus.yml
|
||||
timeout_in: 60m
|
||||
install_script:
|
||||
- @UPDATE_COMMAND@
|
||||
- @INSTALL_COMMAND@ @PKGS@
|
||||
- if test -n "@PYPI_PKGS@" ; then PYLIB=$(@PYTHON@ -c 'import sysconfig; print(sysconfig.get_path("stdlib"))'); rm -f $PYLIB/EXTERNALLY-MANAGED; @PIP3@ install @PYPI_PKGS@ ; fi
|
||||
- if test -n "@PYPI_PKGS@" ; then @PIP3@ install @PYPI_PKGS@ ; fi
|
||||
clone_script:
|
||||
- git clone --depth 100 "$CI_REPOSITORY_URL" .
|
||||
- git fetch origin "$CI_COMMIT_REF_NAME"
|
||||
- git reset --hard "$CI_COMMIT_SHA"
|
||||
step_script:
|
||||
build_script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-werror $CONFIGURE_ARGS
|
||||
|
16
.gitlab-ci.d/cirrus/freebsd-12.vars
Normal file
16
.gitlab-ci.d/cirrus/freebsd-12.vars
Normal file
@@ -0,0 +1,16 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables freebsd-12 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/usr/local/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip-3.8'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
16
.gitlab-ci.d/cirrus/freebsd-13.vars
Normal file
16
.gitlab-ci.d/cirrus/freebsd-13.vars
Normal file
@@ -0,0 +1,16 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables freebsd-13 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/usr/local/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip-3.8'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
@@ -1,16 +0,0 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables freebsd-14 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/usr/local/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk-vnc gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py311-numpy py311-pillow py311-pip py311-pyyaml py311-sphinx py311-sphinx_rtd_theme py311-tomli python3 rpm2cpio rust rust-bindgen-cli sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
31
.gitlab-ci.d/cirrus/kvm-build.yml
Normal file
31
.gitlab-ci.d/cirrus/kvm-build.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
container:
|
||||
image: fedora:35
|
||||
cpu: 4
|
||||
memory: 8Gb
|
||||
kvm: true
|
||||
|
||||
env:
|
||||
CIRRUS_CLONE_DEPTH: 1
|
||||
CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@"
|
||||
CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@"
|
||||
CI_COMMIT_SHA: "@CI_COMMIT_SHA@"
|
||||
|
||||
@NAME@_task:
|
||||
@NAME@_vm_cache:
|
||||
folder: $HOME/.cache/qemu-vm
|
||||
install_script:
|
||||
- dnf update -y
|
||||
- dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget
|
||||
clone_script:
|
||||
- git clone --depth 100 "$CI_REPOSITORY_URL" .
|
||||
- git fetch origin "$CI_COMMIT_REF_NAME"
|
||||
- git reset --hard "$CI_COMMIT_SHA"
|
||||
build_script:
|
||||
- if [ -f $HOME/.cache/qemu-vm/images/@NAME@.img ]; then
|
||||
make vm-build-@NAME@ J=$(getconf _NPROCESSORS_ONLN)
|
||||
EXTRA_CONFIGURE_OPTS="@CONFIGURE_ARGS@"
|
||||
BUILD_TARGET="@TEST_TARGETS@" ;
|
||||
else
|
||||
make vm-build-@NAME@ J=$(getconf _NPROCESSORS_ONLN) BUILD_TARGET=help
|
||||
EXTRA_CONFIGURE_OPTS="--disable-system --disable-user --disable-tools" ;
|
||||
fi
|
16
.gitlab-ci.d/cirrus/macos-12.vars
Normal file
16
.gitlab-ci.d/cirrus/macos-12.vars
Normal file
@@ -0,0 +1,16 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables macos-12 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/opt/homebrew/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/opt/homebrew/bin/gmake'
|
||||
NINJA='/opt/homebrew/bin/ninja'
|
||||
PACKAGING_COMMAND='brew'
|
||||
PIP3='/opt/homebrew/bin/pip3'
|
||||
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol tesseract usbredir vde vte3 zlib zstd'
|
||||
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme'
|
||||
PYTHON='/opt/homebrew/bin/python3'
|
@@ -1,16 +0,0 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables macos-14 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/opt/homebrew/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/opt/homebrew/bin/gmake'
|
||||
NINJA='/opt/homebrew/bin/ninja'
|
||||
PACKAGING_COMMAND='brew'
|
||||
PIP3='/opt/homebrew/bin/pip3'
|
||||
PKGS='bash bc bindgen bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 gtk-vnc jemalloc jpeg-turbo json-c libcbor libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio rust sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd'
|
||||
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli'
|
||||
PYTHON='/opt/homebrew/bin/python3'
|
@@ -1,10 +1,10 @@
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/container-template.yml'
|
||||
|
||||
amd64-centos9-container:
|
||||
amd64-centos8-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: centos9
|
||||
NAME: centos8
|
||||
|
||||
amd64-fedora-container:
|
||||
extends: .container_job_template
|
||||
|
@@ -1,3 +1,9 @@
|
||||
alpha-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-alpha-cross
|
||||
|
||||
amd64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -10,18 +16,18 @@ amd64-debian-user-cross-container:
|
||||
variables:
|
||||
NAME: debian-all-test-cross
|
||||
|
||||
amd64-debian-legacy-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-legacy-test-cross
|
||||
|
||||
arm64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-arm64-cross
|
||||
|
||||
armel-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-armel-cross
|
||||
|
||||
armhf-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -34,17 +40,23 @@ hexagon-cross-container:
|
||||
variables:
|
||||
NAME: debian-hexagon-cross
|
||||
|
||||
loongarch-debian-cross-container:
|
||||
hppa-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-loongarch-cross
|
||||
NAME: debian-hppa-cross
|
||||
|
||||
i686-debian-cross-container:
|
||||
m68k-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-i686-cross
|
||||
NAME: debian-m68k-cross
|
||||
|
||||
mips64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mips64-cross
|
||||
|
||||
mips64el-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
@@ -52,12 +64,24 @@ mips64el-debian-cross-container:
|
||||
variables:
|
||||
NAME: debian-mips64el-cross
|
||||
|
||||
mips-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mips-cross
|
||||
|
||||
mipsel-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mipsel-cross
|
||||
|
||||
powerpc-test-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-powerpc-test-cross
|
||||
|
||||
ppc64el-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -71,7 +95,13 @@ riscv64-debian-cross-container:
|
||||
allow_failure: true
|
||||
variables:
|
||||
NAME: debian-riscv64-cross
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
|
||||
# we can however build TCG tests using a non-sid base
|
||||
riscv64-debian-test-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-riscv64-test-cross
|
||||
|
||||
s390x-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
@@ -79,6 +109,18 @@ s390x-debian-cross-container:
|
||||
variables:
|
||||
NAME: debian-s390x-cross
|
||||
|
||||
sh4-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-sh4-cross
|
||||
|
||||
sparc64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-sparc64-cross
|
||||
|
||||
tricore-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
@@ -90,6 +132,21 @@ xtensa-debian-cross-container:
|
||||
variables:
|
||||
NAME: debian-xtensa-cross
|
||||
|
||||
cris-fedora-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-cris-cross
|
||||
|
||||
i386-fedora-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-i386-cross
|
||||
|
||||
win32-fedora-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-win32-cross
|
||||
|
||||
win64-fedora-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
|
@@ -1,15 +1,15 @@
|
||||
.container_job_template:
|
||||
extends: .base_job_template
|
||||
image: docker:latest
|
||||
image: docker:stable
|
||||
stage: containers
|
||||
services:
|
||||
- docker:dind
|
||||
before_script:
|
||||
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:$QEMU_CI_CONTAINER_TAG"
|
||||
# Always ':latest' because we always use upstream as a common cache source
|
||||
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
|
||||
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest"
|
||||
- apk add python3
|
||||
- docker info
|
||||
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
|
||||
- until docker info; do sleep 1; done
|
||||
script:
|
||||
- echo "TAG:$TAG"
|
||||
- echo "COMMON_TAG:$COMMON_TAG"
|
||||
|
@@ -11,7 +11,7 @@ amd64-debian-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian
|
||||
NAME: debian-amd64
|
||||
|
||||
amd64-ubuntu2204-container:
|
||||
extends: .container_job_template
|
||||
@@ -27,9 +27,3 @@ python-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: python
|
||||
|
||||
amd64-fedora-rust-nightly-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-rust-nightly
|
||||
allow_failure: true
|
||||
|
@@ -1,52 +1,23 @@
|
||||
.cross_system_build_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache
|
||||
key: "$CI_JOB_NAME"
|
||||
when: always
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||
timeout: 80m
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start setup "Pre-script setup"
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
- cat /packages.txt
|
||||
- section_end setup
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ccache --zero-stats
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs --enable-fdt=system
|
||||
--disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
|
||||
--target-list-exclude="arm-softmmu
|
||||
--target-list-exclude="arm-softmmu cris-softmmu
|
||||
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
|
||||
mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu
|
||||
sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS"
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- make -j"$JOBS" all check-build
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
- section_start installer "Building the installer"
|
||||
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
|
||||
- if grep -q "EXESUF=.exe" config-host.mak;
|
||||
then make installer;
|
||||
version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
|
||||
mv -v qemu-setup*.exe qemu-setup-${version}.exe;
|
||||
fi
|
||||
- section_end installer
|
||||
- ccache --show-stats
|
||||
|
||||
# Job to cross-build specific accelerators.
|
||||
#
|
||||
@@ -56,76 +27,34 @@
|
||||
.cross_accel_build_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
timeout: 60m
|
||||
cache:
|
||||
paths:
|
||||
- ccache/
|
||||
key: "$CI_JOB_NAME"
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||
timeout: 30m
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
--disable-tools --enable-${ACCEL:-kvm} $EXTRA_CONFIGURE_OPTS
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- make -j"$JOBS" all check-build
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
|
||||
|
||||
.cross_user_build_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache/
|
||||
key: "$CI_JOB_NAME"
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
--disable-system --target-list-exclude="aarch64_be-linux-user
|
||||
alpha-linux-user m68k-linux-user microblazeel-linux-user
|
||||
or1k-linux-user ppc-linux-user sparc-linux-user
|
||||
alpha-linux-user cris-linux-user m68k-linux-user microblazeel-linux-user
|
||||
nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user
|
||||
xtensa-linux-user $CROSS_SKIP_TARGETS"
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- make -j"$JOBS" all check-build
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
|
||||
|
||||
# We can still run some tests on some of our cross build jobs. They can add this
|
||||
# template to their extends to save the build logs and test results
|
||||
.cross_test_artifacts:
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
when: always
|
||||
expire_in: 7 days
|
||||
paths:
|
||||
- build/meson-logs/testlog.txt
|
||||
|
@@ -1,6 +1,13 @@
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/crossbuild-template.yml'
|
||||
|
||||
cross-armel-user:
|
||||
extends: .cross_user_build_job
|
||||
needs:
|
||||
job: armel-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-armel-cross
|
||||
|
||||
cross-armhf-user:
|
||||
extends: .cross_user_build_job
|
||||
needs:
|
||||
@@ -22,51 +29,28 @@ cross-arm64-user:
|
||||
variables:
|
||||
IMAGE: debian-arm64-cross
|
||||
|
||||
cross-arm64-kvm-only:
|
||||
extends: .cross_accel_build_job
|
||||
needs:
|
||||
job: arm64-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-arm64-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-features
|
||||
|
||||
cross-i686-system:
|
||||
extends:
|
||||
- .cross_system_build_job
|
||||
- .cross_test_artifacts
|
||||
needs:
|
||||
job: i686-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-i686-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-kvm
|
||||
MAKE_CHECK_ARGS: check-qtest
|
||||
|
||||
cross-i686-user:
|
||||
cross-i386-user:
|
||||
extends:
|
||||
- .cross_user_build_job
|
||||
- .cross_test_artifacts
|
||||
needs:
|
||||
job: i686-debian-cross-container
|
||||
job: i386-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: debian-i686-cross
|
||||
IMAGE: fedora-i386-cross
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
cross-i686-tci:
|
||||
cross-i386-tci:
|
||||
extends:
|
||||
- .cross_accel_build_job
|
||||
- .cross_test_artifacts
|
||||
timeout: 60m
|
||||
needs:
|
||||
job: i686-debian-cross-container
|
||||
job: i386-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: debian-i686-cross
|
||||
IMAGE: fedora-i386-cross
|
||||
ACCEL: tcg-interpreter
|
||||
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins --disable-kvm
|
||||
# Force tests to run with reduced parallelism, to see whether this
|
||||
# reduces the flakiness of this CI job. The CI
|
||||
# environment by default shows us 8 CPUs and so we
|
||||
# would otherwise be using a parallelism of 9.
|
||||
MAKE_CHECK_ARGS: check check-tcg -j2
|
||||
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user
|
||||
MAKE_CHECK_ARGS: check check-tcg
|
||||
|
||||
cross-mipsel-system:
|
||||
extends: .cross_system_build_job
|
||||
@@ -167,19 +151,31 @@ cross-mips64el-kvm-only:
|
||||
IMAGE: debian-mips64el-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --target-list=mips64el-softmmu
|
||||
|
||||
cross-win32-system:
|
||||
extends: .cross_system_build_job
|
||||
needs:
|
||||
job: win32-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: fedora-win32-cross
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
|
||||
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
|
||||
microblazeel-softmmu mips64el-softmmu nios2-softmmu
|
||||
artifacts:
|
||||
paths:
|
||||
- build/qemu-setup*.exe
|
||||
|
||||
cross-win64-system:
|
||||
extends: .cross_system_build_job
|
||||
needs:
|
||||
job: win64-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: fedora-win64-cross
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
|
||||
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu
|
||||
m68k-softmmu microblazeel-softmmu
|
||||
m68k-softmmu microblazeel-softmmu nios2-softmmu
|
||||
or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
|
||||
tricore-softmmu xtensaeb-softmmu
|
||||
artifacts:
|
||||
when: on_success
|
||||
paths:
|
||||
- build/qemu-setup*.exe
|
||||
|
||||
|
@@ -10,25 +10,23 @@
|
||||
# gitlab-runner. To avoid problems that gitlab-runner can cause while
|
||||
# reusing the GIT repository, let's enable the clone strategy, which
|
||||
# guarantees a fresh repository on each job run.
|
||||
variables:
|
||||
GIT_STRATEGY: clone
|
||||
|
||||
# All custom runners can extend this template to upload the testlog
|
||||
# data as an artifact and also feed the junit report
|
||||
.custom_runner_template:
|
||||
extends: .base_job_template
|
||||
variables:
|
||||
GIT_STRATEGY: clone
|
||||
GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
expire_in: 7 days
|
||||
when: always
|
||||
paths:
|
||||
- build/build.ninja
|
||||
- build/meson-logs
|
||||
- build/meson-logs/testlog.txt
|
||||
reports:
|
||||
junit: build/meson-logs/testlog.junit.xml
|
||||
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml'
|
||||
|
24
.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml
Normal file
24
.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
# All centos-stream-8 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/stream/8/build-environment.yml task
|
||||
# "Installation of extra packages to build QEMU"
|
||||
|
||||
centos-stream-8-x86_64:
|
||||
extends: .custom_runner_template
|
||||
allow_failure: true
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- centos_stream_8
|
||||
- x86_64
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE"
|
||||
before_script:
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../scripts/ci/org.centos/stream/8/x86_64/configure
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make -j"$JOBS"
|
||||
- make NINJA=":" check check-avocado
|
@@ -1,32 +1,34 @@
|
||||
# All ubuntu-22.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 22.04"
|
||||
# All ubuntu-20.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 20.04/20.04"
|
||||
|
||||
ubuntu-22.04-s390x-all-linux:
|
||||
ubuntu-20.04-s390x-all-linux-static:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
script:
|
||||
# --disable-libssh is needed because of https://bugs.launchpad.net/qemu/+bug/1838763
|
||||
# --disable-glusterfs is needed because there's no static version of those libs in distro supplied packages
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-debug --disable-system --disable-tools --disable-docs
|
||||
- ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync check-tcg
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-all-system:
|
||||
ubuntu-20.04-s390x-all:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
timeout: 75m
|
||||
rules:
|
||||
@@ -35,17 +37,17 @@ ubuntu-22.04-s390x-all-system:
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-user
|
||||
- ../configure --disable-libssh
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-alldbg:
|
||||
ubuntu-20.04-s390x-alldbg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
@@ -57,18 +59,18 @@ ubuntu-22.04-s390x-alldbg:
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-debug
|
||||
- ../configure --enable-debug --disable-libssh
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make clean
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-clang:
|
||||
ubuntu-20.04-s390x-clang:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
@@ -80,16 +82,16 @@ ubuntu-22.04-s390x-clang:
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --cc=clang --cxx=clang++ --enable-ubsan
|
||||
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-tci:
|
||||
ubuntu-20.04-s390x-tci:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
@@ -101,16 +103,16 @@ ubuntu-22.04-s390x-tci:
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-tcg-interpreter
|
||||
- ../configure --disable-libssh --enable-tcg-interpreter
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
|
||||
ubuntu-22.04-s390x-notcg:
|
||||
ubuntu-20.04-s390x-notcg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- ubuntu_20.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
@@ -122,7 +124,7 @@ ubuntu-22.04-s390x-notcg:
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-tcg
|
||||
- ../configure --disable-libssh --disable-tcg
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
@@ -1,6 +1,6 @@
|
||||
# All ubuntu-22.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 22.04"
|
||||
# setup by the scripts/ci/setup/qemu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 20.04"
|
||||
|
||||
ubuntu-22.04-aarch32-all:
|
||||
extends: .custom_runner_template
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# All ubuntu-22.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 22.04"
|
||||
# All ubuntu-20.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/qemu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 20.04"
|
||||
|
||||
ubuntu-22.04-aarch64-all-linux-static:
|
||||
extends: .custom_runner_template
|
||||
@@ -45,28 +45,6 @@ ubuntu-22.04-aarch64-all:
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|
||||
ubuntu-22.04-aarch64-without-defaults:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- aarch64
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$AARCH64_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-user --without-default-devices --without-default-features
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|
||||
ubuntu-22.04-aarch64-alldbg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
@@ -103,7 +81,7 @@ ubuntu-22.04-aarch64-clang:
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-ubsan
|
||||
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
@@ -145,7 +123,7 @@ ubuntu-22.04-aarch64-notcg:
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-tcg --with-devices-aarch64=minimal
|
||||
- ../configure --disable-tcg
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|
@@ -24,10 +24,6 @@
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /opensbi/i'
|
||||
when: manual
|
||||
|
||||
# Scheduled runs on mainline don't get pipelines except for the special Coverity job
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
|
||||
# Run if any files affecting the build output are touched
|
||||
- changes:
|
||||
- .gitlab-ci.d/opensbi.yml
|
||||
@@ -46,15 +42,17 @@
|
||||
docker-opensbi:
|
||||
extends: .opensbi_job_rules
|
||||
stage: containers
|
||||
image: docker:latest
|
||||
image: docker:stable
|
||||
services:
|
||||
- docker:dind
|
||||
- docker:stable-dind
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build
|
||||
# We don't use TLS
|
||||
DOCKER_HOST: tcp://docker:2375
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
- until docker info; do sleep 1; done
|
||||
script:
|
||||
- docker pull $IMAGE_TAG || true
|
||||
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
@@ -67,7 +65,6 @@ build-opensbi:
|
||||
stage: build
|
||||
needs: ['docker-opensbi']
|
||||
artifacts:
|
||||
when: on_success
|
||||
paths: # 'artifacts.zip' will contains the following files:
|
||||
- pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
|
||||
- pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
|
||||
|
@@ -1,13 +1,6 @@
|
||||
# This file contains the set of jobs run by the QEMU project:
|
||||
# https://gitlab.com/qemu-project/qemu/-/pipelines
|
||||
|
||||
variables:
|
||||
RUNNER_TAG: ""
|
||||
|
||||
default:
|
||||
tags:
|
||||
- $RUNNER_TAG
|
||||
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/base.yml'
|
||||
- local: '/.gitlab-ci.d/stages.yml'
|
||||
|
@@ -26,7 +26,7 @@ check-dco:
|
||||
check-python-minreqs:
|
||||
extends: .base_job_template
|
||||
stage: test
|
||||
image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG
|
||||
image: $CI_REGISTRY_IMAGE/qemu/python:latest
|
||||
script:
|
||||
- make -C python check-minreqs
|
||||
variables:
|
||||
@@ -37,7 +37,7 @@ check-python-minreqs:
|
||||
check-python-tox:
|
||||
extends: .base_job_template
|
||||
stage: test
|
||||
image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG
|
||||
image: $CI_REGISTRY_IMAGE/qemu/python:latest
|
||||
script:
|
||||
- make -C python check-tox
|
||||
variables:
|
||||
|
@@ -1,67 +1,25 @@
|
||||
msys2-64bit:
|
||||
.shared_msys2_builder:
|
||||
extends: .base_job_template
|
||||
tags:
|
||||
- saas-windows-medium-amd64
|
||||
- shared-windows
|
||||
- windows
|
||||
- windows-1809
|
||||
cache:
|
||||
key: "$CI_JOB_NAME"
|
||||
key: "${CI_JOB_NAME}-cache"
|
||||
paths:
|
||||
- msys64/var/cache
|
||||
- ccache
|
||||
when: always
|
||||
- ${CI_PROJECT_DIR}/msys64/var/cache
|
||||
needs: []
|
||||
stage: build
|
||||
timeout: 100m
|
||||
variables:
|
||||
# Select the "64 bit, gcc and MSVCRT" MSYS2 environment
|
||||
MSYSTEM: MINGW64
|
||||
# This feature doesn't (currently) work with PowerShell, it stops
|
||||
# the echo'ing of commands being run and doesn't show any timing
|
||||
FF_SCRIPT_SECTIONS: 0
|
||||
CONFIGURE_ARGS: --disable-system --enable-tools -Ddebug=false -Doptimization=0
|
||||
# The Windows git is a bit older so override the default
|
||||
GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
expire_in: 7 days
|
||||
paths:
|
||||
- build/meson-logs/testlog.txt
|
||||
reports:
|
||||
junit: "build/meson-logs/testlog.junit.xml"
|
||||
timeout: 80m
|
||||
before_script:
|
||||
- Write-Output "Acquiring msys2.exe installer at $(Get-Date -Format u)"
|
||||
- If ( !(Test-Path -Path msys64\var\cache ) ) {
|
||||
mkdir msys64\var\cache
|
||||
}
|
||||
- Invoke-WebRequest
|
||||
"https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe.sig"
|
||||
-outfile "msys2.exe.sig"
|
||||
- if ( Test-Path -Path msys64\var\cache\msys2.exe.sig ) {
|
||||
Write-Output "Cached installer sig" ;
|
||||
if ( ((Get-FileHash msys2.exe.sig).Hash -ne (Get-FileHash msys64\var\cache\msys2.exe.sig).Hash) ) {
|
||||
Write-Output "Mis-matched installer sig, new installer download required" ;
|
||||
Remove-Item -Path msys64\var\cache\msys2.exe.sig ;
|
||||
if ( Test-Path -Path msys64\var\cache\msys2.exe ) {
|
||||
Remove-Item -Path msys64\var\cache\msys2.exe
|
||||
}
|
||||
} else {
|
||||
Write-Output "Matched installer sig, cached installer still valid"
|
||||
}
|
||||
} else {
|
||||
Write-Output "No cached installer sig, new installer download required" ;
|
||||
if ( Test-Path -Path msys64\var\cache\msys2.exe ) {
|
||||
Remove-Item -Path msys64\var\cache\msys2.exe
|
||||
}
|
||||
}
|
||||
- if ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
|
||||
Write-Output "Fetching latest installer" ;
|
||||
- If ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
|
||||
Invoke-WebRequest
|
||||
"https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe"
|
||||
-outfile "msys64\var\cache\msys2.exe" ;
|
||||
Copy-Item -Path msys2.exe.sig -Destination msys64\var\cache\msys2.exe.sig
|
||||
} else {
|
||||
Write-Output "Using cached installer"
|
||||
"https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe"
|
||||
-outfile "msys64\var\cache\msys2.exe"
|
||||
}
|
||||
- Write-Output "Invoking msys2.exe installer at $(Get-Date -Format u)"
|
||||
- msys64\var\cache\msys2.exe -y
|
||||
- ((Get-Content -path .\msys64\etc\\post-install\\07-pacman-key.post -Raw)
|
||||
-replace '--refresh-keys', '--version') |
|
||||
@@ -70,37 +28,97 @@ msys2-64bit:
|
||||
- .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Core update
|
||||
- .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Normal update
|
||||
- taskkill /F /FI "MODULES eq msys-2.0.dll"
|
||||
|
||||
msys2-64bit:
|
||||
extends: .shared_msys2_builder
|
||||
script:
|
||||
- Write-Output "Installing mingw packages at $(Get-Date -Format u)"
|
||||
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
|
||||
bison diffutils flex
|
||||
git grep make sed
|
||||
mingw-w64-x86_64-binutils
|
||||
mingw-w64-x86_64-ccache
|
||||
mingw-w64-x86_64-capstone
|
||||
mingw-w64-x86_64-curl
|
||||
mingw-w64-x86_64-cyrus-sasl
|
||||
mingw-w64-x86_64-dtc
|
||||
mingw-w64-x86_64-gcc
|
||||
mingw-w64-x86_64-glib2
|
||||
mingw-w64-x86_64-gnutls
|
||||
mingw-w64-x86_64-gtk3
|
||||
mingw-w64-x86_64-libgcrypt
|
||||
mingw-w64-x86_64-libjpeg-turbo
|
||||
mingw-w64-x86_64-libnfs
|
||||
mingw-w64-x86_64-libpng
|
||||
mingw-w64-x86_64-libssh
|
||||
mingw-w64-x86_64-libtasn1
|
||||
mingw-w64-x86_64-libusb
|
||||
mingw-w64-x86_64-lzo2
|
||||
mingw-w64-x86_64-nettle
|
||||
mingw-w64-x86_64-ninja
|
||||
mingw-w64-x86_64-pixman
|
||||
mingw-w64-x86_64-pkgconf
|
||||
mingw-w64-x86_64-python
|
||||
mingw-w64-x86_64-zstd"
|
||||
- Write-Output "Running build at $(Get-Date -Format u)"
|
||||
- $env:JOBS = $(.\msys64\usr\bin\bash -lc nproc)
|
||||
mingw-w64-x86_64-SDL2
|
||||
mingw-w64-x86_64-SDL2_image
|
||||
mingw-w64-x86_64-snappy
|
||||
mingw-w64-x86_64-spice
|
||||
mingw-w64-x86_64-usbredir
|
||||
mingw-w64-x86_64-zstd "
|
||||
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
|
||||
- $env:MSYSTEM = 'MINGW64' # Start a 64-bit MinGW environment
|
||||
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
|
||||
- $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR"
|
||||
- $env:CCACHE_DIR = "$env:CCACHE_BASEDIR/ccache"
|
||||
- $env:CCACHE_MAXSIZE = "500M"
|
||||
- $env:CCACHE_DEPEND = 1 # cache misses are too expensive with preprocessor mode
|
||||
- $env:CC = "ccache gcc"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ..\msys64\usr\bin\bash -lc "ccache --zero-stats"
|
||||
- ..\msys64\usr\bin\bash -lc "../configure $CONFIGURE_ARGS"
|
||||
- ..\msys64\usr\bin\bash -lc "make -j$env:JOBS"
|
||||
- ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;"
|
||||
- ..\msys64\usr\bin\bash -lc "ccache --show-stats"
|
||||
- Write-Output "Finished build at $(Get-Date -Format u)"
|
||||
- mkdir output
|
||||
- cd output
|
||||
# Note: do not remove "--without-default-devices"!
|
||||
# commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices"
|
||||
# changed to compile QEMU with the --without-default-devices switch
|
||||
# for the msys2 64-bit job, due to the build could not complete within
|
||||
# the project timeout.
|
||||
- ..\msys64\usr\bin\bash -lc '../configure --target-list=x86_64-softmmu
|
||||
--without-default-devices --enable-fdt=system'
|
||||
- ..\msys64\usr\bin\bash -lc 'make'
|
||||
# qTests don't run successfully with "--without-default-devices",
|
||||
# so let's exclude the qtests from CI for now.
|
||||
- ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" || { cat meson-logs/testlog.txt; exit 1; } ;'
|
||||
|
||||
msys2-32bit:
|
||||
extends: .shared_msys2_builder
|
||||
script:
|
||||
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
|
||||
bison diffutils flex
|
||||
git grep make sed
|
||||
mingw-w64-i686-capstone
|
||||
mingw-w64-i686-curl
|
||||
mingw-w64-i686-cyrus-sasl
|
||||
mingw-w64-i686-dtc
|
||||
mingw-w64-i686-gcc
|
||||
mingw-w64-i686-glib2
|
||||
mingw-w64-i686-gnutls
|
||||
mingw-w64-i686-gtk3
|
||||
mingw-w64-i686-libgcrypt
|
||||
mingw-w64-i686-libjpeg-turbo
|
||||
mingw-w64-i686-libnfs
|
||||
mingw-w64-i686-libpng
|
||||
mingw-w64-i686-libssh
|
||||
mingw-w64-i686-libtasn1
|
||||
mingw-w64-i686-libusb
|
||||
mingw-w64-i686-lzo2
|
||||
mingw-w64-i686-nettle
|
||||
mingw-w64-i686-ninja
|
||||
mingw-w64-i686-pixman
|
||||
mingw-w64-i686-pkgconf
|
||||
mingw-w64-i686-python
|
||||
mingw-w64-i686-SDL2
|
||||
mingw-w64-i686-SDL2_image
|
||||
mingw-w64-i686-snappy
|
||||
mingw-w64-i686-spice
|
||||
mingw-w64-i686-usbredir
|
||||
mingw-w64-i686-zstd "
|
||||
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
|
||||
- $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinGW environment
|
||||
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
|
||||
- mkdir output
|
||||
- cd output
|
||||
- ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu
|
||||
--enable-fdt=system'
|
||||
- ..\msys64\usr\bin\bash -lc 'make'
|
||||
- ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" ||
|
||||
{ cat meson-logs/testlog.txt; exit 1; }'
|
||||
|
18
.gitmodules
vendored
18
.gitmodules
vendored
@@ -13,6 +13,9 @@
|
||||
[submodule "roms/qemu-palcode"]
|
||||
path = roms/qemu-palcode
|
||||
url = https://gitlab.com/qemu-project/qemu-palcode.git
|
||||
[submodule "dtc"]
|
||||
path = dtc
|
||||
url = https://gitlab.com/qemu-project/dtc.git
|
||||
[submodule "roms/u-boot"]
|
||||
path = roms/u-boot
|
||||
url = https://gitlab.com/qemu-project/u-boot.git
|
||||
@@ -22,12 +25,21 @@
|
||||
[submodule "roms/QemuMacDrivers"]
|
||||
path = roms/QemuMacDrivers
|
||||
url = https://gitlab.com/qemu-project/QemuMacDrivers.git
|
||||
[submodule "ui/keycodemapdb"]
|
||||
path = ui/keycodemapdb
|
||||
url = https://gitlab.com/qemu-project/keycodemapdb.git
|
||||
[submodule "roms/seabios-hppa"]
|
||||
path = roms/seabios-hppa
|
||||
url = https://gitlab.com/qemu-project/seabios-hppa.git
|
||||
[submodule "roms/u-boot-sam460ex"]
|
||||
path = roms/u-boot-sam460ex
|
||||
url = https://gitlab.com/qemu-project/u-boot-sam460ex.git
|
||||
[submodule "tests/fp/berkeley-testfloat-3"]
|
||||
path = tests/fp/berkeley-testfloat-3
|
||||
url = https://gitlab.com/qemu-project/berkeley-testfloat-3.git
|
||||
[submodule "tests/fp/berkeley-softfloat-3"]
|
||||
path = tests/fp/berkeley-softfloat-3
|
||||
url = https://gitlab.com/qemu-project/berkeley-softfloat-3.git
|
||||
[submodule "roms/edk2"]
|
||||
path = roms/edk2
|
||||
url = https://gitlab.com/qemu-project/edk2.git
|
||||
@@ -37,9 +49,15 @@
|
||||
[submodule "roms/qboot"]
|
||||
path = roms/qboot
|
||||
url = https://gitlab.com/qemu-project/qboot.git
|
||||
[submodule "meson"]
|
||||
path = meson
|
||||
url = https://gitlab.com/qemu-project/meson.git
|
||||
[submodule "roms/vbootrom"]
|
||||
path = roms/vbootrom
|
||||
url = https://gitlab.com/qemu-project/vbootrom.git
|
||||
[submodule "tests/lcitool/libvirt-ci"]
|
||||
path = tests/lcitool/libvirt-ci
|
||||
url = https://gitlab.com/libvirt/libvirt-ci.git
|
||||
[submodule "subprojects/libvfio-user"]
|
||||
path = subprojects/libvfio-user
|
||||
url = https://gitlab.com/qemu-project/libvfio-user.git
|
||||
|
32
.mailmap
32
.mailmap
@@ -30,41 +30,22 @@ malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
# Corrupted Author fields
|
||||
Aaron Larson <alarson@ddci.com> alarson@ddci.com
|
||||
Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber>
|
||||
fanwenjie <fanwj@mail.ustc.edu.cn> fanwj@mail.ustc.edu.cn <fanwj@mail.ustc.edu.cn>
|
||||
Jason Wang <jasowang@redhat.com> Jason Wang <jasowang>
|
||||
Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
|
||||
Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org>
|
||||
Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
|
||||
Timothée Cocault <timothee.cocault@gmail.com> timothee.cocault@gmail.com <timothee.cocault@gmail.com>
|
||||
Stefan Weil <sw@weilnetz.de> <weil@mail.berlios.de>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil <stefan@kiwi.(none)>
|
||||
|
||||
# There is also a:
|
||||
# (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
# for the cvs2svn initialization commit e63c3dc74bf.
|
||||
|
||||
# Next, translate a few commits where mailman rewrote the From: line due
|
||||
# to strict SPF and DMARC. Usually, our build process should be flagging
|
||||
# commits like these before maintainer merges; if you find the need to add
|
||||
# a line here, please also report a bug against the part of the build
|
||||
# process that let the mis-attribution slip through in the first place.
|
||||
#
|
||||
# If the mailing list munges your emails, use:
|
||||
# git config sendemail.from '"Your Name" <your.email@example.com>'
|
||||
# the use of "" in that line will differ from the typically unquoted
|
||||
# 'git config user.name', which in turn is sufficient for 'git send-email'
|
||||
# to add an extra From: line in the body of your email that takes
|
||||
# precedence over any munged From: in the mail's headers.
|
||||
# See https://lists.openembedded.org/g/openembedded-core/message/166515
|
||||
# and https://lists.gnu.org/archive/html/qemu-devel/2023-09/msg06784.html
|
||||
# to strict SPF, although we prefer to avoid adding more entries like that.
|
||||
Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-trivial@nongnu.org>
|
||||
Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> Andrey Drobyshev via <qemu-block@nongnu.org>
|
||||
BALATON Zoltan <balaton@eik.bme.hu> BALATON Zoltan via <qemu-ppc@nongnu.org>
|
||||
|
||||
# Next, replace old addresses by a more recent one.
|
||||
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com>
|
||||
@@ -73,10 +54,7 @@ Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <amarkovic@wavecomp.com>
|
||||
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <arikalo@wavecomp.com>
|
||||
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com>
|
||||
Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
|
||||
Ani Sinha <anisinha@redhat.com> <ani@anisinha.ca>
|
||||
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
|
||||
Brian Cain <brian.cain@oss.qualcomm.com> <bcain@quicinc.com>
|
||||
Brian Cain <brian.cain@oss.qualcomm.com> <quic_bcain@quicinc.com>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
|
||||
Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com>
|
||||
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
|
||||
@@ -86,12 +64,8 @@ Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
||||
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
|
||||
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
|
||||
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
|
||||
Juan Quintela <quintela@trasno.org> <quintela@redhat.com>
|
||||
Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
|
||||
Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
|
||||
Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr>
|
||||
Luc Michel <luc@lmichel.fr> <luc.michel@greensocs.com>
|
||||
Luc Michel <luc@lmichel.fr> <lmichel@kalray.eu>
|
||||
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
|
||||
Paul Brook <paul@nowt.org> <paul@codesourcery.com>
|
||||
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
|
||||
@@ -101,11 +75,7 @@ Paul Burton <paulburton@kernel.org> <pburton@wavecomp.com>
|
||||
Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org>
|
||||
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
|
||||
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
|
||||
Roman Bolshakov <rbolshakov@ddn.com> <r.bolshakov@yadro.com>
|
||||
Sriram Yagnaraman <sriram.yagnaraman@ericsson.com> <sriram.yagnaraman@est.tech>
|
||||
Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil <stefan@weilnetz.de>
|
||||
Taylor Simpson <ltaylorsimpson@gmail.com> <tsimpson@quicinc.com>
|
||||
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
|
||||
|
||||
# Also list preferred name forms where people have changed their
|
||||
|
@@ -5,21 +5,16 @@
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
# Set the version of Python and other tools you might need
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
# We recommend specifying your dependencies to enable reproducible builds:
|
||||
# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
|
||||
# We want all the document formats
|
||||
formats: all
|
||||
|
||||
# For consistency, we require that QEMU's Sphinx extensions
|
||||
# run with at least the same minimum version of Python that
|
||||
# we require for other Python in our codebase (our conf.py
|
||||
# enforces this, and some code needs it.)
|
||||
python:
|
||||
version: 3.6
|
||||
|
45
.travis.yml
45
.travis.yml
@@ -1,5 +1,5 @@
|
||||
os: linux
|
||||
dist: jammy
|
||||
dist: focal
|
||||
language: c
|
||||
compiler:
|
||||
- gcc
|
||||
@@ -7,11 +7,13 @@ cache:
|
||||
# There is one cache per branch and compiler version.
|
||||
# characteristics of each job are used to identify the cache:
|
||||
# - OS name (currently only linux)
|
||||
# - OS distribution (e.g. "jammy" for Linux)
|
||||
# - OS distribution (for Linux, bionic or focal)
|
||||
# - Names and values of visible environment variables set in .travis.yml or Settings panel
|
||||
timeout: 1200
|
||||
ccache: true
|
||||
pip: true
|
||||
directories:
|
||||
- $HOME/avocado/data/cache
|
||||
|
||||
|
||||
# The channel name "irc.oftc.net#qemu" is encrypted against qemu/qemu
|
||||
@@ -32,8 +34,8 @@ env:
|
||||
- BASE_CONFIG="--disable-docs --disable-tools"
|
||||
- TEST_BUILD_CMD=""
|
||||
- TEST_CMD="make check V=1"
|
||||
# This is broadly a list of "mainline" system targets which have support across the major distros
|
||||
- MAIN_SYSTEM_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
|
||||
# This is broadly a list of "mainline" softmmu targets which have support across the major distros
|
||||
- MAIN_SOFTMMU_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
|
||||
- CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime"
|
||||
- CCACHE_MAXSIZE=1G
|
||||
- G_MESSAGES_DEBUG=error
|
||||
@@ -81,6 +83,7 @@ jobs:
|
||||
|
||||
- name: "[aarch64] GCC check-tcg"
|
||||
arch: arm64
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@@ -106,17 +109,17 @@ jobs:
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers --enable-fdt=system
|
||||
--target-list=${MAIN_SYSTEM_TARGETS} --cxx=/bin/false"
|
||||
--target-list=${MAIN_SOFTMMU_TARGETS} --cxx=/bin/false"
|
||||
- UNRELIABLE=true
|
||||
|
||||
- name: "[ppc64] Clang check-tcg"
|
||||
- name: "[ppc64] GCC check-tcg"
|
||||
arch: ppc64le
|
||||
compiler: clang
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@@ -142,7 +145,6 @@ jobs:
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
@@ -152,6 +154,7 @@ jobs:
|
||||
|
||||
- name: "[s390x] GCC check-tcg"
|
||||
arch: s390x
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@@ -177,13 +180,13 @@ jobs:
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers
|
||||
--target-list=hppa-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
|
||||
- CONFIG="--disable-containers --enable-fdt=system
|
||||
--target-list=${MAIN_SOFTMMU_TARGETS},s390x-linux-user"
|
||||
- UNRELIABLE=true
|
||||
script:
|
||||
- BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
|
||||
- |
|
||||
@@ -194,9 +197,9 @@ jobs:
|
||||
$(exit $BUILD_RC);
|
||||
fi
|
||||
|
||||
- name: "[s390x] Clang (other-system)"
|
||||
- name: "[s390x] GCC (other-softmmu)"
|
||||
arch: s390x
|
||||
compiler: clang
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@@ -217,31 +220,29 @@ jobs:
|
||||
- libsnappy-dev
|
||||
- libzstd-dev
|
||||
- nettle-dev
|
||||
- xfslibs-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- CONFIG="--disable-containers --audio-drv-list=sdl --disable-user
|
||||
--target-list=arm-softmmu,avr-softmmu,microblaze-softmmu,sh4eb-softmmu,sparc64-softmmu,xtensaeb-softmmu"
|
||||
- CONFIG="--disable-containers --enable-fdt=system --audio-drv-list=sdl
|
||||
--disable-user --target-list-exclude=${MAIN_SOFTMMU_TARGETS}"
|
||||
|
||||
- name: "[s390x] GCC (user)"
|
||||
arch: s390x
|
||||
dist: focal
|
||||
addons:
|
||||
apt_packages:
|
||||
- libgcrypt20-dev
|
||||
- libglib2.0-dev
|
||||
- libgnutls28-dev
|
||||
- ninja-build
|
||||
- flex
|
||||
- bison
|
||||
- python3-tomli
|
||||
env:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers --disable-system"
|
||||
|
||||
- name: "[s390x] Clang (disable-tcg)"
|
||||
arch: s390x
|
||||
dist: focal
|
||||
compiler: clang
|
||||
addons:
|
||||
apt_packages:
|
||||
@@ -268,8 +269,8 @@ jobs:
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
env:
|
||||
- TEST_CMD="make check-unit"
|
||||
- CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools
|
||||
--enable-fdt=system --host-cc=clang --cxx=clang++"
|
||||
- UNRELIABLE=true
|
||||
|
1
Kconfig
1
Kconfig
@@ -4,4 +4,3 @@ source accel/Kconfig
|
||||
source target/Kconfig
|
||||
source hw/Kconfig
|
||||
source semihosting/Kconfig
|
||||
source rust/Kconfig
|
||||
|
21
Kconfig.host
21
Kconfig.host
@@ -5,21 +5,12 @@
|
||||
config LINUX
|
||||
bool
|
||||
|
||||
config LIBCBOR
|
||||
bool
|
||||
|
||||
config GNUTLS
|
||||
bool
|
||||
|
||||
config OPENGL
|
||||
bool
|
||||
|
||||
config X11
|
||||
bool
|
||||
|
||||
config PIXMAN
|
||||
bool
|
||||
|
||||
config SPICE
|
||||
bool
|
||||
|
||||
@@ -29,9 +20,6 @@ config IVSHMEM
|
||||
config TPM
|
||||
bool
|
||||
|
||||
config FDT
|
||||
bool
|
||||
|
||||
config VHOST_USER
|
||||
bool
|
||||
|
||||
@@ -44,6 +32,9 @@ config VHOST_KERNEL
|
||||
config VIRTFS
|
||||
bool
|
||||
|
||||
config PVRDMA
|
||||
bool
|
||||
|
||||
config MULTIPROCESS_ALLOWED
|
||||
bool
|
||||
imply MULTIPROCESS
|
||||
@@ -55,9 +46,3 @@ config FUZZ
|
||||
config VFIO_USER_SERVER_ALLOWED
|
||||
bool
|
||||
imply VFIO_USER_SERVER
|
||||
|
||||
config HV_BALLOON_POSSIBLE
|
||||
bool
|
||||
|
||||
config HAVE_RUST
|
||||
bool
|
||||
|
912
MAINTAINERS
912
MAINTAINERS
File diff suppressed because it is too large
Load Diff
91
Makefile
91
Makefile
@@ -26,9 +26,9 @@ quiet-command-run = $(if $(V),,$(if $2,printf " %-7s %s\n" $2 $3 && ))$1
|
||||
quiet-@ = $(if $(V),,@)
|
||||
quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3)
|
||||
|
||||
UNCHECKED_GOALS := TAGS gtags cscope ctags dist \
|
||||
UNCHECKED_GOALS := %clean TAGS cscope ctags dist \
|
||||
help check-help print-% \
|
||||
docker docker-% lcitool-refresh vm-help vm-test vm-build-%
|
||||
docker docker-% vm-help vm-test vm-build-%
|
||||
|
||||
all:
|
||||
.PHONY: all clean distclean recurse-all dist msi FORCE
|
||||
@@ -45,6 +45,18 @@ include config-host.mak
|
||||
include Makefile.prereqs
|
||||
Makefile.prereqs: config-host.mak
|
||||
|
||||
git-submodule-update:
|
||||
.git-submodule-status: git-submodule-update config-host.mak
|
||||
Makefile: .git-submodule-status
|
||||
|
||||
.PHONY: git-submodule-update
|
||||
git-submodule-update:
|
||||
ifneq ($(GIT_SUBMODULES_ACTION),ignore)
|
||||
$(call quiet-command, \
|
||||
(GIT="$(GIT)" "$(SRC_PATH)/scripts/git-submodule.sh" $(GIT_SUBMODULES_ACTION) $(GIT_SUBMODULES)), \
|
||||
"GIT","$(GIT_SUBMODULES)")
|
||||
endif
|
||||
|
||||
# 0. ensure the build tree is okay
|
||||
|
||||
# Check that we're not trying to do an out-of-tree build from
|
||||
@@ -78,23 +90,21 @@ x := $(shell rm -rf meson-private meson-info meson-logs)
|
||||
endif
|
||||
|
||||
# 1. ensure config-host.mak is up-to-date
|
||||
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh \
|
||||
$(SRC_PATH)/pythondeps.toml $(SRC_PATH)/VERSION
|
||||
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh $(SRC_PATH)/VERSION
|
||||
@echo config-host.mak is out-of-date, running configure
|
||||
@if test -f meson-private/coredata.dat; then \
|
||||
./config.status --skip-meson; \
|
||||
else \
|
||||
./config.status; \
|
||||
./config.status && touch build.ninja.stamp; \
|
||||
fi
|
||||
|
||||
# 2. meson.stamp exists if meson has run at least once (so ninja reconfigure
|
||||
# works), but otherwise never needs to be updated
|
||||
|
||||
meson-private/coredata.dat: meson.stamp
|
||||
meson.stamp: config-host.mak
|
||||
@touch meson.stamp
|
||||
|
||||
# 3. ensure meson-generated build files are up-to-date
|
||||
# 3. ensure generated build files are up-to-date
|
||||
|
||||
ifneq ($(NINJA),)
|
||||
Makefile.ninja: build.ninja
|
||||
@@ -105,23 +115,15 @@ Makefile.ninja: build.ninja
|
||||
$(NINJA) -t query build.ninja | sed -n '1,/^ input:/d; /^ outputs:/q; s/$$/ \\/p'; \
|
||||
} > $@.tmp && mv $@.tmp $@
|
||||
-include Makefile.ninja
|
||||
endif
|
||||
|
||||
ifneq ($(MESON),)
|
||||
# The path to meson always points to pyvenv/bin/meson, but the absolute
|
||||
# paths could change. In that case, force a regeneration of build.ninja.
|
||||
# Note that this invocation of $(NINJA), just like when Make rebuilds
|
||||
# Makefiles, does not include -n.
|
||||
# A separate rule is needed for Makefile dependencies to avoid -n
|
||||
build.ninja: build.ninja.stamp
|
||||
$(build-files):
|
||||
build.ninja.stamp: meson.stamp $(build-files)
|
||||
@if test "$$(cat build.ninja.stamp)" = "$(MESON)" && test -n "$(NINJA)"; then \
|
||||
$(NINJA) build.ninja; \
|
||||
else \
|
||||
echo "$(MESON) setup --reconfigure $(SRC_PATH)"; \
|
||||
$(MESON) setup --reconfigure $(SRC_PATH); \
|
||||
fi && echo "$(MESON)" > $@
|
||||
$(NINJA) $(if $V,-v,) build.ninja && touch $@
|
||||
endif
|
||||
|
||||
ifneq ($(MESON),)
|
||||
Makefile.mtest: build.ninja scripts/mtest2make.py
|
||||
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
|
||||
-include Makefile.mtest
|
||||
@@ -142,13 +144,8 @@ MAKE.n = $(findstring n,$(firstword $(filter-out --%,$(MAKEFLAGS))))
|
||||
MAKE.k = $(findstring k,$(firstword $(filter-out --%,$(MAKEFLAGS))))
|
||||
MAKE.q = $(findstring q,$(firstword $(filter-out --%,$(MAKEFLAGS))))
|
||||
MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq)
|
||||
NINJAFLAGS = \
|
||||
$(if $V,-v) \
|
||||
$(if $(MAKE.n), -n) \
|
||||
$(if $(MAKE.k), -k0) \
|
||||
$(filter-out -j, \
|
||||
$(or $(filter -l% -j%, $(MAKEFLAGS)), \
|
||||
$(if $(filter --jobserver-auth=%, $(MAKEFLAGS)),, -j1))) \
|
||||
NINJAFLAGS = $(if $V,-v) $(if $(MAKE.n), -n) $(if $(MAKE.k), -k0) \
|
||||
$(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \
|
||||
-d keepdepfile
|
||||
ninja-cmd-goals = $(or $(MAKECMDGOALS), all)
|
||||
ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g))
|
||||
@@ -170,9 +167,19 @@ ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_PLUGIN),y)
|
||||
.PHONY: plugins
|
||||
plugins:
|
||||
$(call quiet-command,\
|
||||
$(MAKE) $(SUBDIR_MAKEFLAGS) -C contrib/plugins V="$(V)", \
|
||||
"BUILD", "example plugins")
|
||||
endif # $(CONFIG_PLUGIN)
|
||||
|
||||
else # config-host.mak does not exist
|
||||
config-host.mak:
|
||||
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
|
||||
$(error Please call configure before running make)
|
||||
@echo "Please call configure before running make!"
|
||||
@exit 1
|
||||
endif
|
||||
endif # config-host.mak does not exist
|
||||
|
||||
@@ -182,15 +189,15 @@ include $(SRC_PATH)/tests/Makefile.include
|
||||
|
||||
all: recurse-all
|
||||
|
||||
SUBDIR_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(SUBDIRS)))
|
||||
.PHONY: $(SUBDIR_RULES)
|
||||
$(SUBDIR_RULES):
|
||||
ROMS_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(ROMS)))
|
||||
.PHONY: $(ROMS_RULES)
|
||||
$(ROMS_RULES):
|
||||
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),)
|
||||
|
||||
.PHONY: recurse-all recurse-clean
|
||||
recurse-all: $(addsuffix /all, $(SUBDIRS))
|
||||
recurse-clean: $(addsuffix /clean, $(SUBDIRS))
|
||||
recurse-distclean: $(addsuffix /distclean, $(SUBDIRS))
|
||||
recurse-all: $(addsuffix /all, $(ROMS))
|
||||
recurse-clean: $(addsuffix /clean, $(ROMS))
|
||||
recurse-distclean: $(addsuffix /distclean, $(ROMS))
|
||||
|
||||
######################################################################
|
||||
|
||||
@@ -203,7 +210,6 @@ clean: recurse-clean
|
||||
! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \
|
||||
-exec rm {} +
|
||||
rm -f TAGS cscope.* *~ */*~
|
||||
@$(MAKE) -Ctests/qemu-iotests clean
|
||||
|
||||
VERSION = $(shell cat $(SRC_PATH)/VERSION)
|
||||
|
||||
@@ -285,13 +291,6 @@ include $(SRC_PATH)/tests/vm/Makefile.include
|
||||
print-help-run = printf " %-30s - %s\\n" "$1" "$2"
|
||||
print-help = @$(call print-help-run,$1,$2)
|
||||
|
||||
.PHONY: update-linux-vdso
|
||||
update-linux-vdso:
|
||||
@for m in $(SRC_PATH)/linux-user/*/Makefile.vdso; do \
|
||||
$(MAKE) $(SUBDIR_MAKEFLAGS) -C $$(dirname $$m) -f Makefile.vdso \
|
||||
SRC_PATH=$(SRC_PATH) BUILD_DIR=$(BUILD_DIR); \
|
||||
done
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo 'Generic targets:'
|
||||
@@ -302,14 +301,16 @@ help:
|
||||
$(call print-help,cscope,Generate cscope index)
|
||||
$(call print-help,sparse,Run sparse on the QEMU source)
|
||||
@echo ''
|
||||
ifeq ($(CONFIG_PLUGIN),y)
|
||||
@echo 'Plugin targets:'
|
||||
$(call print-help,plugins,Build the example TCG plugins)
|
||||
@echo ''
|
||||
endif
|
||||
@echo 'Cleaning targets:'
|
||||
$(call print-help,clean,Remove most generated files but keep the config)
|
||||
$(call print-help,distclean,Remove all generated files)
|
||||
$(call print-help,dist,Build a distributable tarball)
|
||||
@echo ''
|
||||
@echo 'Linux-user targets:'
|
||||
$(call print-help,update-linux-vdso,Build linux-user vdso images)
|
||||
@echo ''
|
||||
@echo 'Test targets:'
|
||||
$(call print-help,check,Run all tests (check-help for details))
|
||||
$(call print-help,bench,Run all benchmarks)
|
||||
@@ -320,7 +321,7 @@ help:
|
||||
@echo 'Documentation targets:'
|
||||
$(call print-help,html man,Build documentation in specified format)
|
||||
@echo ''
|
||||
ifneq ($(filter msi, $(ninja-targets)),)
|
||||
ifdef CONFIG_WIN32
|
||||
@echo 'Windows targets:'
|
||||
$(call print-help,installer,Build NSIS-based installer for QEMU)
|
||||
$(call print-help,msi,Build MSI-based installer for qemu-ga)
|
||||
|
@@ -82,7 +82,7 @@ guidelines set out in the `style section
|
||||
the Developers Guide.
|
||||
|
||||
Additional information on submitting patches can be found online via
|
||||
the QEMU website:
|
||||
the QEMU website
|
||||
|
||||
* `<https://wiki.qemu.org/Contribute/SubmitAPatch>`_
|
||||
* `<https://wiki.qemu.org/Contribute/TrivialPatches>`_
|
||||
@@ -102,7 +102,7 @@ requires a working 'git send-email' setup, and by default doesn't
|
||||
automate everything, so you may want to go through the above steps
|
||||
manually for once.
|
||||
|
||||
For installation instructions, please go to:
|
||||
For installation instructions, please go to
|
||||
|
||||
* `<https://github.com/stefanha/git-publish>`_
|
||||
|
||||
@@ -159,7 +159,7 @@ Contact
|
||||
=======
|
||||
|
||||
The QEMU community can be contacted in a number of ways, with the two
|
||||
main methods being email and IRC:
|
||||
main methods being email and IRC
|
||||
|
||||
* `<mailto:qemu-devel@nongnu.org>`_
|
||||
* `<https://lists.nongnu.org/mailman/listinfo/qemu-devel>`_
|
||||
|
@@ -4,6 +4,9 @@ config WHPX
|
||||
config NVMM
|
||||
bool
|
||||
|
||||
config HAX
|
||||
bool
|
||||
|
||||
config HVF
|
||||
bool
|
||||
|
||||
@@ -16,4 +19,3 @@ config KVM
|
||||
config XEN
|
||||
bool
|
||||
select FSDEV_9P if VIRTFS
|
||||
select XEN_BUS
|
||||
|
@@ -25,7 +25,6 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/lockcnt.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
@@ -42,7 +41,7 @@ void accel_blocker_init(void)
|
||||
|
||||
void accel_ioctl_begin(void)
|
||||
{
|
||||
if (likely(bql_locked())) {
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -52,7 +51,7 @@ void accel_ioctl_begin(void)
|
||||
|
||||
void accel_ioctl_end(void)
|
||||
{
|
||||
if (likely(bql_locked())) {
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -63,7 +62,7 @@ void accel_ioctl_end(void)
|
||||
|
||||
void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(bql_locked())) {
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -73,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
|
||||
void accel_cpu_ioctl_end(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(bql_locked())) {
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -106,7 +105,7 @@ void accel_ioctl_inhibit_begin(void)
|
||||
* We allow to inhibit only when holding the BQL, so we can identify
|
||||
* when an inhibitor wants to issue an ioctl easily.
|
||||
*/
|
||||
g_assert(bql_locked());
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
/* Block further invocations of the ioctls outside the BQL. */
|
||||
CPU_FOREACH(cpu) {
|
||||
|
@@ -30,7 +30,7 @@
|
||||
#include "hw/core/accel-cpu.h"
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "accel-system.h"
|
||||
#include "accel-softmmu.h"
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static const TypeInfo accel_type = {
|
||||
@@ -104,7 +104,7 @@ static void accel_init_cpu_interfaces(AccelClass *ac)
|
||||
void accel_init_interfaces(AccelClass *ac)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
accel_system_init_ops_interfaces(ac);
|
||||
accel_init_ops_interfaces(ac);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
accel_init_cpu_interfaces(ac);
|
||||
@@ -119,37 +119,16 @@ void accel_cpu_instance_init(CPUState *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
|
||||
bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
|
||||
/* target specific realization */
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize
|
||||
&& !cc->accel_cpu->cpu_target_realize(cpu, errp)) {
|
||||
return false;
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) {
|
||||
return cc->accel_cpu->cpu_realizefn(cpu, errp);
|
||||
}
|
||||
|
||||
/* generic realization */
|
||||
if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void accel_cpu_common_unrealize(CPUState *cpu)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
|
||||
/* generic unrealization */
|
||||
if (acc->cpu_common_unrealize) {
|
||||
acc->cpu_common_unrealize(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
int accel_supported_gdbstub_sstep_flags(void)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
@@ -28,7 +28,7 @@
|
||||
#include "hw/boards.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "accel-system.h"
|
||||
#include "accel-softmmu.h"
|
||||
|
||||
int accel_init_machine(AccelState *accel, MachineState *ms)
|
||||
{
|
||||
@@ -62,7 +62,7 @@ void accel_setup_post(MachineState *ms)
|
||||
}
|
||||
|
||||
/* initialize the arch-independent accel operation interfaces */
|
||||
void accel_system_init_ops_interfaces(AccelClass *ac)
|
||||
void accel_init_ops_interfaces(AccelClass *ac)
|
||||
{
|
||||
const char *ac_name;
|
||||
char *ops_name;
|
||||
@@ -73,17 +73,19 @@ void accel_system_init_ops_interfaces(AccelClass *ac)
|
||||
g_assert(ac_name != NULL);
|
||||
|
||||
ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name);
|
||||
ops = ACCEL_OPS_CLASS(module_object_class_by_name(ops_name));
|
||||
oc = module_object_class_by_name(ops_name);
|
||||
if (!oc) {
|
||||
error_report("fatal: could not load module for type '%s'", ops_name);
|
||||
exit(1);
|
||||
}
|
||||
g_free(ops_name);
|
||||
ops = ACCEL_OPS_CLASS(oc);
|
||||
/*
|
||||
* all accelerators need to define ops, providing at least a mandatory
|
||||
* non-NULL create_vcpu_thread operation.
|
||||
*/
|
||||
ops = ACCEL_OPS_CLASS(oc);
|
||||
g_assert(ops != NULL);
|
||||
if (ops->ops_init) {
|
||||
ops->ops_init(ops);
|
||||
}
|
||||
@@ -97,8 +99,8 @@ static const TypeInfo accel_ops_type_info = {
|
||||
.class_size = sizeof(AccelOpsClass),
|
||||
};
|
||||
|
||||
static void accel_system_register_types(void)
|
||||
static void accel_softmmu_register_types(void)
|
||||
{
|
||||
type_register_static(&accel_ops_type_info);
|
||||
}
|
||||
type_init(accel_system_register_types);
|
||||
type_init(accel_softmmu_register_types);
|
@@ -7,9 +7,9 @@
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_SYSTEM_H
|
||||
#define ACCEL_SYSTEM_H
|
||||
#ifndef ACCEL_SOFTMMU_H
|
||||
#define ACCEL_SOFTMMU_H
|
||||
|
||||
void accel_system_init_ops_interfaces(AccelClass *ac);
|
||||
void accel_init_ops_interfaces(AccelClass *ac);
|
||||
|
||||
#endif /* ACCEL_SYSTEM_H */
|
||||
#endif /* ACCEL_SOFTMMU_H */
|
@@ -24,9 +24,10 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
#ifndef _WIN32
|
||||
@@ -42,7 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
#ifndef _WIN32
|
||||
do {
|
||||
int sig;
|
||||
@@ -55,11 +56,11 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
#else
|
||||
qemu_sem_wait(&cpu->sem);
|
||||
#endif
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug);
|
||||
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
@@ -68,6 +69,9 @@ void dummy_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu,
|
||||
|
@@ -52,8 +52,6 @@
|
||||
#include "qemu/main-loop.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "gdbstub/enums.h"
|
||||
#include "hw/boards.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
@@ -62,6 +60,10 @@
|
||||
|
||||
HVFState *hvf_state;
|
||||
|
||||
#ifdef __aarch64__
|
||||
#define HV_VM_DEFAULT NULL
|
||||
#endif
|
||||
|
||||
/* Memory slots */
|
||||
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
|
||||
@@ -201,15 +203,15 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
|
||||
|
||||
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
if (!cpu->accel->dirty) {
|
||||
if (!cpu->vcpu_dirty) {
|
||||
hvf_get_registers(cpu);
|
||||
cpu->accel->dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
if (!cpu->accel->dirty) {
|
||||
if (!cpu->vcpu_dirty) {
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
@@ -218,7 +220,7 @@ static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
|
||||
run_on_cpu_data arg)
|
||||
{
|
||||
/* QEMU state is the reference, push it to HVF now and on next entry */
|
||||
cpu->accel->dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
|
||||
static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
|
||||
@@ -301,7 +303,7 @@ static void hvf_region_del(MemoryListener *listener,
|
||||
|
||||
static MemoryListener hvf_memory_listener = {
|
||||
.name = "hvf",
|
||||
.priority = MEMORY_LISTENER_PRIORITY_ACCEL,
|
||||
.priority = 10,
|
||||
.region_add = hvf_region_add,
|
||||
.region_del = hvf_region_del,
|
||||
.log_start = hvf_log_start,
|
||||
@@ -320,17 +322,8 @@ static int hvf_accel_init(MachineState *ms)
|
||||
int x;
|
||||
hv_return_t ret;
|
||||
HVFState *s;
|
||||
int pa_range = 36;
|
||||
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
||||
|
||||
if (mc->hvf_get_physical_address_range) {
|
||||
pa_range = mc->hvf_get_physical_address_range(ms);
|
||||
if (pa_range < 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hvf_arch_vm_create(ms, (uint32_t)pa_range);
|
||||
ret = hv_vm_create(HV_VM_DEFAULT);
|
||||
assert_hvf_ok(ret);
|
||||
|
||||
s = g_new0(HVFState, 1);
|
||||
@@ -341,26 +334,18 @@ static int hvf_accel_init(MachineState *ms)
|
||||
s->slots[x].slot_id = x;
|
||||
}
|
||||
|
||||
QTAILQ_INIT(&s->hvf_sw_breakpoints);
|
||||
|
||||
hvf_state = s;
|
||||
memory_listener_register(&hvf_memory_listener, &address_space_memory);
|
||||
|
||||
return hvf_arch_init();
|
||||
}
|
||||
|
||||
static inline int hvf_gdbstub_sstep_flags(void)
|
||||
{
|
||||
return SSTEP_ENABLE | SSTEP_NOIRQ;
|
||||
}
|
||||
|
||||
static void hvf_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "HVF";
|
||||
ac->init_machine = hvf_accel_init;
|
||||
ac->allowed = &hvf_allowed;
|
||||
ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags;
|
||||
}
|
||||
|
||||
static const TypeInfo hvf_accel_type = {
|
||||
@@ -378,19 +363,19 @@ type_init(hvf_type_init);
|
||||
|
||||
static void hvf_vcpu_destroy(CPUState *cpu)
|
||||
{
|
||||
hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd);
|
||||
hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
|
||||
assert_hvf_ok(ret);
|
||||
|
||||
hvf_arch_vcpu_destroy(cpu);
|
||||
g_free(cpu->accel);
|
||||
cpu->accel = NULL;
|
||||
g_free(cpu->hvf);
|
||||
cpu->hvf = NULL;
|
||||
}
|
||||
|
||||
static int hvf_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
int r;
|
||||
|
||||
cpu->accel = g_new0(AccelCPUState, 1);
|
||||
cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
|
||||
|
||||
/* init cpu signals */
|
||||
struct sigaction sigact;
|
||||
@@ -399,20 +384,17 @@ static int hvf_init_vcpu(CPUState *cpu)
|
||||
sigact.sa_handler = dummy_signal;
|
||||
sigaction(SIG_IPI, &sigact, NULL);
|
||||
|
||||
pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask);
|
||||
sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI);
|
||||
pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
|
||||
sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
|
||||
|
||||
#ifdef __aarch64__
|
||||
r = hv_vcpu_create(&cpu->accel->fd,
|
||||
(hv_vcpu_exit_t **)&cpu->accel->exit, NULL);
|
||||
r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
|
||||
#else
|
||||
r = hv_vcpu_create(&cpu->accel->fd, HV_VCPU_DEFAULT);
|
||||
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
|
||||
#endif
|
||||
cpu->accel->dirty = true;
|
||||
cpu->vcpu_dirty = 1;
|
||||
assert_hvf_ok(r);
|
||||
|
||||
cpu->accel->guest_debug_enabled = false;
|
||||
|
||||
return hvf_arch_init_vcpu(cpu);
|
||||
}
|
||||
|
||||
@@ -430,10 +412,11 @@ static void *hvf_cpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
hvf_init_vcpu(cpu);
|
||||
@@ -454,7 +437,7 @@ static void *hvf_cpu_thread_fn(void *arg)
|
||||
|
||||
hvf_vcpu_destroy(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
@@ -469,114 +452,16 @@ static void hvf_start_vcpu_thread(CPUState *cpu)
|
||||
*/
|
||||
assert(hvf_enabled());
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
|
||||
static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
int err;
|
||||
|
||||
if (type == GDB_BREAKPOINT_SW) {
|
||||
bp = hvf_find_sw_breakpoint(cpu, addr);
|
||||
if (bp) {
|
||||
bp->use_count++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bp = g_new(struct hvf_sw_breakpoint, 1);
|
||||
bp->pc = addr;
|
||||
bp->use_count = 1;
|
||||
err = hvf_arch_insert_sw_breakpoint(cpu, bp);
|
||||
if (err) {
|
||||
g_free(bp);
|
||||
return err;
|
||||
}
|
||||
|
||||
QTAILQ_INSERT_HEAD(&hvf_state->hvf_sw_breakpoints, bp, entry);
|
||||
} else {
|
||||
err = hvf_arch_insert_hw_breakpoint(addr, len, type);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
err = hvf_update_guest_debug(cpu);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
int err;
|
||||
|
||||
if (type == GDB_BREAKPOINT_SW) {
|
||||
bp = hvf_find_sw_breakpoint(cpu, addr);
|
||||
if (!bp) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (bp->use_count > 1) {
|
||||
bp->use_count--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = hvf_arch_remove_sw_breakpoint(cpu, bp);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
|
||||
g_free(bp);
|
||||
} else {
|
||||
err = hvf_arch_remove_hw_breakpoint(addr, len, type);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
err = hvf_update_guest_debug(cpu);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvf_remove_all_breakpoints(CPUState *cpu)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp, *next;
|
||||
CPUState *tmpcpu;
|
||||
|
||||
QTAILQ_FOREACH_SAFE(bp, &hvf_state->hvf_sw_breakpoints, entry, next) {
|
||||
if (hvf_arch_remove_sw_breakpoint(cpu, bp) != 0) {
|
||||
/* Try harder to find a CPU that currently sees the breakpoint. */
|
||||
CPU_FOREACH(tmpcpu)
|
||||
{
|
||||
if (hvf_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
|
||||
g_free(bp);
|
||||
}
|
||||
hvf_arch_remove_all_hw_breakpoints();
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
hvf_update_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
@@ -588,12 +473,6 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
|
||||
ops->synchronize_state = hvf_cpu_synchronize_state;
|
||||
ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
|
||||
|
||||
ops->insert_breakpoint = hvf_insert_breakpoint;
|
||||
ops->remove_breakpoint = hvf_remove_breakpoint;
|
||||
ops->remove_all_breakpoints = hvf_remove_all_breakpoints;
|
||||
ops->update_guest_debug = hvf_update_guest_debug;
|
||||
ops->supports_guest_debug = hvf_arch_supports_guest_debug;
|
||||
};
|
||||
static const TypeInfo hvf_accel_ops_type = {
|
||||
.name = ACCEL_OPS_NAME("hvf"),
|
||||
|
@@ -13,53 +13,34 @@
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
|
||||
const char *hvf_return_string(hv_return_t ret)
|
||||
{
|
||||
switch (ret) {
|
||||
case HV_SUCCESS: return "HV_SUCCESS";
|
||||
case HV_ERROR: return "HV_ERROR";
|
||||
case HV_BUSY: return "HV_BUSY";
|
||||
case HV_BAD_ARGUMENT: return "HV_BAD_ARGUMENT";
|
||||
case HV_NO_RESOURCES: return "HV_NO_RESOURCES";
|
||||
case HV_NO_DEVICE: return "HV_NO_DEVICE";
|
||||
case HV_UNSUPPORTED: return "HV_UNSUPPORTED";
|
||||
case HV_DENIED: return "HV_DENIED";
|
||||
default: return "[unknown hv_return value]";
|
||||
}
|
||||
}
|
||||
|
||||
void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
|
||||
const char *exp)
|
||||
void assert_hvf_ok(hv_return_t ret)
|
||||
{
|
||||
if (ret == HV_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
error_report("Error: %s = %s (0x%x, at %s:%u)",
|
||||
exp, hvf_return_string(ret), ret, file, line);
|
||||
switch (ret) {
|
||||
case HV_ERROR:
|
||||
error_report("Error: HV_ERROR");
|
||||
break;
|
||||
case HV_BUSY:
|
||||
error_report("Error: HV_BUSY");
|
||||
break;
|
||||
case HV_BAD_ARGUMENT:
|
||||
error_report("Error: HV_BAD_ARGUMENT");
|
||||
break;
|
||||
case HV_NO_RESOURCES:
|
||||
error_report("Error: HV_NO_RESOURCES");
|
||||
break;
|
||||
case HV_NO_DEVICE:
|
||||
error_report("Error: HV_NO_DEVICE");
|
||||
break;
|
||||
case HV_UNSUPPORTED:
|
||||
error_report("Error: HV_UNSUPPORTED");
|
||||
break;
|
||||
default:
|
||||
error_report("Unknown Error");
|
||||
}
|
||||
|
||||
abort();
|
||||
}
|
||||
|
||||
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
|
||||
QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
|
||||
if (bp->pc == pc) {
|
||||
return bp;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int hvf_sw_breakpoints_active(CPUState *cpu)
|
||||
{
|
||||
return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
|
||||
}
|
||||
|
||||
int hvf_update_guest_debug(CPUState *cpu)
|
||||
{
|
||||
hvf_arch_update_guest_debug(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -33,9 +33,10 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
r = kvm_init_vcpu(cpu, &error_fatal);
|
||||
@@ -57,7 +58,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
||||
|
||||
kvm_destroy_vcpu(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
@@ -66,6 +67,9 @@ static void kvm_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, kvm_vcpu_thread_fn,
|
||||
@@ -79,10 +83,10 @@ static bool kvm_vcpu_thread_is_idle(CPUState *cpu)
|
||||
|
||||
static bool kvm_cpus_are_resettable(void)
|
||||
{
|
||||
return !kvm_enabled() || !kvm_state->guest_state_protected;
|
||||
return !kvm_enabled() || kvm_cpu_check_are_resettable();
|
||||
}
|
||||
|
||||
#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
|
||||
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
||||
static int kvm_update_guest_debug_ops(CPUState *cpu)
|
||||
{
|
||||
return kvm_update_guest_debug(cpu, 0);
|
||||
@@ -101,7 +105,7 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
ops->synchronize_state = kvm_cpu_synchronize_state;
|
||||
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
|
||||
|
||||
#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
|
||||
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
||||
ops->update_guest_debug = kvm_update_guest_debug_ops;
|
||||
ops->supports_guest_debug = kvm_supports_guest_debug;
|
||||
ops->insert_breakpoint = kvm_insert_breakpoint;
|
||||
|
1459
accel/kvm/kvm-all.c
1459
accel/kvm/kvm-all.c
File diff suppressed because it is too large
Load Diff
@@ -22,4 +22,5 @@ bool kvm_supports_guest_debug(void);
|
||||
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
|
||||
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
|
||||
void kvm_remove_all_breakpoints(CPUState *cpu);
|
||||
|
||||
#endif /* KVM_CPUS_H */
|
||||
|
@@ -1,25 +1,21 @@
|
||||
# See docs/devel/tracing.rst for syntax documentation.
|
||||
|
||||
# kvm-all.c
|
||||
kvm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p"
|
||||
kvm_vm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p"
|
||||
kvm_vcpu_ioctl(int cpu_index, unsigned long type, void *arg) "cpu_index %d, type 0x%lx, arg %p"
|
||||
kvm_ioctl(int type, void *arg) "type 0x%x, arg %p"
|
||||
kvm_vm_ioctl(int type, void *arg) "type 0x%x, arg %p"
|
||||
kvm_vcpu_ioctl(int cpu_index, int type, void *arg) "cpu_index %d, type 0x%x, arg %p"
|
||||
kvm_run_exit(int cpu_index, uint32_t reason) "cpu_index %d, reason %d"
|
||||
kvm_device_ioctl(int fd, unsigned long type, void *arg) "dev fd %d, type 0x%lx, arg %p"
|
||||
kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p"
|
||||
kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s"
|
||||
kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s"
|
||||
kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
|
||||
kvm_create_vcpu(int cpu_index, unsigned long arch_cpu_id, int kvm_fd) "index: %d, id: %lu, kvm fd: %d"
|
||||
kvm_destroy_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
|
||||
kvm_park_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
|
||||
kvm_unpark_vcpu(unsigned long arch_cpu_id, const char *msg) "id: %lu %s"
|
||||
kvm_irqchip_commit_routes(void) ""
|
||||
kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d"
|
||||
kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
|
||||
kvm_irqchip_release_virq(int virq) "virq %d"
|
||||
kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d"
|
||||
kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
|
||||
kvm_set_user_memory(uint16_t as, uint16_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint32_t fd, uint64_t fd_offset, int ret) "AddrSpace#%d Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " guest_memfd=%d" " guest_memfd_offset=0x%" PRIx64 " ret=%d"
|
||||
kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"
|
||||
kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
|
||||
kvm_resample_fd_notify(int gsi) "gsi %d"
|
||||
kvm_dirty_ring_full(int id) "vcpu %d"
|
||||
@@ -29,11 +25,4 @@ kvm_dirty_ring_reaper(const char *s) "%s"
|
||||
kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
|
||||
kvm_dirty_ring_reaper_kick(const char *reason) "%s"
|
||||
kvm_dirty_ring_flush(int finished) "%d"
|
||||
kvm_failed_get_vcpu_mmap_size(void) ""
|
||||
kvm_cpu_exec(void) ""
|
||||
kvm_interrupt_exit_request(void) ""
|
||||
kvm_io_window_exit(void) ""
|
||||
kvm_run_exit_system_event(int cpu_index, uint32_t event_type) "cpu_index %d, system_even_type %"PRIu32
|
||||
kvm_convert_memory(uint64_t start, uint64_t size, const char *msg) "start 0x%" PRIx64 " size 0x%" PRIx64 " %s"
|
||||
kvm_memory_fault(uint64_t start, uint64_t size, uint64_t flags) "start 0x%" PRIx64 " size 0x%" PRIx64 " flags 0x%" PRIx64
|
||||
kvm_slots_grow(unsigned int old, unsigned int new) "%u -> %u"
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
specific_ss.add(files('accel-target.c'))
|
||||
system_ss.add(files('accel-system.c', 'accel-blocker.c'))
|
||||
specific_ss.add(files('accel-common.c', 'accel-blocker.c'))
|
||||
softmmu_ss.add(files('accel-softmmu.c'))
|
||||
user_ss.add(files('accel-user.c'))
|
||||
|
||||
subdir('tcg')
|
||||
@@ -12,4 +12,4 @@ if have_system
|
||||
endif
|
||||
|
||||
# qtest
|
||||
system_ss.add(files('dummy-cpus.c'))
|
||||
softmmu_ss.add(files('dummy-cpus.c'))
|
||||
|
@@ -1 +1 @@
|
||||
qtest_module_ss.add(when: ['CONFIG_SYSTEM_ONLY'], if_true: files('qtest.c'))
|
||||
qtest_module_ss.add(when: ['CONFIG_SOFTMMU'], if_true: files('qtest.c'))
|
||||
|
@@ -24,18 +24,6 @@
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
static int64_t qtest_clock_counter;
|
||||
|
||||
static int64_t qtest_get_virtual_clock(void)
|
||||
{
|
||||
return qatomic_read_i64(&qtest_clock_counter);
|
||||
}
|
||||
|
||||
static void qtest_set_virtual_clock(int64_t count)
|
||||
{
|
||||
qatomic_set_i64(&qtest_clock_counter, count);
|
||||
}
|
||||
|
||||
static int qtest_init_accel(MachineState *ms)
|
||||
{
|
||||
return 0;
|
||||
@@ -64,7 +52,6 @@ static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
ops->create_vcpu_thread = dummy_start_vcpu_thread;
|
||||
ops->get_virtual_clock = qtest_get_virtual_clock;
|
||||
ops->set_virtual_clock = qtest_set_virtual_clock;
|
||||
};
|
||||
|
||||
static const TypeInfo qtest_accel_ops_type = {
|
||||
|
24
accel/stubs/hax-stub.c
Normal file
24
accel/stubs/hax-stub.c
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* QEMU HAXM support
|
||||
*
|
||||
* Copyright (c) 2015, Intel Corporation
|
||||
*
|
||||
* Copyright 2016 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/hax.h"
|
||||
|
||||
bool hax_allowed;
|
||||
|
||||
int hax_sync_vcpus(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
@@ -17,12 +17,15 @@
|
||||
KVMState *kvm_state;
|
||||
bool kvm_kernel_irqchip;
|
||||
bool kvm_async_interrupts_allowed;
|
||||
bool kvm_eventfds_allowed;
|
||||
bool kvm_irqfds_allowed;
|
||||
bool kvm_resamplefds_allowed;
|
||||
bool kvm_msi_via_irqfd_allowed;
|
||||
bool kvm_gsi_routing_allowed;
|
||||
bool kvm_gsi_direct_mapping;
|
||||
bool kvm_allowed;
|
||||
bool kvm_readonly_mem_allowed;
|
||||
bool kvm_ioeventfd_any_length_allowed;
|
||||
bool kvm_msi_use_devid;
|
||||
|
||||
void kvm_flush_coalesced_mmio_buffer(void)
|
||||
@@ -38,6 +41,11 @@ bool kvm_has_sync_mmu(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_has_many_ioeventfds(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
|
||||
{
|
||||
return 1;
|
||||
@@ -83,6 +91,11 @@ void kvm_irqchip_change_notify(void)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
||||
EventNotifier *rn, int virq)
|
||||
{
|
||||
@@ -95,14 +108,9 @@ int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_max_memslots(void)
|
||||
bool kvm_has_free_slot(MachineState *ms)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_free_memslots(void)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
void kvm_init_cpu_signals(CPUState *cpu)
|
||||
@@ -124,13 +132,3 @@ uint32_t kvm_dirty_ring_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool kvm_hwpoisoned_mem(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
system_stubs_ss = ss.source_set()
|
||||
system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
|
||||
sysemu_stubs_ss = ss.source_set()
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
|
||||
sysemu_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
|
||||
|
||||
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss)
|
||||
specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: sysemu_stubs_ss)
|
||||
|
@@ -18,6 +18,28 @@ void tb_flush(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
|
||||
{
|
||||
}
|
||||
|
||||
void tcg_flush_jmp_cache(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool nonfault, void **phost, uintptr_t retaddr)
|
||||
{
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
/* Handled by hardware accelerator. */
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
G_NORETURN void cpu_loop_exit(CPUState *cpu)
|
||||
{
|
||||
g_assert_not_reached();
|
||||
|
@@ -13,23 +13,26 @@
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
|
||||
uint64_t read_value_low,
|
||||
uint64_t read_value_high,
|
||||
uint64_t write_value_low,
|
||||
uint64_t write_value_high,
|
||||
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
|
||||
MemOpIdx oi)
|
||||
{
|
||||
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
read_value_low, read_value_high,
|
||||
oi, QEMU_PLUGIN_MEM_R);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
write_value_low, write_value_high,
|
||||
oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
|
||||
}
|
||||
|
||||
#if HAVE_ATOMIC128
|
||||
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
|
||||
MemOpIdx oi)
|
||||
{
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
}
|
||||
|
||||
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
|
||||
MemOpIdx oi)
|
||||
{
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Atomic helpers callable from TCG.
|
||||
* These have a common interface and all defer to cpu_atomic_*
|
||||
@@ -37,7 +40,7 @@ static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
|
||||
*/
|
||||
|
||||
#define CMPXCHG_HELPER(OP, TYPE) \
|
||||
TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr, \
|
||||
TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \
|
||||
TYPE oldv, TYPE newv, uint32_t oi) \
|
||||
{ return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
|
||||
|
||||
@@ -52,23 +55,43 @@ CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
|
||||
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
|
||||
#endif
|
||||
|
||||
#if HAVE_CMPXCHG128
|
||||
#ifdef CONFIG_CMPXCHG128
|
||||
CMPXCHG_HELPER(cmpxchgo_be, Int128)
|
||||
CMPXCHG_HELPER(cmpxchgo_le, Int128)
|
||||
#endif
|
||||
|
||||
#undef CMPXCHG_HELPER
|
||||
|
||||
Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
|
||||
Int128 cmpv, Int128 newv, uint32_t oi)
|
||||
Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr,
|
||||
Int128 cmpv, Int128 newv, uint32_t oi)
|
||||
{
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
uintptr_t ra = GETPC();
|
||||
Int128 oldv;
|
||||
|
||||
oldv = cpu_ld16_mmu(env, addr, oi, ra);
|
||||
oldv = cpu_ld16_be_mmu(env, addr, oi, ra);
|
||||
if (int128_eq(oldv, cmpv)) {
|
||||
cpu_st16_mmu(env, addr, newv, oi, ra);
|
||||
cpu_st16_be_mmu(env, addr, newv, oi, ra);
|
||||
} else {
|
||||
/* Even with comparison failure, still need a write cycle. */
|
||||
probe_write(env, addr, 16, get_mmuidx(oi), ra);
|
||||
}
|
||||
return oldv;
|
||||
#else
|
||||
g_assert_not_reached();
|
||||
#endif
|
||||
}
|
||||
|
||||
Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr,
|
||||
Int128 cmpv, Int128 newv, uint32_t oi)
|
||||
{
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
uintptr_t ra = GETPC();
|
||||
Int128 oldv;
|
||||
|
||||
oldv = cpu_ld16_le_mmu(env, addr, oi, ra);
|
||||
if (int128_eq(oldv, cmpv)) {
|
||||
cpu_st16_le_mmu(env, addr, newv, oi, ra);
|
||||
} else {
|
||||
/* Even with comparison failure, still need a write cycle. */
|
||||
probe_write(env, addr, 16, get_mmuidx(oi), ra);
|
||||
@@ -80,7 +103,7 @@ Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
|
||||
}
|
||||
|
||||
#define ATOMIC_HELPER(OP, TYPE) \
|
||||
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr, \
|
||||
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \
|
||||
TYPE val, uint32_t oi) \
|
||||
{ return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }
|
||||
|
||||
|
@@ -53,14 +53,6 @@
|
||||
# error unsupported data size
|
||||
#endif
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
# define VALUE_LOW(val) int128_getlo(val)
|
||||
# define VALUE_HIGH(val) int128_gethi(val)
|
||||
#else
|
||||
# define VALUE_LOW(val) val
|
||||
# define VALUE_HIGH(val) 0
|
||||
#endif
|
||||
|
||||
#if DATA_SIZE >= 4
|
||||
# define ABI_TYPE DATA_TYPE
|
||||
#else
|
||||
@@ -77,12 +69,12 @@
|
||||
# define END _le
|
||||
#endif
|
||||
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||
PAGE_READ | PAGE_WRITE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
@@ -91,48 +83,60 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(newv),
|
||||
VALUE_HIGH(newv),
|
||||
oi);
|
||||
atomic_trace_rmw_post(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if DATA_SIZE < 16
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
|
||||
#if DATA_SIZE >= 16
|
||||
#if HAVE_ATOMIC128
|
||||
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||
PAGE_READ, retaddr);
|
||||
DATA_TYPE val;
|
||||
|
||||
val = atomic16_read(haddr);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_ld_post(env, addr, oi);
|
||||
return val;
|
||||
}
|
||||
|
||||
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||
PAGE_WRITE, retaddr);
|
||||
|
||||
atomic16_set(haddr, val);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_st_post(env, addr, oi);
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||
PAGE_READ | PAGE_WRITE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
ret = qatomic_xchg__nocheck(haddr, val);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(val),
|
||||
VALUE_HIGH(val),
|
||||
oi);
|
||||
atomic_trace_rmw_post(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define GEN_ATOMIC_HELPER(X) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
DATA_TYPE *haddr, ret; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
||||
PAGE_READ | PAGE_WRITE, retaddr); \
|
||||
DATA_TYPE ret; \
|
||||
ret = qatomic_##X(haddr, val); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(ret), \
|
||||
VALUE_HIGH(ret), \
|
||||
VALUE_LOW(val), \
|
||||
VALUE_HIGH(val), \
|
||||
oi); \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@@ -156,11 +160,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
|
||||
* of CF_PARALLEL's value, we'll trace just a read and a write.
|
||||
*/
|
||||
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
||||
PAGE_READ | PAGE_WRITE, retaddr); \
|
||||
XDATA_TYPE cmp, old, new, val = xval; \
|
||||
smp_mb(); \
|
||||
cmp = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
@@ -168,12 +173,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
|
||||
} while (cmp != old); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(old), \
|
||||
VALUE_HIGH(old), \
|
||||
VALUE_LOW(xval), \
|
||||
VALUE_HIGH(xval), \
|
||||
oi); \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
return RET; \
|
||||
}
|
||||
|
||||
@@ -188,7 +188,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
|
||||
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
|
||||
|
||||
#undef GEN_ATOMIC_HELPER_FN
|
||||
#endif /* DATA SIZE < 16 */
|
||||
#endif /* DATA SIZE >= 16 */
|
||||
|
||||
#undef END
|
||||
|
||||
@@ -202,12 +202,12 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
|
||||
# define END _be
|
||||
#endif
|
||||
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||
PAGE_READ | PAGE_WRITE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
@@ -216,48 +216,61 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(newv),
|
||||
VALUE_HIGH(newv),
|
||||
oi);
|
||||
atomic_trace_rmw_post(env, addr, oi);
|
||||
return BSWAP(ret);
|
||||
}
|
||||
|
||||
#if DATA_SIZE < 16
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
|
||||
#if DATA_SIZE >= 16
|
||||
#if HAVE_ATOMIC128
|
||||
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||
PAGE_READ, retaddr);
|
||||
DATA_TYPE val;
|
||||
|
||||
val = atomic16_read(haddr);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_ld_post(env, addr, oi);
|
||||
return BSWAP(val);
|
||||
}
|
||||
|
||||
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||
PAGE_WRITE, retaddr);
|
||||
|
||||
val = BSWAP(val);
|
||||
atomic16_set(haddr, val);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_st_post(env, addr, oi);
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||
PAGE_READ | PAGE_WRITE, retaddr);
|
||||
ABI_TYPE ret;
|
||||
|
||||
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(val),
|
||||
VALUE_HIGH(val),
|
||||
oi);
|
||||
atomic_trace_rmw_post(env, addr, oi);
|
||||
return BSWAP(ret);
|
||||
}
|
||||
|
||||
#define GEN_ATOMIC_HELPER(X) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
DATA_TYPE *haddr, ret; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
||||
PAGE_READ | PAGE_WRITE, retaddr); \
|
||||
DATA_TYPE ret; \
|
||||
ret = qatomic_##X(haddr, BSWAP(val)); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(ret), \
|
||||
VALUE_HIGH(ret), \
|
||||
VALUE_LOW(val), \
|
||||
VALUE_HIGH(val), \
|
||||
oi); \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
return BSWAP(ret); \
|
||||
}
|
||||
|
||||
@@ -278,11 +291,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
|
||||
* of CF_PARALLEL's value, we'll trace just a read and a write.
|
||||
*/
|
||||
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
||||
PAGE_READ | PAGE_WRITE, retaddr); \
|
||||
XDATA_TYPE ldo, ldn, old, new, val = xval; \
|
||||
smp_mb(); \
|
||||
ldn = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
@@ -290,12 +304,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
||||
} while (ldo != ldn); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(old), \
|
||||
VALUE_HIGH(old), \
|
||||
VALUE_LOW(xval), \
|
||||
VALUE_HIGH(xval), \
|
||||
oi); \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
return RET; \
|
||||
}
|
||||
|
||||
@@ -317,7 +326,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
|
||||
#undef ADD
|
||||
|
||||
#undef GEN_ATOMIC_HELPER_FN
|
||||
#endif /* DATA_SIZE < 16 */
|
||||
#endif /* DATA_SIZE >= 16 */
|
||||
|
||||
#undef END
|
||||
#endif /* DATA_SIZE > 1 */
|
||||
@@ -329,5 +338,3 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
|
||||
#undef SUFFIX
|
||||
#undef DATA_SIZE
|
||||
#undef SHIFT
|
||||
#undef VALUE_LOW
|
||||
#undef VALUE_HIGH
|
||||
|
@@ -20,8 +20,8 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "qemu/plugin.h"
|
||||
#include "internal-common.h"
|
||||
|
||||
bool tcg_allowed;
|
||||
|
||||
@@ -32,10 +32,40 @@ void cpu_loop_exit_noexc(CPUState *cpu)
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
void cpu_reloading_memory_map(void)
|
||||
{
|
||||
if (qemu_in_vcpu_thread() && current_cpu->running) {
|
||||
/* The guest can in theory prolong the RCU critical section as long
|
||||
* as it feels like. The major problem with this is that because it
|
||||
* can do multiple reconfigurations of the memory map within the
|
||||
* critical section, we could potentially accumulate an unbounded
|
||||
* collection of memory data structures awaiting reclamation.
|
||||
*
|
||||
* Because the only thing we're currently protecting with RCU is the
|
||||
* memory data structures, it's sufficient to break the critical section
|
||||
* in this callback, which we know will get called every time the
|
||||
* memory map is rearranged.
|
||||
*
|
||||
* (If we add anything else in the system that uses RCU to protect
|
||||
* its data structures, we will need to implement some other mechanism
|
||||
* to force TCG CPUs to exit the critical section, at which point this
|
||||
* part of this callback might become unnecessary.)
|
||||
*
|
||||
* This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
|
||||
* only protects cpu->as->dispatch. Since we know our caller is about
|
||||
* to reload it, it's safe to split the critical section.
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
rcu_read_lock();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void cpu_loop_exit(CPUState *cpu)
|
||||
{
|
||||
/* Undo the setting in cpu_tb_exec. */
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
/* Undo any setting in generated code. */
|
||||
qemu_plugin_disable_mem_helpers(cpu);
|
||||
siglongjmp(cpu->jmp_env, 1);
|
||||
@@ -51,8 +81,6 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
|
||||
|
||||
void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
|
||||
{
|
||||
/* Prevent looping if already executing in a serial context. */
|
||||
g_assert(!cpu_in_serial_context(cpu));
|
||||
cpu->exception_index = EXCP_ATOMIC;
|
||||
cpu_loop_exit_restore(cpu, pc);
|
||||
}
|
||||
|
@@ -30,17 +30,19 @@
|
||||
#include "qemu/rcu.h"
|
||||
#include "exec/log.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
|
||||
#include "hw/i386/apic.h"
|
||||
#endif
|
||||
#include "sysemu/cpus.h"
|
||||
#include "exec/cpu-all.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "exec/replay-core.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "exec/helper-proto-common.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "tb-jmp-cache.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal-target.h"
|
||||
#include "internal.h"
|
||||
|
||||
/* -icount align implementation. */
|
||||
|
||||
@@ -71,7 +73,7 @@ static void align_clocks(SyncClocks *sc, CPUState *cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
||||
cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
|
||||
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
|
||||
sc->last_cpu_icount = cpu_icount;
|
||||
|
||||
@@ -122,7 +124,7 @@ static void init_delay_params(SyncClocks *sc, CPUState *cpu)
|
||||
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
|
||||
sc->last_cpu_icount
|
||||
= cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
||||
= cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
|
||||
if (sc->diff_clk < max_delay) {
|
||||
max_delay = sc->diff_clk;
|
||||
}
|
||||
@@ -144,16 +146,6 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
|
||||
}
|
||||
#endif /* CONFIG USER ONLY */
|
||||
|
||||
bool tcg_cflags_has(CPUState *cpu, uint32_t flags)
|
||||
{
|
||||
return cpu->tcg_cflags & flags;
|
||||
}
|
||||
|
||||
void tcg_cflags_set(CPUState *cpu, uint32_t flags)
|
||||
{
|
||||
cpu->tcg_cflags |= flags;
|
||||
}
|
||||
|
||||
uint32_t curr_cflags(CPUState *cpu)
|
||||
{
|
||||
uint32_t cflags = cpu->tcg_cflags;
|
||||
@@ -167,7 +159,7 @@ uint32_t curr_cflags(CPUState *cpu)
|
||||
*/
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
|
||||
} else if (qatomic_read(&one_insn_per_tb)) {
|
||||
} else if (singlestep) {
|
||||
cflags |= CF_NO_GOTO_TB | 1;
|
||||
} else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
|
||||
cflags |= CF_NO_GOTO_TB;
|
||||
@@ -177,12 +169,13 @@ uint32_t curr_cflags(CPUState *cpu)
|
||||
}
|
||||
|
||||
struct tb_desc {
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
target_ulong pc;
|
||||
target_ulong cs_base;
|
||||
CPUArchState *env;
|
||||
tb_page_addr_t page_addr0;
|
||||
uint32_t flags;
|
||||
uint32_t cflags;
|
||||
uint32_t trace_vcpu_dstate;
|
||||
};
|
||||
|
||||
static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
@@ -194,6 +187,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
tb_page_addr0(tb) == desc->page_addr0 &&
|
||||
tb->cs_base == desc->cs_base &&
|
||||
tb->flags == desc->flags &&
|
||||
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
|
||||
tb_cflags(tb) == desc->cflags) {
|
||||
/* check next page if needed */
|
||||
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
|
||||
@@ -201,7 +195,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
return true;
|
||||
} else {
|
||||
tb_page_addr_t phys_page1;
|
||||
vaddr virt_page1;
|
||||
target_ulong virt_page1;
|
||||
|
||||
/*
|
||||
* We know that the first page matched, and an otherwise valid TB
|
||||
@@ -222,18 +216,19 @@ static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
return false;
|
||||
}
|
||||
|
||||
static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
|
||||
uint64_t cs_base, uint32_t flags,
|
||||
static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
|
||||
target_ulong cs_base, uint32_t flags,
|
||||
uint32_t cflags)
|
||||
{
|
||||
tb_page_addr_t phys_pc;
|
||||
struct tb_desc desc;
|
||||
uint32_t h;
|
||||
|
||||
desc.env = cpu_env(cpu);
|
||||
desc.env = cpu->env_ptr;
|
||||
desc.cs_base = cs_base;
|
||||
desc.flags = flags;
|
||||
desc.cflags = cflags;
|
||||
desc.trace_vcpu_dstate = *cpu->trace_dstate;
|
||||
desc.pc = pc;
|
||||
phys_pc = get_page_addr_code(desc.env, pc);
|
||||
if (phys_pc == -1) {
|
||||
@@ -241,14 +236,14 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
|
||||
}
|
||||
desc.page_addr0 = phys_pc;
|
||||
h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
|
||||
flags, cs_base, cflags);
|
||||
flags, cflags, *cpu->trace_dstate);
|
||||
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
|
||||
}
|
||||
|
||||
/* Might cause an exception, so have a longjmp destination ready */
|
||||
static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
|
||||
uint64_t cs_base, uint32_t flags,
|
||||
uint32_t cflags)
|
||||
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
|
||||
target_ulong cs_base,
|
||||
uint32_t flags, uint32_t cflags)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
CPUJumpCache *jc;
|
||||
@@ -260,42 +255,59 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
|
||||
hash = tb_jmp_cache_hash_func(pc);
|
||||
jc = cpu->tb_jmp_cache;
|
||||
|
||||
tb = qatomic_read(&jc->array[hash].tb);
|
||||
if (likely(tb &&
|
||||
jc->array[hash].pc == pc &&
|
||||
tb->cs_base == cs_base &&
|
||||
tb->flags == flags &&
|
||||
tb_cflags(tb) == cflags)) {
|
||||
goto hit;
|
||||
if (cflags & CF_PCREL) {
|
||||
/* Use acquire to ensure current load of pc from jc. */
|
||||
tb = qatomic_load_acquire(&jc->array[hash].tb);
|
||||
|
||||
if (likely(tb &&
|
||||
jc->array[hash].pc == pc &&
|
||||
tb->cs_base == cs_base &&
|
||||
tb->flags == flags &&
|
||||
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
|
||||
tb_cflags(tb) == cflags)) {
|
||||
return tb;
|
||||
}
|
||||
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
jc->array[hash].pc = pc;
|
||||
/* Ensure pc is written first. */
|
||||
qatomic_store_release(&jc->array[hash].tb, tb);
|
||||
} else {
|
||||
/* Use rcu_read to ensure current load of pc from *tb. */
|
||||
tb = qatomic_rcu_read(&jc->array[hash].tb);
|
||||
|
||||
if (likely(tb &&
|
||||
tb->pc == pc &&
|
||||
tb->cs_base == cs_base &&
|
||||
tb->flags == flags &&
|
||||
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
|
||||
tb_cflags(tb) == cflags)) {
|
||||
return tb;
|
||||
}
|
||||
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
/* Use the pc value already stored in tb->pc. */
|
||||
qatomic_set(&jc->array[hash].tb, tb);
|
||||
}
|
||||
|
||||
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
jc->array[hash].pc = pc;
|
||||
qatomic_set(&jc->array[hash].tb, tb);
|
||||
|
||||
hit:
|
||||
/*
|
||||
* As long as tb is not NULL, the contents are consistent. Therefore,
|
||||
* the virtual PC has to match for non-CF_PCREL translations.
|
||||
*/
|
||||
assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
|
||||
return tb;
|
||||
}
|
||||
|
||||
static void log_cpu_exec(vaddr pc, CPUState *cpu,
|
||||
static void log_cpu_exec(target_ulong pc, CPUState *cpu,
|
||||
const TranslationBlock *tb)
|
||||
{
|
||||
if (qemu_log_in_addr_range(pc)) {
|
||||
qemu_log_mask(CPU_LOG_EXEC,
|
||||
"Trace %d: %p [%08" PRIx64
|
||||
"/%016" VADDR_PRIx "/%08x/%08x] %s\n",
|
||||
"Trace %d: %p [" TARGET_FMT_lx
|
||||
"/" TARGET_FMT_lx "/%08x/%08x] %s\n",
|
||||
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
|
||||
tb->flags, tb->cflags, lookup_symbol(pc));
|
||||
|
||||
#if defined(DEBUG_DISAS)
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
|
||||
FILE *logfile = qemu_log_trylock();
|
||||
if (logfile) {
|
||||
@@ -307,17 +319,15 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
|
||||
#if defined(TARGET_I386)
|
||||
flags |= CPU_DUMP_CCOP;
|
||||
#endif
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
|
||||
flags |= CPU_DUMP_VPU;
|
||||
}
|
||||
cpu_dump_state(cpu, logfile, flags);
|
||||
qemu_log_unlock(logfile);
|
||||
}
|
||||
}
|
||||
#endif /* DEBUG_DISAS */
|
||||
}
|
||||
}
|
||||
|
||||
static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
|
||||
static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
|
||||
uint32_t *cflags)
|
||||
{
|
||||
CPUBreakpoint *bp;
|
||||
@@ -350,9 +360,9 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
assert(tcg_ops->debug_check_breakpoint);
|
||||
match_bp = tcg_ops->debug_check_breakpoint(cpu);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
assert(cc->tcg_ops->debug_check_breakpoint);
|
||||
match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -378,12 +388,12 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
|
||||
* breakpoints are removed.
|
||||
*/
|
||||
if (match_page) {
|
||||
*cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | CF_BP_PAGE | 1;
|
||||
*cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
|
||||
static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
|
||||
uint32_t *cflags)
|
||||
{
|
||||
return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
|
||||
@@ -402,18 +412,9 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
||||
{
|
||||
CPUState *cpu = env_cpu(env);
|
||||
TranslationBlock *tb;
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
target_ulong cs_base, pc;
|
||||
uint32_t flags, cflags;
|
||||
|
||||
/*
|
||||
* By definition we've just finished a TB, so I/O is OK.
|
||||
* Avoid the possibility of calling cpu_io_recompile() if
|
||||
* a page table walk triggered by tb_lookup() calling
|
||||
* probe_access_internal() happens to touch an MMIO device.
|
||||
* The next TB, if we chain to it, will clear the flag again.
|
||||
*/
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||
|
||||
cflags = curr_cflags(cpu);
|
||||
@@ -446,6 +447,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
||||
static inline TranslationBlock * QEMU_DISABLE_CFI
|
||||
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
{
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
uintptr_t ret;
|
||||
TranslationBlock *last_tb;
|
||||
const void *tb_ptr = itb->tc.ptr;
|
||||
@@ -455,8 +457,8 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
}
|
||||
|
||||
qemu_thread_jit_execute();
|
||||
ret = tcg_qemu_tb_exec(cpu_env(cpu), tb_ptr);
|
||||
cpu->neg.can_do_io = true;
|
||||
ret = tcg_qemu_tb_exec(env, tb_ptr);
|
||||
cpu->can_do_io = 1;
|
||||
qemu_plugin_disable_mem_helpers(cpu);
|
||||
/*
|
||||
* TODO: Delay swapping back to the read-write region of the TB
|
||||
@@ -476,21 +478,20 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
* counter hit zero); we must restore the guest PC to the address
|
||||
* of the start of the TB.
|
||||
*/
|
||||
CPUClass *cc = cpu->cc;
|
||||
const TCGCPUOps *tcg_ops = cc->tcg_ops;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (tcg_ops->synchronize_from_tb) {
|
||||
tcg_ops->synchronize_from_tb(cpu, last_tb);
|
||||
if (cc->tcg_ops->synchronize_from_tb) {
|
||||
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
|
||||
} else {
|
||||
tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
|
||||
assert(cc->set_pc);
|
||||
cc->set_pc(cpu, last_tb->pc);
|
||||
}
|
||||
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
|
||||
vaddr pc = log_pc(cpu, last_tb);
|
||||
target_ulong pc = log_pc(cpu, last_tb);
|
||||
if (qemu_log_in_addr_range(pc)) {
|
||||
qemu_log("Stopped execution of TB chain before %p [%016"
|
||||
VADDR_PRIx "] %s\n",
|
||||
qemu_log("Stopped execution of TB chain before %p ["
|
||||
TARGET_FMT_lx "] %s\n",
|
||||
last_tb->tc.ptr, pc, lookup_symbol(pc));
|
||||
}
|
||||
}
|
||||
@@ -512,65 +513,27 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
|
||||
static void cpu_exec_enter(CPUState *cpu)
|
||||
{
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (tcg_ops->cpu_exec_enter) {
|
||||
tcg_ops->cpu_exec_enter(cpu);
|
||||
if (cc->tcg_ops->cpu_exec_enter) {
|
||||
cc->tcg_ops->cpu_exec_enter(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void cpu_exec_exit(CPUState *cpu)
|
||||
{
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (tcg_ops->cpu_exec_exit) {
|
||||
tcg_ops->cpu_exec_exit(cpu);
|
||||
if (cc->tcg_ops->cpu_exec_exit) {
|
||||
cc->tcg_ops->cpu_exec_exit(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void cpu_exec_longjmp_cleanup(CPUState *cpu)
|
||||
{
|
||||
/* Non-buggy compilers preserve this; assert the correct value. */
|
||||
g_assert(cpu == current_cpu);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
clear_helper_retaddr();
|
||||
if (have_mmap_lock()) {
|
||||
mmap_unlock();
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* For softmmu, a tlb_fill fault during translation will land here,
|
||||
* and we need to release any page locks held. In system mode we
|
||||
* have one tcg_ctx per thread, so we know it was this cpu doing
|
||||
* the translation.
|
||||
*
|
||||
* Alternative 1: Install a cleanup to be called via an exception
|
||||
* handling safe longjmp. It seems plausible that all our hosts
|
||||
* support such a thing. We'd have to properly register unwind info
|
||||
* for the JIT for EH, rather that just for GDB.
|
||||
*
|
||||
* Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
|
||||
* capture the cpu_loop_exit longjmp, perform the cleanup, and
|
||||
* jump again to arrive here.
|
||||
*/
|
||||
if (tcg_ctx->gen_tb) {
|
||||
tb_unlock_pages(tcg_ctx->gen_tb);
|
||||
tcg_ctx->gen_tb = NULL;
|
||||
}
|
||||
#endif
|
||||
if (bql_locked()) {
|
||||
bql_unlock();
|
||||
}
|
||||
assert_no_pages_locked();
|
||||
}
|
||||
|
||||
void cpu_exec_step_atomic(CPUState *cpu)
|
||||
{
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
TranslationBlock *tb;
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
target_ulong cs_base, pc;
|
||||
uint32_t flags, cflags;
|
||||
int tb_exit;
|
||||
|
||||
@@ -607,7 +570,16 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
||||
cpu_tb_exec(cpu, tb, &tb_exit);
|
||||
cpu_exec_exit(cpu);
|
||||
} else {
|
||||
cpu_exec_longjmp_cleanup(cpu);
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
clear_helper_retaddr();
|
||||
if (have_mmap_lock()) {
|
||||
mmap_unlock();
|
||||
}
|
||||
#endif
|
||||
if (qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
assert_no_pages_locked();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -678,10 +650,16 @@ static inline bool cpu_handle_halt(CPUState *cpu)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (cpu->halted) {
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
bool leave_halt = tcg_ops->cpu_exec_halt(cpu);
|
||||
|
||||
if (!leave_halt) {
|
||||
#if defined(TARGET_I386)
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
apic_poll_irq(x86_cpu->apic_state);
|
||||
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
#endif /* TARGET_I386 */
|
||||
if (!cpu_has_work(cpu)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -694,7 +672,7 @@ static inline bool cpu_handle_halt(CPUState *cpu)
|
||||
|
||||
static inline void cpu_handle_debug_exception(CPUState *cpu)
|
||||
{
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
CPUWatchpoint *wp;
|
||||
|
||||
if (!cpu->watchpoint_hit) {
|
||||
@@ -703,8 +681,8 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
if (tcg_ops->debug_excp_handler) {
|
||||
tcg_ops->debug_excp_handler(cpu);
|
||||
if (cc->tcg_ops->debug_excp_handler) {
|
||||
cc->tcg_ops->debug_excp_handler(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -713,7 +691,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
if (cpu->exception_index < 0) {
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (replay_has_exception()
|
||||
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
/* Execute just one insn to trigger exception pending in the log */
|
||||
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
|
||||
| CF_NOIRQ | 1;
|
||||
@@ -721,7 +699,6 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cpu->exception_index >= EXCP_INTERRUPT) {
|
||||
/* exit request from the cpu execution loop */
|
||||
*ret = cpu->exception_index;
|
||||
@@ -730,59 +707,62 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
return true;
|
||||
}
|
||||
|
||||
} else {
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/*
|
||||
* If user mode only, we simulate a fake exception which will be
|
||||
* handled outside the cpu execution loop.
|
||||
*/
|
||||
/* if user mode only, we simulate a fake exception
|
||||
which will be handled outside the cpu execution
|
||||
loop */
|
||||
#if defined(TARGET_I386)
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
tcg_ops->fake_user_interrupt(cpu);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
cc->tcg_ops->fake_user_interrupt(cpu);
|
||||
#endif /* TARGET_I386 */
|
||||
*ret = cpu->exception_index;
|
||||
cpu->exception_index = -1;
|
||||
return true;
|
||||
#else
|
||||
if (replay_exception()) {
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
|
||||
bql_lock();
|
||||
tcg_ops->do_interrupt(cpu);
|
||||
bql_unlock();
|
||||
*ret = cpu->exception_index;
|
||||
cpu->exception_index = -1;
|
||||
return true;
|
||||
#else
|
||||
if (replay_exception()) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
cc->tcg_ops->do_interrupt(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu->exception_index = -1;
|
||||
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
/*
|
||||
* After processing the exception, ensure an EXCP_DEBUG is
|
||||
* raised when single-stepping so that GDB doesn't miss the
|
||||
* next instruction.
|
||||
*/
|
||||
*ret = EXCP_DEBUG;
|
||||
cpu_handle_debug_exception(cpu);
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
/*
|
||||
* After processing the exception, ensure an EXCP_DEBUG is
|
||||
* raised when single-stepping so that GDB doesn't miss the
|
||||
* next instruction.
|
||||
*/
|
||||
*ret = EXCP_DEBUG;
|
||||
cpu_handle_debug_exception(cpu);
|
||||
return true;
|
||||
}
|
||||
} else if (!replay_has_interrupt()) {
|
||||
/* give a chance to iothread in replay mode */
|
||||
*ret = EXCP_INTERRUPT;
|
||||
return true;
|
||||
}
|
||||
} else if (!replay_has_interrupt()) {
|
||||
/* give a chance to iothread in replay mode */
|
||||
*ret = EXCP_INTERRUPT;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool icount_exit_request(CPUState *cpu)
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/*
|
||||
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a
|
||||
* "real" interrupt event later. It does not need to be recorded for
|
||||
* replay purposes.
|
||||
*/
|
||||
static inline bool need_replay_interrupt(int interrupt_request)
|
||||
{
|
||||
if (!icount_enabled()) {
|
||||
return false;
|
||||
}
|
||||
if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) {
|
||||
return false;
|
||||
}
|
||||
return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0;
|
||||
#if defined(TARGET_I386)
|
||||
return !(interrupt_request & CPU_INTERRUPT_POLL);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
TranslationBlock **last_tb)
|
||||
@@ -801,11 +781,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
* Ensure zeroing happens before reading cpu->exit_request or
|
||||
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
|
||||
*/
|
||||
qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
|
||||
qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
|
||||
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
|
||||
/* Mask out external interrupts for this step. */
|
||||
@@ -814,7 +794,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
@@ -825,7 +805,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
|
||||
cpu->halted = 1;
|
||||
cpu->exception_index = EXCP_HLT;
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#if defined(TARGET_I386)
|
||||
@@ -836,14 +816,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
|
||||
do_cpu_init(x86_cpu);
|
||||
cpu->exception_index = EXCP_HALTED;
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
else if (interrupt_request & CPU_INTERRUPT_RESET) {
|
||||
replay_interrupt();
|
||||
cpu_reset(cpu);
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#endif /* !TARGET_I386 */
|
||||
@@ -852,11 +832,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
True when it is, and we should restart on a new TB,
|
||||
and via longjmp via cpu_loop_exit. */
|
||||
else {
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
|
||||
if (!tcg_ops->need_replay_interrupt ||
|
||||
tcg_ops->need_replay_interrupt(interrupt_request)) {
|
||||
if (cc->tcg_ops->cpu_exec_interrupt &&
|
||||
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
|
||||
if (need_replay_interrupt(interrupt_request)) {
|
||||
replay_interrupt();
|
||||
}
|
||||
/*
|
||||
@@ -866,7 +846,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
*/
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
@@ -885,11 +865,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
}
|
||||
|
||||
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
/* Finally, check if we need to exit to the main loop. */
|
||||
if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) {
|
||||
if (unlikely(qatomic_read(&cpu->exit_request))
|
||||
|| (icount_enabled()
|
||||
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
if (cpu->exception_index == -1) {
|
||||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
@@ -901,9 +884,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
}
|
||||
|
||||
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
vaddr pc, TranslationBlock **last_tb,
|
||||
int *tb_exit)
|
||||
target_ulong pc,
|
||||
TranslationBlock **last_tb, int *tb_exit)
|
||||
{
|
||||
int32_t insns_left;
|
||||
|
||||
trace_exec_tb(tb, pc);
|
||||
tb = cpu_tb_exec(cpu, tb, tb_exit);
|
||||
if (*tb_exit != TB_EXIT_REQUESTED) {
|
||||
@@ -912,7 +897,8 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
}
|
||||
|
||||
*last_tb = NULL;
|
||||
if (cpu_loop_exit_requested(cpu)) {
|
||||
insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
||||
if (insns_left < 0) {
|
||||
/* Something asked us to stop executing chained TBs; just
|
||||
* continue round the main loop. Whatever requested the exit
|
||||
* will also have set something else (eg exit_request or
|
||||
@@ -929,8 +915,8 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
/* Ensure global icount has gone forward */
|
||||
icount_update(cpu);
|
||||
/* Refill decrementer and continue execution. */
|
||||
int32_t insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu->neg.icount_decr.u16.low = insns_left;
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
/*
|
||||
@@ -960,11 +946,10 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
|
||||
|
||||
while (!cpu_handle_interrupt(cpu, &last_tb)) {
|
||||
TranslationBlock *tb;
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
target_ulong cs_base, pc;
|
||||
uint32_t flags, cflags;
|
||||
|
||||
cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
|
||||
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
|
||||
|
||||
/*
|
||||
* When requested, use an exact setting for cflags for the next
|
||||
@@ -999,8 +984,14 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
|
||||
*/
|
||||
h = tb_jmp_cache_hash_func(pc);
|
||||
jc = cpu->tb_jmp_cache;
|
||||
jc->array[h].pc = pc;
|
||||
qatomic_set(&jc->array[h].tb, tb);
|
||||
if (cflags & CF_PCREL) {
|
||||
jc->array[h].pc = pc;
|
||||
/* Ensure pc is written first. */
|
||||
qatomic_store_release(&jc->array[h].tb, tb);
|
||||
} else {
|
||||
/* Use the pc value already stored in tb->pc. */
|
||||
qatomic_set(&jc->array[h].tb, tb);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
@@ -1033,7 +1024,20 @@ static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
|
||||
{
|
||||
/* Prepare setjmp context for exception handling. */
|
||||
if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
|
||||
cpu_exec_longjmp_cleanup(cpu);
|
||||
/* Non-buggy compilers preserve this; assert the correct value. */
|
||||
g_assert(cpu == current_cpu);
|
||||
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
clear_helper_retaddr();
|
||||
if (have_mmap_lock()) {
|
||||
mmap_unlock();
|
||||
}
|
||||
#endif
|
||||
if (qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
assert_no_pages_locked();
|
||||
}
|
||||
|
||||
return cpu_exec_loop(cpu, sc);
|
||||
@@ -1051,7 +1055,7 @@ int cpu_exec(CPUState *cpu)
|
||||
return EXCP_HALTED;
|
||||
}
|
||||
|
||||
RCU_READ_LOCK_GUARD();
|
||||
rcu_read_lock();
|
||||
cpu_exec_enter(cpu);
|
||||
|
||||
/*
|
||||
@@ -1065,20 +1069,18 @@ int cpu_exec(CPUState *cpu)
|
||||
ret = cpu_exec_setjmp(cpu, &sc);
|
||||
|
||||
cpu_exec_exit(cpu);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
void tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
{
|
||||
static bool tcg_target_initialized;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (!tcg_target_initialized) {
|
||||
/* Check mandatory TCGCPUOps handlers */
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
assert(cpu->cc->tcg_ops->cpu_exec_halt);
|
||||
assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
cpu->cc->tcg_ops->initialize();
|
||||
cc->tcg_ops->initialize();
|
||||
tcg_target_initialized = true;
|
||||
}
|
||||
|
||||
@@ -1088,8 +1090,6 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
tcg_iommu_init_notifier_list(cpu);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
/* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* undo the initializations in reverse order */
|
||||
|
2954
accel/tcg/cputlb.c
2954
accel/tcg/cputlb.c
File diff suppressed because it is too large
Load Diff
@@ -6,10 +6,11 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/lockable.h"
|
||||
#include "tcg/debuginfo.h"
|
||||
|
||||
#include <elfutils/libdwfl.h>
|
||||
|
||||
#include "debuginfo.h"
|
||||
|
||||
static QemuMutex lock;
|
||||
static Dwfl *dwfl;
|
||||
static const Dwfl_Callbacks dwfl_callbacks = {
|
@@ -4,8 +4,8 @@
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef TCG_DEBUGINFO_H
|
||||
#define TCG_DEBUGINFO_H
|
||||
#ifndef ACCEL_TCG_DEBUGINFO_H
|
||||
#define ACCEL_TCG_DEBUGINFO_H
|
||||
|
||||
#include "qemu/bitops.h"
|
||||
|
@@ -1,59 +0,0 @@
|
||||
/*
|
||||
* Internal execution defines for qemu (target agnostic)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_COMMON_H
|
||||
#define ACCEL_TCG_INTERNAL_COMMON_H
|
||||
|
||||
#include "exec/cpu-common.h"
|
||||
#include "exec/translation-block.h"
|
||||
|
||||
extern int64_t max_delay;
|
||||
extern int64_t max_advance;
|
||||
|
||||
extern bool one_insn_per_tb;
|
||||
|
||||
/*
|
||||
* Return true if CS is not running in parallel with other cpus, either
|
||||
* because there are no other cpus or we are within an exclusive context.
|
||||
*/
|
||||
static inline bool cpu_in_serial_context(CPUState *cs)
|
||||
{
|
||||
return !tcg_cflags_has(cs, CF_PARALLEL) || cpu_in_exclusive_context(cs);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpu_plugin_mem_cbs_enabled() - are plugin memory callbacks enabled?
|
||||
* @cs: CPUState pointer
|
||||
*
|
||||
* The memory callbacks are installed if a plugin has instrumented an
|
||||
* instruction for memory. This can be useful to know if you want to
|
||||
* force a slow path for a series of memory accesses.
|
||||
*/
|
||||
static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
|
||||
{
|
||||
#ifdef CONFIG_PLUGIN
|
||||
return !!cpu->neg.plugin_mem_cbs;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
|
||||
uint64_t cs_base, uint32_t flags,
|
||||
int cflags);
|
||||
void page_init(void);
|
||||
void tb_htable_init(void);
|
||||
void tb_reset_jump(TranslationBlock *tb, int n);
|
||||
TranslationBlock *tb_link_page(TranslationBlock *tb);
|
||||
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
uintptr_t host_pc);
|
||||
|
||||
bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
|
||||
void tcg_exec_unrealizefn(CPUState *cpu);
|
||||
|
||||
#endif
|
@@ -1,118 +0,0 @@
|
||||
/*
|
||||
* Internal execution defines for qemu (target specific)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_TARGET_H
|
||||
#define ACCEL_TCG_INTERNAL_TARGET_H
|
||||
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/translate-all.h"
|
||||
|
||||
/*
|
||||
* Access to the various translations structures need to be serialised
|
||||
* via locks for consistency. In user-mode emulation access to the
|
||||
* memory related structures are protected with mmap_lock.
|
||||
* In !user-mode we use per-page locks.
|
||||
*/
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
|
||||
#else
|
||||
#define assert_memory_lock()
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
|
||||
void assert_no_pages_locked(void);
|
||||
#else
|
||||
static inline void assert_no_pages_locked(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
static inline void page_table_config_init(void) { }
|
||||
#else
|
||||
void page_table_config_init(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/*
|
||||
* For user-only, page_protect sets the page read-only.
|
||||
* Since most execution is already on read-only pages, and we'd need to
|
||||
* account for other TBs on the same page, defer undoing any page protection
|
||||
* until we receive the write fault.
|
||||
*/
|
||||
static inline void tb_lock_page0(tb_page_addr_t p0)
|
||||
{
|
||||
page_protect(p0);
|
||||
}
|
||||
|
||||
static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
|
||||
{
|
||||
page_protect(p1);
|
||||
}
|
||||
|
||||
static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
|
||||
static inline void tb_unlock_pages(TranslationBlock *tb) { }
|
||||
#else
|
||||
void tb_lock_page0(tb_page_addr_t);
|
||||
void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
|
||||
void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
|
||||
void tb_unlock_pages(TranslationBlock *);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
|
||||
unsigned size,
|
||||
uintptr_t retaddr);
|
||||
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
||||
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
|
||||
|
||||
/* Return the current PC from CPU, which may be cached in TB. */
|
||||
static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
{
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
return cpu->cc->get_pc(cpu);
|
||||
} else {
|
||||
return tb->pc;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tcg_req_mo:
|
||||
* @type: TCGBar
|
||||
*
|
||||
* Filter @type to the barrier that is required for the guest
|
||||
* memory ordering vs the host memory ordering. A non-zero
|
||||
* result indicates that some barrier is required.
|
||||
*
|
||||
* If TCG_GUEST_DEFAULT_MO is not defined, assume that the
|
||||
* guest requires strict ordering.
|
||||
*
|
||||
* This is a macro so that it's constant even without optimization.
|
||||
*/
|
||||
#ifdef TCG_GUEST_DEFAULT_MO
|
||||
# define tcg_req_mo(type) \
|
||||
((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
|
||||
#else
|
||||
# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* cpu_req_mo:
|
||||
* @type: TCGBar
|
||||
*
|
||||
* If tcg_req_mo indicates a barrier for @type is required
|
||||
* for the guest memory model, issue a host memory barrier.
|
||||
*/
|
||||
#define cpu_req_mo(type) \
|
||||
do { \
|
||||
if (tcg_req_mo(type)) { \
|
||||
smp_mb(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* ACCEL_TCG_INTERNAL_H */
|
70
accel/tcg/internal.h
Normal file
70
accel/tcg/internal.h
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Internal execution defines for qemu
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_H
|
||||
#define ACCEL_TCG_INTERNAL_H
|
||||
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
/*
|
||||
* Access to the various translations structures need to be serialised
|
||||
* via locks for consistency. In user-mode emulation access to the
|
||||
* memory related structures are protected with mmap_lock.
|
||||
* In !user-mode we use per-page locks.
|
||||
*/
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
#define assert_memory_lock()
|
||||
#else
|
||||
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
|
||||
void assert_no_pages_locked(void);
|
||||
#else
|
||||
static inline void assert_no_pages_locked(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
static inline void page_table_config_init(void) { }
|
||||
#else
|
||||
void page_table_config_init(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
|
||||
unsigned size,
|
||||
uintptr_t retaddr);
|
||||
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
|
||||
target_ulong cs_base, uint32_t flags,
|
||||
int cflags);
|
||||
void page_init(void);
|
||||
void tb_htable_init(void);
|
||||
void tb_reset_jump(TranslationBlock *tb, int n);
|
||||
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
||||
tb_page_addr_t phys_page2);
|
||||
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
|
||||
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
uintptr_t host_pc);
|
||||
|
||||
/* Return the current PC from CPU, which may be cached in TB. */
|
||||
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
{
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
return cpu->cc->get_pc(cpu);
|
||||
} else {
|
||||
return tb->pc;
|
||||
}
|
||||
}
|
||||
|
||||
extern int64_t max_delay;
|
||||
extern int64_t max_advance;
|
||||
|
||||
#endif /* ACCEL_TCG_INTERNAL_H */
|
File diff suppressed because it is too large
Load Diff
@@ -8,245 +8,6 @@
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
/*
|
||||
* Load helpers for tcg-ldst.h
|
||||
*/
|
||||
|
||||
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||
return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
/*
|
||||
* Provide signed versions of the load routines as well. We can of course
|
||||
* avoid this for 64-bit data, or for 32-bit data on 32-bit host.
|
||||
*/
|
||||
|
||||
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
return do_ld16_mmu(env_cpu(env), addr, oi, retaddr);
|
||||
}
|
||||
|
||||
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
|
||||
{
|
||||
return helper_ld16_mmu(env, addr, oi, GETPC());
|
||||
}
|
||||
|
||||
/*
|
||||
* Store helpers for tcg-ldst.h
|
||||
*/
|
||||
|
||||
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||
do_st1_mmu(env_cpu(env), addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
|
||||
{
|
||||
helper_st16_mmu(env, addr, val, oi, GETPC());
|
||||
}
|
||||
|
||||
/*
|
||||
* Load helpers for cpu_ldst.h
|
||||
*/
|
||||
|
||||
static void plugin_load_cb(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t value_low,
|
||||
uint64_t value_high,
|
||||
MemOpIdx oi)
|
||||
{
|
||||
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
value_low, value_high,
|
||||
oi, QEMU_PLUGIN_MEM_R);
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint8_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
|
||||
ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint16_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
Int128 ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
|
||||
plugin_load_cb(env, addr, int128_getlo(ret), int128_gethi(ret), oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store helpers for cpu_ldst.h
|
||||
*/
|
||||
|
||||
static void plugin_store_cb(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t value_low,
|
||||
uint64_t value_high,
|
||||
MemOpIdx oi)
|
||||
{
|
||||
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
value_low, value_high,
|
||||
oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
helper_stb_mmu(env, addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
}
|
||||
|
||||
void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
}
|
||||
|
||||
void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
}
|
||||
|
||||
void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
}
|
||||
|
||||
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, int128_getlo(val), int128_gethi(val), oi);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrappers of the above
|
||||
*/
|
||||
|
||||
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
@@ -265,7 +26,7 @@ uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
|
||||
return cpu_ldw_mmu(env, addr, oi, ra);
|
||||
return cpu_ldw_be_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
@@ -278,21 +39,21 @@ uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
|
||||
return cpu_ldl_mmu(env, addr, oi, ra);
|
||||
return cpu_ldl_be_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
|
||||
return cpu_ldq_mmu(env, addr, oi, ra);
|
||||
return cpu_ldq_be_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
|
||||
return cpu_ldw_mmu(env, addr, oi, ra);
|
||||
return cpu_ldw_le_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
@@ -305,14 +66,14 @@ uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
|
||||
return cpu_ldl_mmu(env, addr, oi, ra);
|
||||
return cpu_ldl_le_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
|
||||
return cpu_ldq_mmu(env, addr, oi, ra);
|
||||
return cpu_ldq_le_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
@@ -326,50 +87,49 @@ void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
|
||||
cpu_stw_mmu(env, addr, val, oi, ra);
|
||||
cpu_stw_be_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
|
||||
cpu_stl_mmu(env, addr, val, oi, ra);
|
||||
cpu_stl_be_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
|
||||
cpu_stq_mmu(env, addr, val, oi, ra);
|
||||
cpu_stq_be_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
|
||||
cpu_stw_mmu(env, addr, val, oi, ra);
|
||||
cpu_stw_le_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
|
||||
cpu_stl_mmu(env, addr, val, oi, ra);
|
||||
cpu_stl_le_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
|
||||
cpu_stq_mmu(env, addr, val, oi, ra);
|
||||
cpu_stq_le_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
/*--------------------------*/
|
||||
|
||||
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
@@ -379,8 +139,7 @@ int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
|
||||
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
@@ -390,20 +149,17 @@ int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
|
||||
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
@@ -413,63 +169,54 @@ int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
|
||||
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
|
||||
}
|
||||
|
||||
/*--------------------------*/
|
||||
|
@@ -1,9 +1,7 @@
|
||||
common_ss.add(when: 'CONFIG_TCG', if_true: files(
|
||||
'cpu-exec-common.c',
|
||||
))
|
||||
tcg_specific_ss = ss.source_set()
|
||||
tcg_specific_ss.add(files(
|
||||
tcg_ss = ss.source_set()
|
||||
tcg_ss.add(files(
|
||||
'tcg-all.c',
|
||||
'cpu-exec-common.c',
|
||||
'cpu-exec.c',
|
||||
'tb-maint.c',
|
||||
'tcg-runtime-gvec.c',
|
||||
@@ -11,24 +9,19 @@ tcg_specific_ss.add(files(
|
||||
'translate-all.c',
|
||||
'translator.c',
|
||||
))
|
||||
tcg_specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
|
||||
tcg_specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c'))
|
||||
if get_option('plugins')
|
||||
tcg_specific_ss.add(files('plugin-gen.c'))
|
||||
endif
|
||||
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_specific_ss)
|
||||
tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
|
||||
tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c'))
|
||||
tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')])
|
||||
tcg_ss.add(when: libdw, if_true: files('debuginfo.c'))
|
||||
tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c'))
|
||||
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
|
||||
|
||||
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
|
||||
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
|
||||
'cputlb.c',
|
||||
'watchpoint.c',
|
||||
))
|
||||
|
||||
system_ss.add(when: ['CONFIG_TCG'], if_true: files(
|
||||
'icount-common.c',
|
||||
'monitor.c',
|
||||
))
|
||||
|
||||
tcg_module_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
|
||||
tcg_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
|
||||
'tcg-accel-ops.c',
|
||||
'tcg-accel-ops-mttcg.c',
|
||||
'tcg-accel-ops-icount.c',
|
||||
|
@@ -7,8 +7,6 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "qemu/qht.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/type-helpers.h"
|
||||
#include "qapi/qapi-commands-machine.h"
|
||||
@@ -16,9 +14,7 @@
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "internal-common.h"
|
||||
#include "tb-context.h"
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
static void dump_drift_info(GString *buf)
|
||||
@@ -40,165 +36,6 @@ static void dump_drift_info(GString *buf)
|
||||
}
|
||||
}
|
||||
|
||||
static void dump_accel_info(GString *buf)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
bool one_insn_per_tb = object_property_get_bool(OBJECT(accel),
|
||||
"one-insn-per-tb",
|
||||
&error_fatal);
|
||||
|
||||
g_string_append_printf(buf, "Accelerator settings:\n");
|
||||
g_string_append_printf(buf, "one-insn-per-tb: %s\n\n",
|
||||
one_insn_per_tb ? "on" : "off");
|
||||
}
|
||||
|
||||
static void print_qht_statistics(struct qht_stats hst, GString *buf)
|
||||
{
|
||||
uint32_t hgram_opts;
|
||||
size_t hgram_bins;
|
||||
char *hgram;
|
||||
|
||||
if (!hst.head_buckets) {
|
||||
return;
|
||||
}
|
||||
g_string_append_printf(buf, "TB hash buckets %zu/%zu "
|
||||
"(%0.2f%% head buckets used)\n",
|
||||
hst.used_head_buckets, hst.head_buckets,
|
||||
(double)hst.used_head_buckets /
|
||||
hst.head_buckets * 100);
|
||||
|
||||
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
|
||||
hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
|
||||
if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
|
||||
hgram_opts |= QDIST_PR_NODECIMAL;
|
||||
}
|
||||
hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
|
||||
g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. "
|
||||
"Histogram: %s\n",
|
||||
qdist_avg(&hst.occupancy) * 100, hgram);
|
||||
g_free(hgram);
|
||||
|
||||
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
|
||||
hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
|
||||
if (hgram_bins > 10) {
|
||||
hgram_bins = 10;
|
||||
} else {
|
||||
hgram_bins = 0;
|
||||
hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
|
||||
}
|
||||
hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
|
||||
g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. "
|
||||
"Histogram: %s\n",
|
||||
qdist_avg(&hst.chain), hgram);
|
||||
g_free(hgram);
|
||||
}
|
||||
|
||||
struct tb_tree_stats {
|
||||
size_t nb_tbs;
|
||||
size_t host_size;
|
||||
size_t target_size;
|
||||
size_t max_target_size;
|
||||
size_t direct_jmp_count;
|
||||
size_t direct_jmp2_count;
|
||||
size_t cross_page;
|
||||
};
|
||||
|
||||
static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
|
||||
{
|
||||
const TranslationBlock *tb = value;
|
||||
struct tb_tree_stats *tst = data;
|
||||
|
||||
tst->nb_tbs++;
|
||||
tst->host_size += tb->tc.size;
|
||||
tst->target_size += tb->size;
|
||||
if (tb->size > tst->max_target_size) {
|
||||
tst->max_target_size = tb->size;
|
||||
}
|
||||
if (tb->page_addr[1] != -1) {
|
||||
tst->cross_page++;
|
||||
}
|
||||
if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
|
||||
tst->direct_jmp_count++;
|
||||
if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
|
||||
tst->direct_jmp2_count++;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
|
||||
{
|
||||
CPUState *cpu;
|
||||
size_t full = 0, part = 0, elide = 0;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
|
||||
part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
|
||||
elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
|
||||
}
|
||||
*pfull = full;
|
||||
*ppart = part;
|
||||
*pelide = elide;
|
||||
}
|
||||
|
||||
static void tcg_dump_info(GString *buf)
|
||||
{
|
||||
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
|
||||
}
|
||||
|
||||
static void dump_exec_info(GString *buf)
|
||||
{
|
||||
struct tb_tree_stats tst = {};
|
||||
struct qht_stats hst;
|
||||
size_t nb_tbs, flush_full, flush_part, flush_elide;
|
||||
|
||||
tcg_tb_foreach(tb_tree_stats_iter, &tst);
|
||||
nb_tbs = tst.nb_tbs;
|
||||
/* XXX: avoid using doubles ? */
|
||||
g_string_append_printf(buf, "Translation buffer state:\n");
|
||||
/*
|
||||
* Report total code size including the padding and TB structs;
|
||||
* otherwise users might think "-accel tcg,tb-size" is not honoured.
|
||||
* For avg host size we use the precise numbers from tb_tree_stats though.
|
||||
*/
|
||||
g_string_append_printf(buf, "gen code size %zu/%zu\n",
|
||||
tcg_code_size(), tcg_code_capacity());
|
||||
g_string_append_printf(buf, "TB count %zu\n", nb_tbs);
|
||||
g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n",
|
||||
nb_tbs ? tst.target_size / nb_tbs : 0,
|
||||
tst.max_target_size);
|
||||
g_string_append_printf(buf, "TB avg host size %zu bytes "
|
||||
"(expansion ratio: %0.1f)\n",
|
||||
nb_tbs ? tst.host_size / nb_tbs : 0,
|
||||
tst.target_size ?
|
||||
(double)tst.host_size / tst.target_size : 0);
|
||||
g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
|
||||
tst.cross_page,
|
||||
nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
|
||||
g_string_append_printf(buf, "direct jump count %zu (%zu%%) "
|
||||
"(2 jumps=%zu %zu%%)\n",
|
||||
tst.direct_jmp_count,
|
||||
nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
|
||||
tst.direct_jmp2_count,
|
||||
nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
|
||||
|
||||
qht_statistics_init(&tb_ctx.htable, &hst);
|
||||
print_qht_statistics(hst, buf);
|
||||
qht_statistics_destroy(&hst);
|
||||
|
||||
g_string_append_printf(buf, "\nStatistics:\n");
|
||||
g_string_append_printf(buf, "TB flush count %u\n",
|
||||
qatomic_read(&tb_ctx.tb_flush_count));
|
||||
g_string_append_printf(buf, "TB invalidate count %u\n",
|
||||
qatomic_read(&tb_ctx.tb_phys_invalidate_count));
|
||||
|
||||
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
|
||||
g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
|
||||
g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
|
||||
g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
|
||||
tcg_dump_info(buf);
|
||||
}
|
||||
|
||||
HumanReadableText *qmp_x_query_jit(Error **errp)
|
||||
{
|
||||
g_autoptr(GString) buf = g_string_new("");
|
||||
@@ -208,18 +45,12 @@ HumanReadableText *qmp_x_query_jit(Error **errp)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dump_accel_info(buf);
|
||||
dump_exec_info(buf);
|
||||
dump_drift_info(buf);
|
||||
|
||||
return human_readable_text_from_str(buf);
|
||||
}
|
||||
|
||||
static void tcg_dump_op_count(GString *buf)
|
||||
{
|
||||
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
|
||||
}
|
||||
|
||||
HumanReadableText *qmp_x_query_opcount(Error **errp)
|
||||
{
|
||||
g_autoptr(GString) buf = g_string_new("");
|
||||
@@ -235,6 +66,37 @@ HumanReadableText *qmp_x_query_opcount(Error **errp)
|
||||
return human_readable_text_from_str(buf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
|
||||
int64_t dev_time;
|
||||
|
||||
HumanReadableText *qmp_x_query_profile(Error **errp)
|
||||
{
|
||||
g_autoptr(GString) buf = g_string_new("");
|
||||
static int64_t last_cpu_exec_time;
|
||||
int64_t cpu_exec_time;
|
||||
int64_t delta;
|
||||
|
||||
cpu_exec_time = tcg_cpu_exec_time();
|
||||
delta = cpu_exec_time - last_cpu_exec_time;
|
||||
|
||||
g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
|
||||
dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
|
||||
g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
|
||||
delta, delta / (double)NANOSECONDS_PER_SECOND);
|
||||
last_cpu_exec_time = cpu_exec_time;
|
||||
dev_time = 0;
|
||||
|
||||
return human_readable_text_from_str(buf);
|
||||
}
|
||||
#else
|
||||
HumanReadableText *qmp_x_query_profile(Error **errp)
|
||||
{
|
||||
error_setg(errp, "Internal profiler not compiled");
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void hmp_tcg_register(void)
|
||||
{
|
||||
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
|
||||
|
@@ -10,13 +10,13 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "elf.h"
|
||||
#include "exec/target_page.h"
|
||||
#include "exec/translation-block.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "tcg/debuginfo.h"
|
||||
#include "tcg/perf.h"
|
||||
#include "tcg/tcg.h"
|
||||
|
||||
#include "debuginfo.h"
|
||||
#include "perf.h"
|
||||
|
||||
static FILE *safe_fopen_w(const char *path)
|
||||
{
|
||||
int saved_errno;
|
||||
@@ -111,8 +111,6 @@ static void write_perfmap_entry(const void *start, size_t insn,
|
||||
}
|
||||
|
||||
static FILE *jitdump;
|
||||
static size_t perf_marker_size;
|
||||
static void *perf_marker = MAP_FAILED;
|
||||
|
||||
#define JITHEADER_MAGIC 0x4A695444
|
||||
#define JITHEADER_VERSION 1
|
||||
@@ -192,6 +190,7 @@ void perf_enable_jitdump(void)
|
||||
{
|
||||
struct jitheader header;
|
||||
char jitdump_file[32];
|
||||
void *perf_marker;
|
||||
|
||||
if (!use_rt_clock) {
|
||||
warn_report("CLOCK_MONOTONIC is not available, proceeding without jitdump");
|
||||
@@ -211,8 +210,7 @@ void perf_enable_jitdump(void)
|
||||
* PERF_RECORD_MMAP or PERF_RECORD_MMAP2 event is of the form jit-%d.dump
|
||||
* and will process it as a jitdump file.
|
||||
*/
|
||||
perf_marker_size = qemu_real_host_page_size();
|
||||
perf_marker = mmap(NULL, perf_marker_size, PROT_READ | PROT_EXEC,
|
||||
perf_marker = mmap(NULL, qemu_real_host_page_size(), PROT_READ | PROT_EXEC,
|
||||
MAP_PRIVATE, fileno(jitdump), 0);
|
||||
if (perf_marker == MAP_FAILED) {
|
||||
warn_report("Could not map %s: %s, proceeding without jitdump",
|
||||
@@ -313,8 +311,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
|
||||
const void *start)
|
||||
{
|
||||
struct debuginfo_query *q;
|
||||
size_t insn, start_words;
|
||||
uint64_t *gen_insn_data;
|
||||
size_t insn;
|
||||
|
||||
if (!perfmap && !jitdump) {
|
||||
return;
|
||||
@@ -328,14 +325,15 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
|
||||
debuginfo_lock();
|
||||
|
||||
/* Query debuginfo for each guest instruction. */
|
||||
gen_insn_data = tcg_ctx->gen_insn_data;
|
||||
start_words = tcg_ctx->insn_start_words;
|
||||
|
||||
for (insn = 0; insn < tb->icount; insn++) {
|
||||
/* FIXME: This replicates the restore_state_to_opc() logic. */
|
||||
q[insn].address = gen_insn_data[insn * start_words + 0];
|
||||
q[insn].address = tcg_ctx->gen_insn_data[insn][0];
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
q[insn].address |= (guest_pc & qemu_target_page_mask());
|
||||
q[insn].address |= (guest_pc & TARGET_PAGE_MASK);
|
||||
} else {
|
||||
#if defined(TARGET_I386)
|
||||
q[insn].address -= tb->cs_base;
|
||||
#endif
|
||||
}
|
||||
q[insn].flags = DEBUGINFO_SYMBOL | (jitdump ? DEBUGINFO_LINE : 0);
|
||||
}
|
||||
@@ -370,11 +368,6 @@ void perf_exit(void)
|
||||
perfmap = NULL;
|
||||
}
|
||||
|
||||
if (perf_marker != MAP_FAILED) {
|
||||
munmap(perf_marker, perf_marker_size);
|
||||
perf_marker = MAP_FAILED;
|
||||
}
|
||||
|
||||
if (jitdump) {
|
||||
fclose(jitdump);
|
||||
jitdump = NULL;
|
@@ -4,8 +4,8 @@
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef TCG_PERF_H
|
||||
#define TCG_PERF_H
|
||||
#ifndef ACCEL_TCG_PERF_H
|
||||
#define ACCEL_TCG_PERF_H
|
||||
|
||||
#if defined(CONFIG_TCG) && defined(CONFIG_LINUX)
|
||||
/* Start writing perf-<pid>.map. */
|
File diff suppressed because it is too large
Load Diff
4
accel/tcg/plugin-helpers.h
Normal file
4
accel/tcg/plugin-helpers.h
Normal file
@@ -0,0 +1,4 @@
|
||||
#ifdef CONFIG_PLUGIN
|
||||
DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr)
|
||||
DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, i32, i64, ptr)
|
||||
#endif
|
@@ -35,16 +35,16 @@
|
||||
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
|
||||
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
|
||||
|
||||
static inline unsigned int tb_jmp_cache_hash_page(vaddr pc)
|
||||
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
|
||||
{
|
||||
vaddr tmp;
|
||||
target_ulong tmp;
|
||||
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
|
||||
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
|
||||
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
|
||||
{
|
||||
vaddr tmp;
|
||||
target_ulong tmp;
|
||||
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
|
||||
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
|
||||
| (tmp & TB_JMP_ADDR_MASK));
|
||||
@@ -53,7 +53,7 @@ static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
|
||||
#else
|
||||
|
||||
/* In user-mode we can get better hashing because we do not have a TLB */
|
||||
static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
|
||||
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
|
||||
{
|
||||
return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
|
||||
}
|
||||
@@ -61,10 +61,10 @@ static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
||||
static inline
|
||||
uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc,
|
||||
uint32_t flags, uint64_t flags2, uint32_t cf_mask)
|
||||
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags,
|
||||
uint32_t cf_mask, uint32_t trace_vcpu_dstate)
|
||||
{
|
||||
return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask);
|
||||
return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -9,25 +9,20 @@
|
||||
#ifndef ACCEL_TCG_TB_JMP_CACHE_H
|
||||
#define ACCEL_TCG_TB_JMP_CACHE_H
|
||||
|
||||
#include "qemu/rcu.h"
|
||||
#include "exec/cpu-common.h"
|
||||
|
||||
#define TB_JMP_CACHE_BITS 12
|
||||
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
|
||||
|
||||
/*
|
||||
* Invalidated in parallel; all accesses to 'tb' must be atomic.
|
||||
* A valid entry is read/written by a single CPU, therefore there is
|
||||
* no need for qatomic_rcu_read() and pc is always consistent with a
|
||||
* non-NULL value of 'tb'. Strictly speaking pc is only needed for
|
||||
* CF_PCREL, but it's used always for simplicity.
|
||||
* Accessed in parallel; all accesses to 'tb' must be atomic.
|
||||
* For CF_PCREL, accesses to 'pc' must be protected by a
|
||||
* load_acquire/store_release to 'tb'.
|
||||
*/
|
||||
typedef struct CPUJumpCache {
|
||||
struct CPUJumpCache {
|
||||
struct rcu_head rcu;
|
||||
struct {
|
||||
TranslationBlock *tb;
|
||||
vaddr pc;
|
||||
target_ulong pc;
|
||||
} array[TB_JMP_CACHE_SIZE];
|
||||
} CPUJumpCache;
|
||||
};
|
||||
|
||||
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Translation Block Maintenance
|
||||
* Translation Block Maintaince
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
@@ -23,15 +23,13 @@
|
||||
#include "exec/cputlb.h"
|
||||
#include "exec/log.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/page-protection.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/translate-all.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal-target.h"
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
/* List iterators for lists of tagged pointers in TranslationBlock. */
|
||||
@@ -52,6 +50,7 @@ static bool tb_cmp(const void *ap, const void *bp)
|
||||
a->cs_base == b->cs_base &&
|
||||
a->flags == b->flags &&
|
||||
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
|
||||
a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
|
||||
tb_page_addr0(a) == tb_page_addr0(b) &&
|
||||
tb_page_addr1(a) == tb_page_addr1(b));
|
||||
}
|
||||
@@ -72,7 +71,17 @@ typedef struct PageDesc PageDesc;
|
||||
*/
|
||||
#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
|
||||
|
||||
static inline void tb_lock_pages(const TranslationBlock *tb) { }
|
||||
static inline void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
|
||||
PageDesc **ret_p2, tb_page_addr_t phys2,
|
||||
bool alloc)
|
||||
{
|
||||
*ret_p1 = NULL;
|
||||
*ret_p2 = NULL;
|
||||
}
|
||||
|
||||
static inline void page_unlock(PageDesc *pd) { }
|
||||
static inline void page_lock_tb(const TranslationBlock *tb) { }
|
||||
static inline void page_unlock_tb(const TranslationBlock *tb) { }
|
||||
|
||||
/*
|
||||
* For user-only, since we are protecting all of memory with a single lock,
|
||||
@@ -88,9 +97,9 @@ static void tb_remove_all(void)
|
||||
}
|
||||
|
||||
/* Call with mmap_lock held. */
|
||||
static void tb_record(TranslationBlock *tb)
|
||||
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
|
||||
{
|
||||
vaddr addr;
|
||||
target_ulong addr;
|
||||
int flags;
|
||||
|
||||
assert_memory_lock();
|
||||
@@ -209,12 +218,13 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
|
||||
{
|
||||
PageDesc *pd;
|
||||
void **lp;
|
||||
int i;
|
||||
|
||||
/* Level 1. Always allocated. */
|
||||
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
||||
|
||||
/* Level 2..N-1. */
|
||||
for (int i = v_l2_levels; i > 0; i--) {
|
||||
for (i = v_l2_levels; i > 0; i--) {
|
||||
void **p = qatomic_rcu_read(lp);
|
||||
|
||||
if (p == NULL) {
|
||||
@@ -382,108 +392,12 @@ static void page_lock(PageDesc *pd)
|
||||
qemu_spin_lock(&pd->lock);
|
||||
}
|
||||
|
||||
/* Like qemu_spin_trylock, returns false on success */
|
||||
static bool page_trylock(PageDesc *pd)
|
||||
{
|
||||
bool busy = qemu_spin_trylock(&pd->lock);
|
||||
if (!busy) {
|
||||
page_lock__debug(pd);
|
||||
}
|
||||
return busy;
|
||||
}
|
||||
|
||||
static void page_unlock(PageDesc *pd)
|
||||
{
|
||||
qemu_spin_unlock(&pd->lock);
|
||||
page_unlock__debug(pd);
|
||||
}
|
||||
|
||||
void tb_lock_page0(tb_page_addr_t paddr)
|
||||
{
|
||||
page_lock(page_find_alloc(paddr >> TARGET_PAGE_BITS, true));
|
||||
}
|
||||
|
||||
void tb_lock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
|
||||
{
|
||||
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
|
||||
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
|
||||
PageDesc *pd0, *pd1;
|
||||
|
||||
if (pindex0 == pindex1) {
|
||||
/* Identical pages, and the first page is already locked. */
|
||||
return;
|
||||
}
|
||||
|
||||
pd1 = page_find_alloc(pindex1, true);
|
||||
if (pindex0 < pindex1) {
|
||||
/* Correct locking order, we may block. */
|
||||
page_lock(pd1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Incorrect locking order, we cannot block lest we deadlock. */
|
||||
if (!page_trylock(pd1)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop the lock on page0 and get both page locks in the right order.
|
||||
* Restart translation via longjmp.
|
||||
*/
|
||||
pd0 = page_find_alloc(pindex0, false);
|
||||
page_unlock(pd0);
|
||||
page_lock(pd1);
|
||||
page_lock(pd0);
|
||||
siglongjmp(tcg_ctx->jmp_trans, -3);
|
||||
}
|
||||
|
||||
void tb_unlock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
|
||||
{
|
||||
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
|
||||
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
|
||||
|
||||
if (pindex0 != pindex1) {
|
||||
page_unlock(page_find_alloc(pindex1, false));
|
||||
}
|
||||
}
|
||||
|
||||
static void tb_lock_pages(TranslationBlock *tb)
|
||||
{
|
||||
tb_page_addr_t paddr0 = tb_page_addr0(tb);
|
||||
tb_page_addr_t paddr1 = tb_page_addr1(tb);
|
||||
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
|
||||
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
|
||||
|
||||
if (unlikely(paddr0 == -1)) {
|
||||
return;
|
||||
}
|
||||
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
|
||||
if (pindex0 < pindex1) {
|
||||
page_lock(page_find_alloc(pindex0, true));
|
||||
page_lock(page_find_alloc(pindex1, true));
|
||||
return;
|
||||
}
|
||||
page_lock(page_find_alloc(pindex1, true));
|
||||
}
|
||||
page_lock(page_find_alloc(pindex0, true));
|
||||
}
|
||||
|
||||
void tb_unlock_pages(TranslationBlock *tb)
|
||||
{
|
||||
tb_page_addr_t paddr0 = tb_page_addr0(tb);
|
||||
tb_page_addr_t paddr1 = tb_page_addr1(tb);
|
||||
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
|
||||
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
|
||||
|
||||
if (unlikely(paddr0 == -1)) {
|
||||
return;
|
||||
}
|
||||
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
|
||||
page_unlock(page_find_alloc(pindex1, false));
|
||||
}
|
||||
page_unlock(page_find_alloc(pindex0, false));
|
||||
}
|
||||
|
||||
static inline struct page_entry *
|
||||
page_entry_new(PageDesc *pd, tb_page_addr_t index)
|
||||
{
|
||||
@@ -507,10 +421,13 @@ static void page_entry_destroy(gpointer p)
|
||||
/* returns false on success */
|
||||
static bool page_entry_trylock(struct page_entry *pe)
|
||||
{
|
||||
bool busy = page_trylock(pe->pd);
|
||||
bool busy;
|
||||
|
||||
busy = qemu_spin_trylock(&pe->pd->lock);
|
||||
if (!busy) {
|
||||
g_assert(!pe->locked);
|
||||
pe->locked = true;
|
||||
page_lock__debug(pe->pd);
|
||||
}
|
||||
return busy;
|
||||
}
|
||||
@@ -688,7 +605,8 @@ static void tb_remove_all(void)
|
||||
* Add the tb in the target page and protect it if necessary.
|
||||
* Called with @p->lock held.
|
||||
*/
|
||||
static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n)
|
||||
static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
|
||||
unsigned int n)
|
||||
{
|
||||
bool page_already_protected;
|
||||
|
||||
@@ -708,21 +626,15 @@ static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n)
|
||||
}
|
||||
}
|
||||
|
||||
static void tb_record(TranslationBlock *tb)
|
||||
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
|
||||
{
|
||||
tb_page_addr_t paddr0 = tb_page_addr0(tb);
|
||||
tb_page_addr_t paddr1 = tb_page_addr1(tb);
|
||||
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
|
||||
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
|
||||
|
||||
assert(paddr0 != -1);
|
||||
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
|
||||
tb_page_add(page_find_alloc(pindex1, false), tb, 1);
|
||||
tb_page_add(p1, tb, 0);
|
||||
if (unlikely(p2)) {
|
||||
tb_page_add(p2, tb, 1);
|
||||
}
|
||||
tb_page_add(page_find_alloc(pindex0, false), tb, 0);
|
||||
}
|
||||
|
||||
static void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
|
||||
static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
|
||||
{
|
||||
TranslationBlock *tb1;
|
||||
uintptr_t *pprev;
|
||||
@@ -742,16 +654,74 @@ static void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
|
||||
|
||||
static void tb_remove(TranslationBlock *tb)
|
||||
{
|
||||
tb_page_addr_t paddr0 = tb_page_addr0(tb);
|
||||
tb_page_addr_t paddr1 = tb_page_addr1(tb);
|
||||
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
|
||||
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
|
||||
PageDesc *pd;
|
||||
|
||||
assert(paddr0 != -1);
|
||||
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
|
||||
tb_page_remove(page_find_alloc(pindex1, false), tb);
|
||||
pd = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
|
||||
tb_page_remove(pd, tb);
|
||||
if (unlikely(tb->page_addr[1] != -1)) {
|
||||
pd = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
|
||||
tb_page_remove(pd, tb);
|
||||
}
|
||||
}
|
||||
|
||||
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
|
||||
PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
|
||||
{
|
||||
PageDesc *p1, *p2;
|
||||
tb_page_addr_t page1;
|
||||
tb_page_addr_t page2;
|
||||
|
||||
assert_memory_lock();
|
||||
g_assert(phys1 != -1);
|
||||
|
||||
page1 = phys1 >> TARGET_PAGE_BITS;
|
||||
page2 = phys2 >> TARGET_PAGE_BITS;
|
||||
|
||||
p1 = page_find_alloc(page1, alloc);
|
||||
if (ret_p1) {
|
||||
*ret_p1 = p1;
|
||||
}
|
||||
if (likely(phys2 == -1)) {
|
||||
page_lock(p1);
|
||||
return;
|
||||
} else if (page1 == page2) {
|
||||
page_lock(p1);
|
||||
if (ret_p2) {
|
||||
*ret_p2 = p1;
|
||||
}
|
||||
return;
|
||||
}
|
||||
p2 = page_find_alloc(page2, alloc);
|
||||
if (ret_p2) {
|
||||
*ret_p2 = p2;
|
||||
}
|
||||
if (page1 < page2) {
|
||||
page_lock(p1);
|
||||
page_lock(p2);
|
||||
} else {
|
||||
page_lock(p2);
|
||||
page_lock(p1);
|
||||
}
|
||||
}
|
||||
|
||||
/* lock the page(s) of a TB in the correct acquisition order */
|
||||
static void page_lock_tb(const TranslationBlock *tb)
|
||||
{
|
||||
page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
|
||||
}
|
||||
|
||||
static void page_unlock_tb(const TranslationBlock *tb)
|
||||
{
|
||||
PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
|
||||
|
||||
page_unlock(p1);
|
||||
if (unlikely(tb_page_addr1(tb) != -1)) {
|
||||
PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
|
||||
|
||||
if (p2 != p1) {
|
||||
page_unlock(p2);
|
||||
}
|
||||
}
|
||||
tb_page_remove(page_find_alloc(pindex0, false), tb);
|
||||
}
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
@@ -776,7 +746,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
|
||||
|
||||
tcg_region_reset_all();
|
||||
/* XXX: flush processor icache at this point if cache flush is expensive */
|
||||
qatomic_inc(&tb_ctx.tb_flush_count);
|
||||
qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
|
||||
|
||||
done:
|
||||
mmap_unlock();
|
||||
@@ -788,9 +758,9 @@ done:
|
||||
void tb_flush(CPUState *cpu)
|
||||
{
|
||||
if (tcg_enabled()) {
|
||||
unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count);
|
||||
unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
|
||||
|
||||
if (cpu_in_serial_context(cpu)) {
|
||||
if (cpu_in_exclusive_context(cpu)) {
|
||||
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
|
||||
} else {
|
||||
async_safe_run_on_cpu(cpu, do_tb_flush,
|
||||
@@ -918,7 +888,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
||||
/* remove the TB from the hash list */
|
||||
phys_pc = tb_page_addr0(tb);
|
||||
h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb->pc),
|
||||
tb->flags, tb->cs_base, orig_cflags);
|
||||
tb->flags, orig_cflags, tb->trace_vcpu_dstate);
|
||||
if (!qht_remove(&tb_ctx.htable, tb, h)) {
|
||||
return;
|
||||
}
|
||||
@@ -956,16 +926,18 @@ static void tb_phys_invalidate__locked(TranslationBlock *tb)
|
||||
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
|
||||
{
|
||||
if (page_addr == -1 && tb_page_addr0(tb) != -1) {
|
||||
tb_lock_pages(tb);
|
||||
page_lock_tb(tb);
|
||||
do_tb_phys_invalidate(tb, true);
|
||||
tb_unlock_pages(tb);
|
||||
page_unlock_tb(tb);
|
||||
} else {
|
||||
do_tb_phys_invalidate(tb, false);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a new TB and link it to the physical page tables.
|
||||
* Add a new TB and link it to the physical page tables. phys_page2 is
|
||||
* (-1) to indicate that only one page contains the TB.
|
||||
*
|
||||
* Called with mmap_lock held for user-mode emulation.
|
||||
*
|
||||
* Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
|
||||
@@ -973,29 +945,43 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
|
||||
* for the same block of guest code that @tb corresponds to. In that case,
|
||||
* the caller should discard the original @tb, and use instead the returned TB.
|
||||
*/
|
||||
TranslationBlock *tb_link_page(TranslationBlock *tb)
|
||||
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
||||
tb_page_addr_t phys_page2)
|
||||
{
|
||||
PageDesc *p;
|
||||
PageDesc *p2 = NULL;
|
||||
void *existing_tb = NULL;
|
||||
uint32_t h;
|
||||
|
||||
assert_memory_lock();
|
||||
tcg_debug_assert(!(tb->cflags & CF_INVALID));
|
||||
|
||||
tb_record(tb);
|
||||
/*
|
||||
* Add the TB to the page list, acquiring first the pages's locks.
|
||||
* We keep the locks held until after inserting the TB in the hash table,
|
||||
* so that if the insertion fails we know for sure that the TBs are still
|
||||
* in the page descriptors.
|
||||
* Note that inserting into the hash table first isn't an option, since
|
||||
* we can only insert TBs that are fully initialized.
|
||||
*/
|
||||
page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
|
||||
tb_record(tb, p, p2);
|
||||
|
||||
/* add in the hash table */
|
||||
h = tb_hash_func(tb_page_addr0(tb), (tb->cflags & CF_PCREL ? 0 : tb->pc),
|
||||
tb->flags, tb->cs_base, tb->cflags);
|
||||
h = tb_hash_func(phys_pc, (tb->cflags & CF_PCREL ? 0 : tb->pc),
|
||||
tb->flags, tb->cflags, tb->trace_vcpu_dstate);
|
||||
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
|
||||
|
||||
/* remove TB from the page(s) if we couldn't insert it */
|
||||
if (unlikely(existing_tb)) {
|
||||
tb_remove(tb);
|
||||
tb_unlock_pages(tb);
|
||||
return existing_tb;
|
||||
tb = existing_tb;
|
||||
}
|
||||
|
||||
tb_unlock_pages(tb);
|
||||
if (p2 && p2 != p) {
|
||||
page_unlock(p2);
|
||||
}
|
||||
page_unlock(p);
|
||||
return tb;
|
||||
}
|
||||
|
||||
@@ -1022,7 +1008,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
|
||||
* Called with mmap_lock held for user-mode emulation
|
||||
* NOTE: this function must not be called while a TB is running.
|
||||
*/
|
||||
static void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
{
|
||||
tb_page_addr_t start, last;
|
||||
|
||||
@@ -1107,9 +1093,6 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
|
||||
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||
|
||||
/* Range may not cross a page. */
|
||||
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
|
||||
|
||||
/*
|
||||
* We remove all the TBs in the range [start, last].
|
||||
* XXX: see if in some cases it could be faster to invalidate all the code
|
||||
@@ -1161,6 +1144,28 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate all TBs which intersect with the target physical
|
||||
* address page @addr.
|
||||
*/
|
||||
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
{
|
||||
struct page_collection *pages;
|
||||
tb_page_addr_t start, last;
|
||||
PageDesc *p;
|
||||
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (p == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
start = addr & TARGET_PAGE_MASK;
|
||||
last = addr | ~TARGET_PAGE_MASK;
|
||||
pages = page_collection_lock(start, last);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate all TBs which intersect with the target physical address range
|
||||
* [start;last]. NOTE: start and end may refer to *different* physical pages.
|
||||
@@ -1178,17 +1183,15 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
|
||||
index_last = last >> TARGET_PAGE_BITS;
|
||||
for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
|
||||
PageDesc *pd = page_find(index);
|
||||
tb_page_addr_t page_start, page_last;
|
||||
tb_page_addr_t bound;
|
||||
|
||||
if (pd == NULL) {
|
||||
continue;
|
||||
}
|
||||
assert_page_locked(pd);
|
||||
page_start = index << TARGET_PAGE_BITS;
|
||||
page_last = page_start | ~TARGET_PAGE_MASK;
|
||||
page_last = MIN(page_last, last);
|
||||
tb_invalidate_phys_page_range__locked(pages, pd,
|
||||
page_start, page_last, 0);
|
||||
bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
|
||||
bound = MIN(bound, last);
|
||||
tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
|
||||
}
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
|
@@ -89,20 +89,7 @@ void icount_handle_deadline(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* Distribute the budget evenly across all CPUs */
|
||||
int64_t icount_percpu_budget(int cpu_count)
|
||||
{
|
||||
int64_t limit = icount_get_limit();
|
||||
int64_t timeslice = limit / cpu_count;
|
||||
|
||||
if (timeslice == 0) {
|
||||
timeslice = limit;
|
||||
}
|
||||
|
||||
return timeslice;
|
||||
}
|
||||
|
||||
void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
|
||||
void icount_prepare_for_run(CPUState *cpu)
|
||||
{
|
||||
int insns_left;
|
||||
|
||||
@@ -111,24 +98,24 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
|
||||
* each vCPU execution. However u16.high can be raised
|
||||
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
|
||||
*/
|
||||
g_assert(cpu->neg.icount_decr.u16.low == 0);
|
||||
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
|
||||
g_assert(cpu->icount_extra == 0);
|
||||
|
||||
cpu->icount_budget = icount_get_limit();
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
replay_mutex_lock();
|
||||
|
||||
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu->neg.icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
if (cpu->icount_budget == 0) {
|
||||
/*
|
||||
* We're called without the BQL, so must take it while
|
||||
* We're called without the iothread lock, so must take it while
|
||||
* we're calling timer handlers.
|
||||
*/
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
icount_notify_aio_contexts();
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,7 +125,7 @@ void icount_process_data(CPUState *cpu)
|
||||
icount_update(cpu);
|
||||
|
||||
/* Reset the counters */
|
||||
cpu->neg.icount_decr.u16.low = 0;
|
||||
cpu_neg(cpu)->icount_decr.u16.low = 0;
|
||||
cpu->icount_extra = 0;
|
||||
cpu->icount_budget = 0;
|
||||
|
||||
@@ -153,7 +140,7 @@ void icount_handle_interrupt(CPUState *cpu, int mask)
|
||||
|
||||
tcg_handle_interrupt(cpu, mask);
|
||||
if (qemu_cpu_is_self(cpu) &&
|
||||
!cpu->neg.can_do_io
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
|
@@ -11,8 +11,7 @@
|
||||
#define TCG_ACCEL_OPS_ICOUNT_H
|
||||
|
||||
void icount_handle_deadline(void);
|
||||
void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget);
|
||||
int64_t icount_percpu_budget(int cpu_count);
|
||||
void icount_prepare_for_run(CPUState *cpu);
|
||||
void icount_process_data(CPUState *cpu);
|
||||
|
||||
void icount_handle_interrupt(CPUState *cpu, int mask);
|
||||
|
@@ -32,7 +32,7 @@
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "hw/boards.h"
|
||||
#include "tcg/startup.h"
|
||||
|
||||
#include "tcg-accel-ops.h"
|
||||
#include "tcg-accel-ops-mttcg.h"
|
||||
|
||||
@@ -76,11 +76,11 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
rcu_add_force_rcu_notifier(&force_rcu.notifier);
|
||||
tcg_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
@@ -91,35 +91,40 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
bql_unlock();
|
||||
r = tcg_cpu_exec(cpu);
|
||||
bql_lock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
r = tcg_cpus_exec(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
switch (r) {
|
||||
case EXCP_DEBUG:
|
||||
cpu_handle_guest_debug(cpu);
|
||||
break;
|
||||
case EXCP_HALTED:
|
||||
/*
|
||||
* Usually cpu->halted is set, but may have already been
|
||||
* reset by another thread by the time we arrive here.
|
||||
* during start-up the vCPU is reset and the thread is
|
||||
* kicked several times. If we don't ensure we go back
|
||||
* to sleep in the halted state we won't cleanly
|
||||
* start-up when the vCPU is enabled.
|
||||
*
|
||||
* cpu->halted should ensure we sleep in wait_io_event
|
||||
*/
|
||||
g_assert(cpu->halted);
|
||||
break;
|
||||
case EXCP_ATOMIC:
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
default:
|
||||
/* Ignore everything else? */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
qatomic_set_mb(&cpu->exit_request, 0);
|
||||
qatomic_mb_set(&cpu->exit_request, 0);
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
tcg_cpu_destroy(cpu);
|
||||
bql_unlock();
|
||||
tcg_cpus_destroy(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
@@ -137,10 +142,18 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
|
||||
g_assert(tcg_enabled());
|
||||
tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
|
||||
|
||||
cpu->thread = g_new0(QemuThread, 1);
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
/* create a thread per vCPU with TCG (MTTCG) */
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
|
||||
cpu->cpu_index);
|
||||
|
||||
qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
|
||||
#ifdef _WIN32
|
||||
cpu->hThread = qemu_thread_get_handle(cpu->thread);
|
||||
#endif
|
||||
}
|
||||
|
@@ -24,7 +24,6 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/lockable.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
@@ -32,7 +31,7 @@
|
||||
#include "qemu/notify.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "tcg/startup.h"
|
||||
|
||||
#include "tcg-accel-ops.h"
|
||||
#include "tcg-accel-ops-rr.h"
|
||||
#include "tcg-accel-ops-icount.h"
|
||||
@@ -72,13 +71,11 @@ static void rr_kick_next_cpu(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
do {
|
||||
cpu = qatomic_read(&rr_current_cpu);
|
||||
cpu = qatomic_mb_read(&rr_current_cpu);
|
||||
if (cpu) {
|
||||
cpu_exit(cpu);
|
||||
}
|
||||
/* Finish kicking this cpu before reading again. */
|
||||
smp_mb();
|
||||
} while (cpu != qatomic_read(&rr_current_cpu));
|
||||
} while (cpu != qatomic_mb_read(&rr_current_cpu));
|
||||
}
|
||||
|
||||
static void rr_kick_thread(void *opaque)
|
||||
@@ -111,7 +108,7 @@ static void rr_wait_io_event(void)
|
||||
|
||||
while (all_cpu_threads_idle()) {
|
||||
rr_stop_kick_timer();
|
||||
qemu_cond_wait_bql(first_cpu->halt_cond);
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
}
|
||||
|
||||
rr_start_kick_timer();
|
||||
@@ -131,7 +128,7 @@ static void rr_deal_with_unplugged_cpus(void)
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu->unplug && !cpu_can_run(cpu)) {
|
||||
tcg_cpu_destroy(cpu);
|
||||
tcg_cpus_destroy(cpu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -142,33 +139,6 @@ static void rr_force_rcu(Notifier *notify, void *data)
|
||||
rr_kick_next_cpu();
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the number of CPUs that we will process in a single iteration of
|
||||
* the main CPU thread loop so that we can fairly distribute the instruction
|
||||
* count across CPUs.
|
||||
*
|
||||
* The CPU count is cached based on the CPU list generation ID to avoid
|
||||
* iterating the list every time.
|
||||
*/
|
||||
static int rr_cpu_count(void)
|
||||
{
|
||||
static unsigned int last_gen_id = ~0;
|
||||
static int cpu_count;
|
||||
CPUState *cpu;
|
||||
|
||||
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||
|
||||
if (cpu_list_generation_id_get() != last_gen_id) {
|
||||
cpu_count = 0;
|
||||
CPU_FOREACH(cpu) {
|
||||
++cpu_count;
|
||||
}
|
||||
last_gen_id = cpu_list_generation_id_get();
|
||||
}
|
||||
|
||||
return cpu_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* In the single-threaded case each vCPU is simulated in turn. If
|
||||
* there is more than a single vCPU we create a simple timer to kick
|
||||
@@ -188,17 +158,17 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
rcu_add_force_rcu_notifier(&force_rcu);
|
||||
tcg_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
cpu->can_do_io = 1;
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
/* wait for initial kick-off after machine start */
|
||||
while (first_cpu->stopped) {
|
||||
qemu_cond_wait_bql(first_cpu->halt_cond);
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
|
||||
/* process any pending work */
|
||||
CPU_FOREACH(cpu) {
|
||||
@@ -215,16 +185,11 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
cpu->exit_request = 1;
|
||||
|
||||
while (1) {
|
||||
/* Only used for icount_enabled() */
|
||||
int64_t cpu_budget = 0;
|
||||
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
replay_mutex_lock();
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
if (icount_enabled()) {
|
||||
int cpu_count = rr_cpu_count();
|
||||
|
||||
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
|
||||
icount_account_warp_timer();
|
||||
/*
|
||||
@@ -232,8 +197,6 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
* waking up the I/O thread and waiting for completion.
|
||||
*/
|
||||
icount_handle_deadline();
|
||||
|
||||
cpu_budget = icount_percpu_budget(cpu_count);
|
||||
}
|
||||
|
||||
replay_mutex_unlock();
|
||||
@@ -243,9 +206,8 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
}
|
||||
|
||||
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
|
||||
/* Store rr_current_cpu before evaluating cpu_can_run(). */
|
||||
qatomic_set_mb(&rr_current_cpu, cpu);
|
||||
|
||||
qatomic_mb_set(&rr_current_cpu, cpu);
|
||||
current_cpu = cpu;
|
||||
|
||||
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
|
||||
@@ -254,23 +216,23 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
if (icount_enabled()) {
|
||||
icount_prepare_for_run(cpu, cpu_budget);
|
||||
icount_prepare_for_run(cpu);
|
||||
}
|
||||
r = tcg_cpu_exec(cpu);
|
||||
r = tcg_cpus_exec(cpu);
|
||||
if (icount_enabled()) {
|
||||
icount_process_data(cpu);
|
||||
}
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
break;
|
||||
} else if (r == EXCP_ATOMIC) {
|
||||
bql_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
bql_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
break;
|
||||
}
|
||||
} else if (cpu->stop) {
|
||||
@@ -283,11 +245,11 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
cpu = CPU_NEXT(cpu);
|
||||
} /* while (cpu && !cpu->exit_request).. */
|
||||
|
||||
/* Does not need a memory barrier because a spurious wakeup is okay. */
|
||||
/* Does not need qatomic_mb_set because a spurious wakeup is okay. */
|
||||
qatomic_set(&rr_current_cpu, NULL);
|
||||
|
||||
if (cpu && cpu->exit_request) {
|
||||
qatomic_set_mb(&cpu->exit_request, 0);
|
||||
qatomic_mb_set(&cpu->exit_request, 0);
|
||||
}
|
||||
|
||||
if (icount_enabled() && all_cpu_threads_idle()) {
|
||||
@@ -302,7 +264,9 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
rr_deal_with_unplugged_cpus();
|
||||
}
|
||||
|
||||
g_assert_not_reached();
|
||||
rcu_remove_force_rcu_notifier(&force_rcu);
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void rr_start_vcpu_thread(CPUState *cpu)
|
||||
@@ -315,25 +279,27 @@ void rr_start_vcpu_thread(CPUState *cpu)
|
||||
tcg_cpu_init_cflags(cpu, false);
|
||||
|
||||
if (!single_tcg_cpu_thread) {
|
||||
single_tcg_halt_cond = cpu->halt_cond;
|
||||
single_tcg_cpu_thread = cpu->thread;
|
||||
cpu->thread = g_new0(QemuThread, 1);
|
||||
cpu->halt_cond = g_new0(QemuCond, 1);
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
/* share a single thread for all cpus with TCG */
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
|
||||
qemu_thread_create(cpu->thread, thread_name,
|
||||
rr_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
|
||||
single_tcg_halt_cond = cpu->halt_cond;
|
||||
single_tcg_cpu_thread = cpu->thread;
|
||||
#ifdef _WIN32
|
||||
cpu->hThread = qemu_thread_get_handle(cpu->thread);
|
||||
#endif
|
||||
} else {
|
||||
/* we share the thread, dump spare data */
|
||||
g_free(cpu->thread);
|
||||
qemu_cond_destroy(cpu->halt_cond);
|
||||
g_free(cpu->halt_cond);
|
||||
/* we share the thread */
|
||||
cpu->thread = single_tcg_cpu_thread;
|
||||
cpu->halt_cond = single_tcg_halt_cond;
|
||||
|
||||
/* copy the stuff done at start of rr_cpu_thread_fn */
|
||||
cpu->thread_id = first_cpu->thread_id;
|
||||
cpu->neg.can_do_io = 1;
|
||||
cpu->can_do_io = 1;
|
||||
cpu->created = true;
|
||||
}
|
||||
}
|
||||
|
@@ -34,10 +34,7 @@
|
||||
#include "qemu/timer.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/hwaddr.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "gdbstub/enums.h"
|
||||
|
||||
#include "hw/core/cpu.h"
|
||||
#include "exec/gdbstub.h"
|
||||
|
||||
#include "tcg-accel-ops.h"
|
||||
#include "tcg-accel-ops-mttcg.h"
|
||||
@@ -62,35 +59,38 @@ void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
|
||||
|
||||
cflags |= parallel ? CF_PARALLEL : 0;
|
||||
cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
|
||||
tcg_cflags_set(cpu, cflags);
|
||||
cpu->tcg_cflags |= cflags;
|
||||
}
|
||||
|
||||
void tcg_cpu_destroy(CPUState *cpu)
|
||||
void tcg_cpus_destroy(CPUState *cpu)
|
||||
{
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
}
|
||||
|
||||
int tcg_cpu_exec(CPUState *cpu)
|
||||
int tcg_cpus_exec(CPUState *cpu)
|
||||
{
|
||||
int ret;
|
||||
#ifdef CONFIG_PROFILER
|
||||
int64_t ti;
|
||||
#endif
|
||||
assert(tcg_enabled());
|
||||
#ifdef CONFIG_PROFILER
|
||||
ti = profile_getclock();
|
||||
#endif
|
||||
cpu_exec_start(cpu);
|
||||
ret = cpu_exec(cpu);
|
||||
cpu_exec_end(cpu);
|
||||
#ifdef CONFIG_PROFILER
|
||||
qatomic_set(&tcg_ctx->prof.cpu_exec_time,
|
||||
tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void tcg_cpu_reset_hold(CPUState *cpu)
|
||||
{
|
||||
tcg_flush_jmp_cache(cpu);
|
||||
|
||||
tlb_flush(cpu);
|
||||
}
|
||||
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(bql_locked());
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
@@ -101,7 +101,7 @@ void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
} else {
|
||||
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -215,7 +215,6 @@ static void tcg_accel_ops_init(AccelOpsClass *ops)
|
||||
}
|
||||
}
|
||||
|
||||
ops->cpu_reset_hold = tcg_cpu_reset_hold;
|
||||
ops->supports_guest_debug = tcg_supports_guest_debug;
|
||||
ops->insert_breakpoint = tcg_insert_breakpoint;
|
||||
ops->remove_breakpoint = tcg_remove_breakpoint;
|
||||
|
@@ -14,8 +14,8 @@
|
||||
|
||||
#include "sysemu/cpus.h"
|
||||
|
||||
void tcg_cpu_destroy(CPUState *cpu);
|
||||
int tcg_cpu_exec(CPUState *cpu);
|
||||
void tcg_cpus_destroy(CPUState *cpu);
|
||||
int tcg_cpus_exec(CPUState *cpu);
|
||||
void tcg_handle_interrupt(CPUState *cpu, int mask);
|
||||
void tcg_cpu_init_cflags(CPUState *cpu, bool parallel);
|
||||
|
||||
|
@@ -27,24 +27,21 @@
|
||||
#include "sysemu/tcg.h"
|
||||
#include "exec/replay-core.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "tcg/startup.h"
|
||||
#include "tcg/oversized-guest.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "qemu/atomic.h"
|
||||
#include "qapi/qapi-builtin-visit.h"
|
||||
#include "qemu/units.h"
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#include "hw/boards.h"
|
||||
#endif
|
||||
#include "internal-common.h"
|
||||
#include "internal.h"
|
||||
|
||||
struct TCGState {
|
||||
AccelState parent_obj;
|
||||
|
||||
bool mttcg_enabled;
|
||||
bool one_insn_per_tb;
|
||||
int splitwx_enabled;
|
||||
unsigned long tb_size;
|
||||
};
|
||||
@@ -64,23 +61,37 @@ DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
|
||||
* they can set the appropriate CONFIG flags in ${target}-softmmu.mak
|
||||
*
|
||||
* Once a guest architecture has been converted to the new primitives
|
||||
* there is one remaining limitation to check:
|
||||
* - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
|
||||
* there are two remaining limitations to check.
|
||||
*
|
||||
* - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
|
||||
* - The host must have a stronger memory order than the guest
|
||||
*
|
||||
* It may be possible in future to support strong guests on weak hosts
|
||||
* but that will require tagging all load/stores in a guest with their
|
||||
* implicit memory order requirements which would likely slow things
|
||||
* down a lot.
|
||||
*/
|
||||
|
||||
static bool check_tcg_memory_orders_compatible(void)
|
||||
{
|
||||
#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
|
||||
return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool default_mttcg_enabled(void)
|
||||
{
|
||||
if (icount_enabled() || TCG_OVERSIZED_GUEST) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
#ifdef TARGET_SUPPORTS_MTTCG
|
||||
# ifndef TCG_GUEST_DEFAULT_MO
|
||||
# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
|
||||
# endif
|
||||
return true;
|
||||
return check_tcg_memory_orders_compatible();
|
||||
#else
|
||||
return false;
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static void tcg_accel_instance_init(Object *obj)
|
||||
@@ -98,7 +109,6 @@ static void tcg_accel_instance_init(Object *obj)
|
||||
}
|
||||
|
||||
bool mttcg_enabled;
|
||||
bool one_insn_per_tb;
|
||||
|
||||
static int tcg_init_machine(MachineState *ms)
|
||||
{
|
||||
@@ -121,7 +131,7 @@ static int tcg_init_machine(MachineState *ms)
|
||||
* There's no guest base to take into account, so go ahead and
|
||||
* initialize the prologue now.
|
||||
*/
|
||||
tcg_prologue_init();
|
||||
tcg_prologue_init(tcg_ctx);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
@@ -148,6 +158,11 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
|
||||
warn_report("Guest not yet converted to MTTCG - "
|
||||
"you may get unexpected results");
|
||||
#endif
|
||||
if (!check_tcg_memory_orders_compatible()) {
|
||||
warn_report("Guest expects a stronger memory ordering "
|
||||
"than the host provides");
|
||||
error_printf("This may cause strange/hard to debug errors\n");
|
||||
}
|
||||
s->mttcg_enabled = true;
|
||||
}
|
||||
} else if (strcmp(value, "single") == 0) {
|
||||
@@ -193,20 +208,6 @@ static void tcg_set_splitwx(Object *obj, bool value, Error **errp)
|
||||
s->splitwx_enabled = value;
|
||||
}
|
||||
|
||||
static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp)
|
||||
{
|
||||
TCGState *s = TCG_STATE(obj);
|
||||
return s->one_insn_per_tb;
|
||||
}
|
||||
|
||||
static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp)
|
||||
{
|
||||
TCGState *s = TCG_STATE(obj);
|
||||
s->one_insn_per_tb = value;
|
||||
/* Set the global also: this changes the behaviour */
|
||||
qatomic_set(&one_insn_per_tb, value);
|
||||
}
|
||||
|
||||
static int tcg_gdbstub_supported_sstep_flags(void)
|
||||
{
|
||||
/*
|
||||
@@ -227,8 +228,6 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "tcg";
|
||||
ac->init_machine = tcg_init_machine;
|
||||
ac->cpu_common_realize = tcg_exec_realizefn;
|
||||
ac->cpu_common_unrealize = tcg_exec_unrealizefn;
|
||||
ac->allowed = &tcg_allowed;
|
||||
ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;
|
||||
|
||||
@@ -246,12 +245,6 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
|
||||
tcg_get_splitwx, tcg_set_splitwx);
|
||||
object_class_property_set_description(oc, "split-wx",
|
||||
"Map jit pages into separate RW and RX regions");
|
||||
|
||||
object_class_property_add_bool(oc, "one-insn-per-tb",
|
||||
tcg_get_one_insn_per_tb,
|
||||
tcg_set_one_insn_per_tb);
|
||||
object_class_property_set_description(oc, "one-insn-per-tb",
|
||||
"Only put one guest insn in each translation block");
|
||||
}
|
||||
|
||||
static const TypeInfo tcg_accel_type = {
|
||||
|
@@ -20,7 +20,7 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto-common.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "tcg/tcg-gvec-desc.h"
|
||||
|
||||
|
||||
@@ -550,17 +550,6 @@ void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc)
|
||||
clear_high(d, oprsz, desc);
|
||||
}
|
||||
|
||||
void HELPER(gvec_andcs)(void *d, void *a, uint64_t b, uint32_t desc)
|
||||
{
|
||||
intptr_t oprsz = simd_oprsz(desc);
|
||||
intptr_t i;
|
||||
|
||||
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
|
||||
*(uint64_t *)(d + i) = *(uint64_t *)(a + i) & ~b;
|
||||
}
|
||||
clear_high(d, oprsz, desc);
|
||||
}
|
||||
|
||||
void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
|
||||
{
|
||||
intptr_t oprsz = simd_oprsz(desc);
|
||||
@@ -1042,32 +1031,6 @@ DO_CMP2(64)
|
||||
#undef DO_CMP1
|
||||
#undef DO_CMP2
|
||||
|
||||
#define DO_CMP1(NAME, TYPE, OP) \
|
||||
void HELPER(NAME)(void *d, void *a, uint64_t b64, uint32_t desc) \
|
||||
{ \
|
||||
intptr_t oprsz = simd_oprsz(desc); \
|
||||
TYPE inv = simd_data(desc), b = b64; \
|
||||
for (intptr_t i = 0; i < oprsz; i += sizeof(TYPE)) { \
|
||||
*(TYPE *)(d + i) = -((*(TYPE *)(a + i) OP b) ^ inv); \
|
||||
} \
|
||||
clear_high(d, oprsz, desc); \
|
||||
}
|
||||
|
||||
#define DO_CMP2(SZ) \
|
||||
DO_CMP1(gvec_eqs##SZ, uint##SZ##_t, ==) \
|
||||
DO_CMP1(gvec_lts##SZ, int##SZ##_t, <) \
|
||||
DO_CMP1(gvec_les##SZ, int##SZ##_t, <=) \
|
||||
DO_CMP1(gvec_ltus##SZ, uint##SZ##_t, <) \
|
||||
DO_CMP1(gvec_leus##SZ, uint##SZ##_t, <=)
|
||||
|
||||
DO_CMP2(8)
|
||||
DO_CMP2(16)
|
||||
DO_CMP2(32)
|
||||
DO_CMP2(64)
|
||||
|
||||
#undef DO_CMP1
|
||||
#undef DO_CMP2
|
||||
|
||||
void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc)
|
||||
{
|
||||
intptr_t oprsz = simd_oprsz(desc);
|
||||
|
@@ -24,17 +24,13 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto-common.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "disas/disas.h"
|
||||
#include "exec/log.h"
|
||||
#include "tcg/tcg.h"
|
||||
|
||||
#define HELPER_H "accel/tcg/tcg-runtime.h"
|
||||
#include "exec/helper-info.c.inc"
|
||||
#undef HELPER_H
|
||||
|
||||
/* 32-bit helpers */
|
||||
|
||||
int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2)
|
||||
|
@@ -39,63 +39,62 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
|
||||
DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr)
|
||||
#endif /* IN_HELPER_PROTO */
|
||||
|
||||
DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
|
||||
i32, env, i64, i32, i32, i32)
|
||||
i32, env, tl, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
|
||||
i32, env, i64, i32, i32, i32)
|
||||
i32, env, tl, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG,
|
||||
i32, env, i64, i32, i32, i32)
|
||||
i32, env, tl, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
|
||||
i32, env, i64, i32, i32, i32)
|
||||
i32, env, tl, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
|
||||
i32, env, i64, i32, i32, i32)
|
||||
i32, env, tl, i32, i32, i32)
|
||||
#ifdef CONFIG_ATOMIC64
|
||||
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
|
||||
i64, env, i64, i64, i64, i32)
|
||||
i64, env, tl, i64, i64, i32)
|
||||
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
|
||||
i64, env, i64, i64, i64, i32)
|
||||
i64, env, tl, i64, i64, i32)
|
||||
#endif
|
||||
#if HAVE_CMPXCHG128
|
||||
#ifdef CONFIG_CMPXCHG128
|
||||
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
|
||||
i128, env, i64, i128, i128, i32)
|
||||
i128, env, tl, i128, i128, i32)
|
||||
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,
|
||||
i128, env, i64, i128, i128, i32)
|
||||
i128, env, tl, i128, i128, i32)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo, TCG_CALL_NO_WG,
|
||||
i128, env, i64, i128, i128, i32)
|
||||
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG,
|
||||
i128, env, tl, i128, i128, i32)
|
||||
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG,
|
||||
i128, env, tl, i128, i128, i32)
|
||||
|
||||
#ifdef CONFIG_ATOMIC64
|
||||
#define GEN_ATOMIC_HELPERS(NAME) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
|
||||
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
|
||||
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
|
||||
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
|
||||
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
|
||||
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
|
||||
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
|
||||
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
|
||||
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
|
||||
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
|
||||
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \
|
||||
TCG_CALL_NO_WG, i64, env, i64, i64, i32) \
|
||||
TCG_CALL_NO_WG, i64, env, tl, i64, i32) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \
|
||||
TCG_CALL_NO_WG, i64, env, i64, i64, i32)
|
||||
TCG_CALL_NO_WG, i64, env, tl, i64, i32)
|
||||
#else
|
||||
#define GEN_ATOMIC_HELPERS(NAME) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
|
||||
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
|
||||
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
|
||||
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
|
||||
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
|
||||
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
|
||||
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
|
||||
TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
|
||||
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
|
||||
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
|
||||
TCG_CALL_NO_WG, i32, env, i64, i32, i32)
|
||||
TCG_CALL_NO_WG, i32, env, tl, i32, i32)
|
||||
#endif /* CONFIG_ATOMIC64 */
|
||||
|
||||
GEN_ATOMIC_HELPERS(fetch_add)
|
||||
@@ -218,7 +217,6 @@ DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_andcs, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
@@ -297,29 +295,4 @@ DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_eqs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_eqs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_eqs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_eqs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_lts8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_lts16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_lts32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_lts64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_les8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_les16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_les32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_les64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_ltus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_ltus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_ltus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_ltus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(gvec_leus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_leus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_leus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
DEF_HELPER_FLAGS_4(gvec_leus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
|
||||
|
@@ -12,15 +12,3 @@ memory_notdirty_set_dirty(uint64_t vaddr) "0x%" PRIx64
|
||||
|
||||
# translate-all.c
|
||||
translate_block(void *tb, uintptr_t pc, const void *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p"
|
||||
|
||||
# ldst_atomicity
|
||||
load_atom2_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
|
||||
load_atom4_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
|
||||
load_atom8_or_exit_fallback(uintptr_t ra) "ra:0x%"PRIxPTR""
|
||||
load_atom8_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
|
||||
load_atom16_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
|
||||
load_atom16_or_exit_fallback(uintptr_t ra) "ra:0x%"PRIxPTR""
|
||||
store_atom2_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
|
||||
store_atom4_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
|
||||
store_atom8_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
|
||||
store_atom16_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR""
|
||||
|
@@ -19,6 +19,7 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#define NO_CPU_IO_DEFS
|
||||
#include "trace.h"
|
||||
#include "disas/disas.h"
|
||||
#include "exec/exec-all.h"
|
||||
@@ -61,18 +62,19 @@
|
||||
#include "tb-jmp-cache.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal-target.h"
|
||||
#include "tcg/perf.h"
|
||||
#include "tcg/insn-start-words.h"
|
||||
#include "internal.h"
|
||||
#include "perf.h"
|
||||
|
||||
/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
|
||||
QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
|
||||
sizeof_field(TranslationBlock, trace_vcpu_dstate)
|
||||
* BITS_PER_BYTE);
|
||||
|
||||
TBContext tb_ctx;
|
||||
|
||||
/*
|
||||
* Encode VAL as a signed leb128 sequence at P.
|
||||
* Return P incremented past the encoded value.
|
||||
*/
|
||||
static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
|
||||
/* Encode VAL as a signed leb128 sequence at P.
|
||||
Return P incremented past the encoded value. */
|
||||
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
|
||||
{
|
||||
int more, byte;
|
||||
|
||||
@@ -90,23 +92,21 @@ static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decode a signed leb128 sequence at *PP; increment *PP past the
|
||||
* decoded value. Return the decoded value.
|
||||
*/
|
||||
static int64_t decode_sleb128(const uint8_t **pp)
|
||||
/* Decode a signed leb128 sequence at *PP; increment *PP past the
|
||||
decoded value. Return the decoded value. */
|
||||
static target_long decode_sleb128(const uint8_t **pp)
|
||||
{
|
||||
const uint8_t *p = *pp;
|
||||
int64_t val = 0;
|
||||
target_long val = 0;
|
||||
int byte, shift = 0;
|
||||
|
||||
do {
|
||||
byte = *p++;
|
||||
val |= (int64_t)(byte & 0x7f) << shift;
|
||||
val |= (target_ulong)(byte & 0x7f) << shift;
|
||||
shift += 7;
|
||||
} while (byte & 0x80);
|
||||
if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
|
||||
val |= -(int64_t)1 << shift;
|
||||
val |= -(target_ulong)1 << shift;
|
||||
}
|
||||
|
||||
*pp = p;
|
||||
@@ -128,26 +128,22 @@ static int64_t decode_sleb128(const uint8_t **pp)
|
||||
static int encode_search(TranslationBlock *tb, uint8_t *block)
|
||||
{
|
||||
uint8_t *highwater = tcg_ctx->code_gen_highwater;
|
||||
uint64_t *insn_data = tcg_ctx->gen_insn_data;
|
||||
uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
|
||||
uint8_t *p = block;
|
||||
int i, j, n;
|
||||
|
||||
for (i = 0, n = tb->icount; i < n; ++i) {
|
||||
uint64_t prev, curr;
|
||||
target_ulong prev;
|
||||
|
||||
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
|
||||
if (i == 0) {
|
||||
prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
|
||||
} else {
|
||||
prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
|
||||
prev = tcg_ctx->gen_insn_data[i - 1][j];
|
||||
}
|
||||
curr = insn_data[i * TARGET_INSN_START_WORDS + j];
|
||||
p = encode_sleb128(p, curr - prev);
|
||||
p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
|
||||
}
|
||||
prev = (i == 0 ? 0 : insn_end_off[i - 1]);
|
||||
curr = insn_end_off[i];
|
||||
p = encode_sleb128(p, curr - prev);
|
||||
prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
|
||||
p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
|
||||
|
||||
/* Test for (pending) buffer overflow. The assumption is that any
|
||||
one row beginning below the high water mark cannot overrun
|
||||
@@ -203,6 +199,10 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
uintptr_t host_pc)
|
||||
{
|
||||
uint64_t data[TARGET_INSN_START_WORDS];
|
||||
#ifdef CONFIG_PROFILER
|
||||
TCGProfile *prof = &tcg_ctx->prof;
|
||||
int64_t ti = profile_getclock();
|
||||
#endif
|
||||
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
|
||||
|
||||
if (insns_left < 0) {
|
||||
@@ -215,10 +215,16 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
* Reset the cycle counter to the start of the block and
|
||||
* shift if to the number of actually executed instructions.
|
||||
*/
|
||||
cpu->neg.icount_decr.u16.low += insns_left;
|
||||
cpu_neg(cpu)->icount_decr.u16.low += insns_left;
|
||||
}
|
||||
|
||||
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
qatomic_set(&prof->restore_time,
|
||||
prof->restore_time + profile_getclock() - ti);
|
||||
qatomic_set(&prof->restore_count, prof->restore_count + 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
|
||||
@@ -256,6 +262,7 @@ bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
|
||||
|
||||
void page_init(void)
|
||||
{
|
||||
page_size_init();
|
||||
page_table_config_init();
|
||||
}
|
||||
|
||||
@@ -264,7 +271,7 @@ void page_init(void)
|
||||
* Return the size of the generated code, or negative on error.
|
||||
*/
|
||||
static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
|
||||
vaddr pc, void *host_pc,
|
||||
target_ulong pc, void *host_pc,
|
||||
int *max_insns, int64_t *ti)
|
||||
{
|
||||
int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
|
||||
@@ -280,19 +287,29 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
|
||||
tcg_ctx->cpu = NULL;
|
||||
*max_insns = tb->icount;
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
qatomic_set(&tcg_ctx->prof.tb_count, tcg_ctx->prof.tb_count + 1);
|
||||
qatomic_set(&tcg_ctx->prof.interm_time,
|
||||
tcg_ctx->prof.interm_time + profile_getclock() - *ti);
|
||||
*ti = profile_getclock();
|
||||
#endif
|
||||
|
||||
return tcg_gen_code(tcg_ctx, tb, pc);
|
||||
}
|
||||
|
||||
/* Called with mmap_lock held for user mode emulation. */
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
vaddr pc, uint64_t cs_base,
|
||||
target_ulong pc, target_ulong cs_base,
|
||||
uint32_t flags, int cflags)
|
||||
{
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
TranslationBlock *tb, *existing_tb;
|
||||
tb_page_addr_t phys_pc, phys_p2;
|
||||
tb_page_addr_t phys_pc;
|
||||
tcg_insn_unit *gen_code_buf;
|
||||
int gen_code_size, search_size, max_insns;
|
||||
#ifdef CONFIG_PROFILER
|
||||
TCGProfile *prof = &tcg_ctx->prof;
|
||||
#endif
|
||||
int64_t ti;
|
||||
void *host_pc;
|
||||
|
||||
@@ -303,7 +320,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
|
||||
if (phys_pc == -1) {
|
||||
/* Generate a one-shot TB with 1 insn in it */
|
||||
cflags = (cflags & ~CF_COUNT_MASK) | 1;
|
||||
cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1;
|
||||
}
|
||||
|
||||
max_insns = cflags & CF_COUNT_MASK;
|
||||
@@ -313,7 +330,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
|
||||
|
||||
buffer_overflow:
|
||||
assert_no_pages_locked();
|
||||
tb = tcg_tb_alloc(tcg_ctx);
|
||||
if (unlikely(!tb)) {
|
||||
/* flush must be done */
|
||||
@@ -332,27 +348,18 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
tb->cs_base = cs_base;
|
||||
tb->flags = flags;
|
||||
tb->cflags = cflags;
|
||||
tb->trace_vcpu_dstate = *cpu->trace_dstate;
|
||||
tb_set_page_addr0(tb, phys_pc);
|
||||
tb_set_page_addr1(tb, -1);
|
||||
if (phys_pc != -1) {
|
||||
tb_lock_page0(phys_pc);
|
||||
}
|
||||
|
||||
tcg_ctx->gen_tb = tb;
|
||||
tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
tcg_ctx->page_bits = TARGET_PAGE_BITS;
|
||||
tcg_ctx->page_mask = TARGET_PAGE_MASK;
|
||||
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
|
||||
#endif
|
||||
tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
|
||||
#ifdef TCG_GUEST_DEFAULT_MO
|
||||
tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
|
||||
#else
|
||||
tcg_ctx->guest_mo = TCG_MO_ALL;
|
||||
tb_overflow:
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
/* includes aborted translations because of exceptions */
|
||||
qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
|
||||
ti = profile_getclock();
|
||||
#endif
|
||||
|
||||
restart_translate:
|
||||
trace_translate_block(tb, pc, tb->tc.ptr);
|
||||
|
||||
gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
|
||||
@@ -371,8 +378,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
|
||||
"Restarting code generation for "
|
||||
"code_gen_buffer overflow\n");
|
||||
tb_unlock_pages(tb);
|
||||
tcg_ctx->gen_tb = NULL;
|
||||
goto buffer_overflow;
|
||||
|
||||
case -2:
|
||||
@@ -391,39 +396,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
"Restarting code generation with "
|
||||
"smaller translation block (max %d insns)\n",
|
||||
max_insns);
|
||||
|
||||
/*
|
||||
* The half-sized TB may not cross pages.
|
||||
* TODO: Fix all targets that cross pages except with
|
||||
* the first insn, at which point this can't be reached.
|
||||
*/
|
||||
phys_p2 = tb_page_addr1(tb);
|
||||
if (unlikely(phys_p2 != -1)) {
|
||||
tb_unlock_page1(phys_pc, phys_p2);
|
||||
tb_set_page_addr1(tb, -1);
|
||||
}
|
||||
goto restart_translate;
|
||||
|
||||
case -3:
|
||||
/*
|
||||
* We had a page lock ordering problem. In order to avoid
|
||||
* deadlock we had to drop the lock on page0, which means
|
||||
* that everything we translated so far is compromised.
|
||||
* Restart with locks held on both pages.
|
||||
*/
|
||||
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
|
||||
"Restarting code generation with re-locked pages");
|
||||
goto restart_translate;
|
||||
goto tb_overflow;
|
||||
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
tcg_ctx->gen_tb = NULL;
|
||||
|
||||
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
|
||||
if (unlikely(search_size < 0)) {
|
||||
tb_unlock_pages(tb);
|
||||
goto buffer_overflow;
|
||||
}
|
||||
tb->tc.size = gen_code_size;
|
||||
@@ -434,6 +414,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
*/
|
||||
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
|
||||
qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
|
||||
qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
|
||||
qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG_DISAS
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
|
||||
qemu_log_in_addr_range(pc)) {
|
||||
FILE *logfile = qemu_log_trylock();
|
||||
@@ -456,8 +444,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
/* Dump header and the first instruction */
|
||||
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
|
||||
fprintf(logfile,
|
||||
" -- guest addr 0x%016" PRIx64 " + tb prologue\n",
|
||||
tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
|
||||
" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
|
||||
tcg_ctx->gen_insn_data[insn][0]);
|
||||
chunk_start = tcg_ctx->gen_insn_end_off[insn];
|
||||
disas(logfile, tb->tc.ptr, chunk_start);
|
||||
|
||||
@@ -469,8 +457,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
while (insn < tb->icount) {
|
||||
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
|
||||
if (chunk_end > chunk_start) {
|
||||
fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
|
||||
tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
|
||||
fprintf(logfile, " -- guest addr 0x" TARGET_FMT_lx "\n",
|
||||
tcg_ctx->gen_insn_data[insn][0]);
|
||||
disas(logfile, tb->tc.ptr + chunk_start,
|
||||
chunk_end - chunk_start);
|
||||
chunk_start = chunk_end;
|
||||
@@ -506,6 +494,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
qemu_log_unlock(logfile);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
|
||||
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
|
||||
@@ -533,7 +522,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
* before attempting to link to other TBs or add to the lookup table.
|
||||
*/
|
||||
if (tb_page_addr0(tb) == -1) {
|
||||
assert_no_pages_locked();
|
||||
return tb;
|
||||
}
|
||||
|
||||
@@ -548,9 +536,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
* No explicit memory barrier is required -- tb_link_page() makes the
|
||||
* TB visible in a consistent state.
|
||||
*/
|
||||
existing_tb = tb_link_page(tb);
|
||||
assert_no_pages_locked();
|
||||
|
||||
existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb));
|
||||
/* if the TB already exists, discard what we just translated */
|
||||
if (unlikely(existing_tb != tb)) {
|
||||
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
|
||||
@@ -578,9 +564,8 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
|
||||
} else {
|
||||
/* The exception probably happened in a helper. The CPU state should
|
||||
have been saved before calling it. Fetch the PC from there. */
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
target_ulong pc, cs_base;
|
||||
tb_page_addr_t addr;
|
||||
uint32_t flags;
|
||||
|
||||
@@ -621,7 +606,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
||||
cc = CPU_GET_CLASS(cpu);
|
||||
if (cc->tcg_ops->io_recompile_replay_branch &&
|
||||
cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
|
||||
cpu->neg.icount_decr.u16.low++;
|
||||
cpu_neg(cpu)->icount_decr.u16.low++;
|
||||
n = 2;
|
||||
}
|
||||
|
||||
@@ -631,19 +616,155 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
||||
* operations only (which execute after completion) so we don't
|
||||
* double instrument the instruction.
|
||||
*/
|
||||
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
|
||||
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
|
||||
vaddr pc = cpu->cc->get_pc(cpu);
|
||||
target_ulong pc = log_pc(cpu, tb);
|
||||
if (qemu_log_in_addr_range(pc)) {
|
||||
qemu_log("cpu_io_recompile: rewound execution of TB to %016"
|
||||
VADDR_PRIx "\n", pc);
|
||||
qemu_log("cpu_io_recompile: rewound execution of TB to "
|
||||
TARGET_FMT_lx "\n", pc);
|
||||
}
|
||||
}
|
||||
|
||||
cpu_loop_exit_noexc(cpu);
|
||||
}
|
||||
|
||||
static void print_qht_statistics(struct qht_stats hst, GString *buf)
|
||||
{
|
||||
uint32_t hgram_opts;
|
||||
size_t hgram_bins;
|
||||
char *hgram;
|
||||
|
||||
if (!hst.head_buckets) {
|
||||
return;
|
||||
}
|
||||
g_string_append_printf(buf, "TB hash buckets %zu/%zu "
|
||||
"(%0.2f%% head buckets used)\n",
|
||||
hst.used_head_buckets, hst.head_buckets,
|
||||
(double)hst.used_head_buckets /
|
||||
hst.head_buckets * 100);
|
||||
|
||||
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
|
||||
hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
|
||||
if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
|
||||
hgram_opts |= QDIST_PR_NODECIMAL;
|
||||
}
|
||||
hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
|
||||
g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. "
|
||||
"Histogram: %s\n",
|
||||
qdist_avg(&hst.occupancy) * 100, hgram);
|
||||
g_free(hgram);
|
||||
|
||||
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
|
||||
hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
|
||||
if (hgram_bins > 10) {
|
||||
hgram_bins = 10;
|
||||
} else {
|
||||
hgram_bins = 0;
|
||||
hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
|
||||
}
|
||||
hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
|
||||
g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. "
|
||||
"Histogram: %s\n",
|
||||
qdist_avg(&hst.chain), hgram);
|
||||
g_free(hgram);
|
||||
}
|
||||
|
||||
struct tb_tree_stats {
|
||||
size_t nb_tbs;
|
||||
size_t host_size;
|
||||
size_t target_size;
|
||||
size_t max_target_size;
|
||||
size_t direct_jmp_count;
|
||||
size_t direct_jmp2_count;
|
||||
size_t cross_page;
|
||||
};
|
||||
|
||||
static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
|
||||
{
|
||||
const TranslationBlock *tb = value;
|
||||
struct tb_tree_stats *tst = data;
|
||||
|
||||
tst->nb_tbs++;
|
||||
tst->host_size += tb->tc.size;
|
||||
tst->target_size += tb->size;
|
||||
if (tb->size > tst->max_target_size) {
|
||||
tst->max_target_size = tb->size;
|
||||
}
|
||||
if (tb_page_addr1(tb) != -1) {
|
||||
tst->cross_page++;
|
||||
}
|
||||
if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
|
||||
tst->direct_jmp_count++;
|
||||
if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
|
||||
tst->direct_jmp2_count++;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void dump_exec_info(GString *buf)
|
||||
{
|
||||
struct tb_tree_stats tst = {};
|
||||
struct qht_stats hst;
|
||||
size_t nb_tbs, flush_full, flush_part, flush_elide;
|
||||
|
||||
tcg_tb_foreach(tb_tree_stats_iter, &tst);
|
||||
nb_tbs = tst.nb_tbs;
|
||||
/* XXX: avoid using doubles ? */
|
||||
g_string_append_printf(buf, "Translation buffer state:\n");
|
||||
/*
|
||||
* Report total code size including the padding and TB structs;
|
||||
* otherwise users might think "-accel tcg,tb-size" is not honoured.
|
||||
* For avg host size we use the precise numbers from tb_tree_stats though.
|
||||
*/
|
||||
g_string_append_printf(buf, "gen code size %zu/%zu\n",
|
||||
tcg_code_size(), tcg_code_capacity());
|
||||
g_string_append_printf(buf, "TB count %zu\n", nb_tbs);
|
||||
g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n",
|
||||
nb_tbs ? tst.target_size / nb_tbs : 0,
|
||||
tst.max_target_size);
|
||||
g_string_append_printf(buf, "TB avg host size %zu bytes "
|
||||
"(expansion ratio: %0.1f)\n",
|
||||
nb_tbs ? tst.host_size / nb_tbs : 0,
|
||||
tst.target_size ?
|
||||
(double)tst.host_size / tst.target_size : 0);
|
||||
g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
|
||||
tst.cross_page,
|
||||
nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
|
||||
g_string_append_printf(buf, "direct jump count %zu (%zu%%) "
|
||||
"(2 jumps=%zu %zu%%)\n",
|
||||
tst.direct_jmp_count,
|
||||
nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
|
||||
tst.direct_jmp2_count,
|
||||
nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
|
||||
|
||||
qht_statistics_init(&tb_ctx.htable, &hst);
|
||||
print_qht_statistics(hst, buf);
|
||||
qht_statistics_destroy(&hst);
|
||||
|
||||
g_string_append_printf(buf, "\nStatistics:\n");
|
||||
g_string_append_printf(buf, "TB flush count %u\n",
|
||||
qatomic_read(&tb_ctx.tb_flush_count));
|
||||
g_string_append_printf(buf, "TB invalidate count %u\n",
|
||||
qatomic_read(&tb_ctx.tb_phys_invalidate_count));
|
||||
|
||||
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
|
||||
g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
|
||||
g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
|
||||
g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
|
||||
tcg_dump_info(buf);
|
||||
}
|
||||
|
||||
#else /* CONFIG_USER_ONLY */
|
||||
|
||||
void cpu_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
cpu->interrupt_request |= mask;
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
/*
|
||||
@@ -663,3 +784,11 @@ void tcg_flush_jmp_cache(CPUState *cpu)
|
||||
qatomic_set(&jc->array[i].tb, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
|
||||
void tcg_flush_softmmu_tlb(CPUState *cs)
|
||||
{
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
tlb_flush(cs);
|
||||
#endif
|
||||
}
|
||||
|
@@ -8,101 +8,17 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tcg/tcg-op.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/gen-icount.h"
|
||||
#include "exec/log.h"
|
||||
#include "exec/translator.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/plugin-gen.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "tcg/tcg-op-common.h"
|
||||
#include "internal-target.h"
|
||||
#include "disas/disas.h"
|
||||
#include "exec/replay-core.h"
|
||||
|
||||
static void set_can_do_io(DisasContextBase *db, bool val)
|
||||
{
|
||||
QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
|
||||
tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
|
||||
offsetof(ArchCPU, parent_obj.neg.can_do_io) -
|
||||
offsetof(ArchCPU, env));
|
||||
}
|
||||
|
||||
bool translator_io_start(DisasContextBase *db)
|
||||
{
|
||||
/*
|
||||
* Ensure that this instruction will be the last in the TB.
|
||||
* The target may override this to something more forceful.
|
||||
*/
|
||||
if (db->is_jmp == DISAS_NEXT) {
|
||||
db->is_jmp = DISAS_TOO_MANY;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
|
||||
{
|
||||
TCGv_i32 count = NULL;
|
||||
TCGOp *icount_start_insn = NULL;
|
||||
|
||||
if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
|
||||
count = tcg_temp_new_i32();
|
||||
tcg_gen_ld_i32(count, tcg_env,
|
||||
offsetof(ArchCPU, parent_obj.neg.icount_decr.u32)
|
||||
- offsetof(ArchCPU, env));
|
||||
}
|
||||
|
||||
if (cflags & CF_USE_ICOUNT) {
|
||||
/*
|
||||
* We emit a sub with a dummy immediate argument. Keep the insn index
|
||||
* of the sub so that we later (when we know the actual insn count)
|
||||
* can update the argument with the actual insn count.
|
||||
*/
|
||||
tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
|
||||
icount_start_insn = tcg_last_op();
|
||||
}
|
||||
|
||||
/*
|
||||
* Emit the check against icount_decr.u32 to see if we should exit
|
||||
* unless we suppress the check with CF_NOIRQ. If we are using
|
||||
* icount and have suppressed interruption the higher level code
|
||||
* should have ensured we don't run more instructions than the
|
||||
* budget.
|
||||
*/
|
||||
if (cflags & CF_NOIRQ) {
|
||||
tcg_ctx->exitreq_label = NULL;
|
||||
} else {
|
||||
tcg_ctx->exitreq_label = gen_new_label();
|
||||
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
|
||||
}
|
||||
|
||||
if (cflags & CF_USE_ICOUNT) {
|
||||
tcg_gen_st16_i32(count, tcg_env,
|
||||
offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low)
|
||||
- offsetof(ArchCPU, env));
|
||||
}
|
||||
|
||||
return icount_start_insn;
|
||||
}
|
||||
|
||||
static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
|
||||
TCGOp *icount_start_insn, int num_insns)
|
||||
{
|
||||
if (cflags & CF_USE_ICOUNT) {
|
||||
/*
|
||||
* Update the num_insn immediate parameter now that we know
|
||||
* the actual insn count.
|
||||
*/
|
||||
tcg_set_insn_param(icount_start_insn, 2,
|
||||
tcgv_i32_arg(tcg_constant_i32(num_insns)));
|
||||
}
|
||||
|
||||
if (tcg_ctx->exitreq_label) {
|
||||
gen_set_label(tcg_ctx->exitreq_label);
|
||||
tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
|
||||
}
|
||||
}
|
||||
|
||||
bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
|
||||
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
|
||||
{
|
||||
/* Suppress goto_tb if requested. */
|
||||
if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
|
||||
@@ -114,12 +30,10 @@ bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
|
||||
}
|
||||
|
||||
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
vaddr pc, void *host_pc, const TranslatorOps *ops,
|
||||
DisasContextBase *db)
|
||||
target_ulong pc, void *host_pc,
|
||||
const TranslatorOps *ops, DisasContextBase *db)
|
||||
{
|
||||
uint32_t cflags = tb_cflags(tb);
|
||||
TCGOp *icount_start_insn;
|
||||
TCGOp *first_insn_start = NULL;
|
||||
bool plugin_enabled;
|
||||
|
||||
/* Initialize DisasContext */
|
||||
@@ -129,44 +43,46 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
db->is_jmp = DISAS_NEXT;
|
||||
db->num_insns = 0;
|
||||
db->max_insns = *max_insns;
|
||||
db->insn_start = NULL;
|
||||
db->fake_insn = false;
|
||||
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
|
||||
db->host_addr[0] = host_pc;
|
||||
db->host_addr[1] = NULL;
|
||||
db->record_start = 0;
|
||||
db->record_len = 0;
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
page_protect(pc);
|
||||
#endif
|
||||
|
||||
ops->init_disas_context(db, cpu);
|
||||
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
|
||||
|
||||
/* Start translating. */
|
||||
icount_start_insn = gen_tb_start(db, cflags);
|
||||
gen_tb_start(db->tb);
|
||||
ops->tb_start(db, cpu);
|
||||
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
|
||||
|
||||
plugin_enabled = plugin_gen_tb_start(cpu, db);
|
||||
db->plugin_enabled = plugin_enabled;
|
||||
plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
|
||||
|
||||
while (true) {
|
||||
*max_insns = ++db->num_insns;
|
||||
ops->insn_start(db, cpu);
|
||||
db->insn_start = tcg_last_op();
|
||||
if (first_insn_start == NULL) {
|
||||
first_insn_start = db->insn_start;
|
||||
}
|
||||
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
|
||||
|
||||
if (plugin_enabled) {
|
||||
plugin_gen_insn_start(cpu, db);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disassemble one instruction. The translate_insn hook should
|
||||
* update db->pc_next and db->is_jmp to indicate what should be
|
||||
* done next -- either exiting this loop or locate the start of
|
||||
* the next instruction.
|
||||
*/
|
||||
ops->translate_insn(db, cpu);
|
||||
/* Disassemble one instruction. The translate_insn hook should
|
||||
update db->pc_next and db->is_jmp to indicate what should be
|
||||
done next -- either exiting this loop or locate the start of
|
||||
the next instruction. */
|
||||
if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
|
||||
/* Accept I/O on the last instruction. */
|
||||
gen_io_start();
|
||||
ops->translate_insn(db, cpu);
|
||||
} else {
|
||||
/* we should only see CF_MEMI_ONLY for io_recompile */
|
||||
tcg_debug_assert(!(cflags & CF_MEMI_ONLY));
|
||||
ops->translate_insn(db, cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't instrument after instructions that change control
|
||||
@@ -196,279 +112,136 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
|
||||
/* Emit code to exit the TB, as indicated by db->is_jmp. */
|
||||
ops->tb_stop(db, cpu);
|
||||
gen_tb_end(tb, cflags, icount_start_insn, db->num_insns);
|
||||
gen_tb_end(db->tb, db->num_insns);
|
||||
|
||||
/*
|
||||
* Manage can_do_io for the translation block: set to false before
|
||||
* the first insn and set to true before the last insn.
|
||||
*/
|
||||
if (db->num_insns == 1) {
|
||||
tcg_debug_assert(first_insn_start == db->insn_start);
|
||||
} else {
|
||||
tcg_debug_assert(first_insn_start != db->insn_start);
|
||||
tcg_ctx->emit_before_op = first_insn_start;
|
||||
set_can_do_io(db, false);
|
||||
if (plugin_enabled) {
|
||||
plugin_gen_tb_end(cpu);
|
||||
}
|
||||
tcg_ctx->emit_before_op = db->insn_start;
|
||||
set_can_do_io(db, true);
|
||||
tcg_ctx->emit_before_op = NULL;
|
||||
|
||||
/* May be used by disas_log or plugin callbacks. */
|
||||
/* The disas_log hook may use these values rather than recompute. */
|
||||
tb->size = db->pc_next - db->pc_first;
|
||||
tb->icount = db->num_insns;
|
||||
|
||||
if (plugin_enabled) {
|
||||
plugin_gen_tb_end(cpu, db->num_insns);
|
||||
}
|
||||
|
||||
#ifdef DEBUG_DISAS
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
|
||||
&& qemu_log_in_addr_range(db->pc_first)) {
|
||||
FILE *logfile = qemu_log_trylock();
|
||||
if (logfile) {
|
||||
fprintf(logfile, "----------------\n");
|
||||
|
||||
if (!ops->disas_log ||
|
||||
!ops->disas_log(db, cpu, logfile)) {
|
||||
fprintf(logfile, "IN: %s\n", lookup_symbol(db->pc_first));
|
||||
target_disas(logfile, cpu, db);
|
||||
}
|
||||
ops->disas_log(db, cpu, logfile);
|
||||
fprintf(logfile, "\n");
|
||||
qemu_log_unlock(logfile);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool translator_ld(CPUArchState *env, DisasContextBase *db,
|
||||
void *dest, vaddr pc, size_t len)
|
||||
static void *translator_access(CPUArchState *env, DisasContextBase *db,
|
||||
target_ulong pc, size_t len)
|
||||
{
|
||||
TranslationBlock *tb = db->tb;
|
||||
vaddr last = pc + len - 1;
|
||||
void *host;
|
||||
vaddr base;
|
||||
target_ulong base, end;
|
||||
TranslationBlock *tb;
|
||||
|
||||
tb = db->tb;
|
||||
|
||||
/* Use slow path if first page is MMIO. */
|
||||
if (unlikely(tb_page_addr0(tb) == -1)) {
|
||||
/* We capped translation with first page MMIO in tb_gen_code. */
|
||||
tcg_debug_assert(db->max_insns == 1);
|
||||
return false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
host = db->host_addr[0];
|
||||
base = db->pc_first;
|
||||
|
||||
if (likely(((base ^ last) & TARGET_PAGE_MASK) == 0)) {
|
||||
/* Entire read is from the first page. */
|
||||
memcpy(dest, host + (pc - base), len);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (unlikely(((base ^ pc) & TARGET_PAGE_MASK) == 0)) {
|
||||
/* Read begins on the first page and extends to the second. */
|
||||
size_t len0 = -(pc | TARGET_PAGE_MASK);
|
||||
memcpy(dest, host + (pc - base), len0);
|
||||
pc += len0;
|
||||
dest += len0;
|
||||
len -= len0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The read must conclude on the second page and not extend to a third.
|
||||
*
|
||||
* TODO: We could allow the two pages to be virtually discontiguous,
|
||||
* since we already allow the two pages to be physically discontiguous.
|
||||
* The only reasonable use case would be executing an insn at the end
|
||||
* of the address space wrapping around to the beginning. For that,
|
||||
* we would need to know the current width of the address space.
|
||||
* In the meantime, assert.
|
||||
*/
|
||||
base = (base & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
||||
assert(((base ^ pc) & TARGET_PAGE_MASK) == 0);
|
||||
assert(((base ^ last) & TARGET_PAGE_MASK) == 0);
|
||||
host = db->host_addr[1];
|
||||
|
||||
if (host == NULL) {
|
||||
tb_page_addr_t page0, old_page1, new_page1;
|
||||
|
||||
new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
|
||||
|
||||
/*
|
||||
* If the second page is MMIO, treat as if the first page
|
||||
* was MMIO as well, so that we do not cache the TB.
|
||||
*/
|
||||
if (unlikely(new_page1 == -1)) {
|
||||
tb_unlock_pages(tb);
|
||||
tb_set_page_addr0(tb, -1);
|
||||
/* Require that this be the final insn. */
|
||||
db->max_insns = db->num_insns;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is not the first time around, and page1 matches,
|
||||
* then we already have the page locked. Alternately, we're
|
||||
* not doing anything to prevent the PTE from changing, so
|
||||
* we might wind up with a different page, requiring us to
|
||||
* re-do the locking.
|
||||
*/
|
||||
old_page1 = tb_page_addr1(tb);
|
||||
if (likely(new_page1 != old_page1)) {
|
||||
page0 = tb_page_addr0(tb);
|
||||
if (unlikely(old_page1 != -1)) {
|
||||
tb_unlock_page1(page0, old_page1);
|
||||
}
|
||||
tb_set_page_addr1(tb, new_page1);
|
||||
tb_lock_page1(page0, new_page1);
|
||||
}
|
||||
end = pc + len - 1;
|
||||
if (likely(is_same_page(db, end))) {
|
||||
host = db->host_addr[0];
|
||||
base = db->pc_first;
|
||||
} else {
|
||||
host = db->host_addr[1];
|
||||
}
|
||||
base = TARGET_PAGE_ALIGN(db->pc_first);
|
||||
if (host == NULL) {
|
||||
tb_page_addr_t phys_page =
|
||||
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
|
||||
|
||||
memcpy(dest, host + (pc - base), len);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void record_save(DisasContextBase *db, vaddr pc,
|
||||
const void *from, int size)
|
||||
{
|
||||
int offset;
|
||||
|
||||
/* Do not record probes before the start of TB. */
|
||||
if (pc < db->pc_first) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* In translator_access, we verified that pc is within 2 pages
|
||||
* of pc_first, thus this will never overflow.
|
||||
*/
|
||||
offset = pc - db->pc_first;
|
||||
|
||||
/*
|
||||
* Either the first or second page may be I/O. If it is the second,
|
||||
* then the first byte we need to record will be at a non-zero offset.
|
||||
* In either case, we should not need to record but a single insn.
|
||||
*/
|
||||
if (db->record_len == 0) {
|
||||
db->record_start = offset;
|
||||
db->record_len = size;
|
||||
} else {
|
||||
assert(offset == db->record_start + db->record_len);
|
||||
assert(db->record_len + size <= sizeof(db->record));
|
||||
db->record_len += size;
|
||||
}
|
||||
|
||||
memcpy(db->record + (offset - db->record_start), from, size);
|
||||
}
|
||||
|
||||
size_t translator_st_len(const DisasContextBase *db)
|
||||
{
|
||||
return db->fake_insn ? db->record_len : db->tb->size;
|
||||
}
|
||||
|
||||
bool translator_st(const DisasContextBase *db, void *dest,
|
||||
vaddr addr, size_t len)
|
||||
{
|
||||
size_t offset, offset_end;
|
||||
|
||||
if (addr < db->pc_first) {
|
||||
return false;
|
||||
}
|
||||
offset = addr - db->pc_first;
|
||||
offset_end = offset + len;
|
||||
if (offset_end > translator_st_len(db)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!db->fake_insn) {
|
||||
size_t offset_page1 = -(db->pc_first | TARGET_PAGE_MASK);
|
||||
|
||||
/* Get all the bytes from the first page. */
|
||||
if (db->host_addr[0]) {
|
||||
if (offset_end <= offset_page1) {
|
||||
memcpy(dest, db->host_addr[0] + offset, len);
|
||||
return true;
|
||||
}
|
||||
if (offset < offset_page1) {
|
||||
size_t len0 = offset_page1 - offset;
|
||||
memcpy(dest, db->host_addr[0] + offset, len0);
|
||||
offset += len0;
|
||||
dest += len0;
|
||||
/*
|
||||
* If the second page is MMIO, treat as if the first page
|
||||
* was MMIO as well, so that we do not cache the TB.
|
||||
*/
|
||||
if (unlikely(phys_page == -1)) {
|
||||
tb_set_page_addr0(tb, -1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tb_set_page_addr1(tb, phys_page);
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
page_protect(end);
|
||||
#endif
|
||||
host = db->host_addr[1];
|
||||
}
|
||||
|
||||
/* Get any bytes from the second page. */
|
||||
if (db->host_addr[1] && offset >= offset_page1) {
|
||||
memcpy(dest, db->host_addr[1] + (offset - offset_page1),
|
||||
offset_end - offset);
|
||||
return true;
|
||||
/* Use slow path when crossing pages. */
|
||||
if (is_same_page(db, pc)) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Else get recorded bytes. */
|
||||
if (db->record_len != 0 &&
|
||||
offset >= db->record_start &&
|
||||
offset_end <= db->record_start + db->record_len) {
|
||||
memcpy(dest, db->record + (offset - db->record_start),
|
||||
offset_end - offset);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
tcg_debug_assert(pc >= base);
|
||||
return host + (pc - base);
|
||||
}
|
||||
|
||||
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
|
||||
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
|
||||
{
|
||||
uint8_t raw;
|
||||
uint8_t ret;
|
||||
void *p = translator_access(env, db, pc, sizeof(ret));
|
||||
|
||||
if (!translator_ld(env, db, &raw, pc, sizeof(raw))) {
|
||||
raw = cpu_ldub_code(env, pc);
|
||||
record_save(db, pc, &raw, sizeof(raw));
|
||||
if (p) {
|
||||
plugin_insn_append(pc, p, sizeof(ret));
|
||||
return ldub_p(p);
|
||||
}
|
||||
return raw;
|
||||
ret = cpu_ldub_code(env, pc);
|
||||
plugin_insn_append(pc, &ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc)
|
||||
uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
|
||||
{
|
||||
uint16_t raw, tgt;
|
||||
uint16_t ret, plug;
|
||||
void *p = translator_access(env, db, pc, sizeof(ret));
|
||||
|
||||
if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
|
||||
tgt = tswap16(raw);
|
||||
} else {
|
||||
tgt = cpu_lduw_code(env, pc);
|
||||
raw = tswap16(tgt);
|
||||
record_save(db, pc, &raw, sizeof(raw));
|
||||
if (p) {
|
||||
plugin_insn_append(pc, p, sizeof(ret));
|
||||
return lduw_p(p);
|
||||
}
|
||||
return tgt;
|
||||
ret = cpu_lduw_code(env, pc);
|
||||
plug = tswap16(ret);
|
||||
plugin_insn_append(pc, &plug, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc)
|
||||
uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
|
||||
{
|
||||
uint32_t raw, tgt;
|
||||
uint32_t ret, plug;
|
||||
void *p = translator_access(env, db, pc, sizeof(ret));
|
||||
|
||||
if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
|
||||
tgt = tswap32(raw);
|
||||
} else {
|
||||
tgt = cpu_ldl_code(env, pc);
|
||||
raw = tswap32(tgt);
|
||||
record_save(db, pc, &raw, sizeof(raw));
|
||||
if (p) {
|
||||
plugin_insn_append(pc, p, sizeof(ret));
|
||||
return ldl_p(p);
|
||||
}
|
||||
return tgt;
|
||||
ret = cpu_ldl_code(env, pc);
|
||||
plug = tswap32(ret);
|
||||
plugin_insn_append(pc, &plug, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
|
||||
uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
|
||||
{
|
||||
uint64_t raw, tgt;
|
||||
uint64_t ret, plug;
|
||||
void *p = translator_access(env, db, pc, sizeof(ret));
|
||||
|
||||
if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
|
||||
tgt = tswap64(raw);
|
||||
} else {
|
||||
tgt = cpu_ldq_code(env, pc);
|
||||
raw = tswap64(tgt);
|
||||
record_save(db, pc, &raw, sizeof(raw));
|
||||
if (p) {
|
||||
plugin_insn_append(pc, p, sizeof(ret));
|
||||
return ldq_p(p);
|
||||
}
|
||||
return tgt;
|
||||
}
|
||||
|
||||
void translator_fake_ld(DisasContextBase *db, const void *data, size_t len)
|
||||
{
|
||||
db->fake_insn = true;
|
||||
record_save(db, db->pc_first, data, len);
|
||||
ret = cpu_ldq_code(env, pc);
|
||||
plug = tswap64(ret);
|
||||
plugin_insn_append(pc, &plug, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
@@ -2,6 +2,8 @@
|
||||
#include "hw/core/cpu.h"
|
||||
#include "exec/replay-core.h"
|
||||
|
||||
bool enable_cpu_pm = false;
|
||||
|
||||
void cpu_resume(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
@@ -14,10 +16,6 @@ void qemu_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
void cpu_exec_reset_hold(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
/* User mode emulation does not support record/replay yet. */
|
||||
|
||||
bool replay_exception(void)
|
||||
|
@@ -24,27 +24,17 @@
|
||||
#include "qemu/bitops.h"
|
||||
#include "qemu/rcu.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "exec/translate-all.h"
|
||||
#include "exec/page-protection.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "qemu/atomic128.h"
|
||||
#include "trace.h"
|
||||
#include "trace/trace-root.h"
|
||||
#include "tcg/tcg-ldst.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal-target.h"
|
||||
#include "internal.h"
|
||||
|
||||
__thread uintptr_t helper_retaddr;
|
||||
|
||||
//#define DEBUG_SIGNAL
|
||||
|
||||
void cpu_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(bql_locked());
|
||||
cpu->interrupt_request |= mask;
|
||||
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust the pc to pass to cpu_restore_state; return the memop type.
|
||||
*/
|
||||
@@ -154,7 +144,7 @@ typedef struct PageFlagsNode {
|
||||
|
||||
static IntervalTreeRoot pageflags_root;
|
||||
|
||||
static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
|
||||
static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
|
||||
{
|
||||
IntervalTreeNode *n;
|
||||
|
||||
@@ -163,7 +153,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
|
||||
}
|
||||
|
||||
static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
|
||||
target_ulong last)
|
||||
target_long last)
|
||||
{
|
||||
IntervalTreeNode *n;
|
||||
|
||||
@@ -485,6 +475,11 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
|
||||
return inval_tb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Modify the flags of a page and invalidate the code if necessary.
|
||||
* The flag PAGE_WRITE_ORG is positioned automatically depending
|
||||
* on PAGE_WRITE. The mmap_lock should already be held.
|
||||
*/
|
||||
void page_set_flags(target_ulong start, target_ulong last, int flags)
|
||||
{
|
||||
bool reset = false;
|
||||
@@ -525,19 +520,19 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
|
||||
}
|
||||
}
|
||||
|
||||
bool page_check_range(target_ulong start, target_ulong len, int flags)
|
||||
int page_check_range(target_ulong start, target_ulong len, int flags)
|
||||
{
|
||||
target_ulong last;
|
||||
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
|
||||
bool ret;
|
||||
int ret;
|
||||
|
||||
if (len == 0) {
|
||||
return true; /* trivial length */
|
||||
return 0; /* trivial length */
|
||||
}
|
||||
|
||||
last = start + len - 1;
|
||||
if (last < start) {
|
||||
return false; /* wrap around */
|
||||
return -1; /* wrap around */
|
||||
}
|
||||
|
||||
locked = have_mmap_lock();
|
||||
@@ -556,33 +551,33 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
|
||||
p = pageflags_find(start, last);
|
||||
}
|
||||
if (!p) {
|
||||
ret = false; /* entire region invalid */
|
||||
ret = -1; /* entire region invalid */
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (start < p->itree.start) {
|
||||
ret = false; /* initial bytes invalid */
|
||||
ret = -1; /* initial bytes invalid */
|
||||
break;
|
||||
}
|
||||
|
||||
missing = flags & ~p->flags;
|
||||
if (missing & ~PAGE_WRITE) {
|
||||
ret = false; /* page doesn't match */
|
||||
if (missing & PAGE_READ) {
|
||||
ret = -1; /* page not readable */
|
||||
break;
|
||||
}
|
||||
if (missing & PAGE_WRITE) {
|
||||
if (!(p->flags & PAGE_WRITE_ORG)) {
|
||||
ret = false; /* page not writable */
|
||||
ret = -1; /* page not writable */
|
||||
break;
|
||||
}
|
||||
/* Asking about writable, but has been protected: undo. */
|
||||
if (!page_unprotect(start, 0)) {
|
||||
ret = false;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
/* TODO: page_unprotect should take a range, not a single page. */
|
||||
if (last - start < TARGET_PAGE_SIZE) {
|
||||
ret = true; /* ok */
|
||||
ret = 0; /* ok */
|
||||
break;
|
||||
}
|
||||
start += TARGET_PAGE_SIZE;
|
||||
@@ -590,7 +585,7 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
|
||||
}
|
||||
|
||||
if (last <= p->itree.last) {
|
||||
ret = true; /* ok */
|
||||
ret = 0; /* ok */
|
||||
break;
|
||||
}
|
||||
start = p->itree.last + 1;
|
||||
@@ -603,69 +598,20 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool page_check_range_empty(target_ulong start, target_ulong last)
|
||||
{
|
||||
assert(last >= start);
|
||||
assert_memory_lock();
|
||||
return pageflags_find(start, last) == NULL;
|
||||
}
|
||||
|
||||
target_ulong page_find_range_empty(target_ulong min, target_ulong max,
|
||||
target_ulong len, target_ulong align)
|
||||
{
|
||||
target_ulong len_m1, align_m1;
|
||||
|
||||
assert(min <= max);
|
||||
assert(max <= GUEST_ADDR_MAX);
|
||||
assert(len != 0);
|
||||
assert(is_power_of_2(align));
|
||||
assert_memory_lock();
|
||||
|
||||
len_m1 = len - 1;
|
||||
align_m1 = align - 1;
|
||||
|
||||
/* Iteratively narrow the search region. */
|
||||
while (1) {
|
||||
PageFlagsNode *p;
|
||||
|
||||
/* Align min and double-check there's enough space remaining. */
|
||||
min = (min + align_m1) & ~align_m1;
|
||||
if (min > max) {
|
||||
return -1;
|
||||
}
|
||||
if (len_m1 > max - min) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
p = pageflags_find(min, min + len_m1);
|
||||
if (p == NULL) {
|
||||
/* Found! */
|
||||
return min;
|
||||
}
|
||||
if (max <= p->itree.last) {
|
||||
/* Existing allocation fills the remainder of the search region. */
|
||||
return -1;
|
||||
}
|
||||
/* Skip across existing allocation. */
|
||||
min = p->itree.last + 1;
|
||||
}
|
||||
}
|
||||
|
||||
void page_protect(tb_page_addr_t address)
|
||||
{
|
||||
PageFlagsNode *p;
|
||||
target_ulong start, last;
|
||||
int host_page_size = qemu_real_host_page_size();
|
||||
int prot;
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
if (host_page_size <= TARGET_PAGE_SIZE) {
|
||||
if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
|
||||
start = address & TARGET_PAGE_MASK;
|
||||
last = start + TARGET_PAGE_SIZE - 1;
|
||||
} else {
|
||||
start = address & -host_page_size;
|
||||
last = start + host_page_size - 1;
|
||||
start = address & qemu_host_page_mask;
|
||||
last = start + qemu_host_page_size - 1;
|
||||
}
|
||||
|
||||
p = pageflags_find(start, last);
|
||||
@@ -676,7 +622,7 @@ void page_protect(tb_page_addr_t address)
|
||||
|
||||
if (unlikely(p->itree.last < last)) {
|
||||
/* More than one protection region covers the one host page. */
|
||||
assert(TARGET_PAGE_SIZE < host_page_size);
|
||||
assert(TARGET_PAGE_SIZE < qemu_host_page_size);
|
||||
while ((p = pageflags_next(p, start, last)) != NULL) {
|
||||
prot |= p->flags;
|
||||
}
|
||||
@@ -684,7 +630,7 @@ void page_protect(tb_page_addr_t address)
|
||||
|
||||
if (prot & PAGE_WRITE) {
|
||||
pageflags_set_clear(start, last, 0, PAGE_WRITE);
|
||||
mprotect(g2h_untagged(start), last - start + 1,
|
||||
mprotect(g2h_untagged(start), qemu_host_page_size,
|
||||
prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
|
||||
}
|
||||
}
|
||||
@@ -730,19 +676,18 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
int host_page_size = qemu_real_host_page_size();
|
||||
target_ulong start, len, i;
|
||||
int prot;
|
||||
|
||||
if (host_page_size <= TARGET_PAGE_SIZE) {
|
||||
if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
|
||||
start = address & TARGET_PAGE_MASK;
|
||||
len = TARGET_PAGE_SIZE;
|
||||
prot = p->flags | PAGE_WRITE;
|
||||
pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
|
||||
current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
|
||||
} else {
|
||||
start = address & -host_page_size;
|
||||
len = host_page_size;
|
||||
start = address & qemu_host_page_mask;
|
||||
len = qemu_host_page_size;
|
||||
prot = 0;
|
||||
|
||||
for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
|
||||
@@ -768,7 +713,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
||||
if (prot & PAGE_EXEC) {
|
||||
prot = (prot & ~PAGE_EXEC) | PAGE_READ;
|
||||
}
|
||||
mprotect((void *)g2h_untagged(start), len, prot & PAGE_RWX);
|
||||
mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
|
||||
}
|
||||
mmap_unlock();
|
||||
|
||||
@@ -776,7 +721,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
||||
return current_tb_invalidated ? 2 : 1;
|
||||
}
|
||||
|
||||
static int probe_access_internal(CPUArchState *env, vaddr addr,
|
||||
static int probe_access_internal(CPUArchState *env, target_ulong addr,
|
||||
int fault_size, MMUAccessType access_type,
|
||||
bool nonfault, uintptr_t ra)
|
||||
{
|
||||
@@ -800,10 +745,6 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
|
||||
if (guest_addr_valid_untagged(addr)) {
|
||||
int page_flags = page_get_flags(addr);
|
||||
if (page_flags & acc_flag) {
|
||||
if (access_type != MMU_INST_FETCH
|
||||
&& cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
||||
return TLB_MMIO;
|
||||
}
|
||||
return 0; /* success */
|
||||
}
|
||||
maperr = !(page_flags & PAGE_VALID);
|
||||
@@ -818,7 +759,7 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
|
||||
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
|
||||
}
|
||||
|
||||
int probe_access_flags(CPUArchState *env, vaddr addr, int size,
|
||||
int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool nonfault, void **phost, uintptr_t ra)
|
||||
{
|
||||
@@ -826,23 +767,23 @@ int probe_access_flags(CPUArchState *env, vaddr addr, int size,
|
||||
|
||||
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
|
||||
flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
|
||||
*phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
|
||||
*phost = flags ? NULL : g2h(env_cpu(env), addr);
|
||||
return flags;
|
||||
}
|
||||
|
||||
void *probe_access(CPUArchState *env, vaddr addr, int size,
|
||||
void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
int flags;
|
||||
|
||||
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
|
||||
flags = probe_access_internal(env, addr, size, access_type, false, ra);
|
||||
g_assert((flags & ~TLB_MMIO) == 0);
|
||||
g_assert(flags == 0);
|
||||
|
||||
return size ? g2h(env_cpu(env), addr) : NULL;
|
||||
}
|
||||
|
||||
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
|
||||
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
|
||||
void **hostp)
|
||||
{
|
||||
int flags;
|
||||
@@ -868,7 +809,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
|
||||
typedef struct TargetPageDataNode {
|
||||
struct rcu_head rcu;
|
||||
IntervalTreeNode itree;
|
||||
char data[] __attribute__((aligned));
|
||||
char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
|
||||
} TargetPageDataNode;
|
||||
|
||||
static IntervalTreeRoot targetdata_root;
|
||||
@@ -906,8 +847,7 @@ void page_reset_target_data(target_ulong start, target_ulong last)
|
||||
n_last = MIN(last, n->last);
|
||||
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
|
||||
|
||||
memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
|
||||
p_len * TARGET_PAGE_DATA_SIZE);
|
||||
memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -915,7 +855,7 @@ void *page_get_target_data(target_ulong address)
|
||||
{
|
||||
IntervalTreeNode *n;
|
||||
TargetPageDataNode *t;
|
||||
target_ulong page, region, p_ofs;
|
||||
target_ulong page, region;
|
||||
|
||||
page = address & TARGET_PAGE_MASK;
|
||||
region = address & TBD_MASK;
|
||||
@@ -931,8 +871,7 @@ void *page_get_target_data(target_ulong address)
|
||||
mmap_lock();
|
||||
n = interval_tree_iter_first(&targetdata_root, page, page);
|
||||
if (!n) {
|
||||
t = g_malloc0(sizeof(TargetPageDataNode)
|
||||
+ TPD_PAGES * TARGET_PAGE_DATA_SIZE);
|
||||
t = g_new0(TargetPageDataNode, 1);
|
||||
n = &t->itree;
|
||||
n->start = region;
|
||||
n->last = region | ~TBD_MASK;
|
||||
@@ -942,192 +881,302 @@ void *page_get_target_data(target_ulong address)
|
||||
}
|
||||
|
||||
t = container_of(n, TargetPageDataNode, itree);
|
||||
p_ofs = (page - region) >> TARGET_PAGE_BITS;
|
||||
return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
|
||||
return t->data[(page - region) >> TARGET_PAGE_BITS];
|
||||
}
|
||||
#else
|
||||
void page_reset_target_data(target_ulong start, target_ulong last) { }
|
||||
#endif /* TARGET_PAGE_DATA_SIZE */
|
||||
|
||||
/* The system-mode versions of these helpers are in cputlb.c. */
|
||||
/* The softmmu versions of these helpers are in cputlb.c. */
|
||||
|
||||
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
|
||||
MemOp mop, uintptr_t ra, MMUAccessType type)
|
||||
/*
|
||||
* Verify that we have passed the correct MemOp to the correct function.
|
||||
*
|
||||
* We could present one function to target code, and dispatch based on
|
||||
* the MemOp, but so far we have worked hard to avoid an indirect function
|
||||
* call along the memory path.
|
||||
*/
|
||||
static void validate_memop(MemOpIdx oi, MemOp expected)
|
||||
{
|
||||
int a_bits = memop_alignment_bits(mop);
|
||||
#ifdef CONFIG_DEBUG_TCG
|
||||
MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
|
||||
assert(have == expected);
|
||||
#endif
|
||||
}
|
||||
|
||||
void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
|
||||
{
|
||||
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
|
||||
}
|
||||
|
||||
void helper_unaligned_st(CPUArchState *env, target_ulong addr)
|
||||
{
|
||||
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
|
||||
}
|
||||
|
||||
static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||
MemOpIdx oi, uintptr_t ra, MMUAccessType type)
|
||||
{
|
||||
MemOp mop = get_memop(oi);
|
||||
int a_bits = get_alignment_bits(mop);
|
||||
void *ret;
|
||||
|
||||
/* Enforce guest required alignment. */
|
||||
if (unlikely(addr & ((1 << a_bits) - 1))) {
|
||||
cpu_loop_exit_sigbus(cpu, addr, type, ra);
|
||||
cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
|
||||
}
|
||||
|
||||
ret = g2h(cpu, addr);
|
||||
ret = g2h(env_cpu(env), addr);
|
||||
set_helper_retaddr(ra);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#include "ldst_atomicity.c.inc"
|
||||
|
||||
static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
uintptr_t ra, MMUAccessType access_type)
|
||||
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint8_t ret;
|
||||
|
||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
|
||||
validate_memop(oi, MO_UB);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
||||
ret = ldub_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
uintptr_t ra, MMUAccessType access_type)
|
||||
uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint16_t ret;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
|
||||
ret = load_atom_2(cpu, ra, haddr, mop);
|
||||
validate_memop(oi, MO_BEUW);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
||||
ret = lduw_be_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
ret = bswap16(ret);
|
||||
}
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
uintptr_t ra, MMUAccessType access_type)
|
||||
uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint32_t ret;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
|
||||
ret = load_atom_4(cpu, ra, haddr, mop);
|
||||
validate_memop(oi, MO_BEUL);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
||||
ret = ldl_be_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
ret = bswap32(ret);
|
||||
}
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
uintptr_t ra, MMUAccessType access_type)
|
||||
uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint64_t ret;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
|
||||
ret = load_atom_8(cpu, ra, haddr, mop);
|
||||
validate_memop(oi, MO_BEUQ);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
||||
ret = ldq_be_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
ret = bswap64(ret);
|
||||
}
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint16_t ret;
|
||||
|
||||
validate_memop(oi, MO_LEUW);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
||||
ret = lduw_le_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint32_t ret;
|
||||
|
||||
validate_memop(oi, MO_LEUL);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
||||
ret = ldl_le_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint64_t ret;
|
||||
|
||||
validate_memop(oi, MO_LEUQ);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
||||
ret = ldq_le_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
return ret;
|
||||
}
|
||||
|
||||
Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
Int128 ret;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
tcg_debug_assert((mop & MO_SIZE) == MO_128);
|
||||
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
|
||||
ret = load_atom_16(cpu, ra, haddr, mop);
|
||||
validate_memop(oi, MO_128 | MO_BE);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
||||
memcpy(&ret, haddr, 16);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
if (!HOST_BIG_ENDIAN) {
|
||||
ret = bswap128(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
|
||||
Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
Int128 ret;
|
||||
|
||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
|
||||
validate_memop(oi, MO_128 | MO_LE);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
||||
memcpy(&ret, haddr, 16);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
|
||||
if (HOST_BIG_ENDIAN) {
|
||||
ret = bswap128(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
|
||||
validate_memop(oi, MO_UB);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
||||
stb_p(haddr, val);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
store_atom_2(cpu, ra, haddr, mop, val);
|
||||
validate_memop(oi, MO_BEUW);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
||||
stw_be_p(haddr, val);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
store_atom_4(cpu, ra, haddr, mop, val);
|
||||
validate_memop(oi, MO_BEUL);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
||||
stl_be_p(haddr, val);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
MemOp mop = get_memop(oi);
|
||||
|
||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
val = bswap64(val);
|
||||
}
|
||||
store_atom_8(cpu, ra, haddr, mop, val);
|
||||
validate_memop(oi, MO_BEUQ);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
||||
stq_be_p(haddr, val);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
MemOpIdx mop = get_memop(oi);
|
||||
|
||||
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
||||
validate_memop(oi, MO_LEUW);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
||||
stw_le_p(haddr, val);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
if (mop & MO_BSWAP) {
|
||||
void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
|
||||
validate_memop(oi, MO_LEUL);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
||||
stl_le_p(haddr, val);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
|
||||
validate_memop(oi, MO_LEUQ);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
||||
stq_le_p(haddr, val);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr,
|
||||
Int128 val, MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
|
||||
validate_memop(oi, MO_128 | MO_BE);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
||||
if (!HOST_BIG_ENDIAN) {
|
||||
val = bswap128(val);
|
||||
}
|
||||
store_atom_16(cpu, ra, haddr, mop, val);
|
||||
memcpy(haddr, &val, 16);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr,
|
||||
Int128 val, MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
|
||||
validate_memop(oi, MO_128 | MO_LE);
|
||||
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
||||
if (HOST_BIG_ENDIAN) {
|
||||
val = bswap128(val);
|
||||
}
|
||||
memcpy(haddr, &val, 16);
|
||||
clear_helper_retaddr();
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
|
||||
@@ -1170,86 +1219,33 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint8_t ret;
|
||||
|
||||
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
|
||||
ret = ldub_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint16_t ret;
|
||||
|
||||
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
|
||||
ret = lduw_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
if (get_memop(oi) & MO_BSWAP) {
|
||||
ret = bswap16(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint32_t ret;
|
||||
|
||||
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
|
||||
ret = ldl_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
if (get_memop(oi) & MO_BSWAP) {
|
||||
ret = bswap32(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
void *haddr;
|
||||
uint64_t ret;
|
||||
|
||||
haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
ret = ldq_p(haddr);
|
||||
clear_helper_retaddr();
|
||||
if (get_memop(oi) & MO_BSWAP) {
|
||||
ret = bswap64(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#include "ldst_common.c.inc"
|
||||
|
||||
/*
|
||||
* Do not allow unaligned operations to proceed. Return the host address.
|
||||
*
|
||||
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
|
||||
*/
|
||||
static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
int size, uintptr_t retaddr)
|
||||
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||
MemOpIdx oi, int size, int prot,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
MemOp mop = get_memop(oi);
|
||||
int a_bits = memop_alignment_bits(mop);
|
||||
int a_bits = get_alignment_bits(mop);
|
||||
void *ret;
|
||||
|
||||
/* Enforce guest required alignment. */
|
||||
if (unlikely(addr & ((1 << a_bits) - 1))) {
|
||||
cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr);
|
||||
MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
|
||||
cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
|
||||
}
|
||||
|
||||
/* Enforce qemu required alignment. */
|
||||
if (unlikely(addr & (size - 1))) {
|
||||
cpu_loop_exit_atomic(cpu, retaddr);
|
||||
cpu_loop_exit_atomic(env_cpu(env), retaddr);
|
||||
}
|
||||
|
||||
ret = g2h(cpu, addr);
|
||||
ret = g2h(env_cpu(env), addr);
|
||||
set_helper_retaddr(retaddr);
|
||||
return ret;
|
||||
}
|
||||
@@ -1279,7 +1275,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
#include "atomic_template.h"
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
|
||||
#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
|
||||
#define DATA_SIZE 16
|
||||
#include "atomic_template.h"
|
||||
#endif
|
||||
|
@@ -1,18 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileContributor: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
* SPDX-FileCopyrightText: 2023 Linaro Ltd.
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
#ifndef ACCEL_TCG_VCPU_STATE_H
|
||||
#define ACCEL_TCG_VCPU_STATE_H
|
||||
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
static inline TaskState *get_task_state(const CPUState *cs)
|
||||
{
|
||||
return cs->opaque;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -15,7 +15,6 @@
|
||||
#include "hw/xen/xen_native.h"
|
||||
#include "hw/xen/xen-legacy-backend.h"
|
||||
#include "hw/xen/xen_pt.h"
|
||||
#include "hw/xen/xen_igd.h"
|
||||
#include "chardev/char.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "sysemu/cpus.h"
|
||||
|
@@ -904,7 +904,7 @@ static void alsa_init_per_direction(AudiodevAlsaPerDirectionOptions *apdo)
|
||||
}
|
||||
}
|
||||
|
||||
static void *alsa_audio_init(Audiodev *dev, Error **errp)
|
||||
static void *alsa_audio_init(Audiodev *dev)
|
||||
{
|
||||
AudiodevAlsaOptions *aopts;
|
||||
assert(dev->driver == AUDIODEV_DRIVER_ALSA);
|
||||
@@ -960,6 +960,7 @@ static struct audio_driver alsa_audio_driver = {
|
||||
.init = alsa_audio_init,
|
||||
.fini = alsa_audio_fini,
|
||||
.pcm_ops = &alsa_pcm_ops,
|
||||
.can_be_default = 1,
|
||||
.max_voices_out = INT_MAX,
|
||||
.max_voices_in = INT_MAX,
|
||||
.voice_size_out = sizeof (ALSAVoiceOut),
|
||||
|
@@ -26,7 +26,6 @@
|
||||
#include "audio/audio.h"
|
||||
#include "monitor/hmp.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qmp/qdict.h"
|
||||
|
||||
static QLIST_HEAD (capture_list_head, CaptureState) capture_head;
|
||||
@@ -66,11 +65,10 @@ void hmp_wavcapture(Monitor *mon, const QDict *qdict)
|
||||
int nchannels = qdict_get_try_int(qdict, "nchannels", 2);
|
||||
const char *audiodev = qdict_get_str(qdict, "audiodev");
|
||||
CaptureState *s;
|
||||
Error *local_err = NULL;
|
||||
AudioState *as = audio_state_by_name(audiodev, &local_err);
|
||||
AudioState *as = audio_state_by_name(audiodev);
|
||||
|
||||
if (!as) {
|
||||
error_report_err(local_err);
|
||||
monitor_printf(mon, "Audiodev '%s' not found\n", audiodev);
|
||||
return;
|
||||
}
|
||||
|
||||
|
246
audio/audio.c
246
audio/audio.c
@@ -32,9 +32,7 @@
|
||||
#include "qapi/qobject-input-visitor.h"
|
||||
#include "qapi/qapi-visit-audio.h"
|
||||
#include "qapi/qapi-commands-audio.h"
|
||||
#include "qapi/qmp/qdict.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/help_option.h"
|
||||
@@ -63,22 +61,19 @@ const char *audio_prio_list[] = {
|
||||
"spice",
|
||||
CONFIG_AUDIO_DRIVERS
|
||||
"none",
|
||||
"wav",
|
||||
NULL
|
||||
};
|
||||
|
||||
static QLIST_HEAD(, audio_driver) audio_drivers;
|
||||
static AudiodevListHead audiodevs =
|
||||
QSIMPLEQ_HEAD_INITIALIZER(audiodevs);
|
||||
static AudiodevListHead default_audiodevs =
|
||||
QSIMPLEQ_HEAD_INITIALIZER(default_audiodevs);
|
||||
|
||||
static AudiodevListHead audiodevs = QSIMPLEQ_HEAD_INITIALIZER(audiodevs);
|
||||
|
||||
void audio_driver_register(audio_driver *drv)
|
||||
{
|
||||
QLIST_INSERT_HEAD(&audio_drivers, drv, next);
|
||||
}
|
||||
|
||||
static audio_driver *audio_driver_lookup(const char *name)
|
||||
audio_driver *audio_driver_lookup(const char *name)
|
||||
{
|
||||
struct audio_driver *d;
|
||||
Error *local_err = NULL;
|
||||
@@ -104,7 +99,6 @@ static audio_driver *audio_driver_lookup(const char *name)
|
||||
|
||||
static QTAILQ_HEAD(AudioStateHead, AudioState) audio_states =
|
||||
QTAILQ_HEAD_INITIALIZER(audio_states);
|
||||
static AudioState *default_audio_state;
|
||||
|
||||
const struct mixeng_volume nominal_volume = {
|
||||
.mute = 0,
|
||||
@@ -117,6 +111,8 @@ const struct mixeng_volume nominal_volume = {
|
||||
#endif
|
||||
};
|
||||
|
||||
static bool legacy_config = true;
|
||||
|
||||
int audio_bug (const char *funcname, int cond)
|
||||
{
|
||||
if (cond) {
|
||||
@@ -1557,11 +1553,9 @@ size_t audio_generic_read(HWVoiceIn *hw, void *buf, size_t size)
|
||||
}
|
||||
|
||||
static int audio_driver_init(AudioState *s, struct audio_driver *drv,
|
||||
Audiodev *dev, Error **errp)
|
||||
bool msg, Audiodev *dev)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
|
||||
s->drv_opaque = drv->init(dev, &local_err);
|
||||
s->drv_opaque = drv->init(dev);
|
||||
|
||||
if (s->drv_opaque) {
|
||||
if (!drv->pcm_ops->get_buffer_in) {
|
||||
@@ -1573,15 +1567,13 @@ static int audio_driver_init(AudioState *s, struct audio_driver *drv,
|
||||
drv->pcm_ops->put_buffer_out = audio_generic_put_buffer_out;
|
||||
}
|
||||
|
||||
audio_init_nb_voices_out(s, drv, 1);
|
||||
audio_init_nb_voices_in(s, drv, 0);
|
||||
audio_init_nb_voices_out(s, drv);
|
||||
audio_init_nb_voices_in(s, drv);
|
||||
s->drv = drv;
|
||||
return 0;
|
||||
} else {
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
} else {
|
||||
error_setg(errp, "Could not init `%s' audio driver", drv->name);
|
||||
if (msg) {
|
||||
dolog("Could not init `%s' audio driver\n", drv->name);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
@@ -1661,7 +1653,6 @@ static void free_audio_state(AudioState *s)
|
||||
|
||||
void audio_cleanup(void)
|
||||
{
|
||||
default_audio_state = NULL;
|
||||
while (!QTAILQ_EMPTY(&audio_states)) {
|
||||
AudioState *s = QTAILQ_FIRST(&audio_states);
|
||||
QTAILQ_REMOVE(&audio_states, s, list);
|
||||
@@ -1683,30 +1674,24 @@ static const VMStateDescription vmstate_audio = {
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = vmstate_audio_needed,
|
||||
.fields = (const VMStateField[]) {
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
void audio_create_default_audiodevs(void)
|
||||
static void audio_validate_opts(Audiodev *dev, Error **errp);
|
||||
|
||||
static AudiodevListEntry *audiodev_find(
|
||||
AudiodevListHead *head, const char *drvname)
|
||||
{
|
||||
for (int i = 0; audio_prio_list[i]; i++) {
|
||||
if (audio_driver_lookup(audio_prio_list[i])) {
|
||||
QDict *dict = qdict_new();
|
||||
Audiodev *dev = NULL;
|
||||
Visitor *v;
|
||||
|
||||
qdict_put_str(dict, "driver", audio_prio_list[i]);
|
||||
qdict_put_str(dict, "id", "#default");
|
||||
|
||||
v = qobject_input_visitor_new_keyval(QOBJECT(dict));
|
||||
qobject_unref(dict);
|
||||
visit_type_Audiodev(v, NULL, &dev, &error_fatal);
|
||||
visit_free(v);
|
||||
|
||||
audio_define_default(dev, &error_abort);
|
||||
AudiodevListEntry *e;
|
||||
QSIMPLEQ_FOREACH(e, head, next) {
|
||||
if (strcmp(AudiodevDriver_str(e->dev->driver), drvname) == 0) {
|
||||
return e;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1715,16 +1700,62 @@ void audio_create_default_audiodevs(void)
|
||||
* if dev == NULL => legacy implicit initialization, return the already created
|
||||
* state or create a new one
|
||||
*/
|
||||
static AudioState *audio_init(Audiodev *dev, Error **errp)
|
||||
static AudioState *audio_init(Audiodev *dev, const char *name)
|
||||
{
|
||||
static bool atexit_registered;
|
||||
size_t i;
|
||||
int done = 0;
|
||||
const char *drvname;
|
||||
VMChangeStateEntry *vmse;
|
||||
const char *drvname = NULL;
|
||||
VMChangeStateEntry *e;
|
||||
AudioState *s;
|
||||
struct audio_driver *driver;
|
||||
/* silence gcc warning about uninitialized variable */
|
||||
AudiodevListHead head = QSIMPLEQ_HEAD_INITIALIZER(head);
|
||||
|
||||
if (using_spice) {
|
||||
/*
|
||||
* When using spice allow the spice audio driver being picked
|
||||
* as default.
|
||||
*
|
||||
* Temporary hack. Using audio devices without explicit
|
||||
* audiodev= property is already deprecated. Same goes for
|
||||
* the -soundhw switch. Once this support gets finally
|
||||
* removed we can also drop the concept of a default audio
|
||||
* backend and this can go away.
|
||||
*/
|
||||
driver = audio_driver_lookup("spice");
|
||||
if (driver) {
|
||||
driver->can_be_default = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev) {
|
||||
/* -audiodev option */
|
||||
legacy_config = false;
|
||||
drvname = AudiodevDriver_str(dev->driver);
|
||||
} else if (!QTAILQ_EMPTY(&audio_states)) {
|
||||
if (!legacy_config) {
|
||||
dolog("Device %s: audiodev default parameter is deprecated, please "
|
||||
"specify audiodev=%s\n", name,
|
||||
QTAILQ_FIRST(&audio_states)->dev->id);
|
||||
}
|
||||
return QTAILQ_FIRST(&audio_states);
|
||||
} else {
|
||||
/* legacy implicit initialization */
|
||||
head = audio_handle_legacy_opts();
|
||||
/*
|
||||
* In case of legacy initialization, all Audiodevs in the list will have
|
||||
* the same configuration (except the driver), so it doesn't matter which
|
||||
* one we chose. We need an Audiodev to set up AudioState before we can
|
||||
* init a driver. Also note that dev at this point is still in the
|
||||
* list.
|
||||
*/
|
||||
dev = QSIMPLEQ_FIRST(&head)->dev;
|
||||
audio_validate_opts(dev, &error_abort);
|
||||
}
|
||||
|
||||
s = g_new0(AudioState, 1);
|
||||
s->dev = dev;
|
||||
|
||||
QLIST_INIT (&s->hw_head_out);
|
||||
QLIST_INIT (&s->hw_head_in);
|
||||
@@ -1736,39 +1767,56 @@ static AudioState *audio_init(Audiodev *dev, Error **errp)
|
||||
|
||||
s->ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, audio_timer, s);
|
||||
|
||||
if (dev) {
|
||||
/* -audiodev option */
|
||||
s->dev = dev;
|
||||
drvname = AudiodevDriver_str(dev->driver);
|
||||
s->nb_hw_voices_out = audio_get_pdo_out(dev)->voices;
|
||||
s->nb_hw_voices_in = audio_get_pdo_in(dev)->voices;
|
||||
|
||||
if (s->nb_hw_voices_out < 1) {
|
||||
dolog ("Bogus number of playback voices %d, setting to 1\n",
|
||||
s->nb_hw_voices_out);
|
||||
s->nb_hw_voices_out = 1;
|
||||
}
|
||||
|
||||
if (s->nb_hw_voices_in < 0) {
|
||||
dolog ("Bogus number of capture voices %d, setting to 0\n",
|
||||
s->nb_hw_voices_in);
|
||||
s->nb_hw_voices_in = 0;
|
||||
}
|
||||
|
||||
if (drvname) {
|
||||
driver = audio_driver_lookup(drvname);
|
||||
if (driver) {
|
||||
done = !audio_driver_init(s, driver, dev, errp);
|
||||
done = !audio_driver_init(s, driver, true, dev);
|
||||
} else {
|
||||
error_setg(errp, "Unknown audio driver `%s'", drvname);
|
||||
dolog ("Unknown audio driver `%s'\n", drvname);
|
||||
}
|
||||
if (!done) {
|
||||
goto out;
|
||||
free_audio_state(s);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
assert(!default_audio_state);
|
||||
for (;;) {
|
||||
AudiodevListEntry *e = QSIMPLEQ_FIRST(&default_audiodevs);
|
||||
if (!e) {
|
||||
error_setg(errp, "no default audio driver available");
|
||||
goto out;
|
||||
for (i = 0; audio_prio_list[i]; i++) {
|
||||
AudiodevListEntry *e = audiodev_find(&head, audio_prio_list[i]);
|
||||
driver = audio_driver_lookup(audio_prio_list[i]);
|
||||
|
||||
if (e && driver) {
|
||||
s->dev = dev = e->dev;
|
||||
audio_validate_opts(dev, &error_abort);
|
||||
done = !audio_driver_init(s, driver, false, dev);
|
||||
if (done) {
|
||||
e->dev = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
s->dev = dev = e->dev;
|
||||
QSIMPLEQ_REMOVE_HEAD(&default_audiodevs, next);
|
||||
g_free(e);
|
||||
drvname = AudiodevDriver_str(dev->driver);
|
||||
driver = audio_driver_lookup(drvname);
|
||||
if (!audio_driver_init(s, driver, dev, NULL)) {
|
||||
break;
|
||||
}
|
||||
qapi_free_Audiodev(dev);
|
||||
s->dev = NULL;
|
||||
}
|
||||
}
|
||||
audio_free_audiodev_list(&head);
|
||||
|
||||
if (!done) {
|
||||
driver = audio_driver_lookup("none");
|
||||
done = !audio_driver_init(s, driver, false, dev);
|
||||
assert(done);
|
||||
dolog("warning: Using timer based audio emulation\n");
|
||||
}
|
||||
|
||||
if (dev->timer_period <= 0) {
|
||||
s->period_ticks = 1;
|
||||
@@ -1776,51 +1824,37 @@ static AudioState *audio_init(Audiodev *dev, Error **errp)
|
||||
s->period_ticks = dev->timer_period * (int64_t)SCALE_US;
|
||||
}
|
||||
|
||||
vmse = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
|
||||
if (!vmse) {
|
||||
e = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
|
||||
if (!e) {
|
||||
dolog ("warning: Could not register change state handler\n"
|
||||
"(Audio can continue looping even after stopping the VM)\n");
|
||||
}
|
||||
|
||||
QTAILQ_INSERT_TAIL(&audio_states, s, list);
|
||||
QLIST_INIT (&s->card_head);
|
||||
vmstate_register_any(NULL, &vmstate_audio, s);
|
||||
vmstate_register (NULL, 0, &vmstate_audio, s);
|
||||
return s;
|
||||
|
||||
out:
|
||||
free_audio_state(s);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
AudioState *audio_get_default_audio_state(Error **errp)
|
||||
void audio_free_audiodev_list(AudiodevListHead *head)
|
||||
{
|
||||
if (!default_audio_state) {
|
||||
default_audio_state = audio_init(NULL, errp);
|
||||
if (!default_audio_state) {
|
||||
if (!QSIMPLEQ_EMPTY(&audiodevs)) {
|
||||
error_append_hint(errp, "Perhaps you wanted to use -audio or set audiodev=%s?\n",
|
||||
QSIMPLEQ_FIRST(&audiodevs)->dev->id);
|
||||
}
|
||||
}
|
||||
AudiodevListEntry *e;
|
||||
while ((e = QSIMPLEQ_FIRST(head))) {
|
||||
QSIMPLEQ_REMOVE_HEAD(head, next);
|
||||
qapi_free_Audiodev(e->dev);
|
||||
g_free(e);
|
||||
}
|
||||
|
||||
return default_audio_state;
|
||||
}
|
||||
|
||||
bool AUD_register_card (const char *name, QEMUSoundCard *card, Error **errp)
|
||||
void AUD_register_card (const char *name, QEMUSoundCard *card)
|
||||
{
|
||||
if (!card->state) {
|
||||
card->state = audio_get_default_audio_state(errp);
|
||||
if (!card->state) {
|
||||
return false;
|
||||
}
|
||||
card->state = audio_init(NULL, name);
|
||||
}
|
||||
|
||||
card->name = g_strdup (name);
|
||||
memset (&card->entries, 0, sizeof (card->entries));
|
||||
QLIST_INSERT_HEAD(&card->state->card_head, card, entries);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void AUD_remove_card (QEMUSoundCard *card)
|
||||
@@ -1842,8 +1876,10 @@ CaptureVoiceOut *AUD_add_capture(
|
||||
struct capture_callback *cb;
|
||||
|
||||
if (!s) {
|
||||
error_report("Capturing without setting an audiodev is not supported");
|
||||
abort();
|
||||
if (!legacy_config) {
|
||||
dolog("Capturing without setting an audiodev is deprecated\n");
|
||||
}
|
||||
s = audio_init(NULL, NULL);
|
||||
}
|
||||
|
||||
if (!audio_get_pdo_out(s->dev)->mixing_engine) {
|
||||
@@ -1864,8 +1900,10 @@ CaptureVoiceOut *AUD_add_capture(
|
||||
cap = audio_pcm_capture_find_specific(s, as);
|
||||
if (cap) {
|
||||
QLIST_INSERT_HEAD (&cap->cb_head, cb, entries);
|
||||
return cap;
|
||||
} else {
|
||||
HWVoiceOut *hw;
|
||||
CaptureVoiceOut *cap;
|
||||
|
||||
cap = g_malloc0(sizeof(*cap));
|
||||
|
||||
@@ -1899,9 +1937,8 @@ CaptureVoiceOut *AUD_add_capture(
|
||||
QLIST_FOREACH(hw, &s->hw_head_out, entries) {
|
||||
audio_attach_capture (hw);
|
||||
}
|
||||
return cap;
|
||||
}
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque)
|
||||
@@ -2024,9 +2061,6 @@ void audio_create_pdos(Audiodev *dev)
|
||||
#ifdef CONFIG_AUDIO_PA
|
||||
CASE(PA, pa, Pa);
|
||||
#endif
|
||||
#ifdef CONFIG_AUDIO_PIPEWIRE
|
||||
CASE(PIPEWIRE, pipewire, Pipewire);
|
||||
#endif
|
||||
#ifdef CONFIG_AUDIO_SDL
|
||||
CASE(SDL, sdl, Sdl);
|
||||
#endif
|
||||
@@ -2147,24 +2181,17 @@ void audio_define(Audiodev *dev)
|
||||
QSIMPLEQ_INSERT_TAIL(&audiodevs, e, next);
|
||||
}
|
||||
|
||||
void audio_define_default(Audiodev *dev, Error **errp)
|
||||
{
|
||||
AudiodevListEntry *e;
|
||||
|
||||
audio_validate_opts(dev, errp);
|
||||
|
||||
e = g_new0(AudiodevListEntry, 1);
|
||||
e->dev = dev;
|
||||
QSIMPLEQ_INSERT_TAIL(&default_audiodevs, e, next);
|
||||
}
|
||||
|
||||
void audio_init_audiodevs(void)
|
||||
bool audio_init_audiodevs(void)
|
||||
{
|
||||
AudiodevListEntry *e;
|
||||
|
||||
QSIMPLEQ_FOREACH(e, &audiodevs, next) {
|
||||
audio_init(e->dev, &error_fatal);
|
||||
if (!audio_init(e->dev, NULL)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
audsettings audiodev_to_audsettings(AudiodevPerDirectionOptions *pdo)
|
||||
@@ -2226,7 +2253,7 @@ int audio_buffer_bytes(AudiodevPerDirectionOptions *pdo,
|
||||
audioformat_bytes_per_sample(as->fmt);
|
||||
}
|
||||
|
||||
AudioState *audio_state_by_name(const char *name, Error **errp)
|
||||
AudioState *audio_state_by_name(const char *name)
|
||||
{
|
||||
AudioState *s;
|
||||
QTAILQ_FOREACH(s, &audio_states, list) {
|
||||
@@ -2235,7 +2262,6 @@ AudioState *audio_state_by_name(const char *name, Error **errp)
|
||||
return s;
|
||||
}
|
||||
}
|
||||
error_setg(errp, "audiodev '%s' not found", name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@@ -94,7 +94,7 @@ typedef struct QEMUAudioTimeStamp {
|
||||
void AUD_vlog (const char *cap, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0);
|
||||
void AUD_log (const char *cap, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
|
||||
|
||||
bool AUD_register_card (const char *name, QEMUSoundCard *card, Error **errp);
|
||||
void AUD_register_card (const char *name, QEMUSoundCard *card);
|
||||
void AUD_remove_card (QEMUSoundCard *card);
|
||||
CaptureVoiceOut *AUD_add_capture(
|
||||
AudioState *s,
|
||||
@@ -169,14 +169,12 @@ void audio_sample_from_uint64(void *samples, int pos,
|
||||
uint64_t left, uint64_t right);
|
||||
|
||||
void audio_define(Audiodev *audio);
|
||||
void audio_define_default(Audiodev *dev, Error **errp);
|
||||
void audio_parse_option(const char *opt);
|
||||
void audio_create_default_audiodevs(void);
|
||||
void audio_init_audiodevs(void);
|
||||
bool audio_init_audiodevs(void);
|
||||
void audio_help(void);
|
||||
void audio_legacy_help(void);
|
||||
|
||||
AudioState *audio_state_by_name(const char *name, Error **errp);
|
||||
AudioState *audio_get_default_audio_state(Error **errp);
|
||||
AudioState *audio_state_by_name(const char *name);
|
||||
const char *audio_get_id(QEMUSoundCard *card);
|
||||
|
||||
#define DEFINE_AUDIO_PROPERTIES(_s, _f) \
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user