Compare commits
	
		
			16 Commits
		
	
	
		
			qdev-array
			...
			fixed-ram-
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | a0efb212fb | ||
|  | 9e086f2e2a | ||
|  | cc57d6c75f | ||
|  | c204cfeeb7 | ||
|  | d5cd94bbaf | ||
|  | 4c2f145632 | ||
|  | fb56e2387b | ||
|  | 5686566113 | ||
|  | 4c16cca0e1 | ||
|  | 895c40ce0a | ||
|  | cad3bc2be3 | ||
|  | ea1efd3d17 | ||
|  | 3ce4e2e8d5 | ||
|  | c1eef47d79 | ||
|  | 80f68f67a9 | ||
|  | 4ed18df9df | 
							
								
								
									
										111
									
								
								.cirrus.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										111
									
								
								.cirrus.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,111 @@ | ||||
| env: | ||||
|   CIRRUS_CLONE_DEPTH: 1 | ||||
|  | ||||
| windows_msys2_task: | ||||
|   timeout_in: 90m | ||||
|   windows_container: | ||||
|     image: cirrusci/windowsservercore:2019 | ||||
|     os_version: 2019 | ||||
|     cpu: 8 | ||||
|     memory: 8G | ||||
|   env: | ||||
|     CIRRUS_SHELL: powershell | ||||
|     MSYS: winsymlinks:native | ||||
|     MSYSTEM: MINGW64 | ||||
|     MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe | ||||
|     MSYS2_FINGERPRINT: 0 | ||||
|     MSYS2_PACKAGES: " | ||||
|       diffutils git grep make pkg-config sed | ||||
|       mingw-w64-x86_64-python | ||||
|       mingw-w64-x86_64-python-sphinx | ||||
|       mingw-w64-x86_64-toolchain | ||||
|       mingw-w64-x86_64-SDL2 | ||||
|       mingw-w64-x86_64-SDL2_image | ||||
|       mingw-w64-x86_64-gtk3 | ||||
|       mingw-w64-x86_64-glib2 | ||||
|       mingw-w64-x86_64-ninja | ||||
|       mingw-w64-x86_64-jemalloc | ||||
|       mingw-w64-x86_64-lzo2 | ||||
|       mingw-w64-x86_64-zstd | ||||
|       mingw-w64-x86_64-libjpeg-turbo | ||||
|       mingw-w64-x86_64-pixman | ||||
|       mingw-w64-x86_64-libgcrypt | ||||
|       mingw-w64-x86_64-libpng | ||||
|       mingw-w64-x86_64-libssh | ||||
|       mingw-w64-x86_64-snappy | ||||
|       mingw-w64-x86_64-libusb | ||||
|       mingw-w64-x86_64-usbredir | ||||
|       mingw-w64-x86_64-libtasn1 | ||||
|       mingw-w64-x86_64-nettle | ||||
|       mingw-w64-x86_64-cyrus-sasl | ||||
|       mingw-w64-x86_64-curl | ||||
|       mingw-w64-x86_64-gnutls | ||||
|       mingw-w64-x86_64-libnfs | ||||
|     " | ||||
|     CHERE_INVOKING: 1 | ||||
|   msys2_cache: | ||||
|     folder: C:\tools\archive | ||||
|     reupload_on_changes: false | ||||
|     # These env variables are used to generate fingerprint to trigger the cache procedure | ||||
|     # If wanna to force re-populate msys2, increase MSYS2_FINGERPRINT | ||||
|     fingerprint_script: | ||||
|       - | | ||||
|         echo $env:CIRRUS_TASK_NAME | ||||
|         echo $env:MSYS2_URL | ||||
|         echo $env:MSYS2_FINGERPRINT | ||||
|         echo $env:MSYS2_PACKAGES | ||||
|     populate_script: | ||||
|       - | | ||||
|         md -Force C:\tools\archive\pkg | ||||
|         $start_time = Get-Date | ||||
|         bitsadmin /transfer msys_download /dynamic /download /priority FOREGROUND $env:MSYS2_URL C:\tools\archive\base.exe | ||||
|         Write-Output "Download time taken: $((Get-Date).Subtract($start_time))" | ||||
|         cd C:\tools | ||||
|         C:\tools\archive\base.exe -y | ||||
|         del -Force C:\tools\archive\base.exe | ||||
|         Write-Output "Base install time taken: $((Get-Date).Subtract($start_time))" | ||||
|         $start_time = Get-Date | ||||
|  | ||||
|         ((Get-Content -path C:\tools\msys64\etc\\post-install\\07-pacman-key.post -Raw) -replace '--refresh-keys', '--version') | Set-Content -Path C:\tools\msys64\etc\\post-install\\07-pacman-key.post | ||||
|         C:\tools\msys64\usr\bin\bash.exe -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf" | ||||
|         C:\tools\msys64\usr\bin\bash.exe -lc "export" | ||||
|         C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Sy | ||||
|         echo Y | C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Suu --overwrite=* | ||||
|         taskkill /F /FI "MODULES eq msys-2.0.dll" | ||||
|         tasklist | ||||
|         C:\tools\msys64\usr\bin\bash.exe -lc "mv -f /etc/pacman.conf.pacnew /etc/pacman.conf || true" | ||||
|         C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -Syuu --overwrite=*" | ||||
|         Write-Output "Core install time taken: $((Get-Date).Subtract($start_time))" | ||||
|         $start_time = Get-Date | ||||
|  | ||||
|         C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -S --needed $env:MSYS2_PACKAGES" | ||||
|         Write-Output "Package install time taken: $((Get-Date).Subtract($start_time))" | ||||
|         $start_time = Get-Date | ||||
|  | ||||
|         del -Force -ErrorAction SilentlyContinue C:\tools\msys64\etc\mtab | ||||
|         del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\fd | ||||
|         del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stderr | ||||
|         del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdin | ||||
|         del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdout | ||||
|         del -Force -Recurse -ErrorAction SilentlyContinue C:\tools\msys64\var\cache\pacman\pkg | ||||
|         tar cf C:\tools\archive\msys64.tar -C C:\tools\ msys64 | ||||
|  | ||||
|         Write-Output "Package archive time taken: $((Get-Date).Subtract($start_time))" | ||||
|         del -Force -Recurse -ErrorAction SilentlyContinue c:\tools\msys64  | ||||
|   install_script: | ||||
|     - | | ||||
|       $start_time = Get-Date | ||||
|       cd C:\tools | ||||
|       ls C:\tools\archive\msys64.tar | ||||
|       tar xf C:\tools\archive\msys64.tar | ||||
|       Write-Output "Extract msys2 time taken: $((Get-Date).Subtract($start_time))" | ||||
|   script: | ||||
|     - mkdir build | ||||
|     - cd build | ||||
|     - C:\tools\msys64\usr\bin\bash.exe -lc "../configure --python=python3 | ||||
|         --target-list-exclude=i386-softmmu,ppc64-softmmu,aarch64-softmmu,mips64-softmmu,mipsel-softmmu,sh4-softmmu" | ||||
|     - C:\tools\msys64\usr\bin\bash.exe -lc "make -j8" | ||||
|     - exit $LastExitCode | ||||
|   test_script: | ||||
|     - C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make V=1 check" | ||||
|     - exit $LastExitCode | ||||
| @@ -1,21 +0,0 @@ | ||||
| # | ||||
| # List of code-formatting clean ups the git blame can ignore | ||||
| # | ||||
| #   git blame --ignore-revs-file .git-blame-ignore-revs | ||||
| # | ||||
| # or | ||||
| # | ||||
| #   git config blame.ignoreRevsFile .git-blame-ignore-revs | ||||
| # | ||||
|  | ||||
| # gdbstub: clean-up indents | ||||
| ad9e4585b3c7425759d3eea697afbca71d2c2082 | ||||
|  | ||||
| # e1000e: fix code style | ||||
| 0eadd56bf53ab196a16d492d7dd31c62e1c24c32 | ||||
|  | ||||
| # target/riscv: coding style fixes | ||||
| 8c7feddddd9218b407792120bcfda0347ed16205 | ||||
|  | ||||
| # replace TABs with spaces | ||||
| 48805df9c22a0700fba4b3b548fafaa21726ca68 | ||||
| @@ -1,24 +1,10 @@ | ||||
|  | ||||
| variables: | ||||
|   # On stable branches this is changed by later rules. Should also | ||||
|   # be overridden per pipeline if running pipelines concurrently | ||||
|   # for different branches in contributor forks. | ||||
|   QEMU_CI_CONTAINER_TAG: latest | ||||
|  | ||||
|   # For purposes of CI rules, upstream is the gitlab.com/qemu-project | ||||
|   # namespace. When testing CI, it might be usefult to override this | ||||
|   # to point to a fork repo | ||||
|   QEMU_CI_UPSTREAM: qemu-project | ||||
|  | ||||
| # The order of rules defined here is critically important. | ||||
| # They are evaluated in order and first match wins. | ||||
| # | ||||
| # Thus we group them into a number of stages, ordered from | ||||
| # most restrictive to least restrictive | ||||
| # | ||||
| # For pipelines running for stable "staging-X.Y" branches | ||||
| # we must override QEMU_CI_CONTAINER_TAG | ||||
| # | ||||
| .base_job_template: | ||||
|   variables: | ||||
|     # Each script line from will be in a collapsible section in the job output | ||||
| @@ -33,68 +19,48 @@ variables: | ||||
|     # want jobs to run | ||||
|     ############################################################# | ||||
|  | ||||
|     # Never run jobs upstream on stable branch, staging branch jobs already ran | ||||
|     - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /^stable-/' | ||||
|       when: never | ||||
|  | ||||
|     # Never run jobs upstream on tags, staging branch jobs already ran | ||||
|     - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_TAG' | ||||
|       when: never | ||||
|  | ||||
|     # Cirrus jobs can't run unless the creds / target repo are set | ||||
|     - if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)' | ||||
|       when: never | ||||
|  | ||||
|     # Publishing jobs should only run on the default branch in upstream | ||||
|     - if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH' | ||||
|     - if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH' | ||||
|       when: never | ||||
|  | ||||
|     # Non-publishing jobs should only run on staging branches in upstream | ||||
|     - if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH !~ /staging/' | ||||
|     - if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH !~ /staging/' | ||||
|       when: never | ||||
|  | ||||
|     # Jobs only intended for forks should always be skipped on upstream | ||||
|     - if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM' | ||||
|     - if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == "qemu-project"' | ||||
|       when: never | ||||
|  | ||||
|     # Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set | ||||
|     - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' | ||||
|     - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"' | ||||
|       when: never | ||||
|  | ||||
|     # Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set | ||||
|     - if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' | ||||
|     - if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != "qemu-project"' | ||||
|       when: never | ||||
|  | ||||
|  | ||||
|     ############################################################# | ||||
|     # Stage 2: fine tune execution of jobs in specific scenarios | ||||
|     # where the catch all logic is inappropriate | ||||
|     # where the catch all logic is inapprorpaite | ||||
|     ############################################################# | ||||
|  | ||||
|     # Optional jobs should not be run unless manually triggered | ||||
|     - if: '$QEMU_JOB_OPTIONAL && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/' | ||||
|       when: manual | ||||
|       allow_failure: true | ||||
|       variables: | ||||
|         QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG | ||||
|  | ||||
|     - if: '$QEMU_JOB_OPTIONAL' | ||||
|       when: manual | ||||
|       allow_failure: true | ||||
|  | ||||
|     # Skipped jobs should not be run unless manually triggered | ||||
|     - if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/' | ||||
|       when: manual | ||||
|       allow_failure: true | ||||
|       variables: | ||||
|         QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG | ||||
|  | ||||
|     - if: '$QEMU_JOB_SKIPPED' | ||||
|       when: manual | ||||
|       allow_failure: true | ||||
|  | ||||
|     # Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset | ||||
|     - if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' | ||||
|     - if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != "qemu-project"' | ||||
|       when: manual | ||||
|       allow_failure: true | ||||
|  | ||||
| @@ -106,23 +72,8 @@ variables: | ||||
|  | ||||
|     # Forks pipeline jobs don't start automatically unless | ||||
|     # QEMU_CI=2 is set | ||||
|     - if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' | ||||
|     - if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"' | ||||
|       when: manual | ||||
|  | ||||
|     # Upstream pipeline jobs start automatically unless told not to | ||||
|     # by setting QEMU_CI=1 | ||||
|     - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/' | ||||
|       when: manual | ||||
|       variables: | ||||
|         QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG | ||||
|  | ||||
|     - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM' | ||||
|       when: manual | ||||
|  | ||||
|     # Jobs can run if any jobs they depend on were successful | ||||
|     - if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/' | ||||
|       when: on_success | ||||
|       variables: | ||||
|         QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG | ||||
|  | ||||
|     # Jobs can run if any jobs they depend on were successfull | ||||
|     - when: on_success | ||||
|   | ||||
| @@ -1,42 +1,34 @@ | ||||
| .native_build_job_template: | ||||
|   extends: .base_job_template | ||||
|   stage: build | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG | ||||
|   cache: | ||||
|     paths: | ||||
|       - ccache | ||||
|     key: "$CI_JOB_NAME" | ||||
|     when: always | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest | ||||
|   before_script: | ||||
|     - JOBS=$(expr $(nproc) + 1) | ||||
|   script: | ||||
|     - export CCACHE_BASEDIR="$(pwd)" | ||||
|     - export CCACHE_DIR="$CCACHE_BASEDIR/ccache" | ||||
|     - export CCACHE_MAXSIZE="500M" | ||||
|     - export PATH="$CCACHE_WRAPPERSDIR:$PATH" | ||||
|     - if test -n "$LD_JOBS"; | ||||
|       then | ||||
|         scripts/git-submodule.sh update meson ; | ||||
|       fi | ||||
|     - mkdir build | ||||
|     - cd build | ||||
|     - ccache --zero-stats | ||||
|     - ../configure --enable-werror --disable-docs --enable-fdt=system | ||||
|           ${TARGETS:+--target-list="$TARGETS"} | ||||
|           ${LD_JOBS:+--meson=git} ${TARGETS:+--target-list="$TARGETS"} | ||||
|           $CONFIGURE_ARGS || | ||||
|       { cat config.log meson-logs/meson-log.txt && exit 1; } | ||||
|     - if test -n "$LD_JOBS"; | ||||
|       then | ||||
|         pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ; | ||||
|         ../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ; | ||||
|       fi || exit 1; | ||||
|     - make -j"$JOBS" | ||||
|     - if test -n "$MAKE_CHECK_ARGS"; | ||||
|       then | ||||
|         make -j"$JOBS" $MAKE_CHECK_ARGS ; | ||||
|       fi | ||||
|     - ccache --show-stats | ||||
|  | ||||
| # We jump some hoops in common_test_job_template to avoid | ||||
| # rebuilding all the object files we skip in the artifacts | ||||
| .native_build_artifact_template: | ||||
|   artifacts: | ||||
|     when: on_success | ||||
|     expire_in: 2 days | ||||
|     paths: | ||||
|       - build | ||||
| @@ -52,10 +44,10 @@ | ||||
| .common_test_job_template: | ||||
|   extends: .base_job_template | ||||
|   stage: test | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest | ||||
|   script: | ||||
|     - scripts/git-submodule.sh update roms/SLOF | ||||
|     - meson subprojects download $(cd build/subprojects && echo *) | ||||
|     - scripts/git-submodule.sh update | ||||
|         $(sed -n '/GIT_SUBMODULES=/ s/.*=// p' build/config-host.mak) | ||||
|     - cd build | ||||
|     - find . -type f -exec touch {} + | ||||
|     # Avoid recompiling by hiding ninja with NINJA=":" | ||||
| @@ -65,7 +57,6 @@ | ||||
|   extends: .common_test_job_template | ||||
|   artifacts: | ||||
|     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" | ||||
|     when: always | ||||
|     expire_in: 7 days | ||||
|     paths: | ||||
|       - build/meson-logs/testlog.txt | ||||
| @@ -81,7 +72,7 @@ | ||||
|     policy: pull-push | ||||
|   artifacts: | ||||
|     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" | ||||
|     when: always | ||||
|     when: on_failure | ||||
|     expire_in: 7 days | ||||
|     paths: | ||||
|       - build/tests/results/latest/results.xml | ||||
|   | ||||
| @@ -9,7 +9,8 @@ build-system-alpine: | ||||
|     - job: amd64-alpine-container | ||||
|   variables: | ||||
|     IMAGE: alpine | ||||
|     TARGETS: avr-softmmu loongarch64-softmmu mips64-softmmu mipsel-softmmu | ||||
|     TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu | ||||
|       microblazeel-softmmu mips64el-softmmu | ||||
|     MAKE_CHECK_ARGS: check-build | ||||
|     CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog | ||||
|  | ||||
| @@ -30,7 +31,6 @@ avocado-system-alpine: | ||||
|   variables: | ||||
|     IMAGE: alpine | ||||
|     MAKE_CHECK_ARGS: check-avocado | ||||
|     AVOCADO_TAGS: arch:avr arch:loongarch64 arch:mips64 arch:mipsel | ||||
|  | ||||
| build-system-ubuntu: | ||||
|   extends: | ||||
| @@ -41,7 +41,8 @@ build-system-ubuntu: | ||||
|   variables: | ||||
|     IMAGE: ubuntu2204 | ||||
|     CONFIGURE_ARGS: --enable-docs | ||||
|     TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu | ||||
|     TARGETS: alpha-softmmu cris-softmmu hppa-softmmu | ||||
|       microblazeel-softmmu mips64el-softmmu | ||||
|     MAKE_CHECK_ARGS: check-build | ||||
|  | ||||
| check-system-ubuntu: | ||||
| @@ -61,7 +62,6 @@ avocado-system-ubuntu: | ||||
|   variables: | ||||
|     IMAGE: ubuntu2204 | ||||
|     MAKE_CHECK_ARGS: check-avocado | ||||
|     AVOCADO_TAGS: arch:alpha arch:microblaze arch:mips64el | ||||
|  | ||||
| build-system-debian: | ||||
|   extends: | ||||
| @@ -72,8 +72,8 @@ build-system-debian: | ||||
|   variables: | ||||
|     IMAGE: debian-amd64 | ||||
|     CONFIGURE_ARGS: --with-coroutine=sigaltstack | ||||
|     TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu | ||||
|       sparc-softmmu xtensa-softmmu | ||||
|     TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu | ||||
|       riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu | ||||
|     MAKE_CHECK_ARGS: check-build | ||||
|  | ||||
| check-system-debian: | ||||
| @@ -93,7 +93,6 @@ avocado-system-debian: | ||||
|   variables: | ||||
|     IMAGE: debian-amd64 | ||||
|     MAKE_CHECK_ARGS: check-avocado | ||||
|     AVOCADO_TAGS: arch:arm arch:i386 arch:riscv64 arch:sh4 arch:sparc arch:xtensa | ||||
|  | ||||
| crash-test-debian: | ||||
|   extends: .native_test_job_template | ||||
| @@ -104,8 +103,8 @@ crash-test-debian: | ||||
|     IMAGE: debian-amd64 | ||||
|   script: | ||||
|     - cd build | ||||
|     - make NINJA=":" check-venv | ||||
|     - pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386 | ||||
|     - make check-venv | ||||
|     - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-i386 | ||||
|  | ||||
| build-system-fedora: | ||||
|   extends: | ||||
| @@ -116,7 +115,7 @@ build-system-fedora: | ||||
|   variables: | ||||
|     IMAGE: fedora | ||||
|     CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs | ||||
|     TARGETS: microblaze-softmmu mips-softmmu | ||||
|     TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu | ||||
|       xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu | ||||
|     MAKE_CHECK_ARGS: check-build | ||||
|  | ||||
| @@ -137,8 +136,6 @@ avocado-system-fedora: | ||||
|   variables: | ||||
|     IMAGE: fedora | ||||
|     MAKE_CHECK_ARGS: check-avocado | ||||
|     AVOCADO_TAGS: arch:microblaze arch:mips arch:xtensa arch:m68k | ||||
|       arch:riscv32 arch:ppc arch:sparc64 | ||||
|  | ||||
| crash-test-fedora: | ||||
|   extends: .native_test_job_template | ||||
| @@ -149,9 +146,9 @@ crash-test-fedora: | ||||
|     IMAGE: fedora | ||||
|   script: | ||||
|     - cd build | ||||
|     - make NINJA=":" check-venv | ||||
|     - pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc | ||||
|     - pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32 | ||||
|     - make check-venv | ||||
|     - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc | ||||
|     - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32 | ||||
|  | ||||
| build-system-centos: | ||||
|   extends: | ||||
| @@ -184,8 +181,6 @@ avocado-system-centos: | ||||
|   variables: | ||||
|     IMAGE: centos8 | ||||
|     MAKE_CHECK_ARGS: check-avocado | ||||
|     AVOCADO_TAGS: arch:ppc64 arch:or1k arch:390x arch:x86_64 arch:rx | ||||
|       arch:sh4 arch:nios2 | ||||
|  | ||||
| build-system-opensuse: | ||||
|   extends: | ||||
| @@ -215,7 +210,6 @@ avocado-system-opensuse: | ||||
|   variables: | ||||
|     IMAGE: opensuse-leap | ||||
|     MAKE_CHECK_ARGS: check-avocado | ||||
|     AVOCADO_TAGS: arch:s390x arch:x86_64 arch:aarch64 | ||||
|  | ||||
|  | ||||
| # This jobs explicitly disable TCG (--disable-tcg), KVM is detected by | ||||
| @@ -256,7 +250,6 @@ build-user: | ||||
|   variables: | ||||
|     IMAGE: debian-all-test-cross | ||||
|     CONFIGURE_ARGS: --disable-tools --disable-system | ||||
|       --target-list-exclude=alpha-linux-user,sh4-linux-user | ||||
|     MAKE_CHECK_ARGS: check-tcg | ||||
|  | ||||
| build-user-static: | ||||
| @@ -266,18 +259,6 @@ build-user-static: | ||||
|   variables: | ||||
|     IMAGE: debian-all-test-cross | ||||
|     CONFIGURE_ARGS: --disable-tools --disable-system --static | ||||
|       --target-list-exclude=alpha-linux-user,sh4-linux-user | ||||
|     MAKE_CHECK_ARGS: check-tcg | ||||
|  | ||||
| # targets stuck on older compilers | ||||
| build-legacy: | ||||
|   extends: .native_build_job_template | ||||
|   needs: | ||||
|     job: amd64-debian-legacy-cross-container | ||||
|   variables: | ||||
|     IMAGE: debian-legacy-test-cross | ||||
|     TARGETS: alpha-linux-user alpha-softmmu sh4-linux-user | ||||
|     CONFIGURE_ARGS: --disable-tools | ||||
|     MAKE_CHECK_ARGS: check-tcg | ||||
|  | ||||
| build-user-hexagon: | ||||
| @@ -290,9 +271,7 @@ build-user-hexagon: | ||||
|     CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg | ||||
|     MAKE_CHECK_ARGS: check-tcg | ||||
|  | ||||
| # Build the softmmu targets we have check-tcg tests and compilers in | ||||
| # our omnibus all-test-cross container. Those targets that haven't got | ||||
| # Debian cross compiler support need to use special containers. | ||||
| # Only build the softmmu targets we have check-tcg tests for | ||||
| build-some-softmmu: | ||||
|   extends: .native_build_job_template | ||||
|   needs: | ||||
| @@ -300,18 +279,7 @@ build-some-softmmu: | ||||
|   variables: | ||||
|     IMAGE: debian-all-test-cross | ||||
|     CONFIGURE_ARGS: --disable-tools --enable-debug | ||||
|     TARGETS: arm-softmmu aarch64-softmmu i386-softmmu riscv64-softmmu | ||||
|       s390x-softmmu x86_64-softmmu | ||||
|     MAKE_CHECK_ARGS: check-tcg | ||||
|  | ||||
| build-loongarch64: | ||||
|   extends: .native_build_job_template | ||||
|   needs: | ||||
|     job: loongarch-debian-cross-container | ||||
|   variables: | ||||
|     IMAGE: debian-loongarch-cross | ||||
|     CONFIGURE_ARGS: --disable-tools --enable-debug | ||||
|     TARGETS: loongarch64-linux-user loongarch64-softmmu | ||||
|     TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu | ||||
|     MAKE_CHECK_ARGS: check-tcg | ||||
|  | ||||
| # We build tricore in a very minimal tricore only container | ||||
| @@ -344,7 +312,7 @@ clang-user: | ||||
|   variables: | ||||
|     IMAGE: debian-all-test-cross | ||||
|     CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system | ||||
|       --target-list-exclude=alpha-linux-user,microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user | ||||
|       --target-list-exclude=microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user | ||||
|       --extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined | ||||
|     MAKE_CHECK_ARGS: check-unit check-tcg | ||||
|  | ||||
| @@ -487,7 +455,7 @@ gcov: | ||||
|     IMAGE: ubuntu2204 | ||||
|     CONFIGURE_ARGS: --enable-gcov | ||||
|     TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu | ||||
|     MAKE_CHECK_ARGS: check-unit check-softfloat | ||||
|     MAKE_CHECK_ARGS: check | ||||
|   after_script: | ||||
|     - cd build | ||||
|     - gcovr --xml-pretty --exclude-unreachable-branches --print-summary | ||||
| @@ -495,12 +463,8 @@ gcov: | ||||
|   coverage: /^\s*lines:\s*\d+.\d+\%/ | ||||
|   artifacts: | ||||
|     name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA} | ||||
|     when: always | ||||
|     expire_in: 2 days | ||||
|     paths: | ||||
|       - build/meson-logs/testlog.txt | ||||
|     reports: | ||||
|       junit: build/meson-logs/testlog.junit.xml | ||||
|       coverage_report: | ||||
|         coverage_format: cobertura | ||||
|         path: build/coverage.xml | ||||
| @@ -531,7 +495,7 @@ build-tci: | ||||
|   variables: | ||||
|     IMAGE: debian-all-test-cross | ||||
|   script: | ||||
|     - TARGETS="aarch64 arm hppa m68k microblaze ppc64 s390x x86_64" | ||||
|     - TARGETS="aarch64 alpha arm hppa m68k microblaze ppc64 s390x x86_64" | ||||
|     - mkdir build | ||||
|     - cd build | ||||
|     - ../configure --enable-tcg-interpreter --disable-docs --disable-gtk --disable-vnc | ||||
| @@ -564,12 +528,12 @@ build-without-defaults: | ||||
|       --disable-strip | ||||
|     TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu | ||||
|       sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user | ||||
|     MAKE_CHECK_ARGS: check | ||||
|     MAKE_CHECK_ARGS: check-unit check-qtest-avr check-qtest-mips64 | ||||
|  | ||||
| build-libvhost-user: | ||||
|   extends: .base_job_template | ||||
|   stage: build | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/fedora:latest | ||||
|   needs: | ||||
|     job: amd64-fedora-container | ||||
|   script: | ||||
| @@ -609,7 +573,7 @@ build-tools-and-docs-debian: | ||||
| # of what topic branch they're currently using | ||||
| pages: | ||||
|   extends: .base_job_template | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:$QEMU_CI_CONTAINER_TAG | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:latest | ||||
|   stage: test | ||||
|   needs: | ||||
|     - job: build-tools-and-docs-debian | ||||
| @@ -624,7 +588,6 @@ pages: | ||||
|     - make -C build install DESTDIR=$(pwd)/temp-install | ||||
|     - mv temp-install/usr/local/share/doc/qemu/* public/ | ||||
|   artifacts: | ||||
|     when: on_success | ||||
|     paths: | ||||
|       - public | ||||
|   variables: | ||||
|   | ||||
| @@ -15,10 +15,8 @@ | ||||
|   stage: build | ||||
|   image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master | ||||
|   needs: [] | ||||
|   # 20 mins larger than "timeout_in" in cirrus/build.yml | ||||
|   # as there's often a 5-10 minute delay before Cirrus CI | ||||
|   # actually starts the task | ||||
|   timeout: 80m | ||||
|   allow_failure: true | ||||
|   script: | ||||
|     - source .gitlab-ci.d/cirrus/$NAME.vars | ||||
|     - sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g" | ||||
| @@ -46,13 +44,26 @@ | ||||
|   variables: | ||||
|     QEMU_JOB_CIRRUS: 1 | ||||
|  | ||||
| x64-freebsd-12-build: | ||||
|   extends: .cirrus_build_job | ||||
|   variables: | ||||
|     NAME: freebsd-12 | ||||
|     CIRRUS_VM_INSTANCE_TYPE: freebsd_instance | ||||
|     CIRRUS_VM_IMAGE_SELECTOR: image_family | ||||
|     CIRRUS_VM_IMAGE_NAME: freebsd-12-4 | ||||
|     CIRRUS_VM_CPUS: 8 | ||||
|     CIRRUS_VM_RAM: 8G | ||||
|     UPDATE_COMMAND: pkg update; pkg upgrade -y | ||||
|     INSTALL_COMMAND: pkg install -y | ||||
|     TEST_TARGETS: check | ||||
|  | ||||
| x64-freebsd-13-build: | ||||
|   extends: .cirrus_build_job | ||||
|   variables: | ||||
|     NAME: freebsd-13 | ||||
|     CIRRUS_VM_INSTANCE_TYPE: freebsd_instance | ||||
|     CIRRUS_VM_IMAGE_SELECTOR: image_family | ||||
|     CIRRUS_VM_IMAGE_NAME: freebsd-13-2 | ||||
|     CIRRUS_VM_IMAGE_NAME: freebsd-13-1 | ||||
|     CIRRUS_VM_CPUS: 8 | ||||
|     CIRRUS_VM_RAM: 8G | ||||
|     UPDATE_COMMAND: pkg update; pkg upgrade -y | ||||
|   | ||||
| @@ -16,8 +16,6 @@ env: | ||||
|   TEST_TARGETS: "@TEST_TARGETS@" | ||||
|  | ||||
| build_task: | ||||
|   # A little shorter than GitLab timeout in ../cirrus.yml | ||||
|   timeout_in: 60m | ||||
|   install_script: | ||||
|     - @UPDATE_COMMAND@ | ||||
|     - @INSTALL_COMMAND@ @PKGS@ | ||||
|   | ||||
							
								
								
									
										16
									
								
								.gitlab-ci.d/cirrus/freebsd-12.vars
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								.gitlab-ci.d/cirrus/freebsd-12.vars
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,16 @@ | ||||
| # THIS FILE WAS AUTO-GENERATED | ||||
| # | ||||
| #  $ lcitool variables freebsd-12 qemu | ||||
| # | ||||
| # https://gitlab.com/libvirt/libvirt-ci | ||||
|  | ||||
| CCACHE='/usr/local/bin/ccache' | ||||
| CPAN_PKGS='' | ||||
| CROSS_PKGS='' | ||||
| MAKE='/usr/local/bin/gmake' | ||||
| NINJA='/usr/local/bin/ninja' | ||||
| PACKAGING_COMMAND='pkg' | ||||
| PIP3='/usr/local/bin/pip-3.8' | ||||
| PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 zstd' | ||||
| PYPI_PKGS='' | ||||
| PYTHON='/usr/local/bin/python3' | ||||
| @@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake' | ||||
| NINJA='/usr/local/bin/ninja' | ||||
| PACKAGING_COMMAND='pkg' | ||||
| PIP3='/usr/local/bin/pip-3.8' | ||||
| PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-tomli py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd' | ||||
| PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 zstd' | ||||
| PYPI_PKGS='' | ||||
| PYTHON='/usr/local/bin/python3' | ||||
|   | ||||
| @@ -15,7 +15,7 @@ env: | ||||
|     folder: $HOME/.cache/qemu-vm | ||||
|   install_script: | ||||
|     - dnf update -y | ||||
|     - dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget meson | ||||
|     - dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget | ||||
|   clone_script: | ||||
|     - git clone --depth 100 "$CI_REPOSITORY_URL" . | ||||
|     - git fetch origin "$CI_COMMIT_REF_NAME" | ||||
|   | ||||
| @@ -11,6 +11,6 @@ MAKE='/opt/homebrew/bin/gmake' | ||||
| NINJA='/opt/homebrew/bin/ninja' | ||||
| PACKAGING_COMMAND='brew' | ||||
| PIP3='/opt/homebrew/bin/pip3' | ||||
| PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd' | ||||
| PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli' | ||||
| PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol tesseract usbredir vde vte3 zlib zstd' | ||||
| PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme' | ||||
| PYTHON='/opt/homebrew/bin/python3' | ||||
|   | ||||
| @@ -1,3 +1,9 @@ | ||||
| alpha-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-alpha-cross | ||||
|  | ||||
| amd64-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
| @@ -10,12 +16,6 @@ amd64-debian-user-cross-container: | ||||
|   variables: | ||||
|     NAME: debian-all-test-cross | ||||
|  | ||||
| amd64-debian-legacy-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-legacy-test-cross | ||||
|  | ||||
| arm64-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
| @@ -40,11 +40,23 @@ hexagon-cross-container: | ||||
|   variables: | ||||
|     NAME: debian-hexagon-cross | ||||
|  | ||||
| loongarch-debian-cross-container: | ||||
| hppa-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-loongarch-cross | ||||
|     NAME: debian-hppa-cross | ||||
|  | ||||
| m68k-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-m68k-cross | ||||
|  | ||||
| mips64-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-mips64-cross | ||||
|  | ||||
| mips64el-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
| @@ -52,12 +64,24 @@ mips64el-debian-cross-container: | ||||
|   variables: | ||||
|     NAME: debian-mips64el-cross | ||||
|  | ||||
| mips-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-mips-cross | ||||
|  | ||||
| mipsel-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-mipsel-cross | ||||
|  | ||||
| powerpc-test-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-powerpc-test-cross | ||||
|  | ||||
| ppc64el-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
| @@ -71,7 +95,13 @@ riscv64-debian-cross-container: | ||||
|   allow_failure: true | ||||
|   variables: | ||||
|     NAME: debian-riscv64-cross | ||||
|     QEMU_JOB_OPTIONAL: 1 | ||||
|  | ||||
| # we can however build TCG tests using a non-sid base | ||||
| riscv64-debian-test-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-riscv64-test-cross | ||||
|  | ||||
| s390x-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
| @@ -79,6 +109,18 @@ s390x-debian-cross-container: | ||||
|   variables: | ||||
|     NAME: debian-s390x-cross | ||||
|  | ||||
| sh4-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-sh4-cross | ||||
|  | ||||
| sparc64-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   variables: | ||||
|     NAME: debian-sparc64-cross | ||||
|  | ||||
| tricore-debian-cross-container: | ||||
|   extends: .container_job_template | ||||
|   stage: containers | ||||
|   | ||||
| @@ -1,15 +1,15 @@ | ||||
| .container_job_template: | ||||
|   extends: .base_job_template | ||||
|   image: docker:latest | ||||
|   image: docker:stable | ||||
|   stage: containers | ||||
|   services: | ||||
|     - docker:dind | ||||
|   before_script: | ||||
|     - export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:$QEMU_CI_CONTAINER_TAG" | ||||
|     # Always ':latest' because we always use upstream as a common cache source | ||||
|     - export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest" | ||||
|     - export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest" | ||||
|     - apk add python3 | ||||
|     - docker info | ||||
|     - docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" | ||||
|     - until docker info; do sleep 1; done | ||||
|   script: | ||||
|     - echo "TAG:$TAG" | ||||
|     - echo "COMMON_TAG:$COMMON_TAG" | ||||
|   | ||||
| @@ -1,21 +1,11 @@ | ||||
| .cross_system_build_job: | ||||
|   extends: .base_job_template | ||||
|   stage: build | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG | ||||
|   cache: | ||||
|     paths: | ||||
|       - ccache | ||||
|     key: "$CI_JOB_NAME" | ||||
|     when: always | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest | ||||
|   timeout: 80m | ||||
|   script: | ||||
|     - export CCACHE_BASEDIR="$(pwd)" | ||||
|     - export CCACHE_DIR="$CCACHE_BASEDIR/ccache" | ||||
|     - export CCACHE_MAXSIZE="500M" | ||||
|     - export PATH="$CCACHE_WRAPPERSDIR:$PATH" | ||||
|     - mkdir build | ||||
|     - cd build | ||||
|     - ccache --zero-stats | ||||
|     - ../configure --enable-werror --disable-docs --enable-fdt=system | ||||
|         --disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS | ||||
|         --target-list-exclude="arm-softmmu cris-softmmu | ||||
| @@ -28,7 +18,6 @@ | ||||
|       version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)"; | ||||
|       mv -v qemu-setup*.exe qemu-setup-${version}.exe; | ||||
|       fi | ||||
|     - ccache --show-stats | ||||
|  | ||||
| # Job to cross-build specific accelerators. | ||||
| # | ||||
| @@ -38,17 +27,9 @@ | ||||
| .cross_accel_build_job: | ||||
|   extends: .base_job_template | ||||
|   stage: build | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest | ||||
|   timeout: 30m | ||||
|   cache: | ||||
|     paths: | ||||
|       - ccache/ | ||||
|     key: "$CI_JOB_NAME" | ||||
|   script: | ||||
|     - export CCACHE_BASEDIR="$(pwd)" | ||||
|     - export CCACHE_DIR="$CCACHE_BASEDIR/ccache" | ||||
|     - export CCACHE_MAXSIZE="500M" | ||||
|     - export PATH="$CCACHE_WRAPPERSDIR:$PATH" | ||||
|     - mkdir build | ||||
|     - cd build | ||||
|     - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS | ||||
| @@ -58,15 +39,8 @@ | ||||
| .cross_user_build_job: | ||||
|   extends: .base_job_template | ||||
|   stage: build | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG | ||||
|   cache: | ||||
|     paths: | ||||
|       - ccache/ | ||||
|     key: "$CI_JOB_NAME" | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest | ||||
|   script: | ||||
|     - export CCACHE_BASEDIR="$(pwd)" | ||||
|     - export CCACHE_DIR="$CCACHE_BASEDIR/ccache" | ||||
|     - export CCACHE_MAXSIZE="500M" | ||||
|     - mkdir build | ||||
|     - cd build | ||||
|     - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS | ||||
| @@ -81,7 +55,6 @@ | ||||
| .cross_test_artifacts: | ||||
|   artifacts: | ||||
|     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" | ||||
|     when: always | ||||
|     expire_in: 7 days | ||||
|     paths: | ||||
|       - build/meson-logs/testlog.txt | ||||
|   | ||||
| @@ -1,6 +1,13 @@ | ||||
| include: | ||||
|   - local: '/.gitlab-ci.d/crossbuild-template.yml' | ||||
|  | ||||
| cross-armel-system: | ||||
|   extends: .cross_system_build_job | ||||
|   needs: | ||||
|     job: armel-debian-cross-container | ||||
|   variables: | ||||
|     IMAGE: debian-armel-cross | ||||
|  | ||||
| cross-armel-user: | ||||
|   extends: .cross_user_build_job | ||||
|   needs: | ||||
| @@ -8,6 +15,13 @@ cross-armel-user: | ||||
|   variables: | ||||
|     IMAGE: debian-armel-cross | ||||
|  | ||||
| cross-armhf-system: | ||||
|   extends: .cross_system_build_job | ||||
|   needs: | ||||
|     job: armhf-debian-cross-container | ||||
|   variables: | ||||
|     IMAGE: debian-armhf-cross | ||||
|  | ||||
| cross-armhf-user: | ||||
|   extends: .cross_user_build_job | ||||
|   needs: | ||||
| @@ -29,13 +43,15 @@ cross-arm64-user: | ||||
|   variables: | ||||
|     IMAGE: debian-arm64-cross | ||||
|  | ||||
| cross-arm64-kvm-only: | ||||
|   extends: .cross_accel_build_job | ||||
| cross-i386-system: | ||||
|   extends: | ||||
|     - .cross_system_build_job | ||||
|     - .cross_test_artifacts | ||||
|   needs: | ||||
|     job: arm64-debian-cross-container | ||||
|     job: i386-fedora-cross-container | ||||
|   variables: | ||||
|     IMAGE: debian-arm64-cross | ||||
|     EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-features | ||||
|     IMAGE: fedora-i386-cross | ||||
|     MAKE_CHECK_ARGS: check-qtest | ||||
|  | ||||
| cross-i386-user: | ||||
|   extends: | ||||
| @@ -57,7 +73,7 @@ cross-i386-tci: | ||||
|   variables: | ||||
|     IMAGE: fedora-i386-cross | ||||
|     ACCEL: tcg-interpreter | ||||
|     EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins | ||||
|     EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user | ||||
|     MAKE_CHECK_ARGS: check check-tcg | ||||
|  | ||||
| cross-mipsel-system: | ||||
| @@ -165,11 +181,10 @@ cross-win32-system: | ||||
|     job: win32-fedora-cross-container | ||||
|   variables: | ||||
|     IMAGE: fedora-win32-cross | ||||
|     EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins | ||||
|     EXTRA_CONFIGURE_OPTS: --enable-fdt=internal | ||||
|     CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu | ||||
|                         microblazeel-softmmu mips64el-softmmu nios2-softmmu | ||||
|   artifacts: | ||||
|     when: on_success | ||||
|     paths: | ||||
|       - build/qemu-setup*.exe | ||||
|  | ||||
| @@ -179,13 +194,12 @@ cross-win64-system: | ||||
|     job: win64-fedora-cross-container | ||||
|   variables: | ||||
|     IMAGE: fedora-win64-cross | ||||
|     EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins | ||||
|     EXTRA_CONFIGURE_OPTS: --enable-fdt=internal | ||||
|     CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu | ||||
|                         m68k-softmmu microblazeel-softmmu nios2-softmmu | ||||
|                         or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu | ||||
|                         tricore-softmmu xtensaeb-softmmu | ||||
|   artifacts: | ||||
|     when: on_success | ||||
|     paths: | ||||
|       - build/qemu-setup*.exe | ||||
|  | ||||
|   | ||||
| @@ -20,10 +20,8 @@ variables: | ||||
|   artifacts: | ||||
|     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" | ||||
|     expire_in: 7 days | ||||
|     when: always | ||||
|     paths: | ||||
|       - build/build.ninja | ||||
|       - build/meson-logs | ||||
|       - build/meson-logs/testlog.txt | ||||
|     reports: | ||||
|       junit: build/meson-logs/testlog.junit.xml | ||||
|  | ||||
|   | ||||
| @@ -1,9 +1,4 @@ | ||||
| # All centos-stream-8 jobs should run successfully in an environment | ||||
| # setup by the scripts/ci/setup/stream/8/build-environment.yml task | ||||
| # "Installation of extra packages to build QEMU" | ||||
|  | ||||
| centos-stream-8-x86_64: | ||||
|  extends: .custom_runner_template | ||||
|  allow_failure: true | ||||
|  needs: [] | ||||
|  stage: build | ||||
| @@ -13,6 +8,15 @@ centos-stream-8-x86_64: | ||||
|  rules: | ||||
|  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' | ||||
|  - if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE" | ||||
|  artifacts: | ||||
|    name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" | ||||
|    when: on_failure | ||||
|    expire_in: 7 days | ||||
|    paths: | ||||
|      - build/tests/results/latest/results.xml | ||||
|      - build/tests/results/latest/test-results | ||||
|    reports: | ||||
|      junit: build/tests/results/latest/results.xml | ||||
|  before_script: | ||||
|  - JOBS=$(expr $(nproc) + 1) | ||||
|  script: | ||||
| @@ -21,4 +25,6 @@ centos-stream-8-x86_64: | ||||
|  - ../scripts/ci/org.centos/stream/8/x86_64/configure | ||||
|    || { cat config.log meson-logs/meson-log.txt; exit 1; } | ||||
|  - make -j"$JOBS" | ||||
|  - make NINJA=":" check check-avocado | ||||
|  - make NINJA=":" check | ||||
|    || { cat meson-logs/testlog.txt; exit 1; } ; | ||||
|  - ../scripts/ci/org.centos/stream/8/x86_64/test-avocado | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
| # All ubuntu-22.04 jobs should run successfully in an environment | ||||
| # setup by the scripts/ci/setup/qemu/build-environment.yml task | ||||
| # "Install basic packages to build QEMU on Ubuntu 22.04" | ||||
| # "Install basic packages to build QEMU on Ubuntu 20.04" | ||||
|  | ||||
| ubuntu-22.04-aarch32-all: | ||||
|  extends: .custom_runner_template | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
| # All ubuntu-22.04 jobs should run successfully in an environment | ||||
| # All ubuntu-20.04 jobs should run successfully in an environment | ||||
| # setup by the scripts/ci/setup/qemu/build-environment.yml task | ||||
| # "Install basic packages to build QEMU on Ubuntu 22.04" | ||||
| # "Install basic packages to build QEMU on Ubuntu 20.04" | ||||
|  | ||||
| ubuntu-22.04-aarch64-all-linux-static: | ||||
|  extends: .custom_runner_template | ||||
| @@ -45,28 +45,6 @@ ubuntu-22.04-aarch64-all: | ||||
|  - make --output-sync -j`nproc --ignore=40` | ||||
|  - make --output-sync -j`nproc --ignore=40` check | ||||
|  | ||||
| ubuntu-22.04-aarch64-without-defaults: | ||||
|  extends: .custom_runner_template | ||||
|  needs: [] | ||||
|  stage: build | ||||
|  tags: | ||||
|  - ubuntu_22.04 | ||||
|  - aarch64 | ||||
|  rules: | ||||
|  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' | ||||
|    when: manual | ||||
|    allow_failure: true | ||||
|  - if: "$AARCH64_RUNNER_AVAILABLE" | ||||
|    when: manual | ||||
|    allow_failure: true | ||||
|  script: | ||||
|  - mkdir build | ||||
|  - cd build | ||||
|  - ../configure --disable-user --without-default-devices --without-default-features | ||||
|    || { cat config.log meson-logs/meson-log.txt; exit 1; } | ||||
|  - make --output-sync -j`nproc --ignore=40` | ||||
|  - make --output-sync -j`nproc --ignore=40` check | ||||
|  | ||||
| ubuntu-22.04-aarch64-alldbg: | ||||
|  extends: .custom_runner_template | ||||
|  needs: [] | ||||
| @@ -145,7 +123,7 @@ ubuntu-22.04-aarch64-notcg: | ||||
|  script: | ||||
|  - mkdir build | ||||
|  - cd build | ||||
|  - ../configure --disable-tcg --with-devices-aarch64=minimal | ||||
|  - ../configure --disable-tcg | ||||
|    || { cat config.log meson-logs/meson-log.txt; exit 1; } | ||||
|  - make --output-sync -j`nproc --ignore=40` | ||||
|  - make --output-sync -j`nproc --ignore=40` check | ||||
|   | ||||
							
								
								
									
										85
									
								
								.gitlab-ci.d/edk2.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								.gitlab-ci.d/edk2.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,85 @@ | ||||
| # All jobs needing docker-edk2 must use the same rules it uses. | ||||
| .edk2_job_rules: | ||||
|   rules: | ||||
|     # Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set | ||||
|     - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"' | ||||
|       when: never | ||||
|  | ||||
|     # In forks, if QEMU_CI=1 is set, then create manual job | ||||
|     # if any of the files affecting the build are touched | ||||
|     - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"' | ||||
|       changes: | ||||
|         - .gitlab-ci.d/edk2.yml | ||||
|         - .gitlab-ci.d/edk2/Dockerfile | ||||
|         - roms/edk2/* | ||||
|       when: manual | ||||
|  | ||||
|     # In forks, if QEMU_CI=1 is set, then create manual job | ||||
|     # if the branch/tag starts with 'edk2' | ||||
|     - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^edk2/' | ||||
|       when: manual | ||||
|  | ||||
|     # In forks, if QEMU_CI=1 is set, then create manual job | ||||
|     # if last commit msg contains 'EDK2' (case insensitive) | ||||
|     - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /edk2/i' | ||||
|       when: manual | ||||
|  | ||||
|     # Run if any files affecting the build output are touched | ||||
|     - changes: | ||||
|         - .gitlab-ci.d/edk2.yml | ||||
|         - .gitlab-ci.d/edk2/Dockerfile | ||||
|         - roms/edk2/* | ||||
|       when: on_success | ||||
|  | ||||
|     # Run if the branch/tag starts with 'edk2' | ||||
|     - if: '$CI_COMMIT_REF_NAME =~ /^edk2/' | ||||
|       when: on_success | ||||
|  | ||||
|     # Run if last commit msg contains 'EDK2' (case insensitive) | ||||
|     - if: '$CI_COMMIT_MESSAGE =~ /edk2/i' | ||||
|       when: on_success | ||||
|  | ||||
| docker-edk2: | ||||
|   extends: .edk2_job_rules | ||||
|   stage: containers | ||||
|   image: docker:19.03.1 | ||||
|   services: | ||||
|     - docker:19.03.1-dind | ||||
|   variables: | ||||
|     GIT_DEPTH: 3 | ||||
|     IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build | ||||
|     # We don't use TLS | ||||
|     DOCKER_HOST: tcp://docker:2375 | ||||
|     DOCKER_TLS_CERTDIR: "" | ||||
|   before_script: | ||||
|     - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY | ||||
|   script: | ||||
|     - docker pull $IMAGE_TAG || true | ||||
|     - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA | ||||
|                                            --tag $IMAGE_TAG .gitlab-ci.d/edk2 | ||||
|     - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA | ||||
|     - docker push $IMAGE_TAG | ||||
|  | ||||
| build-edk2: | ||||
|   extends: .edk2_job_rules | ||||
|   stage: build | ||||
|   needs: ['docker-edk2'] | ||||
|   artifacts: | ||||
|     paths: # 'artifacts.zip' will contains the following files: | ||||
|       - pc-bios/edk2*bz2 | ||||
|       - pc-bios/edk2-licenses.txt | ||||
|       - edk2-stdout.log | ||||
|       - edk2-stderr.log | ||||
|   image: $CI_REGISTRY_IMAGE:edk2-cross-build | ||||
|   variables: | ||||
|     GIT_DEPTH: 3 | ||||
|   script: # Clone the required submodules and build EDK2 | ||||
|     - git submodule update --init roms/edk2 | ||||
|     - git -C roms/edk2 submodule update --init -- | ||||
|        ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 | ||||
|        BaseTools/Source/C/BrotliCompress/brotli | ||||
|        CryptoPkg/Library/OpensslLib/openssl | ||||
|        MdeModulePkg/Library/BrotliCustomDecompressLib/brotli | ||||
|     - export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1)) | ||||
|     - echo "=== Using ${JOBS} simultaneous jobs ===" | ||||
|     - make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2 | ||||
							
								
								
									
										27
									
								
								.gitlab-ci.d/edk2/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								.gitlab-ci.d/edk2/Dockerfile
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| # | ||||
| # Docker image to cross-compile EDK2 firmware binaries | ||||
| # | ||||
| FROM ubuntu:18.04 | ||||
|  | ||||
| MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org> | ||||
|  | ||||
| # Install packages required to build EDK2 | ||||
| RUN apt update \ | ||||
|     && \ | ||||
|     \ | ||||
|     DEBIAN_FRONTEND=noninteractive \ | ||||
|     apt install --assume-yes --no-install-recommends \ | ||||
|         build-essential \ | ||||
|         ca-certificates \ | ||||
|         dos2unix \ | ||||
|         gcc-aarch64-linux-gnu \ | ||||
|         gcc-arm-linux-gnueabi \ | ||||
|         git \ | ||||
|         iasl \ | ||||
|         make \ | ||||
|         nasm \ | ||||
|         python3 \ | ||||
|         uuid-dev \ | ||||
|     && \ | ||||
|     \ | ||||
|     rm -rf /var/lib/apt/lists/* | ||||
| @@ -42,15 +42,17 @@ | ||||
| docker-opensbi: | ||||
|   extends: .opensbi_job_rules | ||||
|   stage: containers | ||||
|   image: docker:latest | ||||
|   image: docker:19.03.1 | ||||
|   services: | ||||
|     - docker:dind | ||||
|     - docker:19.03.1-dind | ||||
|   variables: | ||||
|     GIT_DEPTH: 3 | ||||
|     IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build | ||||
|     # We don't use TLS | ||||
|     DOCKER_HOST: tcp://docker:2375 | ||||
|     DOCKER_TLS_CERTDIR: "" | ||||
|   before_script: | ||||
|     - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY | ||||
|     - until docker info; do sleep 1; done | ||||
|   script: | ||||
|     - docker pull $IMAGE_TAG || true | ||||
|     - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA | ||||
| @@ -63,7 +65,6 @@ build-opensbi: | ||||
|   stage: build | ||||
|   needs: ['docker-opensbi'] | ||||
|   artifacts: | ||||
|     when: on_success | ||||
|     paths: # 'artifacts.zip' will contains the following files: | ||||
|       - pc-bios/opensbi-riscv32-generic-fw_dynamic.bin | ||||
|       - pc-bios/opensbi-riscv64-generic-fw_dynamic.bin | ||||
|   | ||||
| @@ -15,7 +15,6 @@ RUN apt update \ | ||||
|         ca-certificates \ | ||||
|         git \ | ||||
|         make \ | ||||
| 	python3 \ | ||||
|         wget \ | ||||
|     && \ | ||||
|     \ | ||||
|   | ||||
| @@ -1,16 +1,10 @@ | ||||
| # This file contains the set of jobs run by the QEMU project: | ||||
| # https://gitlab.com/qemu-project/qemu/-/pipelines | ||||
|  | ||||
| variables: | ||||
|   RUNNER_TAG: "" | ||||
|  | ||||
| default: | ||||
|   tags: | ||||
|     - $RUNNER_TAG | ||||
|  | ||||
| include: | ||||
|   - local: '/.gitlab-ci.d/base.yml' | ||||
|   - local: '/.gitlab-ci.d/stages.yml' | ||||
|   - local: '/.gitlab-ci.d/edk2.yml' | ||||
|   - local: '/.gitlab-ci.d/opensbi.yml' | ||||
|   - local: '/.gitlab-ci.d/containers.yml' | ||||
|   - local: '/.gitlab-ci.d/crossbuilds.yml' | ||||
|   | ||||
| @@ -26,7 +26,7 @@ check-dco: | ||||
| check-python-minreqs: | ||||
|   extends: .base_job_template | ||||
|   stage: test | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/python:latest | ||||
|   script: | ||||
|     - make -C python check-minreqs | ||||
|   variables: | ||||
| @@ -37,7 +37,7 @@ check-python-minreqs: | ||||
| check-python-tox: | ||||
|   extends: .base_job_template | ||||
|   stage: test | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG | ||||
|   image: $CI_REGISTRY_IMAGE/qemu/python:latest | ||||
|   script: | ||||
|     - make -C python check-tox | ||||
|   variables: | ||||
|   | ||||
| @@ -5,60 +5,21 @@ | ||||
|   - windows | ||||
|   - windows-1809 | ||||
|   cache: | ||||
|     key: "$CI_JOB_NAME" | ||||
|     key: "${CI_JOB_NAME}-cache" | ||||
|     paths: | ||||
|       - msys64/var/cache | ||||
|       - ccache | ||||
|     when: always | ||||
|       - ${CI_PROJECT_DIR}/msys64/var/cache | ||||
|   needs: [] | ||||
|   stage: build | ||||
|   timeout: 100m | ||||
|   variables: | ||||
|     # This feature doesn't (currently) work with PowerShell, it stops | ||||
|     # the echo'ing of commands being run and doesn't show any timing | ||||
|     FF_SCRIPT_SECTIONS: 0 | ||||
|   artifacts: | ||||
|     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" | ||||
|     expire_in: 7 days | ||||
|     paths: | ||||
|       - build/meson-logs/testlog.txt | ||||
|     reports: | ||||
|       junit: "build/meson-logs/testlog.junit.xml" | ||||
|   timeout: 80m | ||||
|   before_script: | ||||
|   - Write-Output "Acquiring msys2.exe installer at $(Get-Date -Format u)" | ||||
|   - If ( !(Test-Path -Path msys64\var\cache ) ) { | ||||
|       mkdir msys64\var\cache | ||||
|     } | ||||
|   - Invoke-WebRequest | ||||
|     "https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe.sig" | ||||
|     -outfile "msys2.exe.sig" | ||||
|   - if ( Test-Path -Path msys64\var\cache\msys2.exe.sig ) { | ||||
|       Write-Output "Cached installer sig" ; | ||||
|       if ( ((Get-FileHash msys2.exe.sig).Hash -ne (Get-FileHash msys64\var\cache\msys2.exe.sig).Hash) ) { | ||||
|         Write-Output "Mis-matched installer sig, new installer download required" ; | ||||
|         Remove-Item -Path msys64\var\cache\msys2.exe.sig ; | ||||
|         if ( Test-Path -Path msys64\var\cache\msys2.exe ) { | ||||
|           Remove-Item -Path msys64\var\cache\msys2.exe | ||||
|         } | ||||
|       } else { | ||||
|         Write-Output "Matched installer sig, cached installer still valid" | ||||
|       } | ||||
|     } else { | ||||
|       Write-Output "No cached installer sig, new installer download required" ; | ||||
|       if ( Test-Path -Path msys64\var\cache\msys2.exe ) { | ||||
|         Remove-Item -Path msys64\var\cache\msys2.exe | ||||
|       } | ||||
|     } | ||||
|   - if ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) { | ||||
|       Write-Output "Fetching latest installer" ; | ||||
|   - If ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) { | ||||
|       Invoke-WebRequest | ||||
|       "https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe" | ||||
|       -outfile "msys64\var\cache\msys2.exe" ; | ||||
|       Copy-Item -Path msys2.exe.sig -Destination msys64\var\cache\msys2.exe.sig | ||||
|     } else { | ||||
|       Write-Output "Using cached installer" | ||||
|       "https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe" | ||||
|       -outfile "msys64\var\cache\msys2.exe" | ||||
|     } | ||||
|   - Write-Output "Invoking msys2.exe installer at $(Get-Date -Format u)" | ||||
|   - msys64\var\cache\msys2.exe -y | ||||
|   - ((Get-Content -path .\msys64\etc\\post-install\\07-pacman-key.post -Raw) | ||||
|       -replace '--refresh-keys', '--version') | | ||||
| @@ -67,75 +28,95 @@ | ||||
|   - .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu'  # Core update | ||||
|   - .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu'  # Normal update | ||||
|   - taskkill /F /FI "MODULES eq msys-2.0.dll" | ||||
|   script: | ||||
|   - Write-Output "Installing mingw packages at $(Get-Date -Format u)" | ||||
|   - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed | ||||
|       bison diffutils flex | ||||
|       git grep make sed | ||||
|       $MINGW_TARGET-binutils | ||||
|       $MINGW_TARGET-capstone | ||||
|       $MINGW_TARGET-ccache | ||||
|       $MINGW_TARGET-curl | ||||
|       $MINGW_TARGET-cyrus-sasl | ||||
|       $MINGW_TARGET-dtc | ||||
|       $MINGW_TARGET-gcc | ||||
|       $MINGW_TARGET-glib2 | ||||
|       $MINGW_TARGET-gnutls | ||||
|       $MINGW_TARGET-gtk3 | ||||
|       $MINGW_TARGET-libgcrypt | ||||
|       $MINGW_TARGET-libjpeg-turbo | ||||
|       $MINGW_TARGET-libnfs | ||||
|       $MINGW_TARGET-libpng | ||||
|       $MINGW_TARGET-libssh | ||||
|       $MINGW_TARGET-libtasn1 | ||||
|       $MINGW_TARGET-libusb | ||||
|       $MINGW_TARGET-lzo2 | ||||
|       $MINGW_TARGET-nettle | ||||
|       $MINGW_TARGET-ninja | ||||
|       $MINGW_TARGET-pixman | ||||
|       $MINGW_TARGET-pkgconf | ||||
|       $MINGW_TARGET-python | ||||
|       $MINGW_TARGET-SDL2 | ||||
|       $MINGW_TARGET-SDL2_image | ||||
|       $MINGW_TARGET-snappy | ||||
|       $MINGW_TARGET-spice | ||||
|       $MINGW_TARGET-usbredir | ||||
|       $MINGW_TARGET-zstd " | ||||
|   - Write-Output "Running build at $(Get-Date -Format u)" | ||||
|   - $env:CHERE_INVOKING = 'yes'  # Preserve the current working directory | ||||
|   - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink | ||||
|   - $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR" | ||||
|   - $env:CCACHE_DIR = "$env:CCACHE_BASEDIR/ccache" | ||||
|   - $env:CCACHE_MAXSIZE = "500M" | ||||
|   - $env:CCACHE_DEPEND = 1 # cache misses are too expensive with preprocessor mode | ||||
|   - $env:CC = "ccache gcc" | ||||
|   - mkdir build | ||||
|   - cd build | ||||
|   - ..\msys64\usr\bin\bash -lc "ccache --zero-stats" | ||||
|   - ..\msys64\usr\bin\bash -lc "../configure --enable-fdt=system $CONFIGURE_ARGS" | ||||
|   - ..\msys64\usr\bin\bash -lc "make" | ||||
|   - ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;" | ||||
|   - ..\msys64\usr\bin\bash -lc "ccache --show-stats" | ||||
|   - Write-Output "Finished build at $(Get-Date -Format u)" | ||||
|  | ||||
| msys2-64bit: | ||||
|   extends: .shared_msys2_builder | ||||
|   variables: | ||||
|     MINGW_TARGET: mingw-w64-x86_64 | ||||
|     MSYSTEM: MINGW64 | ||||
|     # do not remove "--without-default-devices"! | ||||
|     # commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices" | ||||
|     # changed to compile QEMU with the --without-default-devices switch | ||||
|     # for the msys2 64-bit job, due to the build could not complete within | ||||
|     CONFIGURE_ARGS:  --target-list=x86_64-softmmu --without-default-devices -Ddebug=false -Doptimization=0 | ||||
|     # qTests don't run successfully with "--without-default-devices", | ||||
|     # so let's exclude the qtests from CI for now. | ||||
|     TEST_ARGS: --no-suite qtest | ||||
|   script: | ||||
|   - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed | ||||
|       bison diffutils flex | ||||
|       git grep make sed | ||||
|       mingw-w64-x86_64-capstone | ||||
|       mingw-w64-x86_64-curl | ||||
|       mingw-w64-x86_64-cyrus-sasl | ||||
|       mingw-w64-x86_64-dtc | ||||
|       mingw-w64-x86_64-gcc | ||||
|       mingw-w64-x86_64-glib2 | ||||
|       mingw-w64-x86_64-gnutls | ||||
|       mingw-w64-x86_64-gtk3 | ||||
|       mingw-w64-x86_64-libgcrypt | ||||
|       mingw-w64-x86_64-libjpeg-turbo | ||||
|       mingw-w64-x86_64-libnfs | ||||
|       mingw-w64-x86_64-libpng | ||||
|       mingw-w64-x86_64-libssh | ||||
|       mingw-w64-x86_64-libtasn1 | ||||
|       mingw-w64-x86_64-libusb | ||||
|       mingw-w64-x86_64-lzo2 | ||||
|       mingw-w64-x86_64-nettle | ||||
|       mingw-w64-x86_64-ninja | ||||
|       mingw-w64-x86_64-pixman | ||||
|       mingw-w64-x86_64-pkgconf | ||||
|       mingw-w64-x86_64-python | ||||
|       mingw-w64-x86_64-SDL2 | ||||
|       mingw-w64-x86_64-SDL2_image | ||||
|       mingw-w64-x86_64-snappy | ||||
|       mingw-w64-x86_64-usbredir | ||||
|       mingw-w64-x86_64-zstd " | ||||
|   - $env:CHERE_INVOKING = 'yes'  # Preserve the current working directory | ||||
|   - $env:MSYSTEM = 'MINGW64'     # Start a 64-bit MinGW environment | ||||
|   - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink | ||||
|   - mkdir output | ||||
|   - cd output | ||||
|   # Note: do not remove "--without-default-devices"! | ||||
|   # commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices" | ||||
|   # changed to compile QEMU with the --without-default-devices switch | ||||
|   # for the msys2 64-bit job, due to the build could not complete within | ||||
|   # the project timeout. | ||||
|   - ..\msys64\usr\bin\bash -lc '../configure --target-list=x86_64-softmmu | ||||
|       --without-default-devices --enable-fdt=system' | ||||
|   - ..\msys64\usr\bin\bash -lc 'make' | ||||
|   # qTests don't run successfully with "--without-default-devices", | ||||
|   # so let's exclude the qtests from CI for now. | ||||
|   - ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" || { cat meson-logs/testlog.txt; exit 1; } ;' | ||||
|  | ||||
| msys2-32bit: | ||||
|   extends: .shared_msys2_builder | ||||
|   variables: | ||||
|     MINGW_TARGET: mingw-w64-i686 | ||||
|     MSYSTEM: MINGW32 | ||||
|     CONFIGURE_ARGS:  --target-list=ppc64-softmmu -Ddebug=false -Doptimization=0 | ||||
|     TEST_ARGS: --no-suite qtest | ||||
|   script: | ||||
|   - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed | ||||
|       bison diffutils flex | ||||
|       git grep make sed | ||||
|       mingw-w64-i686-capstone | ||||
|       mingw-w64-i686-curl | ||||
|       mingw-w64-i686-cyrus-sasl | ||||
|       mingw-w64-i686-dtc | ||||
|       mingw-w64-i686-gcc | ||||
|       mingw-w64-i686-glib2 | ||||
|       mingw-w64-i686-gnutls | ||||
|       mingw-w64-i686-gtk3 | ||||
|       mingw-w64-i686-libgcrypt | ||||
|       mingw-w64-i686-libjpeg-turbo | ||||
|       mingw-w64-i686-libnfs | ||||
|       mingw-w64-i686-libpng | ||||
|       mingw-w64-i686-libssh | ||||
|       mingw-w64-i686-libtasn1 | ||||
|       mingw-w64-i686-libusb | ||||
|       mingw-w64-i686-lzo2 | ||||
|       mingw-w64-i686-nettle | ||||
|       mingw-w64-i686-ninja | ||||
|       mingw-w64-i686-pixman | ||||
|       mingw-w64-i686-pkgconf | ||||
|       mingw-w64-i686-python | ||||
|       mingw-w64-i686-SDL2 | ||||
|       mingw-w64-i686-SDL2_image | ||||
|       mingw-w64-i686-snappy | ||||
|       mingw-w64-i686-usbredir | ||||
|       mingw-w64-i686-zstd " | ||||
|   - $env:CHERE_INVOKING = 'yes'  # Preserve the current working directory | ||||
|   - $env:MSYSTEM = 'MINGW32'     # Start a 32-bit MinGW environment | ||||
|   - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink | ||||
|   - mkdir output | ||||
|   - cd output | ||||
|   - ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu | ||||
|                                 --enable-fdt=system' | ||||
|   - ..\msys64\usr\bin\bash -lc 'make' | ||||
|   - ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" || | ||||
|                                 { cat meson-logs/testlog.txt; exit 1; }' | ||||
|   | ||||
							
								
								
									
										18
									
								
								.gitmodules
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								.gitmodules
									
									
									
									
										vendored
									
									
								
							| @@ -13,6 +13,9 @@ | ||||
| [submodule "roms/qemu-palcode"] | ||||
| 	path = roms/qemu-palcode | ||||
| 	url = https://gitlab.com/qemu-project/qemu-palcode.git | ||||
| [submodule "dtc"] | ||||
| 	path = dtc | ||||
| 	url = https://gitlab.com/qemu-project/dtc.git | ||||
| [submodule "roms/u-boot"] | ||||
| 	path = roms/u-boot | ||||
| 	url = https://gitlab.com/qemu-project/u-boot.git | ||||
| @@ -22,12 +25,21 @@ | ||||
| [submodule "roms/QemuMacDrivers"] | ||||
| 	path = roms/QemuMacDrivers | ||||
| 	url = https://gitlab.com/qemu-project/QemuMacDrivers.git | ||||
| [submodule "ui/keycodemapdb"] | ||||
| 	path = ui/keycodemapdb | ||||
| 	url = https://gitlab.com/qemu-project/keycodemapdb.git | ||||
| [submodule "roms/seabios-hppa"] | ||||
| 	path = roms/seabios-hppa | ||||
| 	url = https://gitlab.com/qemu-project/seabios-hppa.git | ||||
| [submodule "roms/u-boot-sam460ex"] | ||||
| 	path = roms/u-boot-sam460ex | ||||
| 	url = https://gitlab.com/qemu-project/u-boot-sam460ex.git | ||||
| [submodule "tests/fp/berkeley-testfloat-3"] | ||||
| 	path = tests/fp/berkeley-testfloat-3 | ||||
| 	url = https://gitlab.com/qemu-project/berkeley-testfloat-3.git | ||||
| [submodule "tests/fp/berkeley-softfloat-3"] | ||||
| 	path = tests/fp/berkeley-softfloat-3 | ||||
| 	url = https://gitlab.com/qemu-project/berkeley-softfloat-3.git | ||||
| [submodule "roms/edk2"] | ||||
| 	path = roms/edk2 | ||||
| 	url = https://gitlab.com/qemu-project/edk2.git | ||||
| @@ -37,9 +49,15 @@ | ||||
| [submodule "roms/qboot"] | ||||
| 	path = roms/qboot | ||||
| 	url = https://gitlab.com/qemu-project/qboot.git | ||||
| [submodule "meson"] | ||||
| 	path = meson | ||||
| 	url = https://gitlab.com/qemu-project/meson.git | ||||
| [submodule "roms/vbootrom"] | ||||
| 	path = roms/vbootrom | ||||
| 	url = https://gitlab.com/qemu-project/vbootrom.git | ||||
| [submodule "tests/lcitool/libvirt-ci"] | ||||
| 	path = tests/lcitool/libvirt-ci | ||||
| 	url = https://gitlab.com/libvirt/libvirt-ci.git | ||||
| [submodule "subprojects/libvfio-user"] | ||||
| 	path = subprojects/libvfio-user | ||||
| 	url = https://gitlab.com/qemu-project/libvfio-user.git | ||||
|   | ||||
							
								
								
									
										25
									
								
								.mailmap
									
									
									
									
									
								
							
							
						
						
									
										25
									
								
								.mailmap
									
									
									
									
									
								
							| @@ -30,38 +30,22 @@ malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162> | ||||
| # Corrupted Author fields | ||||
| Aaron Larson <alarson@ddci.com> alarson@ddci.com | ||||
| Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber> | ||||
| fanwenjie <fanwj@mail.ustc.edu.cn> fanwj@mail.ustc.edu.cn <fanwj@mail.ustc.edu.cn> | ||||
| Jason Wang <jasowang@redhat.com> Jason Wang <jasowang> | ||||
| Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com> | ||||
| Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org> | ||||
| Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com> | ||||
| Timothée Cocault <timothee.cocault@gmail.com> timothee.cocault@gmail.com <timothee.cocault@gmail.com> | ||||
|  | ||||
| # There is also a: | ||||
| #    (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162> | ||||
| # for the cvs2svn initialization commit e63c3dc74bf. | ||||
|  | ||||
| # Next, translate a few commits where mailman rewrote the From: line due | ||||
| # to strict SPF and DMARC.  Usually, our build process should be flagging | ||||
| # commits like these before maintainer merges; if you find the need to add | ||||
| # a line here, please also report a bug against the part of the build | ||||
| # process that let the mis-attribution slip through in the first place. | ||||
| # | ||||
| # If the mailing list munges your emails, use: | ||||
| #   git config sendemail.from '"Your Name" <your.email@example.com>' | ||||
| # the use of "" in that line will differ from the typically unquoted | ||||
| # 'git config user.name', which in turn is sufficient for 'git send-email' | ||||
| # to add an extra From: line in the body of your email that takes | ||||
| # precedence over any munged From: in the mail's headers. | ||||
| # See https://lists.openembedded.org/g/openembedded-core/message/166515 | ||||
| # and https://lists.gnu.org/archive/html/qemu-devel/2023-09/msg06784.html | ||||
| # to strict SPF, although we prefer to avoid adding more entries like that. | ||||
| Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org> | ||||
| Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org> | ||||
| Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org> | ||||
| Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org> | ||||
| Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org> | ||||
| Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> Andrey Drobyshev via <qemu-block@nongnu.org> | ||||
| BALATON Zoltan <balaton@eik.bme.hu> BALATON Zoltan via <qemu-ppc@nongnu.org> | ||||
|  | ||||
| # Next, replace old addresses by a more recent one. | ||||
| Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com> | ||||
| @@ -70,10 +54,8 @@ Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <amarkovic@wavecomp.com> | ||||
| Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <arikalo@wavecomp.com> | ||||
| Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com> | ||||
| Alexander Graf <agraf@csgraf.de> <agraf@suse.de> | ||||
| Ani Sinha <anisinha@redhat.com> <ani@anisinha.ca> | ||||
| Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com> | ||||
| Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com> | ||||
| Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com> | ||||
| Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com> | ||||
| Frederic Konrad <konrad.frederic@yahoo.fr> <fred.konrad@greensocs.com> | ||||
| Frederic Konrad <konrad.frederic@yahoo.fr> <konrad@adacore.com> | ||||
| @@ -83,9 +65,6 @@ Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn> | ||||
| James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com> | ||||
| Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org> | ||||
| Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com> | ||||
| Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr> | ||||
| Luc Michel <luc@lmichel.fr> <luc.michel@greensocs.com> | ||||
| Luc Michel <luc@lmichel.fr> <lmichel@kalray.eu> | ||||
| Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org> | ||||
| Paul Brook <paul@nowt.org> <paul@codesourcery.com> | ||||
| Paul Burton <paulburton@kernel.org> <paul.burton@mips.com> | ||||
| @@ -95,9 +74,7 @@ Paul Burton <paulburton@kernel.org> <pburton@wavecomp.com> | ||||
| Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org> | ||||
| Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com> | ||||
| Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com> | ||||
| Roman Bolshakov <rbolshakov@ddn.com> <r.bolshakov@yadro.com> | ||||
| Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com> | ||||
| Taylor Simpson <ltaylorsimpson@gmail.com> <tsimpson@quicinc.com> | ||||
| Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com> | ||||
|  | ||||
| # Also list preferred name forms where people have changed their | ||||
|   | ||||
| @@ -34,7 +34,7 @@ env: | ||||
|     - BASE_CONFIG="--disable-docs --disable-tools" | ||||
|     - TEST_BUILD_CMD="" | ||||
|     - TEST_CMD="make check V=1" | ||||
|     # This is broadly a list of "mainline" system targets which have support across the major distros | ||||
|     # This is broadly a list of "mainline" softmmu targets which have support across the major distros | ||||
|     - MAIN_SOFTMMU_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu" | ||||
|     - CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime" | ||||
|     - CCACHE_MAXSIZE=1G | ||||
| @@ -197,7 +197,7 @@ jobs: | ||||
|               $(exit $BUILD_RC); | ||||
|           fi | ||||
|  | ||||
|     - name: "[s390x] GCC (other-system)" | ||||
|     - name: "[s390x] GCC (other-softmmu)" | ||||
|       arch: s390x | ||||
|       dist: focal | ||||
|       addons: | ||||
| @@ -237,15 +237,13 @@ jobs: | ||||
|           - libglib2.0-dev | ||||
|           - libgnutls28-dev | ||||
|           - ninja-build | ||||
|           - flex | ||||
|           - bison | ||||
|       env: | ||||
|         - CONFIG="--disable-containers --disable-system" | ||||
|  | ||||
|     - name: "[s390x] Clang (disable-tcg)" | ||||
|       arch: s390x | ||||
|       dist: focal | ||||
|       compiler: clang-10 | ||||
|       compiler: clang | ||||
|       addons: | ||||
|         apt_packages: | ||||
|           - libaio-dev | ||||
| @@ -271,7 +269,6 @@ jobs: | ||||
|           - libvdeplug-dev | ||||
|           - libvte-2.91-dev | ||||
|           - ninja-build | ||||
|           - clang-10 | ||||
|       env: | ||||
|         - TEST_CMD="make check-unit" | ||||
|         - CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools | ||||
|   | ||||
| @@ -11,9 +11,6 @@ config OPENGL | ||||
| config X11 | ||||
|     bool | ||||
|  | ||||
| config PIXMAN | ||||
|     bool | ||||
|  | ||||
| config SPICE | ||||
|     bool | ||||
|  | ||||
| @@ -49,6 +46,3 @@ config FUZZ | ||||
| config VFIO_USER_SERVER_ALLOWED | ||||
|     bool | ||||
|     imply VFIO_USER_SERVER | ||||
|  | ||||
| config HV_BALLOON_POSSIBLE | ||||
|     bool | ||||
|   | ||||
							
								
								
									
										478
									
								
								MAINTAINERS
									
									
									
									
									
								
							
							
						
						
									
										478
									
								
								MAINTAINERS
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										80
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										80
									
								
								Makefile
									
									
									
									
									
								
							| @@ -26,9 +26,9 @@ quiet-command-run = $(if $(V),,$(if $2,printf "  %-7s %s\n" $2 $3 && ))$1 | ||||
| quiet-@ = $(if $(V),,@) | ||||
| quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3) | ||||
|  | ||||
| UNCHECKED_GOALS := TAGS gtags cscope ctags dist \ | ||||
| UNCHECKED_GOALS := %clean TAGS cscope ctags dist \ | ||||
|     help check-help print-% \ | ||||
|     docker docker-% lcitool-refresh vm-help vm-test vm-build-% | ||||
|     docker docker-% vm-help vm-test vm-build-% | ||||
|  | ||||
| all: | ||||
| .PHONY: all clean distclean recurse-all dist msi FORCE | ||||
| @@ -45,6 +45,18 @@ include config-host.mak | ||||
| include Makefile.prereqs | ||||
| Makefile.prereqs: config-host.mak | ||||
|  | ||||
| git-submodule-update: | ||||
| .git-submodule-status: git-submodule-update config-host.mak | ||||
| Makefile: .git-submodule-status | ||||
|  | ||||
| .PHONY: git-submodule-update | ||||
| git-submodule-update: | ||||
| ifneq ($(GIT_SUBMODULES_ACTION),ignore) | ||||
| 	$(call quiet-command, \ | ||||
| 		(GIT="$(GIT)" "$(SRC_PATH)/scripts/git-submodule.sh" $(GIT_SUBMODULES_ACTION) $(GIT_SUBMODULES)), \ | ||||
| 		"GIT","$(GIT_SUBMODULES)") | ||||
| endif | ||||
|  | ||||
| # 0. ensure the build tree is okay | ||||
|  | ||||
| # Check that we're not trying to do an out-of-tree build from | ||||
| @@ -83,17 +95,16 @@ config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh | ||||
| 	@if test -f meson-private/coredata.dat; then \ | ||||
| 	  ./config.status --skip-meson; \ | ||||
| 	else \ | ||||
| 	  ./config.status; \ | ||||
| 	  ./config.status && touch build.ninja.stamp; \ | ||||
| 	fi | ||||
|  | ||||
| # 2. meson.stamp exists if meson has run at least once (so ninja reconfigure | ||||
| # works), but otherwise never needs to be updated | ||||
|  | ||||
| meson-private/coredata.dat: meson.stamp | ||||
| meson.stamp: config-host.mak | ||||
| 	@touch meson.stamp | ||||
|  | ||||
| # 3. ensure meson-generated build files are up-to-date | ||||
| # 3. ensure generated build files are up-to-date | ||||
|  | ||||
| ifneq ($(NINJA),) | ||||
| Makefile.ninja: build.ninja | ||||
| @@ -104,23 +115,15 @@ Makefile.ninja: build.ninja | ||||
| 	  $(NINJA) -t query build.ninja | sed -n '1,/^  input:/d; /^  outputs:/q; s/$$/ \\/p'; \ | ||||
| 	} > $@.tmp && mv $@.tmp $@ | ||||
| -include Makefile.ninja | ||||
| endif | ||||
|  | ||||
| ifneq ($(MESON),) | ||||
| # The path to meson always points to pyvenv/bin/meson, but the absolute | ||||
| # paths could change.  In that case, force a regeneration of build.ninja. | ||||
| # Note that this invocation of $(NINJA), just like when Make rebuilds | ||||
| # Makefiles, does not include -n. | ||||
| # A separate rule is needed for Makefile dependencies to avoid -n | ||||
| build.ninja: build.ninja.stamp | ||||
| $(build-files): | ||||
| build.ninja.stamp: meson.stamp $(build-files) | ||||
| 	@if test "$$(cat build.ninja.stamp)" = "$(MESON)" && test -n "$(NINJA)"; then \ | ||||
| 	  $(NINJA) build.ninja; \ | ||||
| 	else \ | ||||
| 	  echo "$(MESON) setup --reconfigure $(SRC_PATH)"; \ | ||||
| 	  $(MESON) setup --reconfigure $(SRC_PATH); \ | ||||
| 	fi && echo "$(MESON)" > $@ | ||||
| 	$(NINJA) $(if $V,-v,) build.ninja && touch $@ | ||||
| endif | ||||
|  | ||||
| ifneq ($(MESON),) | ||||
| Makefile.mtest: build.ninja scripts/mtest2make.py | ||||
| 	$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@ | ||||
| -include Makefile.mtest | ||||
| @@ -164,9 +167,19 @@ ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),) | ||||
| endif | ||||
| endif | ||||
|  | ||||
| ifeq ($(CONFIG_PLUGIN),y) | ||||
| .PHONY: plugins | ||||
| plugins: | ||||
| 	$(call quiet-command,\ | ||||
| 		$(MAKE) $(SUBDIR_MAKEFLAGS) -C contrib/plugins V="$(V)", \ | ||||
| 		"BUILD", "example plugins") | ||||
| endif # $(CONFIG_PLUGIN) | ||||
|  | ||||
| else # config-host.mak does not exist | ||||
| config-host.mak: | ||||
| ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail)) | ||||
| $(error Please call configure before running make) | ||||
| 	@echo "Please call configure before running make!" | ||||
| 	@exit 1 | ||||
| endif | ||||
| endif # config-host.mak does not exist | ||||
|  | ||||
| @@ -176,20 +189,15 @@ include $(SRC_PATH)/tests/Makefile.include | ||||
|  | ||||
| all: recurse-all | ||||
|  | ||||
| SUBDIR_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(SUBDIRS))) | ||||
| .PHONY: $(SUBDIR_RULES) | ||||
| $(SUBDIR_RULES): | ||||
| ROMS_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(ROMS))) | ||||
| .PHONY: $(ROMS_RULES) | ||||
| $(ROMS_RULES): | ||||
| 	$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),) | ||||
|  | ||||
| ifneq ($(filter contrib/plugins, $(SUBDIRS)),) | ||||
| .PHONY: plugins | ||||
| plugins: contrib/plugins/all | ||||
| endif | ||||
|  | ||||
| .PHONY: recurse-all recurse-clean | ||||
| recurse-all: $(addsuffix /all, $(SUBDIRS)) | ||||
| recurse-clean: $(addsuffix /clean, $(SUBDIRS)) | ||||
| recurse-distclean: $(addsuffix /distclean, $(SUBDIRS)) | ||||
| recurse-all: $(addsuffix /all, $(ROMS)) | ||||
| recurse-clean: $(addsuffix /clean, $(ROMS)) | ||||
| recurse-distclean: $(addsuffix /distclean, $(ROMS)) | ||||
|  | ||||
| ###################################################################### | ||||
|  | ||||
| @@ -283,13 +291,6 @@ include $(SRC_PATH)/tests/vm/Makefile.include | ||||
| print-help-run = printf "  %-30s - %s\\n" "$1" "$2" | ||||
| print-help = @$(call print-help-run,$1,$2) | ||||
|  | ||||
| .PHONY: update-linux-vdso | ||||
| update-linux-vdso: | ||||
| 	@for m in $(SRC_PATH)/linux-user/*/Makefile.vdso; do \ | ||||
| 	  $(MAKE) $(SUBDIR_MAKEFLAGS) -C $$(dirname $$m) -f Makefile.vdso \ | ||||
| 		SRC_PATH=$(SRC_PATH) BUILD_DIR=$(BUILD_DIR); \ | ||||
| 	done | ||||
|  | ||||
| .PHONY: help | ||||
| help: | ||||
| 	@echo  'Generic targets:' | ||||
| @@ -300,7 +301,7 @@ help: | ||||
| 	$(call print-help,cscope,Generate cscope index) | ||||
| 	$(call print-help,sparse,Run sparse on the QEMU source) | ||||
| 	@echo  '' | ||||
| ifneq ($(filter contrib/plugins, $(SUBDIRS)),) | ||||
| ifeq ($(CONFIG_PLUGIN),y) | ||||
| 	@echo  'Plugin targets:' | ||||
| 	$(call print-help,plugins,Build the example TCG plugins) | ||||
| 	@echo  '' | ||||
| @@ -310,9 +311,6 @@ endif | ||||
| 	$(call print-help,distclean,Remove all generated files) | ||||
| 	$(call print-help,dist,Build a distributable tarball) | ||||
| 	@echo  '' | ||||
| 	@echo  'Linux-user targets:' | ||||
| 	$(call print-help,update-linux-vdso,Build linux-user vdso images) | ||||
| 	@echo  '' | ||||
| 	@echo  'Test targets:' | ||||
| 	$(call print-help,check,Run all tests (check-help for details)) | ||||
| 	$(call print-help,bench,Run all benchmarks) | ||||
| @@ -323,7 +321,7 @@ endif | ||||
| 	@echo  'Documentation targets:' | ||||
| 	$(call print-help,html man,Build documentation in specified format) | ||||
| 	@echo  '' | ||||
| ifneq ($(filter msi, $(ninja-targets)),) | ||||
| ifdef CONFIG_WIN32 | ||||
| 	@echo  'Windows targets:' | ||||
| 	$(call print-help,installer,Build NSIS-based installer for QEMU) | ||||
| 	$(call print-help,msi,Build MSI-based installer for qemu-ga) | ||||
|   | ||||
| @@ -4,6 +4,9 @@ config WHPX | ||||
| config NVMM | ||||
|     bool | ||||
|  | ||||
| config HAX | ||||
|     bool | ||||
|  | ||||
| config HVF | ||||
|     bool | ||||
|  | ||||
|   | ||||
| @@ -30,7 +30,7 @@ | ||||
| #include "hw/core/accel-cpu.h" | ||||
| 
 | ||||
| #ifndef CONFIG_USER_ONLY | ||||
| #include "accel-system.h" | ||||
| #include "accel-softmmu.h" | ||||
| #endif /* !CONFIG_USER_ONLY */ | ||||
| 
 | ||||
| static const TypeInfo accel_type = { | ||||
| @@ -119,37 +119,16 @@ void accel_cpu_instance_init(CPUState *cpu) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| bool accel_cpu_common_realize(CPUState *cpu, Error **errp) | ||||
| bool accel_cpu_realizefn(CPUState *cpu, Error **errp) | ||||
| { | ||||
|     CPUClass *cc = CPU_GET_CLASS(cpu); | ||||
|     AccelState *accel = current_accel(); | ||||
|     AccelClass *acc = ACCEL_GET_CLASS(accel); | ||||
| 
 | ||||
|     /* target specific realization */ | ||||
|     if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize | ||||
|         && !cc->accel_cpu->cpu_target_realize(cpu, errp)) { | ||||
|         return false; | ||||
|     if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) { | ||||
|         return cc->accel_cpu->cpu_realizefn(cpu, errp); | ||||
|     } | ||||
| 
 | ||||
|     /* generic realization */ | ||||
|     if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) { | ||||
|         return false; | ||||
|     } | ||||
| 
 | ||||
|     return true; | ||||
| } | ||||
| 
 | ||||
| void accel_cpu_common_unrealize(CPUState *cpu) | ||||
| { | ||||
|     AccelState *accel = current_accel(); | ||||
|     AccelClass *acc = ACCEL_GET_CLASS(accel); | ||||
| 
 | ||||
|     /* generic unrealization */ | ||||
|     if (acc->cpu_common_unrealize) { | ||||
|         acc->cpu_common_unrealize(cpu); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| int accel_supported_gdbstub_sstep_flags(void) | ||||
| { | ||||
|     AccelState *accel = current_accel(); | ||||
| @@ -27,8 +27,8 @@ | ||||
| #include "qemu/accel.h" | ||||
| #include "hw/boards.h" | ||||
| #include "sysemu/cpus.h" | ||||
| #include "qemu/error-report.h" | ||||
| #include "accel-system.h" | ||||
| 
 | ||||
| #include "accel-softmmu.h" | ||||
| 
 | ||||
| int accel_init_machine(AccelState *accel, MachineState *ms) | ||||
| { | ||||
| @@ -99,8 +99,8 @@ static const TypeInfo accel_ops_type_info = { | ||||
|     .class_size = sizeof(AccelOpsClass), | ||||
| }; | ||||
| 
 | ||||
| static void accel_system_register_types(void) | ||||
| static void accel_softmmu_register_types(void) | ||||
| { | ||||
|     type_register_static(&accel_ops_type_info); | ||||
| } | ||||
| type_init(accel_system_register_types); | ||||
| type_init(accel_softmmu_register_types); | ||||
| @@ -7,9 +7,9 @@ | ||||
|  * See the COPYING file in the top-level directory. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef ACCEL_SYSTEM_H | ||||
| #define ACCEL_SYSTEM_H | ||||
| #ifndef ACCEL_SOFTMMU_H | ||||
| #define ACCEL_SOFTMMU_H | ||||
| 
 | ||||
| void accel_init_ops_interfaces(AccelClass *ac); | ||||
| 
 | ||||
| #endif /* ACCEL_SYSTEM_H */ | ||||
| #endif /* ACCEL_SOFTMMU_H */ | ||||
| @@ -27,7 +27,7 @@ static void *dummy_cpu_thread_fn(void *arg) | ||||
|     qemu_mutex_lock_iothread(); | ||||
|     qemu_thread_get_self(cpu->thread); | ||||
|     cpu->thread_id = qemu_get_thread_id(); | ||||
|     cpu->neg.can_do_io = true; | ||||
|     cpu->can_do_io = 1; | ||||
|     current_cpu = cpu; | ||||
|  | ||||
| #ifndef _WIN32 | ||||
|   | ||||
| @@ -52,7 +52,6 @@ | ||||
| #include "qemu/main-loop.h" | ||||
| #include "exec/address-spaces.h" | ||||
| #include "exec/exec-all.h" | ||||
| #include "exec/gdbstub.h" | ||||
| #include "sysemu/cpus.h" | ||||
| #include "sysemu/hvf.h" | ||||
| #include "sysemu/hvf_int.h" | ||||
| @@ -304,7 +303,7 @@ static void hvf_region_del(MemoryListener *listener, | ||||
|  | ||||
| static MemoryListener hvf_memory_listener = { | ||||
|     .name = "hvf", | ||||
|     .priority = MEMORY_LISTENER_PRIORITY_ACCEL, | ||||
|     .priority = 10, | ||||
|     .region_add = hvf_region_add, | ||||
|     .region_del = hvf_region_del, | ||||
|     .log_start = hvf_log_start, | ||||
| @@ -335,26 +334,18 @@ static int hvf_accel_init(MachineState *ms) | ||||
|         s->slots[x].slot_id = x; | ||||
|     } | ||||
|  | ||||
|     QTAILQ_INIT(&s->hvf_sw_breakpoints); | ||||
|  | ||||
|     hvf_state = s; | ||||
|     memory_listener_register(&hvf_memory_listener, &address_space_memory); | ||||
|  | ||||
|     return hvf_arch_init(); | ||||
| } | ||||
|  | ||||
| static inline int hvf_gdbstub_sstep_flags(void) | ||||
| { | ||||
|     return SSTEP_ENABLE | SSTEP_NOIRQ; | ||||
| } | ||||
|  | ||||
| static void hvf_accel_class_init(ObjectClass *oc, void *data) | ||||
| { | ||||
|     AccelClass *ac = ACCEL_CLASS(oc); | ||||
|     ac->name = "HVF"; | ||||
|     ac->init_machine = hvf_accel_init; | ||||
|     ac->allowed = &hvf_allowed; | ||||
|     ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags; | ||||
| } | ||||
|  | ||||
| static const TypeInfo hvf_accel_type = { | ||||
| @@ -372,19 +363,19 @@ type_init(hvf_type_init); | ||||
|  | ||||
| static void hvf_vcpu_destroy(CPUState *cpu) | ||||
| { | ||||
|     hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd); | ||||
|     hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd); | ||||
|     assert_hvf_ok(ret); | ||||
|  | ||||
|     hvf_arch_vcpu_destroy(cpu); | ||||
|     g_free(cpu->accel); | ||||
|     cpu->accel = NULL; | ||||
|     g_free(cpu->hvf); | ||||
|     cpu->hvf = NULL; | ||||
| } | ||||
|  | ||||
| static int hvf_init_vcpu(CPUState *cpu) | ||||
| { | ||||
|     int r; | ||||
|  | ||||
|     cpu->accel = g_new0(AccelCPUState, 1); | ||||
|     cpu->hvf = g_malloc0(sizeof(*cpu->hvf)); | ||||
|  | ||||
|     /* init cpu signals */ | ||||
|     struct sigaction sigact; | ||||
| @@ -393,20 +384,17 @@ static int hvf_init_vcpu(CPUState *cpu) | ||||
|     sigact.sa_handler = dummy_signal; | ||||
|     sigaction(SIG_IPI, &sigact, NULL); | ||||
|  | ||||
|     pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask); | ||||
|     sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI); | ||||
|     pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask); | ||||
|     sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI); | ||||
|  | ||||
| #ifdef __aarch64__ | ||||
|     r = hv_vcpu_create(&cpu->accel->fd, | ||||
|                        (hv_vcpu_exit_t **)&cpu->accel->exit, NULL); | ||||
|     r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL); | ||||
| #else | ||||
|     r = hv_vcpu_create((hv_vcpuid_t *)&cpu->accel->fd, HV_VCPU_DEFAULT); | ||||
|     r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT); | ||||
| #endif | ||||
|     cpu->vcpu_dirty = 1; | ||||
|     assert_hvf_ok(r); | ||||
|  | ||||
|     cpu->accel->guest_debug_enabled = false; | ||||
|  | ||||
|     return hvf_arch_init_vcpu(cpu); | ||||
| } | ||||
|  | ||||
| @@ -428,7 +416,7 @@ static void *hvf_cpu_thread_fn(void *arg) | ||||
|     qemu_thread_get_self(cpu->thread); | ||||
|  | ||||
|     cpu->thread_id = qemu_get_thread_id(); | ||||
|     cpu->neg.can_do_io = true; | ||||
|     cpu->can_do_io = 1; | ||||
|     current_cpu = cpu; | ||||
|  | ||||
|     hvf_init_vcpu(cpu); | ||||
| @@ -474,108 +462,6 @@ static void hvf_start_vcpu_thread(CPUState *cpu) | ||||
|                        cpu, QEMU_THREAD_JOINABLE); | ||||
| } | ||||
|  | ||||
| static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) | ||||
| { | ||||
|     struct hvf_sw_breakpoint *bp; | ||||
|     int err; | ||||
|  | ||||
|     if (type == GDB_BREAKPOINT_SW) { | ||||
|         bp = hvf_find_sw_breakpoint(cpu, addr); | ||||
|         if (bp) { | ||||
|             bp->use_count++; | ||||
|             return 0; | ||||
|         } | ||||
|  | ||||
|         bp = g_new(struct hvf_sw_breakpoint, 1); | ||||
|         bp->pc = addr; | ||||
|         bp->use_count = 1; | ||||
|         err = hvf_arch_insert_sw_breakpoint(cpu, bp); | ||||
|         if (err) { | ||||
|             g_free(bp); | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         QTAILQ_INSERT_HEAD(&hvf_state->hvf_sw_breakpoints, bp, entry); | ||||
|     } else { | ||||
|         err = hvf_arch_insert_hw_breakpoint(addr, len, type); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     CPU_FOREACH(cpu) { | ||||
|         err = hvf_update_guest_debug(cpu); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|     } | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) | ||||
| { | ||||
|     struct hvf_sw_breakpoint *bp; | ||||
|     int err; | ||||
|  | ||||
|     if (type == GDB_BREAKPOINT_SW) { | ||||
|         bp = hvf_find_sw_breakpoint(cpu, addr); | ||||
|         if (!bp) { | ||||
|             return -ENOENT; | ||||
|         } | ||||
|  | ||||
|         if (bp->use_count > 1) { | ||||
|             bp->use_count--; | ||||
|             return 0; | ||||
|         } | ||||
|  | ||||
|         err = hvf_arch_remove_sw_breakpoint(cpu, bp); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry); | ||||
|         g_free(bp); | ||||
|     } else { | ||||
|         err = hvf_arch_remove_hw_breakpoint(addr, len, type); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     CPU_FOREACH(cpu) { | ||||
|         err = hvf_update_guest_debug(cpu); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|     } | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static void hvf_remove_all_breakpoints(CPUState *cpu) | ||||
| { | ||||
|     struct hvf_sw_breakpoint *bp, *next; | ||||
|     CPUState *tmpcpu; | ||||
|  | ||||
|     QTAILQ_FOREACH_SAFE(bp, &hvf_state->hvf_sw_breakpoints, entry, next) { | ||||
|         if (hvf_arch_remove_sw_breakpoint(cpu, bp) != 0) { | ||||
|             /* Try harder to find a CPU that currently sees the breakpoint. */ | ||||
|             CPU_FOREACH(tmpcpu) | ||||
|             { | ||||
|                 if (hvf_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) { | ||||
|                     break; | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry); | ||||
|         g_free(bp); | ||||
|     } | ||||
|     hvf_arch_remove_all_hw_breakpoints(); | ||||
|  | ||||
|     CPU_FOREACH(cpu) { | ||||
|         hvf_update_guest_debug(cpu); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) | ||||
| { | ||||
|     AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); | ||||
| @@ -587,12 +473,6 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) | ||||
|     ops->synchronize_post_init = hvf_cpu_synchronize_post_init; | ||||
|     ops->synchronize_state = hvf_cpu_synchronize_state; | ||||
|     ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm; | ||||
|  | ||||
|     ops->insert_breakpoint = hvf_insert_breakpoint; | ||||
|     ops->remove_breakpoint = hvf_remove_breakpoint; | ||||
|     ops->remove_all_breakpoints = hvf_remove_all_breakpoints; | ||||
|     ops->update_guest_debug = hvf_update_guest_debug; | ||||
|     ops->supports_guest_debug = hvf_arch_supports_guest_debug; | ||||
| }; | ||||
| static const TypeInfo hvf_accel_ops_type = { | ||||
|     .name = ACCEL_OPS_NAME("hvf"), | ||||
|   | ||||
| @@ -38,38 +38,9 @@ void assert_hvf_ok(hv_return_t ret) | ||||
|     case HV_UNSUPPORTED: | ||||
|         error_report("Error: HV_UNSUPPORTED"); | ||||
|         break; | ||||
| #if defined(MAC_OS_VERSION_11_0) && \ | ||||
|     MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0 | ||||
|     case HV_DENIED: | ||||
|         error_report("Error: HV_DENIED"); | ||||
|         break; | ||||
| #endif | ||||
|     default: | ||||
|         error_report("Unknown Error"); | ||||
|     } | ||||
|  | ||||
|     abort(); | ||||
| } | ||||
|  | ||||
| struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc) | ||||
| { | ||||
|     struct hvf_sw_breakpoint *bp; | ||||
|  | ||||
|     QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) { | ||||
|         if (bp->pc == pc) { | ||||
|             return bp; | ||||
|         } | ||||
|     } | ||||
|     return NULL; | ||||
| } | ||||
|  | ||||
| int hvf_sw_breakpoints_active(CPUState *cpu) | ||||
| { | ||||
|     return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints); | ||||
| } | ||||
|  | ||||
| int hvf_update_guest_debug(CPUState *cpu) | ||||
| { | ||||
|     hvf_arch_update_guest_debug(cpu); | ||||
|     return 0; | ||||
| } | ||||
|   | ||||
| @@ -36,7 +36,7 @@ static void *kvm_vcpu_thread_fn(void *arg) | ||||
|     qemu_mutex_lock_iothread(); | ||||
|     qemu_thread_get_self(cpu->thread); | ||||
|     cpu->thread_id = qemu_get_thread_id(); | ||||
|     cpu->neg.can_do_io = true; | ||||
|     cpu->can_do_io = 1; | ||||
|     current_cpu = cpu; | ||||
|  | ||||
|     r = kvm_init_vcpu(cpu, &error_fatal); | ||||
| @@ -86,13 +86,6 @@ static bool kvm_cpus_are_resettable(void) | ||||
|     return !kvm_enabled() || kvm_cpu_check_are_resettable(); | ||||
| } | ||||
|  | ||||
| #ifdef KVM_CAP_SET_GUEST_DEBUG | ||||
| static int kvm_update_guest_debug_ops(CPUState *cpu) | ||||
| { | ||||
|     return kvm_update_guest_debug(cpu, 0); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| static void kvm_accel_ops_class_init(ObjectClass *oc, void *data) | ||||
| { | ||||
|     AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); | ||||
| @@ -106,7 +99,6 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data) | ||||
|     ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm; | ||||
|  | ||||
| #ifdef KVM_CAP_SET_GUEST_DEBUG | ||||
|     ops->update_guest_debug = kvm_update_guest_debug_ops; | ||||
|     ops->supports_guest_debug = kvm_supports_guest_debug; | ||||
|     ops->insert_breakpoint = kvm_insert_breakpoint; | ||||
|     ops->remove_breakpoint = kvm_remove_breakpoint; | ||||
|   | ||||
| @@ -90,6 +90,8 @@ bool kvm_kernel_irqchip; | ||||
| bool kvm_split_irqchip; | ||||
| bool kvm_async_interrupts_allowed; | ||||
| bool kvm_halt_in_kernel_allowed; | ||||
| bool kvm_eventfds_allowed; | ||||
| bool kvm_irqfds_allowed; | ||||
| bool kvm_resamplefds_allowed; | ||||
| bool kvm_msi_via_irqfd_allowed; | ||||
| bool kvm_gsi_routing_allowed; | ||||
| @@ -97,6 +99,8 @@ bool kvm_gsi_direct_mapping; | ||||
| bool kvm_allowed; | ||||
| bool kvm_readonly_mem_allowed; | ||||
| bool kvm_vm_attributes_allowed; | ||||
| bool kvm_direct_msi_allowed; | ||||
| bool kvm_ioeventfd_any_length_allowed; | ||||
| bool kvm_msi_use_devid; | ||||
| bool kvm_has_guest_debug; | ||||
| static int kvm_sstep_flags; | ||||
| @@ -107,9 +111,6 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = { | ||||
|     KVM_CAP_INFO(USER_MEMORY), | ||||
|     KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS), | ||||
|     KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS), | ||||
|     KVM_CAP_INFO(INTERNAL_ERROR_DATA), | ||||
|     KVM_CAP_INFO(IOEVENTFD), | ||||
|     KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH), | ||||
|     KVM_CAP_LAST_INFO | ||||
| }; | ||||
|  | ||||
| @@ -173,31 +174,13 @@ void kvm_resample_fd_notify(int gsi) | ||||
|     } | ||||
| } | ||||
|  | ||||
| unsigned int kvm_get_max_memslots(void) | ||||
| int kvm_get_max_memslots(void) | ||||
| { | ||||
|     KVMState *s = KVM_STATE(current_accel()); | ||||
|  | ||||
|     return s->nr_slots; | ||||
| } | ||||
|  | ||||
| unsigned int kvm_get_free_memslots(void) | ||||
| { | ||||
|     unsigned int used_slots = 0; | ||||
|     KVMState *s = kvm_state; | ||||
|     int i; | ||||
|  | ||||
|     kvm_slots_lock(); | ||||
|     for (i = 0; i < s->nr_as; i++) { | ||||
|         if (!s->as[i].ml) { | ||||
|             continue; | ||||
|         } | ||||
|         used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots); | ||||
|     } | ||||
|     kvm_slots_unlock(); | ||||
|  | ||||
|     return s->nr_slots - used_slots; | ||||
| } | ||||
|  | ||||
| /* Called with KVMMemoryListener.slots_lock held */ | ||||
| static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) | ||||
| { | ||||
| @@ -213,6 +196,19 @@ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) | ||||
|     return NULL; | ||||
| } | ||||
|  | ||||
| bool kvm_has_free_slot(MachineState *ms) | ||||
| { | ||||
|     KVMState *s = KVM_STATE(ms->accelerator); | ||||
|     bool result; | ||||
|     KVMMemoryListener *kml = &s->memory_listener; | ||||
|  | ||||
|     kvm_slots_lock(); | ||||
|     result = !!kvm_get_free_slot(kml); | ||||
|     kvm_slots_unlock(); | ||||
|  | ||||
|     return result; | ||||
| } | ||||
|  | ||||
| /* Called with KVMMemoryListener.slots_lock held */ | ||||
| static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml) | ||||
| { | ||||
| @@ -454,8 +450,6 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp) | ||||
|                          "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)", | ||||
|                          kvm_arch_vcpu_id(cpu)); | ||||
|     } | ||||
|     cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL); | ||||
|  | ||||
| err: | ||||
|     return ret; | ||||
| } | ||||
| @@ -691,15 +685,6 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu) | ||||
|     uint32_t ring_size = s->kvm_dirty_ring_size; | ||||
|     uint32_t count = 0, fetch = cpu->kvm_fetch_index; | ||||
|  | ||||
|     /* | ||||
|      * It's possible that we race with vcpu creation code where the vcpu is | ||||
|      * put onto the vcpus list but not yet initialized the dirty ring | ||||
|      * structures.  If so, skip it. | ||||
|      */ | ||||
|     if (!cpu->created) { | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
|     assert(dirty_gfns && ring_size); | ||||
|     trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index); | ||||
|  | ||||
| @@ -1105,6 +1090,12 @@ static void kvm_coalesce_pio_del(MemoryListener *listener, | ||||
|      } | ||||
| } | ||||
|  | ||||
| static MemoryListener kvm_coalesced_pio_listener = { | ||||
|     .name = "kvm-coalesced-pio", | ||||
|     .coalesced_io_add = kvm_coalesce_pio_add, | ||||
|     .coalesced_io_del = kvm_coalesce_pio_del, | ||||
| }; | ||||
|  | ||||
| int kvm_check_extension(KVMState *s, unsigned int extension) | ||||
| { | ||||
|     int ret; | ||||
| @@ -1246,6 +1237,43 @@ static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val, | ||||
| } | ||||
|  | ||||
|  | ||||
| static int kvm_check_many_ioeventfds(void) | ||||
| { | ||||
|     /* Userspace can use ioeventfd for io notification.  This requires a host | ||||
|      * that supports eventfd(2) and an I/O thread; since eventfd does not | ||||
|      * support SIGIO it cannot interrupt the vcpu. | ||||
|      * | ||||
|      * Older kernels have a 6 device limit on the KVM io bus.  Find out so we | ||||
|      * can avoid creating too many ioeventfds. | ||||
|      */ | ||||
| #if defined(CONFIG_EVENTFD) | ||||
|     int ioeventfds[7]; | ||||
|     int i, ret = 0; | ||||
|     for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) { | ||||
|         ioeventfds[i] = eventfd(0, EFD_CLOEXEC); | ||||
|         if (ioeventfds[i] < 0) { | ||||
|             break; | ||||
|         } | ||||
|         ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true); | ||||
|         if (ret < 0) { | ||||
|             close(ioeventfds[i]); | ||||
|             break; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     /* Decide whether many devices are supported or not */ | ||||
|     ret = i == ARRAY_SIZE(ioeventfds); | ||||
|  | ||||
|     while (i-- > 0) { | ||||
|         kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true); | ||||
|         close(ioeventfds[i]); | ||||
|     } | ||||
|     return ret; | ||||
| #else | ||||
|     return 0; | ||||
| #endif | ||||
| } | ||||
|  | ||||
| static const KVMCapabilityInfo * | ||||
| kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) | ||||
| { | ||||
| @@ -1324,10 +1352,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, | ||||
|                  */ | ||||
|                 if (kvm_state->kvm_dirty_ring_size) { | ||||
|                     kvm_dirty_ring_reap_locked(kvm_state, NULL); | ||||
|                     if (kvm_state->kvm_dirty_ring_with_bitmap) { | ||||
|                         kvm_slot_sync_dirty_pages(mem); | ||||
|                         kvm_slot_get_dirty_log(kvm_state, mem); | ||||
|                     } | ||||
|                 } else { | ||||
|                     kvm_slot_get_dirty_log(kvm_state, mem); | ||||
|                 } | ||||
| @@ -1347,7 +1371,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, | ||||
|             } | ||||
|             start_addr += slot_size; | ||||
|             size -= slot_size; | ||||
|             kml->nr_used_slots--; | ||||
|         } while (size); | ||||
|         return; | ||||
|     } | ||||
| @@ -1373,7 +1396,6 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, | ||||
|         ram_start_offset += slot_size; | ||||
|         ram += slot_size; | ||||
|         size -= slot_size; | ||||
|         kml->nr_used_slots++; | ||||
|     } while (size); | ||||
| } | ||||
|  | ||||
| @@ -1416,74 +1438,13 @@ static void *kvm_dirty_ring_reaper_thread(void *data) | ||||
|     return NULL; | ||||
| } | ||||
|  | ||||
| static void kvm_dirty_ring_reaper_init(KVMState *s) | ||||
| static int kvm_dirty_ring_reaper_init(KVMState *s) | ||||
| { | ||||
|     struct KVMDirtyRingReaper *r = &s->reaper; | ||||
|  | ||||
|     qemu_thread_create(&r->reaper_thr, "kvm-reaper", | ||||
|                        kvm_dirty_ring_reaper_thread, | ||||
|                        s, QEMU_THREAD_JOINABLE); | ||||
| } | ||||
|  | ||||
| static int kvm_dirty_ring_init(KVMState *s) | ||||
| { | ||||
|     uint32_t ring_size = s->kvm_dirty_ring_size; | ||||
|     uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn); | ||||
|     unsigned int capability = KVM_CAP_DIRTY_LOG_RING; | ||||
|     int ret; | ||||
|  | ||||
|     s->kvm_dirty_ring_size = 0; | ||||
|     s->kvm_dirty_ring_bytes = 0; | ||||
|  | ||||
|     /* Bail if the dirty ring size isn't specified */ | ||||
|     if (!ring_size) { | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
|     /* | ||||
|      * Read the max supported pages. Fall back to dirty logging mode | ||||
|      * if the dirty ring isn't supported. | ||||
|      */ | ||||
|     ret = kvm_vm_check_extension(s, capability); | ||||
|     if (ret <= 0) { | ||||
|         capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL; | ||||
|         ret = kvm_vm_check_extension(s, capability); | ||||
|     } | ||||
|  | ||||
|     if (ret <= 0) { | ||||
|         warn_report("KVM dirty ring not available, using bitmap method"); | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
|     if (ring_bytes > ret) { | ||||
|         error_report("KVM dirty ring size %" PRIu32 " too big " | ||||
|                      "(maximum is %ld).  Please use a smaller value.", | ||||
|                      ring_size, (long)ret / sizeof(struct kvm_dirty_gfn)); | ||||
|         return -EINVAL; | ||||
|     } | ||||
|  | ||||
|     ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes); | ||||
|     if (ret) { | ||||
|         error_report("Enabling of KVM dirty ring failed: %s. " | ||||
|                      "Suggested minimum value is 1024.", strerror(-ret)); | ||||
|         return -EIO; | ||||
|     } | ||||
|  | ||||
|     /* Enable the backup bitmap if it is supported */ | ||||
|     ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP); | ||||
|     if (ret > 0) { | ||||
|         ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0); | ||||
|         if (ret) { | ||||
|             error_report("Enabling of KVM dirty ring's backup bitmap failed: " | ||||
|                          "%s. ", strerror(-ret)); | ||||
|             return -EIO; | ||||
|         } | ||||
|  | ||||
|         s->kvm_dirty_ring_with_bitmap = true; | ||||
|     } | ||||
|  | ||||
|     s->kvm_dirty_ring_size = ring_size; | ||||
|     s->kvm_dirty_ring_bytes = ring_bytes; | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
| @@ -1593,7 +1554,7 @@ static void kvm_log_sync(MemoryListener *listener, | ||||
|     kvm_slots_unlock(); | ||||
| } | ||||
|  | ||||
| static void kvm_log_sync_global(MemoryListener *l, bool last_stage) | ||||
| static void kvm_log_sync_global(MemoryListener *l) | ||||
| { | ||||
|     KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener); | ||||
|     KVMState *s = kvm_state; | ||||
| @@ -1612,12 +1573,6 @@ static void kvm_log_sync_global(MemoryListener *l, bool last_stage) | ||||
|         mem = &kml->slots[i]; | ||||
|         if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { | ||||
|             kvm_slot_sync_dirty_pages(mem); | ||||
|  | ||||
|             if (s->kvm_dirty_ring_with_bitmap && last_stage && | ||||
|                 kvm_slot_get_dirty_log(s, mem)) { | ||||
|                 kvm_slot_sync_dirty_pages(mem); | ||||
|             } | ||||
|  | ||||
|             /* | ||||
|              * This is not needed by KVM_GET_DIRTY_LOG because the | ||||
|              * ioctl will unconditionally overwrite the whole region. | ||||
| @@ -1738,7 +1693,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, | ||||
|     kml->listener.commit = kvm_region_commit; | ||||
|     kml->listener.log_start = kvm_log_start; | ||||
|     kml->listener.log_stop = kvm_log_stop; | ||||
|     kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL; | ||||
|     kml->listener.priority = 10; | ||||
|     kml->listener.name = name; | ||||
|  | ||||
|     if (s->kvm_dirty_ring_size) { | ||||
| @@ -1761,11 +1716,9 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, | ||||
|  | ||||
| static MemoryListener kvm_io_listener = { | ||||
|     .name = "kvm-io", | ||||
|     .coalesced_io_add = kvm_coalesce_pio_add, | ||||
|     .coalesced_io_del = kvm_coalesce_pio_del, | ||||
|     .eventfd_add = kvm_io_ioeventfd_add, | ||||
|     .eventfd_del = kvm_io_ioeventfd_del, | ||||
|     .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND, | ||||
|     .priority = 10, | ||||
| }; | ||||
|  | ||||
| int kvm_set_irq(KVMState *s, int irq, int level) | ||||
| @@ -1804,7 +1757,7 @@ static void clear_gsi(KVMState *s, unsigned int gsi) | ||||
|  | ||||
| void kvm_init_irq_routing(KVMState *s) | ||||
| { | ||||
|     int gsi_count; | ||||
|     int gsi_count, i; | ||||
|  | ||||
|     gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; | ||||
|     if (gsi_count > 0) { | ||||
| @@ -1816,6 +1769,12 @@ void kvm_init_irq_routing(KVMState *s) | ||||
|     s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); | ||||
|     s->nr_allocated_irq_routes = 0; | ||||
|  | ||||
|     if (!kvm_direct_msi_allowed) { | ||||
|         for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) { | ||||
|             QTAILQ_INIT(&s->msi_hashtab[i]); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     kvm_arch_init_irq_routing(s); | ||||
| } | ||||
|  | ||||
| @@ -1935,10 +1894,41 @@ void kvm_irqchip_change_notify(void) | ||||
|     notifier_list_notify(&kvm_irqchip_change_notifiers, NULL); | ||||
| } | ||||
|  | ||||
| static unsigned int kvm_hash_msi(uint32_t data) | ||||
| { | ||||
|     /* This is optimized for IA32 MSI layout. However, no other arch shall | ||||
|      * repeat the mistake of not providing a direct MSI injection API. */ | ||||
|     return data & 0xff; | ||||
| } | ||||
|  | ||||
| static void kvm_flush_dynamic_msi_routes(KVMState *s) | ||||
| { | ||||
|     KVMMSIRoute *route, *next; | ||||
|     unsigned int hash; | ||||
|  | ||||
|     for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) { | ||||
|         QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) { | ||||
|             kvm_irqchip_release_virq(s, route->kroute.gsi); | ||||
|             QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry); | ||||
|             g_free(route); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| static int kvm_irqchip_get_virq(KVMState *s) | ||||
| { | ||||
|     int next_virq; | ||||
|  | ||||
|     /* | ||||
|      * PIC and IOAPIC share the first 16 GSI numbers, thus the available | ||||
|      * GSI numbers are more than the number of IRQ route. Allocating a GSI | ||||
|      * number can succeed even though a new route entry cannot be added. | ||||
|      * When this happens, flush dynamic MSI entries to free IRQ route entries. | ||||
|      */ | ||||
|     if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) { | ||||
|         kvm_flush_dynamic_msi_routes(s); | ||||
|     } | ||||
|  | ||||
|     /* Return the lowest unused GSI in the bitmap */ | ||||
|     next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count); | ||||
|     if (next_virq >= s->gsi_count) { | ||||
| @@ -1948,17 +1938,63 @@ static int kvm_irqchip_get_virq(KVMState *s) | ||||
|     } | ||||
| } | ||||
|  | ||||
| static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg) | ||||
| { | ||||
|     unsigned int hash = kvm_hash_msi(msg.data); | ||||
|     KVMMSIRoute *route; | ||||
|  | ||||
|     QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) { | ||||
|         if (route->kroute.u.msi.address_lo == (uint32_t)msg.address && | ||||
|             route->kroute.u.msi.address_hi == (msg.address >> 32) && | ||||
|             route->kroute.u.msi.data == le32_to_cpu(msg.data)) { | ||||
|             return route; | ||||
|         } | ||||
|     } | ||||
|     return NULL; | ||||
| } | ||||
|  | ||||
| int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) | ||||
| { | ||||
|     struct kvm_msi msi; | ||||
|     KVMMSIRoute *route; | ||||
|  | ||||
|     msi.address_lo = (uint32_t)msg.address; | ||||
|     msi.address_hi = msg.address >> 32; | ||||
|     msi.data = le32_to_cpu(msg.data); | ||||
|     msi.flags = 0; | ||||
|     memset(msi.pad, 0, sizeof(msi.pad)); | ||||
|     if (kvm_direct_msi_allowed) { | ||||
|         msi.address_lo = (uint32_t)msg.address; | ||||
|         msi.address_hi = msg.address >> 32; | ||||
|         msi.data = le32_to_cpu(msg.data); | ||||
|         msi.flags = 0; | ||||
|         memset(msi.pad, 0, sizeof(msi.pad)); | ||||
|  | ||||
|     return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); | ||||
|         return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); | ||||
|     } | ||||
|  | ||||
|     route = kvm_lookup_msi_route(s, msg); | ||||
|     if (!route) { | ||||
|         int virq; | ||||
|  | ||||
|         virq = kvm_irqchip_get_virq(s); | ||||
|         if (virq < 0) { | ||||
|             return virq; | ||||
|         } | ||||
|  | ||||
|         route = g_new0(KVMMSIRoute, 1); | ||||
|         route->kroute.gsi = virq; | ||||
|         route->kroute.type = KVM_IRQ_ROUTING_MSI; | ||||
|         route->kroute.flags = 0; | ||||
|         route->kroute.u.msi.address_lo = (uint32_t)msg.address; | ||||
|         route->kroute.u.msi.address_hi = msg.address >> 32; | ||||
|         route->kroute.u.msi.data = le32_to_cpu(msg.data); | ||||
|  | ||||
|         kvm_add_routing_entry(s, &route->kroute); | ||||
|         kvm_irqchip_commit_routes(s); | ||||
|  | ||||
|         QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route, | ||||
|                            entry); | ||||
|     } | ||||
|  | ||||
|     assert(route->kroute.type == KVM_IRQ_ROUTING_MSI); | ||||
|  | ||||
|     return kvm_set_irq(s, route->kroute.gsi, 1); | ||||
| } | ||||
|  | ||||
| int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) | ||||
| @@ -2085,6 +2121,10 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event, | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     if (!kvm_irqfds_enabled()) { | ||||
|         return -ENOSYS; | ||||
|     } | ||||
|  | ||||
|     return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd); | ||||
| } | ||||
|  | ||||
| @@ -2245,11 +2285,6 @@ static void kvm_irqchip_create(KVMState *s) | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) { | ||||
|         fprintf(stderr, "kvm: irqfd not implemented\n"); | ||||
|         exit(1); | ||||
|     } | ||||
|  | ||||
|     /* First probe and see if there's a arch-specific hook to create the | ||||
|      * in-kernel irqchip for us */ | ||||
|     ret = kvm_arch_irqchip_create(s); | ||||
| @@ -2338,7 +2373,7 @@ static int kvm_init(MachineState *ms) | ||||
|     KVMState *s; | ||||
|     const KVMCapabilityInfo *missing_cap; | ||||
|     int ret; | ||||
|     int type; | ||||
|     int type = 0; | ||||
|     uint64_t dirty_log_manual_caps; | ||||
|  | ||||
|     qemu_mutex_init(&kml_slots_lock); | ||||
| @@ -2403,13 +2438,6 @@ static int kvm_init(MachineState *ms) | ||||
|         type = mc->kvm_type(ms, kvm_type); | ||||
|     } else if (mc->kvm_type) { | ||||
|         type = mc->kvm_type(ms, NULL); | ||||
|     } else { | ||||
|         type = kvm_arch_get_default_type(ms); | ||||
|     } | ||||
|  | ||||
|     if (type < 0) { | ||||
|         ret = -EINVAL; | ||||
|         goto err; | ||||
|     } | ||||
|  | ||||
|     do { | ||||
| @@ -2484,9 +2512,35 @@ static int kvm_init(MachineState *ms) | ||||
|      * Enable KVM dirty ring if supported, otherwise fall back to | ||||
|      * dirty logging mode | ||||
|      */ | ||||
|     ret = kvm_dirty_ring_init(s); | ||||
|     if (ret < 0) { | ||||
|         goto err; | ||||
|     if (s->kvm_dirty_ring_size > 0) { | ||||
|         uint64_t ring_bytes; | ||||
|  | ||||
|         ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn); | ||||
|  | ||||
|         /* Read the max supported pages */ | ||||
|         ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING); | ||||
|         if (ret > 0) { | ||||
|             if (ring_bytes > ret) { | ||||
|                 error_report("KVM dirty ring size %" PRIu32 " too big " | ||||
|                              "(maximum is %ld).  Please use a smaller value.", | ||||
|                              s->kvm_dirty_ring_size, | ||||
|                              (long)ret / sizeof(struct kvm_dirty_gfn)); | ||||
|                 ret = -EINVAL; | ||||
|                 goto err; | ||||
|             } | ||||
|  | ||||
|             ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes); | ||||
|             if (ret) { | ||||
|                 error_report("Enabling of KVM dirty ring failed: %s. " | ||||
|                              "Suggested minimum value is 1024.", strerror(-ret)); | ||||
|                 goto err; | ||||
|             } | ||||
|  | ||||
|             s->kvm_dirty_ring_bytes = ring_bytes; | ||||
|          } else { | ||||
|              warn_report("KVM dirty ring not available, using bitmap method"); | ||||
|              s->kvm_dirty_ring_size = 0; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     /* | ||||
| @@ -2524,8 +2578,22 @@ static int kvm_init(MachineState *ms) | ||||
| #ifdef KVM_CAP_VCPU_EVENTS | ||||
|     s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); | ||||
| #endif | ||||
|  | ||||
|     s->robust_singlestep = | ||||
|         kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); | ||||
|  | ||||
| #ifdef KVM_CAP_DEBUGREGS | ||||
|     s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); | ||||
| #endif | ||||
|  | ||||
|     s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); | ||||
|  | ||||
| #ifdef KVM_CAP_IRQ_ROUTING | ||||
|     kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); | ||||
| #endif | ||||
|  | ||||
|     s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); | ||||
|  | ||||
|     s->irq_set_ioctl = KVM_IRQ_LINE; | ||||
|     if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { | ||||
|         s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; | ||||
| @@ -2534,12 +2602,21 @@ static int kvm_init(MachineState *ms) | ||||
|     kvm_readonly_mem_allowed = | ||||
|         (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); | ||||
|  | ||||
|     kvm_eventfds_allowed = | ||||
|         (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0); | ||||
|  | ||||
|     kvm_irqfds_allowed = | ||||
|         (kvm_check_extension(s, KVM_CAP_IRQFD) > 0); | ||||
|  | ||||
|     kvm_resamplefds_allowed = | ||||
|         (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); | ||||
|  | ||||
|     kvm_vm_attributes_allowed = | ||||
|         (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); | ||||
|  | ||||
|     kvm_ioeventfd_any_length_allowed = | ||||
|         (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0); | ||||
|  | ||||
| #ifdef KVM_CAP_SET_GUEST_DEBUG | ||||
|     kvm_has_guest_debug = | ||||
|         (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0); | ||||
| @@ -2576,16 +2653,24 @@ static int kvm_init(MachineState *ms) | ||||
|         kvm_irqchip_create(s); | ||||
|     } | ||||
|  | ||||
|     s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; | ||||
|     s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; | ||||
|     if (kvm_eventfds_allowed) { | ||||
|         s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; | ||||
|         s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; | ||||
|     } | ||||
|     s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; | ||||
|     s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; | ||||
|  | ||||
|     kvm_memory_listener_register(s, &s->memory_listener, | ||||
|                                  &address_space_memory, 0, "kvm-memory"); | ||||
|     memory_listener_register(&kvm_io_listener, | ||||
|     if (kvm_eventfds_allowed) { | ||||
|         memory_listener_register(&kvm_io_listener, | ||||
|                                  &address_space_io); | ||||
|     } | ||||
|     memory_listener_register(&kvm_coalesced_pio_listener, | ||||
|                              &address_space_io); | ||||
|  | ||||
|     s->many_ioeventfds = kvm_check_many_ioeventfds(); | ||||
|  | ||||
|     s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); | ||||
|     if (!s->sync_mmu) { | ||||
|         ret = ram_block_discard_disable(true); | ||||
| @@ -2593,7 +2678,10 @@ static int kvm_init(MachineState *ms) | ||||
|     } | ||||
|  | ||||
|     if (s->kvm_dirty_ring_size) { | ||||
|         kvm_dirty_ring_reaper_init(s); | ||||
|         ret = kvm_dirty_ring_reaper_init(s); | ||||
|         if (ret) { | ||||
|             goto err; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) { | ||||
| @@ -2611,7 +2699,6 @@ err: | ||||
|     if (s->fd != -1) { | ||||
|         close(s->fd); | ||||
|     } | ||||
|     g_free(s->as); | ||||
|     g_free(s->memory_listener.slots); | ||||
|  | ||||
|     return ret; | ||||
| @@ -2638,14 +2725,16 @@ static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direc | ||||
|  | ||||
| static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run) | ||||
| { | ||||
|     int i; | ||||
|  | ||||
|     fprintf(stderr, "KVM internal error. Suberror: %d\n", | ||||
|             run->internal.suberror); | ||||
|  | ||||
|     for (i = 0; i < run->internal.ndata; ++i) { | ||||
|         fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n", | ||||
|                 i, (uint64_t)run->internal.data[i]); | ||||
|     if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) { | ||||
|         int i; | ||||
|  | ||||
|         for (i = 0; i < run->internal.ndata; ++i) { | ||||
|             fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n", | ||||
|                     i, (uint64_t)run->internal.data[i]); | ||||
|         } | ||||
|     } | ||||
|     if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { | ||||
|         fprintf(stderr, "emulation failure\n"); | ||||
| @@ -2664,7 +2753,7 @@ void kvm_flush_coalesced_mmio_buffer(void) | ||||
| { | ||||
|     KVMState *s = kvm_state; | ||||
|  | ||||
|     if (!s || s->coalesced_flush_in_progress) { | ||||
|     if (s->coalesced_flush_in_progress) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
| @@ -2700,13 +2789,7 @@ bool kvm_cpu_check_are_resettable(void) | ||||
| static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) | ||||
| { | ||||
|     if (!cpu->vcpu_dirty) { | ||||
|         int ret = kvm_arch_get_registers(cpu); | ||||
|         if (ret) { | ||||
|             error_report("Failed to get registers: %s", strerror(-ret)); | ||||
|             cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); | ||||
|             vm_stop(RUN_STATE_INTERNAL_ERROR); | ||||
|         } | ||||
|  | ||||
|         kvm_arch_get_registers(cpu); | ||||
|         cpu->vcpu_dirty = true; | ||||
|     } | ||||
| } | ||||
| @@ -2720,13 +2803,7 @@ void kvm_cpu_synchronize_state(CPUState *cpu) | ||||
|  | ||||
| static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) | ||||
| { | ||||
|     int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); | ||||
|     if (ret) { | ||||
|         error_report("Failed to put registers after reset: %s", strerror(-ret)); | ||||
|         cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); | ||||
|         vm_stop(RUN_STATE_INTERNAL_ERROR); | ||||
|     } | ||||
|  | ||||
|     kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); | ||||
|     cpu->vcpu_dirty = false; | ||||
| } | ||||
|  | ||||
| @@ -2737,12 +2814,7 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu) | ||||
|  | ||||
| static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) | ||||
| { | ||||
|     int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); | ||||
|     if (ret) { | ||||
|         error_report("Failed to put registers after init: %s", strerror(-ret)); | ||||
|         exit(1); | ||||
|     } | ||||
|  | ||||
|     kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); | ||||
|     cpu->vcpu_dirty = false; | ||||
| } | ||||
|  | ||||
| @@ -2835,14 +2907,7 @@ int kvm_cpu_exec(CPUState *cpu) | ||||
|         MemTxAttrs attrs; | ||||
|  | ||||
|         if (cpu->vcpu_dirty) { | ||||
|             ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); | ||||
|             if (ret) { | ||||
|                 error_report("Failed to put registers after init: %s", | ||||
|                              strerror(-ret)); | ||||
|                 ret = -1; | ||||
|                 break; | ||||
|             } | ||||
|  | ||||
|             kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); | ||||
|             cpu->vcpu_dirty = false; | ||||
|         } | ||||
|  | ||||
| @@ -3139,11 +3204,29 @@ int kvm_has_vcpu_events(void) | ||||
|     return kvm_state->vcpu_events; | ||||
| } | ||||
|  | ||||
| int kvm_has_robust_singlestep(void) | ||||
| { | ||||
|     return kvm_state->robust_singlestep; | ||||
| } | ||||
|  | ||||
| int kvm_has_debugregs(void) | ||||
| { | ||||
|     return kvm_state->debugregs; | ||||
| } | ||||
|  | ||||
| int kvm_max_nested_state_length(void) | ||||
| { | ||||
|     return kvm_state->max_nested_state_len; | ||||
| } | ||||
|  | ||||
| int kvm_has_many_ioeventfds(void) | ||||
| { | ||||
|     if (!kvm_enabled()) { | ||||
|         return 0; | ||||
|     } | ||||
|     return kvm_state->many_ioeventfds; | ||||
| } | ||||
|  | ||||
| int kvm_has_gsi_routing(void) | ||||
| { | ||||
| #ifdef KVM_CAP_IRQ_ROUTING | ||||
| @@ -3153,13 +3236,19 @@ int kvm_has_gsi_routing(void) | ||||
| #endif | ||||
| } | ||||
|  | ||||
| int kvm_has_intx_set_mask(void) | ||||
| { | ||||
|     return kvm_state->intx_set_mask; | ||||
| } | ||||
|  | ||||
| bool kvm_arm_supports_user_irq(void) | ||||
| { | ||||
|     return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ); | ||||
| } | ||||
|  | ||||
| #ifdef KVM_CAP_SET_GUEST_DEBUG | ||||
| struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc) | ||||
| struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, | ||||
|                                                  target_ulong pc) | ||||
| { | ||||
|     struct kvm_sw_breakpoint *bp; | ||||
|  | ||||
| @@ -3612,8 +3701,6 @@ static void kvm_accel_instance_init(Object *obj) | ||||
|     s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; | ||||
|     /* KVM dirty ring is by default off */ | ||||
|     s->kvm_dirty_ring_size = 0; | ||||
|     s->kvm_dirty_ring_with_bitmap = false; | ||||
|     s->kvm_eager_split_size = 0; | ||||
|     s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; | ||||
|     s->notify_window = 0; | ||||
|     s->xen_version = 0; | ||||
| @@ -3863,7 +3950,7 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd | ||||
|  | ||||
|     /* Read stats header */ | ||||
|     kvm_stats_header = &descriptors->kvm_stats_header; | ||||
|     ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0); | ||||
|     ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header)); | ||||
|     if (ret != sizeof(*kvm_stats_header)) { | ||||
|         error_setg(errp, "KVM stats: failed to read stats header: " | ||||
|                    "expected %zu actual %zu", | ||||
| @@ -3894,8 +3981,7 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd | ||||
| } | ||||
|  | ||||
| static void query_stats(StatsResultList **result, StatsTarget target, | ||||
|                         strList *names, int stats_fd, CPUState *cpu, | ||||
|                         Error **errp) | ||||
|                         strList *names, int stats_fd, Error **errp) | ||||
| { | ||||
|     struct kvm_stats_desc *kvm_stats_desc; | ||||
|     struct kvm_stats_header *kvm_stats_header; | ||||
| @@ -3953,7 +4039,7 @@ static void query_stats(StatsResultList **result, StatsTarget target, | ||||
|         break; | ||||
|     case STATS_TARGET_VCPU: | ||||
|         add_stats_entry(result, STATS_PROVIDER_KVM, | ||||
|                         cpu->parent_obj.canonical_path, | ||||
|                         current_cpu->parent_obj.canonical_path, | ||||
|                         stats_list); | ||||
|         break; | ||||
|     default: | ||||
| @@ -3990,9 +4076,10 @@ static void query_stats_schema(StatsSchemaList **result, StatsTarget target, | ||||
|     add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list); | ||||
| } | ||||
|  | ||||
| static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args) | ||||
| static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data) | ||||
| { | ||||
|     int stats_fd = cpu->kvm_vcpu_stats_fd; | ||||
|     StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr; | ||||
|     int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL); | ||||
|     Error *local_err = NULL; | ||||
|  | ||||
|     if (stats_fd == -1) { | ||||
| @@ -4001,13 +4088,14 @@ static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args) | ||||
|         return; | ||||
|     } | ||||
|     query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU, | ||||
|                 kvm_stats_args->names, stats_fd, cpu, | ||||
|                 kvm_stats_args->errp); | ||||
|                 kvm_stats_args->names, stats_fd, kvm_stats_args->errp); | ||||
|     close(stats_fd); | ||||
| } | ||||
|  | ||||
| static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args) | ||||
| static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data) | ||||
| { | ||||
|     int stats_fd = cpu->kvm_vcpu_stats_fd; | ||||
|     StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr; | ||||
|     int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL); | ||||
|     Error *local_err = NULL; | ||||
|  | ||||
|     if (stats_fd == -1) { | ||||
| @@ -4017,6 +4105,7 @@ static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args) | ||||
|     } | ||||
|     query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd, | ||||
|                        kvm_stats_args->errp); | ||||
|     close(stats_fd); | ||||
| } | ||||
|  | ||||
| static void query_stats_cb(StatsResultList **result, StatsTarget target, | ||||
| @@ -4034,7 +4123,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target, | ||||
|             error_setg_errno(errp, errno, "KVM stats: ioctl failed"); | ||||
|             return; | ||||
|         } | ||||
|         query_stats(result, target, names, stats_fd, NULL, errp); | ||||
|         query_stats(result, target, names, stats_fd, errp); | ||||
|         close(stats_fd); | ||||
|         break; | ||||
|     } | ||||
| @@ -4048,7 +4137,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target, | ||||
|             if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) { | ||||
|                 continue; | ||||
|             } | ||||
|             query_stats_vcpu(cpu, &stats_args); | ||||
|             run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args)); | ||||
|         } | ||||
|         break; | ||||
|     } | ||||
| @@ -4074,6 +4163,6 @@ void query_stats_schemas_cb(StatsSchemaList **result, Error **errp) | ||||
|     if (first_cpu) { | ||||
|         stats_args.result.schema = result; | ||||
|         stats_args.errp = errp; | ||||
|         query_stats_schema_vcpu(first_cpu, &stats_args); | ||||
|         run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args)); | ||||
|     } | ||||
| } | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| specific_ss.add(files('accel-target.c')) | ||||
| system_ss.add(files('accel-system.c', 'accel-blocker.c')) | ||||
| specific_ss.add(files('accel-common.c', 'accel-blocker.c')) | ||||
| softmmu_ss.add(files('accel-softmmu.c')) | ||||
| user_ss.add(files('accel-user.c')) | ||||
|  | ||||
| subdir('tcg') | ||||
| @@ -12,4 +12,4 @@ if have_system | ||||
| endif | ||||
|  | ||||
| # qtest | ||||
| system_ss.add(files('dummy-cpus.c')) | ||||
| softmmu_ss.add(files('dummy-cpus.c')) | ||||
|   | ||||
| @@ -1 +1 @@ | ||||
| qtest_module_ss.add(when: ['CONFIG_SYSTEM_ONLY'], if_true: files('qtest.c')) | ||||
| qtest_module_ss.add(when: ['CONFIG_SOFTMMU'], if_true: files('qtest.c')) | ||||
|   | ||||
							
								
								
									
										24
									
								
								accel/stubs/hax-stub.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								accel/stubs/hax-stub.c
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| /* | ||||
|  * QEMU HAXM support | ||||
|  * | ||||
|  * Copyright (c) 2015, Intel Corporation | ||||
|  * | ||||
|  * Copyright 2016 Google, Inc. | ||||
|  * | ||||
|  * This software is licensed under the terms of the GNU General Public | ||||
|  * License version 2, as published by the Free Software Foundation, and | ||||
|  * may be copied, distributed, and modified under those terms. | ||||
|  * | ||||
|  * See the COPYING file in the top-level directory. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| #include "qemu/osdep.h" | ||||
| #include "sysemu/hax.h" | ||||
|  | ||||
| bool hax_allowed; | ||||
|  | ||||
| int hax_sync_vcpus(void) | ||||
| { | ||||
|     return 0; | ||||
| } | ||||
| @@ -17,12 +17,15 @@ | ||||
| KVMState *kvm_state; | ||||
| bool kvm_kernel_irqchip; | ||||
| bool kvm_async_interrupts_allowed; | ||||
| bool kvm_eventfds_allowed; | ||||
| bool kvm_irqfds_allowed; | ||||
| bool kvm_resamplefds_allowed; | ||||
| bool kvm_msi_via_irqfd_allowed; | ||||
| bool kvm_gsi_routing_allowed; | ||||
| bool kvm_gsi_direct_mapping; | ||||
| bool kvm_allowed; | ||||
| bool kvm_readonly_mem_allowed; | ||||
| bool kvm_ioeventfd_any_length_allowed; | ||||
| bool kvm_msi_use_devid; | ||||
|  | ||||
| void kvm_flush_coalesced_mmio_buffer(void) | ||||
| @@ -38,6 +41,11 @@ bool kvm_has_sync_mmu(void) | ||||
|     return false; | ||||
| } | ||||
|  | ||||
| int kvm_has_many_ioeventfds(void) | ||||
| { | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) | ||||
| { | ||||
|     return 1; | ||||
| @@ -83,6 +91,11 @@ void kvm_irqchip_change_notify(void) | ||||
| { | ||||
| } | ||||
|  | ||||
| int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) | ||||
| { | ||||
|     return -ENOSYS; | ||||
| } | ||||
|  | ||||
| int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, | ||||
|                                        EventNotifier *rn, int virq) | ||||
| { | ||||
| @@ -95,14 +108,9 @@ int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, | ||||
|     return -ENOSYS; | ||||
| } | ||||
|  | ||||
| unsigned int kvm_get_max_memslots(void) | ||||
| bool kvm_has_free_slot(MachineState *ms) | ||||
| { | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| unsigned int kvm_get_free_memslots(void) | ||||
| { | ||||
|     return 0; | ||||
|     return false; | ||||
| } | ||||
|  | ||||
| void kvm_init_cpu_signals(CPUState *cpu) | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| system_stubs_ss = ss.source_set() | ||||
| system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c')) | ||||
| system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) | ||||
| system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c')) | ||||
| sysemu_stubs_ss = ss.source_set() | ||||
| sysemu_stubs_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c')) | ||||
| sysemu_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c')) | ||||
| sysemu_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) | ||||
| sysemu_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c')) | ||||
|  | ||||
| specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss) | ||||
| specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: sysemu_stubs_ss) | ||||
|   | ||||
| @@ -11,25 +11,28 @@ | ||||
|  */ | ||||
|  | ||||
| #include "qemu/osdep.h" | ||||
| #include "exec/tb-flush.h" | ||||
| #include "exec/exec-all.h" | ||||
|  | ||||
| void tb_flush(CPUState *cpu) | ||||
| { | ||||
| } | ||||
|  | ||||
| void tlb_set_dirty(CPUState *cpu, vaddr vaddr) | ||||
| void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) | ||||
| { | ||||
| } | ||||
|  | ||||
| int probe_access_flags(CPUArchState *env, vaddr addr, int size, | ||||
| void tcg_flush_jmp_cache(CPUState *cpu) | ||||
| { | ||||
| } | ||||
|  | ||||
| int probe_access_flags(CPUArchState *env, target_ulong addr, int size, | ||||
|                        MMUAccessType access_type, int mmu_idx, | ||||
|                        bool nonfault, void **phost, uintptr_t retaddr) | ||||
| { | ||||
|      g_assert_not_reached(); | ||||
| } | ||||
|  | ||||
| void *probe_access(CPUArchState *env, vaddr addr, int size, | ||||
| void *probe_access(CPUArchState *env, target_ulong addr, int size, | ||||
|                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | ||||
| { | ||||
|      /* Handled by hardware accelerator. */ | ||||
|   | ||||
| @@ -13,12 +13,26 @@ | ||||
|  * See the COPYING file in the top-level directory. | ||||
|  */ | ||||
| 
 | ||||
| static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr, | ||||
| static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, | ||||
|                                   MemOpIdx oi) | ||||
| { | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW); | ||||
| } | ||||
| 
 | ||||
| #if HAVE_ATOMIC128
 | ||||
| static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, | ||||
|                                  MemOpIdx oi) | ||||
| { | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
| } | ||||
| 
 | ||||
| static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, | ||||
|                                  MemOpIdx oi) | ||||
| { | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
| #endif
 | ||||
| 
 | ||||
| /* | ||||
|  * Atomic helpers callable from TCG. | ||||
|  * These have a common interface and all defer to cpu_atomic_* | ||||
| @@ -26,7 +40,7 @@ static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr, | ||||
|  */ | ||||
| 
 | ||||
| #define CMPXCHG_HELPER(OP, TYPE) \
 | ||||
|     TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr,      \
 | ||||
|     TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr,  \
 | ||||
|                              TYPE oldv, TYPE newv, uint32_t oi)     \ | ||||
|     { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
 | ||||
| 
 | ||||
| @@ -41,23 +55,43 @@ CMPXCHG_HELPER(cmpxchgq_be, uint64_t) | ||||
| CMPXCHG_HELPER(cmpxchgq_le, uint64_t) | ||||
| #endif
 | ||||
| 
 | ||||
| #if HAVE_CMPXCHG128
 | ||||
| #ifdef CONFIG_CMPXCHG128
 | ||||
| CMPXCHG_HELPER(cmpxchgo_be, Int128) | ||||
| CMPXCHG_HELPER(cmpxchgo_le, Int128) | ||||
| #endif
 | ||||
| 
 | ||||
| #undef CMPXCHG_HELPER
 | ||||
| 
 | ||||
| Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr, | ||||
|                                   Int128 cmpv, Int128 newv, uint32_t oi) | ||||
| Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr, | ||||
|                                      Int128 cmpv, Int128 newv, uint32_t oi) | ||||
| { | ||||
| #if TCG_TARGET_REG_BITS == 32
 | ||||
|     uintptr_t ra = GETPC(); | ||||
|     Int128 oldv; | ||||
| 
 | ||||
|     oldv = cpu_ld16_mmu(env, addr, oi, ra); | ||||
|     oldv = cpu_ld16_be_mmu(env, addr, oi, ra); | ||||
|     if (int128_eq(oldv, cmpv)) { | ||||
|         cpu_st16_mmu(env, addr, newv, oi, ra); | ||||
|         cpu_st16_be_mmu(env, addr, newv, oi, ra); | ||||
|     } else { | ||||
|         /* Even with comparison failure, still need a write cycle. */ | ||||
|         probe_write(env, addr, 16, get_mmuidx(oi), ra); | ||||
|     } | ||||
|     return oldv; | ||||
| #else
 | ||||
|     g_assert_not_reached(); | ||||
| #endif
 | ||||
| } | ||||
| 
 | ||||
| Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr, | ||||
|                                      Int128 cmpv, Int128 newv, uint32_t oi) | ||||
| { | ||||
| #if TCG_TARGET_REG_BITS == 32
 | ||||
|     uintptr_t ra = GETPC(); | ||||
|     Int128 oldv; | ||||
| 
 | ||||
|     oldv = cpu_ld16_le_mmu(env, addr, oi, ra); | ||||
|     if (int128_eq(oldv, cmpv)) { | ||||
|         cpu_st16_le_mmu(env, addr, newv, oi, ra); | ||||
|     } else { | ||||
|         /* Even with comparison failure, still need a write cycle. */ | ||||
|         probe_write(env, addr, 16, get_mmuidx(oi), ra); | ||||
| @@ -69,7 +103,7 @@ Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr, | ||||
| } | ||||
| 
 | ||||
| #define ATOMIC_HELPER(OP, TYPE) \
 | ||||
|     TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr,  \ | ||||
|     TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr,  \ | ||||
|                                   TYPE val, uint32_t oi)                 \ | ||||
|     { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); } | ||||
| 
 | ||||
|   | ||||
| @@ -69,12 +69,12 @@ | ||||
| # define END  _le | ||||
| #endif | ||||
|  | ||||
| ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, | ||||
| ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, | ||||
|                               ABI_TYPE cmpv, ABI_TYPE newv, | ||||
|                               MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, | ||||
|                                          DATA_SIZE, retaddr); | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, | ||||
|                                          PAGE_READ | PAGE_WRITE, retaddr); | ||||
|     DATA_TYPE ret; | ||||
|  | ||||
| #if DATA_SIZE == 16 | ||||
| @@ -87,12 +87,38 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| #if DATA_SIZE < 16 | ||||
| ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, | ||||
| #if DATA_SIZE >= 16 | ||||
| #if HAVE_ATOMIC128 | ||||
| ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, | ||||
|                          MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, | ||||
|                                          PAGE_READ, retaddr); | ||||
|     DATA_TYPE val; | ||||
|  | ||||
|     val = atomic16_read(haddr); | ||||
|     ATOMIC_MMU_CLEANUP; | ||||
|     atomic_trace_ld_post(env, addr, oi); | ||||
|     return val; | ||||
| } | ||||
|  | ||||
| void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, | ||||
|                      MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, | ||||
|                                          PAGE_WRITE, retaddr); | ||||
|  | ||||
|     atomic16_set(haddr, val); | ||||
|     ATOMIC_MMU_CLEANUP; | ||||
|     atomic_trace_st_post(env, addr, oi); | ||||
| } | ||||
| #endif | ||||
| #else | ||||
| ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, | ||||
|                            MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, | ||||
|                                          DATA_SIZE, retaddr); | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, | ||||
|                                          PAGE_READ | PAGE_WRITE, retaddr); | ||||
|     DATA_TYPE ret; | ||||
|  | ||||
|     ret = qatomic_xchg__nocheck(haddr, val); | ||||
| @@ -102,11 +128,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, | ||||
| } | ||||
|  | ||||
| #define GEN_ATOMIC_HELPER(X)                                        \ | ||||
| ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr,            \ | ||||
| ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \ | ||||
|                         ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ | ||||
| {                                                                   \ | ||||
|     DATA_TYPE *haddr, ret;                                          \ | ||||
|     haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr);   \ | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,  \ | ||||
|                                          PAGE_READ | PAGE_WRITE, retaddr); \ | ||||
|     DATA_TYPE ret;                                                  \ | ||||
|     ret = qatomic_##X(haddr, val);                                  \ | ||||
|     ATOMIC_MMU_CLEANUP;                                             \ | ||||
|     atomic_trace_rmw_post(env, addr, oi);                           \ | ||||
| @@ -133,11 +160,12 @@ GEN_ATOMIC_HELPER(xor_fetch) | ||||
|  * of CF_PARALLEL's value, we'll trace just a read and a write. | ||||
|  */ | ||||
| #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \ | ||||
| ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr,            \ | ||||
| ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \ | ||||
|                         ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ | ||||
| {                                                                   \ | ||||
|     XDATA_TYPE *haddr, cmp, old, new, val = xval;                   \ | ||||
|     haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr);   \ | ||||
|     XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ | ||||
|                                           PAGE_READ | PAGE_WRITE, retaddr); \ | ||||
|     XDATA_TYPE cmp, old, new, val = xval;                           \ | ||||
|     smp_mb();                                                       \ | ||||
|     cmp = qatomic_read__nocheck(haddr);                             \ | ||||
|     do {                                                            \ | ||||
| @@ -160,7 +188,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) | ||||
| GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new) | ||||
|  | ||||
| #undef GEN_ATOMIC_HELPER_FN | ||||
| #endif /* DATA SIZE < 16 */ | ||||
| #endif /* DATA SIZE >= 16 */ | ||||
|  | ||||
| #undef END | ||||
|  | ||||
| @@ -174,12 +202,12 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new) | ||||
| # define END  _be | ||||
| #endif | ||||
|  | ||||
| ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, | ||||
| ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, | ||||
|                               ABI_TYPE cmpv, ABI_TYPE newv, | ||||
|                               MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, | ||||
|                                          DATA_SIZE, retaddr); | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, | ||||
|                                          PAGE_READ | PAGE_WRITE, retaddr); | ||||
|     DATA_TYPE ret; | ||||
|  | ||||
| #if DATA_SIZE == 16 | ||||
| @@ -192,12 +220,39 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, | ||||
|     return BSWAP(ret); | ||||
| } | ||||
|  | ||||
| #if DATA_SIZE < 16 | ||||
| ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, | ||||
| #if DATA_SIZE >= 16 | ||||
| #if HAVE_ATOMIC128 | ||||
| ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, | ||||
|                          MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, | ||||
|                                          PAGE_READ, retaddr); | ||||
|     DATA_TYPE val; | ||||
|  | ||||
|     val = atomic16_read(haddr); | ||||
|     ATOMIC_MMU_CLEANUP; | ||||
|     atomic_trace_ld_post(env, addr, oi); | ||||
|     return BSWAP(val); | ||||
| } | ||||
|  | ||||
| void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, | ||||
|                      MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, | ||||
|                                          PAGE_WRITE, retaddr); | ||||
|  | ||||
|     val = BSWAP(val); | ||||
|     atomic16_set(haddr, val); | ||||
|     ATOMIC_MMU_CLEANUP; | ||||
|     atomic_trace_st_post(env, addr, oi); | ||||
| } | ||||
| #endif | ||||
| #else | ||||
| ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, | ||||
|                            MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, | ||||
|                                          DATA_SIZE, retaddr); | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, | ||||
|                                          PAGE_READ | PAGE_WRITE, retaddr); | ||||
|     ABI_TYPE ret; | ||||
|  | ||||
|     ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); | ||||
| @@ -207,11 +262,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, | ||||
| } | ||||
|  | ||||
| #define GEN_ATOMIC_HELPER(X)                                        \ | ||||
| ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr,            \ | ||||
| ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \ | ||||
|                         ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ | ||||
| {                                                                   \ | ||||
|     DATA_TYPE *haddr, ret;                                          \ | ||||
|     haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr);   \ | ||||
|     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,  \ | ||||
|                                          PAGE_READ | PAGE_WRITE, retaddr); \ | ||||
|     DATA_TYPE ret;                                                  \ | ||||
|     ret = qatomic_##X(haddr, BSWAP(val));                           \ | ||||
|     ATOMIC_MMU_CLEANUP;                                             \ | ||||
|     atomic_trace_rmw_post(env, addr, oi);                           \ | ||||
| @@ -235,11 +291,12 @@ GEN_ATOMIC_HELPER(xor_fetch) | ||||
|  * of CF_PARALLEL's value, we'll trace just a read and a write. | ||||
|  */ | ||||
| #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \ | ||||
| ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr,            \ | ||||
| ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \ | ||||
|                         ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ | ||||
| {                                                                   \ | ||||
|     XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval;              \ | ||||
|     haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr);   \ | ||||
|     XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ | ||||
|                                           PAGE_READ | PAGE_WRITE, retaddr); \ | ||||
|     XDATA_TYPE ldo, ldn, old, new, val = xval;                      \ | ||||
|     smp_mb();                                                       \ | ||||
|     ldn = qatomic_read__nocheck(haddr);                             \ | ||||
|     do {                                                            \ | ||||
| @@ -269,7 +326,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new) | ||||
| #undef ADD | ||||
|  | ||||
| #undef GEN_ATOMIC_HELPER_FN | ||||
| #endif /* DATA_SIZE < 16 */ | ||||
| #endif /* DATA_SIZE >= 16 */ | ||||
|  | ||||
| #undef END | ||||
| #endif /* DATA_SIZE > 1 */ | ||||
|   | ||||
| @@ -20,8 +20,7 @@ | ||||
| #include "qemu/osdep.h" | ||||
| #include "sysemu/cpus.h" | ||||
| #include "sysemu/tcg.h" | ||||
| #include "qemu/plugin.h" | ||||
| #include "internal-common.h" | ||||
| #include "exec/exec-all.h" | ||||
|  | ||||
| bool tcg_allowed; | ||||
|  | ||||
| @@ -32,12 +31,40 @@ void cpu_loop_exit_noexc(CPUState *cpu) | ||||
|     cpu_loop_exit(cpu); | ||||
| } | ||||
|  | ||||
| #if defined(CONFIG_SOFTMMU) | ||||
| void cpu_reloading_memory_map(void) | ||||
| { | ||||
|     if (qemu_in_vcpu_thread() && current_cpu->running) { | ||||
|         /* The guest can in theory prolong the RCU critical section as long | ||||
|          * as it feels like. The major problem with this is that because it | ||||
|          * can do multiple reconfigurations of the memory map within the | ||||
|          * critical section, we could potentially accumulate an unbounded | ||||
|          * collection of memory data structures awaiting reclamation. | ||||
|          * | ||||
|          * Because the only thing we're currently protecting with RCU is the | ||||
|          * memory data structures, it's sufficient to break the critical section | ||||
|          * in this callback, which we know will get called every time the | ||||
|          * memory map is rearranged. | ||||
|          * | ||||
|          * (If we add anything else in the system that uses RCU to protect | ||||
|          * its data structures, we will need to implement some other mechanism | ||||
|          * to force TCG CPUs to exit the critical section, at which point this | ||||
|          * part of this callback might become unnecessary.) | ||||
|          * | ||||
|          * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which | ||||
|          * only protects cpu->as->dispatch. Since we know our caller is about | ||||
|          * to reload it, it's safe to split the critical section. | ||||
|          */ | ||||
|         rcu_read_unlock(); | ||||
|         rcu_read_lock(); | ||||
|     } | ||||
| } | ||||
| #endif | ||||
|  | ||||
| void cpu_loop_exit(CPUState *cpu) | ||||
| { | ||||
|     /* Undo the setting in cpu_tb_exec.  */ | ||||
|     cpu->neg.can_do_io = true; | ||||
|     /* Undo any setting in generated code.  */ | ||||
|     qemu_plugin_disable_mem_helpers(cpu); | ||||
|     cpu->can_do_io = 1; | ||||
|     siglongjmp(cpu->jmp_env, 1); | ||||
| } | ||||
|  | ||||
| @@ -51,8 +78,6 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) | ||||
|  | ||||
| void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) | ||||
| { | ||||
|     /* Prevent looping if already executing in a serial context. */ | ||||
|     g_assert(!cpu_in_serial_context(cpu)); | ||||
|     cpu->exception_index = EXCP_ATOMIC; | ||||
|     cpu_loop_exit_restore(cpu, pc); | ||||
| } | ||||
|   | ||||
| @@ -38,12 +38,11 @@ | ||||
| #include "sysemu/cpu-timers.h" | ||||
| #include "exec/replay-core.h" | ||||
| #include "sysemu/tcg.h" | ||||
| #include "exec/helper-proto-common.h" | ||||
| #include "exec/helper-proto.h" | ||||
| #include "tb-jmp-cache.h" | ||||
| #include "tb-hash.h" | ||||
| #include "tb-context.h" | ||||
| #include "internal-common.h" | ||||
| #include "internal-target.h" | ||||
| #include "internal.h" | ||||
|  | ||||
| /* -icount align implementation. */ | ||||
|  | ||||
| @@ -74,7 +73,7 @@ static void align_clocks(SyncClocks *sc, CPUState *cpu) | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low; | ||||
|     cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; | ||||
|     sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); | ||||
|     sc->last_cpu_icount = cpu_icount; | ||||
|  | ||||
| @@ -125,7 +124,7 @@ static void init_delay_params(SyncClocks *sc, CPUState *cpu) | ||||
|     sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); | ||||
|     sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; | ||||
|     sc->last_cpu_icount | ||||
|         = cpu->icount_extra + cpu->neg.icount_decr.u16.low; | ||||
|         = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; | ||||
|     if (sc->diff_clk < max_delay) { | ||||
|         max_delay = sc->diff_clk; | ||||
|     } | ||||
| @@ -160,7 +159,7 @@ uint32_t curr_cflags(CPUState *cpu) | ||||
|      */ | ||||
|     if (unlikely(cpu->singlestep_enabled)) { | ||||
|         cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; | ||||
|     } else if (qatomic_read(&one_insn_per_tb)) { | ||||
|     } else if (singlestep) { | ||||
|         cflags |= CF_NO_GOTO_TB | 1; | ||||
|     } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { | ||||
|         cflags |= CF_NO_GOTO_TB; | ||||
| @@ -170,12 +169,13 @@ uint32_t curr_cflags(CPUState *cpu) | ||||
| } | ||||
|  | ||||
| struct tb_desc { | ||||
|     vaddr pc; | ||||
|     uint64_t cs_base; | ||||
|     target_ulong pc; | ||||
|     target_ulong cs_base; | ||||
|     CPUArchState *env; | ||||
|     tb_page_addr_t page_addr0; | ||||
|     uint32_t flags; | ||||
|     uint32_t cflags; | ||||
|     uint32_t trace_vcpu_dstate; | ||||
| }; | ||||
|  | ||||
| static bool tb_lookup_cmp(const void *p, const void *d) | ||||
| @@ -187,6 +187,7 @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||||
|         tb_page_addr0(tb) == desc->page_addr0 && | ||||
|         tb->cs_base == desc->cs_base && | ||||
|         tb->flags == desc->flags && | ||||
|         tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && | ||||
|         tb_cflags(tb) == desc->cflags) { | ||||
|         /* check next page if needed */ | ||||
|         tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); | ||||
| @@ -194,7 +195,7 @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||||
|             return true; | ||||
|         } else { | ||||
|             tb_page_addr_t phys_page1; | ||||
|             vaddr virt_page1; | ||||
|             target_ulong virt_page1; | ||||
|  | ||||
|             /* | ||||
|              * We know that the first page matched, and an otherwise valid TB | ||||
| @@ -215,18 +216,19 @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||||
|     return false; | ||||
| } | ||||
|  | ||||
| static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc, | ||||
|                                           uint64_t cs_base, uint32_t flags, | ||||
| static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||||
|                                           target_ulong cs_base, uint32_t flags, | ||||
|                                           uint32_t cflags) | ||||
| { | ||||
|     tb_page_addr_t phys_pc; | ||||
|     struct tb_desc desc; | ||||
|     uint32_t h; | ||||
|  | ||||
|     desc.env = cpu_env(cpu); | ||||
|     desc.env = cpu->env_ptr; | ||||
|     desc.cs_base = cs_base; | ||||
|     desc.flags = flags; | ||||
|     desc.cflags = cflags; | ||||
|     desc.trace_vcpu_dstate = *cpu->trace_dstate; | ||||
|     desc.pc = pc; | ||||
|     phys_pc = get_page_addr_code(desc.env, pc); | ||||
|     if (phys_pc == -1) { | ||||
| @@ -234,14 +236,14 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc, | ||||
|     } | ||||
|     desc.page_addr0 = phys_pc; | ||||
|     h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc), | ||||
|                      flags, cs_base, cflags); | ||||
|                      flags, cflags, *cpu->trace_dstate); | ||||
|     return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); | ||||
| } | ||||
|  | ||||
| /* Might cause an exception, so have a longjmp destination ready */ | ||||
| static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, | ||||
|                                           uint64_t cs_base, uint32_t flags, | ||||
|                                           uint32_t cflags) | ||||
| static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||||
|                                           target_ulong cs_base, | ||||
|                                           uint32_t flags, uint32_t cflags) | ||||
| { | ||||
|     TranslationBlock *tb; | ||||
|     CPUJumpCache *jc; | ||||
| @@ -255,12 +257,13 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, | ||||
|  | ||||
|     if (cflags & CF_PCREL) { | ||||
|         /* Use acquire to ensure current load of pc from jc. */ | ||||
|         tb = qatomic_load_acquire(&jc->array[hash].tb); | ||||
|         tb =  qatomic_load_acquire(&jc->array[hash].tb); | ||||
|  | ||||
|         if (likely(tb && | ||||
|                    jc->array[hash].pc == pc && | ||||
|                    tb->cs_base == cs_base && | ||||
|                    tb->flags == flags && | ||||
|                    tb->trace_vcpu_dstate == *cpu->trace_dstate && | ||||
|                    tb_cflags(tb) == cflags)) { | ||||
|             return tb; | ||||
|         } | ||||
| @@ -269,7 +272,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, | ||||
|             return NULL; | ||||
|         } | ||||
|         jc->array[hash].pc = pc; | ||||
|         /* Ensure pc is written first. */ | ||||
|         /* Use store_release on tb to ensure pc is written first. */ | ||||
|         qatomic_store_release(&jc->array[hash].tb, tb); | ||||
|     } else { | ||||
|         /* Use rcu_read to ensure current load of pc from *tb. */ | ||||
| @@ -279,6 +282,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, | ||||
|                    tb->pc == pc && | ||||
|                    tb->cs_base == cs_base && | ||||
|                    tb->flags == flags && | ||||
|                    tb->trace_vcpu_dstate == *cpu->trace_dstate && | ||||
|                    tb_cflags(tb) == cflags)) { | ||||
|             return tb; | ||||
|         } | ||||
| @@ -293,16 +297,17 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, | ||||
|     return tb; | ||||
| } | ||||
|  | ||||
| static void log_cpu_exec(vaddr pc, CPUState *cpu, | ||||
| static void log_cpu_exec(target_ulong pc, CPUState *cpu, | ||||
|                          const TranslationBlock *tb) | ||||
| { | ||||
|     if (qemu_log_in_addr_range(pc)) { | ||||
|         qemu_log_mask(CPU_LOG_EXEC, | ||||
|                       "Trace %d: %p [%08" PRIx64 | ||||
|                       "/%016" VADDR_PRIx "/%08x/%08x] %s\n", | ||||
|                       "Trace %d: %p [" TARGET_FMT_lx | ||||
|                       "/" TARGET_FMT_lx "/%08x/%08x] %s\n", | ||||
|                       cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, | ||||
|                       tb->flags, tb->cflags, lookup_symbol(pc)); | ||||
|  | ||||
| #if defined(DEBUG_DISAS) | ||||
|         if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { | ||||
|             FILE *logfile = qemu_log_trylock(); | ||||
|             if (logfile) { | ||||
| @@ -314,17 +319,15 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu, | ||||
| #if defined(TARGET_I386) | ||||
|                 flags |= CPU_DUMP_CCOP; | ||||
| #endif | ||||
|                 if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) { | ||||
|                     flags |= CPU_DUMP_VPU; | ||||
|                 } | ||||
|                 cpu_dump_state(cpu, logfile, flags); | ||||
|                 qemu_log_unlock(logfile); | ||||
|             } | ||||
|         } | ||||
| #endif /* DEBUG_DISAS */ | ||||
|     } | ||||
| } | ||||
|  | ||||
| static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc, | ||||
| static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc, | ||||
|                                        uint32_t *cflags) | ||||
| { | ||||
|     CPUBreakpoint *bp; | ||||
| @@ -390,7 +393,7 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc, | ||||
|     return false; | ||||
| } | ||||
|  | ||||
| static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc, | ||||
| static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc, | ||||
|                                          uint32_t *cflags) | ||||
| { | ||||
|     return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) && | ||||
| @@ -409,8 +412,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | ||||
| { | ||||
|     CPUState *cpu = env_cpu(env); | ||||
|     TranslationBlock *tb; | ||||
|     vaddr pc; | ||||
|     uint64_t cs_base; | ||||
|     target_ulong cs_base, pc; | ||||
|     uint32_t flags, cflags; | ||||
|  | ||||
|     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||||
| @@ -445,7 +447,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | ||||
| static inline TranslationBlock * QEMU_DISABLE_CFI | ||||
| cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) | ||||
| { | ||||
|     CPUArchState *env = cpu_env(cpu); | ||||
|     CPUArchState *env = cpu->env_ptr; | ||||
|     uintptr_t ret; | ||||
|     TranslationBlock *last_tb; | ||||
|     const void *tb_ptr = itb->tc.ptr; | ||||
| @@ -456,8 +458,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) | ||||
|  | ||||
|     qemu_thread_jit_execute(); | ||||
|     ret = tcg_qemu_tb_exec(env, tb_ptr); | ||||
|     cpu->neg.can_do_io = true; | ||||
|     qemu_plugin_disable_mem_helpers(cpu); | ||||
|     cpu->can_do_io = 1; | ||||
|     /* | ||||
|      * TODO: Delay swapping back to the read-write region of the TB | ||||
|      * until we actually need to modify the TB.  The read-only copy, | ||||
| @@ -486,10 +487,10 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) | ||||
|             cc->set_pc(cpu, last_tb->pc); | ||||
|         } | ||||
|         if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | ||||
|             vaddr pc = log_pc(cpu, last_tb); | ||||
|             target_ulong pc = log_pc(cpu, last_tb); | ||||
|             if (qemu_log_in_addr_range(pc)) { | ||||
|                 qemu_log("Stopped execution of TB chain before %p [%016" | ||||
|                          VADDR_PRIx "] %s\n", | ||||
|                 qemu_log("Stopped execution of TB chain before %p [" | ||||
|                          TARGET_FMT_lx "] %s\n", | ||||
|                          last_tb->tc.ptr, pc, lookup_symbol(pc)); | ||||
|             } | ||||
|         } | ||||
| @@ -525,51 +526,14 @@ static void cpu_exec_exit(CPUState *cpu) | ||||
|     if (cc->tcg_ops->cpu_exec_exit) { | ||||
|         cc->tcg_ops->cpu_exec_exit(cpu); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void cpu_exec_longjmp_cleanup(CPUState *cpu) | ||||
| { | ||||
|     /* Non-buggy compilers preserve this; assert the correct value. */ | ||||
|     g_assert(cpu == current_cpu); | ||||
|  | ||||
| #ifdef CONFIG_USER_ONLY | ||||
|     clear_helper_retaddr(); | ||||
|     if (have_mmap_lock()) { | ||||
|         mmap_unlock(); | ||||
|     } | ||||
| #else | ||||
|     /* | ||||
|      * For softmmu, a tlb_fill fault during translation will land here, | ||||
|      * and we need to release any page locks held.  In system mode we | ||||
|      * have one tcg_ctx per thread, so we know it was this cpu doing | ||||
|      * the translation. | ||||
|      * | ||||
|      * Alternative 1: Install a cleanup to be called via an exception | ||||
|      * handling safe longjmp.  It seems plausible that all our hosts | ||||
|      * support such a thing.  We'd have to properly register unwind info | ||||
|      * for the JIT for EH, rather that just for GDB. | ||||
|      * | ||||
|      * Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to | ||||
|      * capture the cpu_loop_exit longjmp, perform the cleanup, and | ||||
|      * jump again to arrive here. | ||||
|      */ | ||||
|     if (tcg_ctx->gen_tb) { | ||||
|         tb_unlock_pages(tcg_ctx->gen_tb); | ||||
|         tcg_ctx->gen_tb = NULL; | ||||
|     } | ||||
| #endif | ||||
|     if (qemu_mutex_iothread_locked()) { | ||||
|         qemu_mutex_unlock_iothread(); | ||||
|     } | ||||
|     assert_no_pages_locked(); | ||||
|     QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL); | ||||
| } | ||||
|  | ||||
| void cpu_exec_step_atomic(CPUState *cpu) | ||||
| { | ||||
|     CPUArchState *env = cpu_env(cpu); | ||||
|     CPUArchState *env = cpu->env_ptr; | ||||
|     TranslationBlock *tb; | ||||
|     vaddr pc; | ||||
|     uint64_t cs_base; | ||||
|     target_ulong cs_base, pc; | ||||
|     uint32_t flags, cflags; | ||||
|     int tb_exit; | ||||
|  | ||||
| @@ -606,7 +570,17 @@ void cpu_exec_step_atomic(CPUState *cpu) | ||||
|         cpu_tb_exec(cpu, tb, &tb_exit); | ||||
|         cpu_exec_exit(cpu); | ||||
|     } else { | ||||
|         cpu_exec_longjmp_cleanup(cpu); | ||||
| #ifndef CONFIG_SOFTMMU | ||||
|         clear_helper_retaddr(); | ||||
|         if (have_mmap_lock()) { | ||||
|             mmap_unlock(); | ||||
|         } | ||||
| #endif | ||||
|         if (qemu_mutex_iothread_locked()) { | ||||
|             qemu_mutex_unlock_iothread(); | ||||
|         } | ||||
|         assert_no_pages_locked(); | ||||
|         qemu_plugin_disable_mem_helpers(cpu); | ||||
|     } | ||||
|  | ||||
|     /* | ||||
| @@ -718,10 +692,10 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) | ||||
|     if (cpu->exception_index < 0) { | ||||
| #ifndef CONFIG_USER_ONLY | ||||
|         if (replay_has_exception() | ||||
|             && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) { | ||||
|             && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { | ||||
|             /* Execute just one insn to trigger exception pending in the log */ | ||||
|             cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | ||||
|                 | CF_LAST_IO | CF_NOIRQ | 1; | ||||
|                 | CF_NOIRQ | 1; | ||||
|         } | ||||
| #endif | ||||
|         return false; | ||||
| @@ -808,7 +782,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, | ||||
|      * Ensure zeroing happens before reading cpu->exit_request or | ||||
|      * cpu->interrupt_request (see also smp_wmb in cpu_exit()) | ||||
|      */ | ||||
|     qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0); | ||||
|     qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); | ||||
|  | ||||
|     if (unlikely(qatomic_read(&cpu->interrupt_request))) { | ||||
|         int interrupt_request; | ||||
| @@ -899,7 +873,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, | ||||
|     if (unlikely(qatomic_read(&cpu->exit_request)) | ||||
|         || (icount_enabled() | ||||
|             && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) | ||||
|             && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) { | ||||
|             && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { | ||||
|         qatomic_set(&cpu->exit_request, 0); | ||||
|         if (cpu->exception_index == -1) { | ||||
|             cpu->exception_index = EXCP_INTERRUPT; | ||||
| @@ -911,8 +885,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, | ||||
| } | ||||
|  | ||||
| static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, | ||||
|                                     vaddr pc, TranslationBlock **last_tb, | ||||
|                                     int *tb_exit) | ||||
|                                     target_ulong pc, | ||||
|                                     TranslationBlock **last_tb, int *tb_exit) | ||||
| { | ||||
|     int32_t insns_left; | ||||
|  | ||||
| @@ -924,7 +898,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, | ||||
|     } | ||||
|  | ||||
|     *last_tb = NULL; | ||||
|     insns_left = qatomic_read(&cpu->neg.icount_decr.u32); | ||||
|     insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); | ||||
|     if (insns_left < 0) { | ||||
|         /* Something asked us to stop executing chained TBs; just | ||||
|          * continue round the main loop. Whatever requested the exit | ||||
| @@ -943,7 +917,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, | ||||
|     icount_update(cpu); | ||||
|     /* Refill decrementer and continue execution.  */ | ||||
|     insns_left = MIN(0xffff, cpu->icount_budget); | ||||
|     cpu->neg.icount_decr.u16.low = insns_left; | ||||
|     cpu_neg(cpu)->icount_decr.u16.low = insns_left; | ||||
|     cpu->icount_extra = cpu->icount_budget - insns_left; | ||||
|  | ||||
|     /* | ||||
| @@ -973,11 +947,10 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) | ||||
|  | ||||
|         while (!cpu_handle_interrupt(cpu, &last_tb)) { | ||||
|             TranslationBlock *tb; | ||||
|             vaddr pc; | ||||
|             uint64_t cs_base; | ||||
|             target_ulong cs_base, pc; | ||||
|             uint32_t flags, cflags; | ||||
|  | ||||
|             cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags); | ||||
|             cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); | ||||
|  | ||||
|             /* | ||||
|              * When requested, use an exact setting for cflags for the next | ||||
| @@ -999,27 +972,18 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) | ||||
|  | ||||
|             tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | ||||
|             if (tb == NULL) { | ||||
|                 CPUJumpCache *jc; | ||||
|                 uint32_t h; | ||||
|  | ||||
|                 mmap_lock(); | ||||
|                 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | ||||
|                 mmap_unlock(); | ||||
|  | ||||
|                 /* | ||||
|                  * We add the TB in the virtual pc hash table | ||||
|                  * for the fast lookup | ||||
|                  */ | ||||
|                 h = tb_jmp_cache_hash_func(pc); | ||||
|                 jc = cpu->tb_jmp_cache; | ||||
|                 if (cflags & CF_PCREL) { | ||||
|                     jc->array[h].pc = pc; | ||||
|                     /* Ensure pc is written first. */ | ||||
|                     qatomic_store_release(&jc->array[h].tb, tb); | ||||
|                 } else { | ||||
|                     /* Use the pc value already stored in tb->pc. */ | ||||
|                     qatomic_set(&jc->array[h].tb, tb); | ||||
|                 } | ||||
|                 /* Use the pc value already stored in tb->pc. */ | ||||
|                 qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb); | ||||
|             } | ||||
|  | ||||
| #ifndef CONFIG_USER_ONLY | ||||
| @@ -1040,6 +1004,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) | ||||
|  | ||||
|             cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); | ||||
|  | ||||
|             QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL); | ||||
|             /* Try to align the host and virtual clocks | ||||
|                if the guest is in advance */ | ||||
|             align_clocks(sc, cpu); | ||||
| @@ -1052,7 +1017,21 @@ static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc) | ||||
| { | ||||
|     /* Prepare setjmp context for exception handling. */ | ||||
|     if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) { | ||||
|         cpu_exec_longjmp_cleanup(cpu); | ||||
|         /* Non-buggy compilers preserve this; assert the correct value. */ | ||||
|         g_assert(cpu == current_cpu); | ||||
|  | ||||
| #ifndef CONFIG_SOFTMMU | ||||
|         clear_helper_retaddr(); | ||||
|         if (have_mmap_lock()) { | ||||
|             mmap_unlock(); | ||||
|         } | ||||
| #endif | ||||
|         if (qemu_mutex_iothread_locked()) { | ||||
|             qemu_mutex_unlock_iothread(); | ||||
|         } | ||||
|         qemu_plugin_disable_mem_helpers(cpu); | ||||
|  | ||||
|         assert_no_pages_locked(); | ||||
|     } | ||||
|  | ||||
|     return cpu_exec_loop(cpu, sc); | ||||
| @@ -1089,7 +1068,7 @@ int cpu_exec(CPUState *cpu) | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| bool tcg_exec_realizefn(CPUState *cpu, Error **errp) | ||||
| void tcg_exec_realizefn(CPUState *cpu, Error **errp) | ||||
| { | ||||
|     static bool tcg_target_initialized; | ||||
|     CPUClass *cc = CPU_GET_CLASS(cpu); | ||||
| @@ -1105,8 +1084,6 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp) | ||||
|     tcg_iommu_init_notifier_list(cpu); | ||||
| #endif /* !CONFIG_USER_ONLY */ | ||||
|     /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */ | ||||
|  | ||||
|     return true; | ||||
| } | ||||
|  | ||||
| /* undo the initializations in reverse order */ | ||||
|   | ||||
							
								
								
									
										2675
									
								
								accel/tcg/cputlb.c
									
									
									
									
									
								
							
							
						
						
									
										2675
									
								
								accel/tcg/cputlb.c
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1,26 +0,0 @@ | ||||
| /* | ||||
|  * Internal execution defines for qemu (target agnostic) | ||||
|  * | ||||
|  *  Copyright (c) 2003 Fabrice Bellard | ||||
|  * | ||||
|  * SPDX-License-Identifier: LGPL-2.1-or-later | ||||
|  */ | ||||
|  | ||||
| #ifndef ACCEL_TCG_INTERNAL_COMMON_H | ||||
| #define ACCEL_TCG_INTERNAL_COMMON_H | ||||
|  | ||||
| #include "exec/translation-block.h" | ||||
|  | ||||
| extern int64_t max_delay; | ||||
| extern int64_t max_advance; | ||||
|  | ||||
| /* | ||||
|  * Return true if CS is not running in parallel with other cpus, either | ||||
|  * because there are no other cpus or we are within an exclusive context. | ||||
|  */ | ||||
| static inline bool cpu_in_serial_context(CPUState *cs) | ||||
| { | ||||
|     return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs); | ||||
| } | ||||
|  | ||||
| #endif | ||||
| @@ -1,132 +0,0 @@ | ||||
| /* | ||||
|  * Internal execution defines for qemu (target specific) | ||||
|  * | ||||
|  *  Copyright (c) 2003 Fabrice Bellard | ||||
|  * | ||||
|  * SPDX-License-Identifier: LGPL-2.1-or-later | ||||
|  */ | ||||
|  | ||||
| #ifndef ACCEL_TCG_INTERNAL_TARGET_H | ||||
| #define ACCEL_TCG_INTERNAL_TARGET_H | ||||
|  | ||||
| #include "exec/exec-all.h" | ||||
| #include "exec/translate-all.h" | ||||
|  | ||||
| /* | ||||
|  * Access to the various translations structures need to be serialised | ||||
|  * via locks for consistency.  In user-mode emulation access to the | ||||
|  * memory related structures are protected with mmap_lock. | ||||
|  * In !user-mode we use per-page locks. | ||||
|  */ | ||||
| #ifdef CONFIG_USER_ONLY | ||||
| #define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) | ||||
| #else | ||||
| #define assert_memory_lock() | ||||
| #endif | ||||
|  | ||||
| #if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG) | ||||
| void assert_no_pages_locked(void); | ||||
| #else | ||||
| static inline void assert_no_pages_locked(void) { } | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_USER_ONLY | ||||
| static inline void page_table_config_init(void) { } | ||||
| #else | ||||
| void page_table_config_init(void); | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_USER_ONLY | ||||
| /* | ||||
|  * For user-only, page_protect sets the page read-only. | ||||
|  * Since most execution is already on read-only pages, and we'd need to | ||||
|  * account for other TBs on the same page, defer undoing any page protection | ||||
|  * until we receive the write fault. | ||||
|  */ | ||||
| static inline void tb_lock_page0(tb_page_addr_t p0) | ||||
| { | ||||
|     page_protect(p0); | ||||
| } | ||||
|  | ||||
| static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1) | ||||
| { | ||||
|     page_protect(p1); | ||||
| } | ||||
|  | ||||
| static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { } | ||||
| static inline void tb_unlock_pages(TranslationBlock *tb) { } | ||||
| #else | ||||
| void tb_lock_page0(tb_page_addr_t); | ||||
| void tb_lock_page1(tb_page_addr_t, tb_page_addr_t); | ||||
| void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t); | ||||
| void tb_unlock_pages(TranslationBlock *); | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_SOFTMMU | ||||
| void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, | ||||
|                                    unsigned size, | ||||
|                                    uintptr_t retaddr); | ||||
| G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); | ||||
| #endif /* CONFIG_SOFTMMU */ | ||||
|  | ||||
| TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc, | ||||
|                               uint64_t cs_base, uint32_t flags, | ||||
|                               int cflags); | ||||
| void page_init(void); | ||||
| void tb_htable_init(void); | ||||
| void tb_reset_jump(TranslationBlock *tb, int n); | ||||
| TranslationBlock *tb_link_page(TranslationBlock *tb); | ||||
| bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc); | ||||
| void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | ||||
|                                uintptr_t host_pc); | ||||
|  | ||||
| bool tcg_exec_realizefn(CPUState *cpu, Error **errp); | ||||
| void tcg_exec_unrealizefn(CPUState *cpu); | ||||
|  | ||||
| /* Return the current PC from CPU, which may be cached in TB. */ | ||||
| static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb) | ||||
| { | ||||
|     if (tb_cflags(tb) & CF_PCREL) { | ||||
|         return cpu->cc->get_pc(cpu); | ||||
|     } else { | ||||
|         return tb->pc; | ||||
|     } | ||||
| } | ||||
|  | ||||
| extern bool one_insn_per_tb; | ||||
|  | ||||
| /** | ||||
|  * tcg_req_mo: | ||||
|  * @type: TCGBar | ||||
|  * | ||||
|  * Filter @type to the barrier that is required for the guest | ||||
|  * memory ordering vs the host memory ordering.  A non-zero | ||||
|  * result indicates that some barrier is required. | ||||
|  * | ||||
|  * If TCG_GUEST_DEFAULT_MO is not defined, assume that the | ||||
|  * guest requires strict ordering. | ||||
|  * | ||||
|  * This is a macro so that it's constant even without optimization. | ||||
|  */ | ||||
| #ifdef TCG_GUEST_DEFAULT_MO | ||||
| # define tcg_req_mo(type) \ | ||||
|     ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) | ||||
| #else | ||||
| # define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO) | ||||
| #endif | ||||
|  | ||||
| /** | ||||
|  * cpu_req_mo: | ||||
|  * @type: TCGBar | ||||
|  * | ||||
|  * If tcg_req_mo indicates a barrier for @type is required | ||||
|  * for the guest memory model, issue a host memory barrier. | ||||
|  */ | ||||
| #define cpu_req_mo(type)          \ | ||||
|     do {                          \ | ||||
|         if (tcg_req_mo(type)) {   \ | ||||
|             smp_mb();             \ | ||||
|         }                         \ | ||||
|     } while (0) | ||||
|  | ||||
| #endif /* ACCEL_TCG_INTERNAL_H */ | ||||
							
								
								
									
										70
									
								
								accel/tcg/internal.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								accel/tcg/internal.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,70 @@ | ||||
| /* | ||||
|  * Internal execution defines for qemu | ||||
|  * | ||||
|  *  Copyright (c) 2003 Fabrice Bellard | ||||
|  * | ||||
|  * SPDX-License-Identifier: LGPL-2.1-or-later | ||||
|  */ | ||||
|  | ||||
| #ifndef ACCEL_TCG_INTERNAL_H | ||||
| #define ACCEL_TCG_INTERNAL_H | ||||
|  | ||||
| #include "exec/exec-all.h" | ||||
|  | ||||
| /* | ||||
|  * Access to the various translations structures need to be serialised | ||||
|  * via locks for consistency.  In user-mode emulation access to the | ||||
|  * memory related structures are protected with mmap_lock. | ||||
|  * In !user-mode we use per-page locks. | ||||
|  */ | ||||
| #ifdef CONFIG_SOFTMMU | ||||
| #define assert_memory_lock() | ||||
| #else | ||||
| #define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) | ||||
| #endif | ||||
|  | ||||
| #if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG) | ||||
| void assert_no_pages_locked(void); | ||||
| #else | ||||
| static inline void assert_no_pages_locked(void) { } | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_USER_ONLY | ||||
| static inline void page_table_config_init(void) { } | ||||
| #else | ||||
| void page_table_config_init(void); | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_SOFTMMU | ||||
| void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, | ||||
|                                    unsigned size, | ||||
|                                    uintptr_t retaddr); | ||||
| G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); | ||||
| #endif /* CONFIG_SOFTMMU */ | ||||
|  | ||||
| TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, | ||||
|                               target_ulong cs_base, uint32_t flags, | ||||
|                               int cflags); | ||||
| void page_init(void); | ||||
| void tb_htable_init(void); | ||||
| void tb_reset_jump(TranslationBlock *tb, int n); | ||||
| TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||||
|                                tb_page_addr_t phys_page2); | ||||
| bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc); | ||||
| void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | ||||
|                                uintptr_t host_pc); | ||||
|  | ||||
| /* Return the current PC from CPU, which may be cached in TB. */ | ||||
| static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb) | ||||
| { | ||||
|     if (tb_cflags(tb) & CF_PCREL) { | ||||
|         return cpu->cc->get_pc(cpu); | ||||
|     } else { | ||||
|         return tb->pc; | ||||
|     } | ||||
| } | ||||
|  | ||||
| extern int64_t max_delay; | ||||
| extern int64_t max_advance; | ||||
|  | ||||
| #endif /* ACCEL_TCG_INTERNAL_H */ | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -8,231 +8,6 @@ | ||||
|  * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||||
|  * See the COPYING file in the top-level directory. | ||||
|  */ | ||||
| /* | ||||
|  * Load helpers for tcg-ldst.h | ||||
|  */ | ||||
| 
 | ||||
| tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, | ||||
|                                  MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); | ||||
|     return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD); | ||||
| } | ||||
| 
 | ||||
| tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, | ||||
|                                  MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); | ||||
|     return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD); | ||||
| } | ||||
| 
 | ||||
| tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, | ||||
|                                  MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); | ||||
|     return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD); | ||||
| } | ||||
| 
 | ||||
| uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, | ||||
|                         MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); | ||||
|     return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD); | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  * Provide signed versions of the load routines as well.  We can of course | ||||
|  * avoid this for 64-bit data, or for 32-bit data on 32-bit host. | ||||
|  */ | ||||
| 
 | ||||
| tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, | ||||
|                                  MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr); | ||||
| } | ||||
| 
 | ||||
| tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, | ||||
|                                  MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr); | ||||
| } | ||||
| 
 | ||||
| tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, | ||||
|                                  MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr); | ||||
| } | ||||
| 
 | ||||
| Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, | ||||
|                        MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); | ||||
|     return do_ld16_mmu(env_cpu(env), addr, oi, retaddr); | ||||
| } | ||||
| 
 | ||||
| Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi) | ||||
| { | ||||
|     return helper_ld16_mmu(env, addr, oi, GETPC()); | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  * Store helpers for tcg-ldst.h | ||||
|  */ | ||||
| 
 | ||||
| void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, | ||||
|                     MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); | ||||
|     do_st1_mmu(env_cpu(env), addr, val, oi, ra); | ||||
| } | ||||
| 
 | ||||
| void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, | ||||
|                     MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); | ||||
|     do_st2_mmu(env_cpu(env), addr, val, oi, retaddr); | ||||
| } | ||||
| 
 | ||||
| void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, | ||||
|                     MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); | ||||
|     do_st4_mmu(env_cpu(env), addr, val, oi, retaddr); | ||||
| } | ||||
| 
 | ||||
| void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, | ||||
|                     MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); | ||||
|     do_st8_mmu(env_cpu(env), addr, val, oi, retaddr); | ||||
| } | ||||
| 
 | ||||
| void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, | ||||
|                      MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); | ||||
|     do_st16_mmu(env_cpu(env), addr, val, oi, retaddr); | ||||
| } | ||||
| 
 | ||||
| void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) | ||||
| { | ||||
|     helper_st16_mmu(env, addr, val, oi, GETPC()); | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  * Load helpers for cpu_ldst.h | ||||
|  */ | ||||
| 
 | ||||
| static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) | ||||
| { | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
| } | ||||
| 
 | ||||
| uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     uint8_t ret; | ||||
| 
 | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB); | ||||
|     ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); | ||||
|     plugin_load_cb(env, addr, oi); | ||||
|     return ret; | ||||
| } | ||||
| 
 | ||||
| uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                      MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     uint16_t ret; | ||||
| 
 | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); | ||||
|     ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); | ||||
|     plugin_load_cb(env, addr, oi); | ||||
|     return ret; | ||||
| } | ||||
| 
 | ||||
| uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                      MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     uint32_t ret; | ||||
| 
 | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); | ||||
|     ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); | ||||
|     plugin_load_cb(env, addr, oi); | ||||
|     return ret; | ||||
| } | ||||
| 
 | ||||
| uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                      MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     uint64_t ret; | ||||
| 
 | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); | ||||
|     ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); | ||||
|     plugin_load_cb(env, addr, oi); | ||||
|     return ret; | ||||
| } | ||||
| 
 | ||||
| Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                     MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     Int128 ret; | ||||
| 
 | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); | ||||
|     ret = do_ld16_mmu(env_cpu(env), addr, oi, ra); | ||||
|     plugin_load_cb(env, addr, oi); | ||||
|     return ret; | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  * Store helpers for cpu_ldst.h | ||||
|  */ | ||||
| 
 | ||||
| static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) | ||||
| { | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
| 
 | ||||
| void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, | ||||
|                  MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     helper_stb_mmu(env, addr, val, oi, retaddr); | ||||
|     plugin_store_cb(env, addr, oi); | ||||
| } | ||||
| 
 | ||||
| void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, | ||||
|                  MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); | ||||
|     do_st2_mmu(env_cpu(env), addr, val, oi, retaddr); | ||||
|     plugin_store_cb(env, addr, oi); | ||||
| } | ||||
| 
 | ||||
| void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, | ||||
|                     MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); | ||||
|     do_st4_mmu(env_cpu(env), addr, val, oi, retaddr); | ||||
|     plugin_store_cb(env, addr, oi); | ||||
| } | ||||
| 
 | ||||
| void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, | ||||
|                  MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); | ||||
|     do_st8_mmu(env_cpu(env), addr, val, oi, retaddr); | ||||
|     plugin_store_cb(env, addr, oi); | ||||
| } | ||||
| 
 | ||||
| void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, | ||||
|                   MemOpIdx oi, uintptr_t retaddr) | ||||
| { | ||||
|     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); | ||||
|     do_st16_mmu(env_cpu(env), addr, val, oi, retaddr); | ||||
|     plugin_store_cb(env, addr, oi); | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  * Wrappers of the above | ||||
|  */ | ||||
| 
 | ||||
| uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, | ||||
|                             int mmu_idx, uintptr_t ra) | ||||
| @@ -251,7 +26,7 @@ uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, | ||||
|                                int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); | ||||
|     return cpu_ldw_mmu(env, addr, oi, ra); | ||||
|     return cpu_ldw_be_mmu(env, addr, oi, ra); | ||||
| } | ||||
| 
 | ||||
| int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, | ||||
| @@ -264,21 +39,21 @@ uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, | ||||
|                               int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); | ||||
|     return cpu_ldl_mmu(env, addr, oi, ra); | ||||
|     return cpu_ldl_be_mmu(env, addr, oi, ra); | ||||
| } | ||||
| 
 | ||||
| uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, | ||||
|                               int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); | ||||
|     return cpu_ldq_mmu(env, addr, oi, ra); | ||||
|     return cpu_ldq_be_mmu(env, addr, oi, ra); | ||||
| } | ||||
| 
 | ||||
| uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, | ||||
|                                int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); | ||||
|     return cpu_ldw_mmu(env, addr, oi, ra); | ||||
|     return cpu_ldw_le_mmu(env, addr, oi, ra); | ||||
| } | ||||
| 
 | ||||
| int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, | ||||
| @@ -291,14 +66,14 @@ uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, | ||||
|                               int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); | ||||
|     return cpu_ldl_mmu(env, addr, oi, ra); | ||||
|     return cpu_ldl_le_mmu(env, addr, oi, ra); | ||||
| } | ||||
| 
 | ||||
| uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, | ||||
|                               int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); | ||||
|     return cpu_ldq_mmu(env, addr, oi, ra); | ||||
|     return cpu_ldq_le_mmu(env, addr, oi, ra); | ||||
| } | ||||
| 
 | ||||
| void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, | ||||
| @@ -312,42 +87,42 @@ void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, | ||||
|                           int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); | ||||
|     cpu_stw_mmu(env, addr, val, oi, ra); | ||||
|     cpu_stw_be_mmu(env, addr, val, oi, ra); | ||||
| } | ||||
| 
 | ||||
| void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, | ||||
|                           int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); | ||||
|     cpu_stl_mmu(env, addr, val, oi, ra); | ||||
|     cpu_stl_be_mmu(env, addr, val, oi, ra); | ||||
| } | ||||
| 
 | ||||
| void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, | ||||
|                           int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); | ||||
|     cpu_stq_mmu(env, addr, val, oi, ra); | ||||
|     cpu_stq_be_mmu(env, addr, val, oi, ra); | ||||
| } | ||||
| 
 | ||||
| void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, | ||||
|                           int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); | ||||
|     cpu_stw_mmu(env, addr, val, oi, ra); | ||||
|     cpu_stw_le_mmu(env, addr, val, oi, ra); | ||||
| } | ||||
| 
 | ||||
| void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, | ||||
|                           int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); | ||||
|     cpu_stl_mmu(env, addr, val, oi, ra); | ||||
|     cpu_stl_le_mmu(env, addr, val, oi, ra); | ||||
| } | ||||
| 
 | ||||
| void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, | ||||
|                           int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); | ||||
|     cpu_stq_mmu(env, addr, val, oi, ra); | ||||
|     cpu_stq_le_mmu(env, addr, val, oi, ra); | ||||
| } | ||||
| 
 | ||||
| /*--------------------------*/ | ||||
|   | ||||
| @@ -1,9 +1,7 @@ | ||||
| tcg_ss = ss.source_set() | ||||
| common_ss.add(when: 'CONFIG_TCG', if_true: files( | ||||
|   'cpu-exec-common.c', | ||||
| )) | ||||
| tcg_ss.add(files( | ||||
|   'tcg-all.c', | ||||
|   'cpu-exec-common.c', | ||||
|   'cpu-exec.c', | ||||
|   'tb-maint.c', | ||||
|   'tcg-runtime-gvec.c', | ||||
| @@ -12,24 +10,18 @@ tcg_ss.add(files( | ||||
|   'translator.c', | ||||
| )) | ||||
| tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c')) | ||||
| tcg_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c')) | ||||
| if get_option('plugins') | ||||
|   tcg_ss.add(files('plugin-gen.c')) | ||||
| endif | ||||
| tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c')) | ||||
| tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')]) | ||||
| tcg_ss.add(when: libdw, if_true: files('debuginfo.c')) | ||||
| tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c')) | ||||
| specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss) | ||||
|  | ||||
| specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files( | ||||
| specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files( | ||||
|   'cputlb.c', | ||||
| )) | ||||
|  | ||||
| system_ss.add(when: ['CONFIG_TCG'], if_true: files( | ||||
|   'icount-common.c', | ||||
|   'monitor.c', | ||||
| )) | ||||
|  | ||||
| tcg_module_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files( | ||||
| tcg_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files( | ||||
|   'tcg-accel-ops.c', | ||||
|   'tcg-accel-ops-mttcg.c', | ||||
|   'tcg-accel-ops-icount.c', | ||||
|   | ||||
| @@ -7,8 +7,6 @@ | ||||
|  */ | ||||
|  | ||||
| #include "qemu/osdep.h" | ||||
| #include "qemu/accel.h" | ||||
| #include "qemu/qht.h" | ||||
| #include "qapi/error.h" | ||||
| #include "qapi/type-helpers.h" | ||||
| #include "qapi/qapi-commands-machine.h" | ||||
| @@ -16,9 +14,7 @@ | ||||
| #include "sysemu/cpus.h" | ||||
| #include "sysemu/cpu-timers.h" | ||||
| #include "sysemu/tcg.h" | ||||
| #include "tcg/tcg.h" | ||||
| #include "internal-common.h" | ||||
| #include "tb-context.h" | ||||
| #include "internal.h" | ||||
|  | ||||
|  | ||||
| static void dump_drift_info(GString *buf) | ||||
| @@ -40,165 +36,6 @@ static void dump_drift_info(GString *buf) | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void dump_accel_info(GString *buf) | ||||
| { | ||||
|     AccelState *accel = current_accel(); | ||||
|     bool one_insn_per_tb = object_property_get_bool(OBJECT(accel), | ||||
|                                                     "one-insn-per-tb", | ||||
|                                                     &error_fatal); | ||||
|  | ||||
|     g_string_append_printf(buf, "Accelerator settings:\n"); | ||||
|     g_string_append_printf(buf, "one-insn-per-tb: %s\n\n", | ||||
|                            one_insn_per_tb ? "on" : "off"); | ||||
| } | ||||
|  | ||||
| static void print_qht_statistics(struct qht_stats hst, GString *buf) | ||||
| { | ||||
|     uint32_t hgram_opts; | ||||
|     size_t hgram_bins; | ||||
|     char *hgram; | ||||
|  | ||||
|     if (!hst.head_buckets) { | ||||
|         return; | ||||
|     } | ||||
|     g_string_append_printf(buf, "TB hash buckets     %zu/%zu " | ||||
|                            "(%0.2f%% head buckets used)\n", | ||||
|                            hst.used_head_buckets, hst.head_buckets, | ||||
|                            (double)hst.used_head_buckets / | ||||
|                            hst.head_buckets * 100); | ||||
|  | ||||
|     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS; | ||||
|     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT; | ||||
|     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { | ||||
|         hgram_opts |= QDIST_PR_NODECIMAL; | ||||
|     } | ||||
|     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); | ||||
|     g_string_append_printf(buf, "TB hash occupancy   %0.2f%% avg chain occ. " | ||||
|                            "Histogram: %s\n", | ||||
|                            qdist_avg(&hst.occupancy) * 100, hgram); | ||||
|     g_free(hgram); | ||||
|  | ||||
|     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; | ||||
|     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); | ||||
|     if (hgram_bins > 10) { | ||||
|         hgram_bins = 10; | ||||
|     } else { | ||||
|         hgram_bins = 0; | ||||
|         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; | ||||
|     } | ||||
|     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); | ||||
|     g_string_append_printf(buf, "TB hash avg chain   %0.3f buckets. " | ||||
|                            "Histogram: %s\n", | ||||
|                            qdist_avg(&hst.chain), hgram); | ||||
|     g_free(hgram); | ||||
| } | ||||
|  | ||||
| struct tb_tree_stats { | ||||
|     size_t nb_tbs; | ||||
|     size_t host_size; | ||||
|     size_t target_size; | ||||
|     size_t max_target_size; | ||||
|     size_t direct_jmp_count; | ||||
|     size_t direct_jmp2_count; | ||||
|     size_t cross_page; | ||||
| }; | ||||
|  | ||||
| static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) | ||||
| { | ||||
|     const TranslationBlock *tb = value; | ||||
|     struct tb_tree_stats *tst = data; | ||||
|  | ||||
|     tst->nb_tbs++; | ||||
|     tst->host_size += tb->tc.size; | ||||
|     tst->target_size += tb->size; | ||||
|     if (tb->size > tst->max_target_size) { | ||||
|         tst->max_target_size = tb->size; | ||||
|     } | ||||
|     if (tb->page_addr[1] != -1) { | ||||
|         tst->cross_page++; | ||||
|     } | ||||
|     if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) { | ||||
|         tst->direct_jmp_count++; | ||||
|         if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) { | ||||
|             tst->direct_jmp2_count++; | ||||
|         } | ||||
|     } | ||||
|     return false; | ||||
| } | ||||
|  | ||||
| static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) | ||||
| { | ||||
|     CPUState *cpu; | ||||
|     size_t full = 0, part = 0, elide = 0; | ||||
|  | ||||
|     CPU_FOREACH(cpu) { | ||||
|         full += qatomic_read(&cpu->neg.tlb.c.full_flush_count); | ||||
|         part += qatomic_read(&cpu->neg.tlb.c.part_flush_count); | ||||
|         elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count); | ||||
|     } | ||||
|     *pfull = full; | ||||
|     *ppart = part; | ||||
|     *pelide = elide; | ||||
| } | ||||
|  | ||||
| static void tcg_dump_info(GString *buf) | ||||
| { | ||||
|     g_string_append_printf(buf, "[TCG profiler not compiled]\n"); | ||||
| } | ||||
|  | ||||
| static void dump_exec_info(GString *buf) | ||||
| { | ||||
|     struct tb_tree_stats tst = {}; | ||||
|     struct qht_stats hst; | ||||
|     size_t nb_tbs, flush_full, flush_part, flush_elide; | ||||
|  | ||||
|     tcg_tb_foreach(tb_tree_stats_iter, &tst); | ||||
|     nb_tbs = tst.nb_tbs; | ||||
|     /* XXX: avoid using doubles ? */ | ||||
|     g_string_append_printf(buf, "Translation buffer state:\n"); | ||||
|     /* | ||||
|      * Report total code size including the padding and TB structs; | ||||
|      * otherwise users might think "-accel tcg,tb-size" is not honoured. | ||||
|      * For avg host size we use the precise numbers from tb_tree_stats though. | ||||
|      */ | ||||
|     g_string_append_printf(buf, "gen code size       %zu/%zu\n", | ||||
|                            tcg_code_size(), tcg_code_capacity()); | ||||
|     g_string_append_printf(buf, "TB count            %zu\n", nb_tbs); | ||||
|     g_string_append_printf(buf, "TB avg target size  %zu max=%zu bytes\n", | ||||
|                            nb_tbs ? tst.target_size / nb_tbs : 0, | ||||
|                            tst.max_target_size); | ||||
|     g_string_append_printf(buf, "TB avg host size    %zu bytes " | ||||
|                            "(expansion ratio: %0.1f)\n", | ||||
|                            nb_tbs ? tst.host_size / nb_tbs : 0, | ||||
|                            tst.target_size ? | ||||
|                            (double)tst.host_size / tst.target_size : 0); | ||||
|     g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n", | ||||
|                            tst.cross_page, | ||||
|                            nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); | ||||
|     g_string_append_printf(buf, "direct jump count   %zu (%zu%%) " | ||||
|                            "(2 jumps=%zu %zu%%)\n", | ||||
|                            tst.direct_jmp_count, | ||||
|                            nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, | ||||
|                            tst.direct_jmp2_count, | ||||
|                            nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); | ||||
|  | ||||
|     qht_statistics_init(&tb_ctx.htable, &hst); | ||||
|     print_qht_statistics(hst, buf); | ||||
|     qht_statistics_destroy(&hst); | ||||
|  | ||||
|     g_string_append_printf(buf, "\nStatistics:\n"); | ||||
|     g_string_append_printf(buf, "TB flush count      %u\n", | ||||
|                            qatomic_read(&tb_ctx.tb_flush_count)); | ||||
|     g_string_append_printf(buf, "TB invalidate count %u\n", | ||||
|                            qatomic_read(&tb_ctx.tb_phys_invalidate_count)); | ||||
|  | ||||
|     tlb_flush_counts(&flush_full, &flush_part, &flush_elide); | ||||
|     g_string_append_printf(buf, "TLB full flushes    %zu\n", flush_full); | ||||
|     g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); | ||||
|     g_string_append_printf(buf, "TLB elided flushes  %zu\n", flush_elide); | ||||
|     tcg_dump_info(buf); | ||||
| } | ||||
|  | ||||
| HumanReadableText *qmp_x_query_jit(Error **errp) | ||||
| { | ||||
|     g_autoptr(GString) buf = g_string_new(""); | ||||
| @@ -208,18 +45,12 @@ HumanReadableText *qmp_x_query_jit(Error **errp) | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
|     dump_accel_info(buf); | ||||
|     dump_exec_info(buf); | ||||
|     dump_drift_info(buf); | ||||
|  | ||||
|     return human_readable_text_from_str(buf); | ||||
| } | ||||
|  | ||||
| static void tcg_dump_op_count(GString *buf) | ||||
| { | ||||
|     g_string_append_printf(buf, "[TCG profiler not compiled]\n"); | ||||
| } | ||||
|  | ||||
| HumanReadableText *qmp_x_query_opcount(Error **errp) | ||||
| { | ||||
|     g_autoptr(GString) buf = g_string_new(""); | ||||
| @@ -235,6 +66,37 @@ HumanReadableText *qmp_x_query_opcount(Error **errp) | ||||
|     return human_readable_text_from_str(buf); | ||||
| } | ||||
|  | ||||
| #ifdef CONFIG_PROFILER | ||||
|  | ||||
| int64_t dev_time; | ||||
|  | ||||
| HumanReadableText *qmp_x_query_profile(Error **errp) | ||||
| { | ||||
|     g_autoptr(GString) buf = g_string_new(""); | ||||
|     static int64_t last_cpu_exec_time; | ||||
|     int64_t cpu_exec_time; | ||||
|     int64_t delta; | ||||
|  | ||||
|     cpu_exec_time = tcg_cpu_exec_time(); | ||||
|     delta = cpu_exec_time - last_cpu_exec_time; | ||||
|  | ||||
|     g_string_append_printf(buf, "async time  %" PRId64 " (%0.3f)\n", | ||||
|                            dev_time, dev_time / (double)NANOSECONDS_PER_SECOND); | ||||
|     g_string_append_printf(buf, "qemu time   %" PRId64 " (%0.3f)\n", | ||||
|                            delta, delta / (double)NANOSECONDS_PER_SECOND); | ||||
|     last_cpu_exec_time = cpu_exec_time; | ||||
|     dev_time = 0; | ||||
|  | ||||
|     return human_readable_text_from_str(buf); | ||||
| } | ||||
| #else | ||||
| HumanReadableText *qmp_x_query_profile(Error **errp) | ||||
| { | ||||
|     error_setg(errp, "Internal profiler not compiled"); | ||||
|     return NULL; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| static void hmp_tcg_register(void) | ||||
| { | ||||
|     monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); | ||||
|   | ||||
| @@ -111,8 +111,6 @@ static void write_perfmap_entry(const void *start, size_t insn, | ||||
| } | ||||
|  | ||||
| static FILE *jitdump; | ||||
| static size_t perf_marker_size; | ||||
| static void *perf_marker = MAP_FAILED; | ||||
|  | ||||
| #define JITHEADER_MAGIC 0x4A695444 | ||||
| #define JITHEADER_VERSION 1 | ||||
| @@ -192,6 +190,7 @@ void perf_enable_jitdump(void) | ||||
| { | ||||
|     struct jitheader header; | ||||
|     char jitdump_file[32]; | ||||
|     void *perf_marker; | ||||
|  | ||||
|     if (!use_rt_clock) { | ||||
|         warn_report("CLOCK_MONOTONIC is not available, proceeding without jitdump"); | ||||
| @@ -211,8 +210,7 @@ void perf_enable_jitdump(void) | ||||
|      * PERF_RECORD_MMAP or PERF_RECORD_MMAP2 event is of the form jit-%d.dump | ||||
|      * and will process it as a jitdump file. | ||||
|      */ | ||||
|     perf_marker_size = qemu_real_host_page_size(); | ||||
|     perf_marker = mmap(NULL, perf_marker_size, PROT_READ | PROT_EXEC, | ||||
|     perf_marker = mmap(NULL, qemu_real_host_page_size(), PROT_READ | PROT_EXEC, | ||||
|                        MAP_PRIVATE, fileno(jitdump), 0); | ||||
|     if (perf_marker == MAP_FAILED) { | ||||
|         warn_report("Could not map %s: %s, proceeding without jitdump", | ||||
| @@ -313,8 +311,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb, | ||||
|                       const void *start) | ||||
| { | ||||
|     struct debuginfo_query *q; | ||||
|     size_t insn, start_words; | ||||
|     uint64_t *gen_insn_data; | ||||
|     size_t insn; | ||||
|  | ||||
|     if (!perfmap && !jitdump) { | ||||
|         return; | ||||
| @@ -328,12 +325,9 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb, | ||||
|     debuginfo_lock(); | ||||
|  | ||||
|     /* Query debuginfo for each guest instruction. */ | ||||
|     gen_insn_data = tcg_ctx->gen_insn_data; | ||||
|     start_words = tcg_ctx->insn_start_words; | ||||
|  | ||||
|     for (insn = 0; insn < tb->icount; insn++) { | ||||
|         /* FIXME: This replicates the restore_state_to_opc() logic. */ | ||||
|         q[insn].address = gen_insn_data[insn * start_words + 0]; | ||||
|         q[insn].address = tcg_ctx->gen_insn_data[insn][0]; | ||||
|         if (tb_cflags(tb) & CF_PCREL) { | ||||
|             q[insn].address |= (guest_pc & TARGET_PAGE_MASK); | ||||
|         } else { | ||||
| @@ -374,11 +368,6 @@ void perf_exit(void) | ||||
|         perfmap = NULL; | ||||
|     } | ||||
|  | ||||
|     if (perf_marker != MAP_FAILED) { | ||||
|         munmap(perf_marker, perf_marker_size); | ||||
|         perf_marker = MAP_FAILED; | ||||
|     } | ||||
|  | ||||
|     if (jitdump) { | ||||
|         fclose(jitdump); | ||||
|         jitdump = NULL; | ||||
|   | ||||
| @@ -43,18 +43,11 @@ | ||||
|  * CPU's index into a TCG temp, since the first callback did it already. | ||||
|  */ | ||||
| #include "qemu/osdep.h" | ||||
| #include "cpu.h" | ||||
| #include "tcg/tcg.h" | ||||
| #include "tcg/tcg-temp-internal.h" | ||||
| #include "tcg/tcg-op.h" | ||||
| #include "exec/exec-all.h" | ||||
| #include "exec/plugin-gen.h" | ||||
| #include "exec/translator.h" | ||||
| #include "exec/helper-proto-common.h" | ||||
|  | ||||
| #define HELPER_H  "accel/tcg/plugin-helpers.h" | ||||
| #include "exec/helper-info.c.inc" | ||||
| #undef  HELPER_H | ||||
|  | ||||
| #ifdef CONFIG_SOFTMMU | ||||
| # define CONFIG_SOFTMMU_GATE 1 | ||||
| @@ -98,13 +91,34 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, | ||||
|                                 void *userdata) | ||||
| { } | ||||
|  | ||||
| static void do_gen_mem_cb(TCGv vaddr, uint32_t info) | ||||
| { | ||||
|     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); | ||||
|     TCGv_i32 meminfo = tcg_temp_ebb_new_i32(); | ||||
|     TCGv_i64 vaddr64 = tcg_temp_ebb_new_i64(); | ||||
|     TCGv_ptr udata = tcg_temp_ebb_new_ptr(); | ||||
|  | ||||
|     tcg_gen_movi_i32(meminfo, info); | ||||
|     tcg_gen_movi_ptr(udata, 0); | ||||
|     tcg_gen_ld_i32(cpu_index, cpu_env, | ||||
|                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); | ||||
|     tcg_gen_extu_tl_i64(vaddr64, vaddr); | ||||
|  | ||||
|     gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata); | ||||
|  | ||||
|     tcg_temp_free_ptr(udata); | ||||
|     tcg_temp_free_i64(vaddr64); | ||||
|     tcg_temp_free_i32(meminfo); | ||||
|     tcg_temp_free_i32(cpu_index); | ||||
| } | ||||
|  | ||||
| static void gen_empty_udata_cb(void) | ||||
| { | ||||
|     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); | ||||
|     TCGv_ptr udata = tcg_temp_ebb_new_ptr(); | ||||
|  | ||||
|     tcg_gen_movi_ptr(udata, 0); | ||||
|     tcg_gen_ld_i32(cpu_index, tcg_env, | ||||
|     tcg_gen_ld_i32(cpu_index, cpu_env, | ||||
|                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); | ||||
|     gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); | ||||
|  | ||||
| @@ -130,22 +144,9 @@ static void gen_empty_inline_cb(void) | ||||
|     tcg_temp_free_i64(val); | ||||
| } | ||||
|  | ||||
| static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info) | ||||
| static void gen_empty_mem_cb(TCGv addr, uint32_t info) | ||||
| { | ||||
|     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); | ||||
|     TCGv_i32 meminfo = tcg_temp_ebb_new_i32(); | ||||
|     TCGv_ptr udata = tcg_temp_ebb_new_ptr(); | ||||
|  | ||||
|     tcg_gen_movi_i32(meminfo, info); | ||||
|     tcg_gen_movi_ptr(udata, 0); | ||||
|     tcg_gen_ld_i32(cpu_index, tcg_env, | ||||
|                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); | ||||
|  | ||||
|     gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata); | ||||
|  | ||||
|     tcg_temp_free_ptr(udata); | ||||
|     tcg_temp_free_i32(meminfo); | ||||
|     tcg_temp_free_i32(cpu_index); | ||||
|     do_gen_mem_cb(addr, info); | ||||
| } | ||||
|  | ||||
| /* | ||||
| @@ -157,7 +158,7 @@ static void gen_empty_mem_helper(void) | ||||
|     TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); | ||||
|  | ||||
|     tcg_gen_movi_ptr(ptr, 0); | ||||
|     tcg_gen_st_ptr(ptr, tcg_env, offsetof(CPUState, plugin_mem_cbs) - | ||||
|     tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - | ||||
|                                  offsetof(ArchCPU, env)); | ||||
|     tcg_temp_free_ptr(ptr); | ||||
| } | ||||
| @@ -200,17 +201,35 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from) | ||||
|     } | ||||
| } | ||||
|  | ||||
| void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info) | ||||
| union mem_gen_fn { | ||||
|     void (*mem_fn)(TCGv, uint32_t); | ||||
|     void (*inline_fn)(void); | ||||
| }; | ||||
|  | ||||
| static void gen_mem_wrapped(enum plugin_gen_cb type, | ||||
|                             const union mem_gen_fn *f, TCGv addr, | ||||
|                             uint32_t info, bool is_mem) | ||||
| { | ||||
|     enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); | ||||
|  | ||||
|     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw); | ||||
|     gen_empty_mem_cb(addr, info); | ||||
|     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw); | ||||
|     if (is_mem) { | ||||
|         f->mem_fn(addr, info); | ||||
|     } else { | ||||
|         f->inline_fn(); | ||||
|     } | ||||
|     tcg_gen_plugin_cb_end(); | ||||
| } | ||||
|  | ||||
|     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw); | ||||
|     gen_empty_inline_cb(); | ||||
|     tcg_gen_plugin_cb_end(); | ||||
| void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) | ||||
| { | ||||
|     union mem_gen_fn fn; | ||||
|  | ||||
|     fn.mem_fn = gen_empty_mem_cb; | ||||
|     gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true); | ||||
|  | ||||
|     fn.inline_fn = gen_empty_inline_cb; | ||||
|     gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false); | ||||
| } | ||||
|  | ||||
| static TCGOp *find_op(TCGOp *op, TCGOpcode opc) | ||||
| @@ -260,6 +279,33 @@ static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc) | ||||
|     return op; | ||||
| } | ||||
|  | ||||
| static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op) | ||||
| { | ||||
|     if (TCG_TARGET_REG_BITS == 32) { | ||||
|         /* mov_i32 */ | ||||
|         op = copy_op(begin_op, op, INDEX_op_mov_i32); | ||||
|         /* mov_i32 w/ $0 */ | ||||
|         op = copy_op(begin_op, op, INDEX_op_mov_i32); | ||||
|     } else { | ||||
|         /* extu_i32_i64 */ | ||||
|         op = copy_op(begin_op, op, INDEX_op_extu_i32_i64); | ||||
|     } | ||||
|     return op; | ||||
| } | ||||
|  | ||||
| static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op) | ||||
| { | ||||
|     if (TCG_TARGET_REG_BITS == 32) { | ||||
|         /* 2x mov_i32 */ | ||||
|         op = copy_op(begin_op, op, INDEX_op_mov_i32); | ||||
|         op = copy_op(begin_op, op, INDEX_op_mov_i32); | ||||
|     } else { | ||||
|         /* mov_i64 */ | ||||
|         op = copy_op(begin_op, op, INDEX_op_mov_i64); | ||||
|     } | ||||
|     return op; | ||||
| } | ||||
|  | ||||
| static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) | ||||
| { | ||||
|     if (UINTPTR_MAX == UINT32_MAX) { | ||||
| @@ -274,6 +320,18 @@ static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) | ||||
|     return op; | ||||
| } | ||||
|  | ||||
| static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op) | ||||
| { | ||||
|     if (TARGET_LONG_BITS == 32) { | ||||
|         /* extu_i32_i64 */ | ||||
|         op = copy_extu_i32_i64(begin_op, op); | ||||
|     } else { | ||||
|         /* mov_i64 */ | ||||
|         op = copy_mov_i64(begin_op, op); | ||||
|     } | ||||
|     return op; | ||||
| } | ||||
|  | ||||
| static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) | ||||
| { | ||||
|     if (TCG_TARGET_REG_BITS == 32) { | ||||
| @@ -327,7 +385,8 @@ static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op) | ||||
|     return op; | ||||
| } | ||||
|  | ||||
| static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx) | ||||
| static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func, | ||||
|                         void *func, int *cb_idx) | ||||
| { | ||||
|     TCGOp *old_op; | ||||
|     int func_idx; | ||||
| @@ -371,7 +430,8 @@ static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb, | ||||
|     } | ||||
|  | ||||
|     /* call */ | ||||
|     op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx); | ||||
|     op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb), | ||||
|                    cb->f.vcpu_udata, cb_idx); | ||||
|  | ||||
|     return op; | ||||
| } | ||||
| @@ -416,9 +476,13 @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb, | ||||
|         tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); | ||||
|     } | ||||
|  | ||||
|     /* extu_tl_i64 */ | ||||
|     op = copy_extu_tl_i64(&begin_op, op); | ||||
|  | ||||
|     if (type == PLUGIN_GEN_CB_MEM) { | ||||
|         /* call */ | ||||
|         op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx); | ||||
|         op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb), | ||||
|                        cb->f.vcpu_udata, cb_idx); | ||||
|     } | ||||
|  | ||||
|     return op; | ||||
| @@ -578,7 +642,7 @@ void plugin_gen_disable_mem_helpers(void) | ||||
|     if (!tcg_ctx->plugin_tb->mem_helper) { | ||||
|         return; | ||||
|     } | ||||
|     tcg_gen_st_ptr(tcg_constant_ptr(NULL), tcg_env, | ||||
|     tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env, | ||||
|                    offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env)); | ||||
| } | ||||
|  | ||||
| @@ -846,7 +910,7 @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) | ||||
|     } else { | ||||
|         if (ptb->vaddr2 == -1) { | ||||
|             ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first); | ||||
|             get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2); | ||||
|             get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2); | ||||
|         } | ||||
|         pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; | ||||
|     } | ||||
| @@ -863,14 +927,10 @@ void plugin_gen_insn_end(void) | ||||
|  * do any clean-up here and make sure things are reset in | ||||
|  * plugin_gen_tb_start. | ||||
|  */ | ||||
| void plugin_gen_tb_end(CPUState *cpu, size_t num_insns) | ||||
| void plugin_gen_tb_end(CPUState *cpu) | ||||
| { | ||||
|     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; | ||||
|  | ||||
|     /* translator may have removed instructions, update final count */ | ||||
|     g_assert(num_insns <= ptb->n); | ||||
|     ptb->n = num_insns; | ||||
|  | ||||
|     /* collect instrumentation requests */ | ||||
|     qemu_plugin_tb_trans_cb(cpu, ptb); | ||||
|  | ||||
|   | ||||
| @@ -35,16 +35,16 @@ | ||||
| #define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) | ||||
| #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) | ||||
|  | ||||
| static inline unsigned int tb_jmp_cache_hash_page(vaddr pc) | ||||
| static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) | ||||
| { | ||||
|     vaddr tmp; | ||||
|     target_ulong tmp; | ||||
|     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); | ||||
|     return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; | ||||
| } | ||||
|  | ||||
| static inline unsigned int tb_jmp_cache_hash_func(vaddr pc) | ||||
| static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) | ||||
| { | ||||
|     vaddr tmp; | ||||
|     target_ulong tmp; | ||||
|     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); | ||||
|     return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) | ||||
|            | (tmp & TB_JMP_ADDR_MASK)); | ||||
| @@ -53,7 +53,7 @@ static inline unsigned int tb_jmp_cache_hash_func(vaddr pc) | ||||
| #else | ||||
|  | ||||
| /* In user-mode we can get better hashing because we do not have a TLB */ | ||||
| static inline unsigned int tb_jmp_cache_hash_func(vaddr pc) | ||||
| static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) | ||||
| { | ||||
|     return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1); | ||||
| } | ||||
| @@ -61,10 +61,10 @@ static inline unsigned int tb_jmp_cache_hash_func(vaddr pc) | ||||
| #endif /* CONFIG_SOFTMMU */ | ||||
|  | ||||
| static inline | ||||
| uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc, | ||||
|                       uint32_t flags, uint64_t flags2, uint32_t cf_mask) | ||||
| uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags, | ||||
|                       uint32_t cf_mask, uint32_t trace_vcpu_dstate) | ||||
| { | ||||
|     return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask); | ||||
|     return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate); | ||||
| } | ||||
|  | ||||
| #endif | ||||
|   | ||||
| @@ -21,7 +21,7 @@ struct CPUJumpCache { | ||||
|     struct rcu_head rcu; | ||||
|     struct { | ||||
|         TranslationBlock *tb; | ||||
|         vaddr pc; | ||||
|         target_ulong pc; | ||||
|     } array[TB_JMP_CACHE_SIZE]; | ||||
| }; | ||||
|  | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| /* | ||||
|  * Translation Block Maintenance | ||||
|  * Translation Block Maintaince | ||||
|  * | ||||
|  *  Copyright (c) 2003 Fabrice Bellard | ||||
|  * | ||||
| @@ -19,18 +19,15 @@ | ||||
|  | ||||
| #include "qemu/osdep.h" | ||||
| #include "qemu/interval-tree.h" | ||||
| #include "qemu/qtree.h" | ||||
| #include "exec/cputlb.h" | ||||
| #include "exec/log.h" | ||||
| #include "exec/exec-all.h" | ||||
| #include "exec/tb-flush.h" | ||||
| #include "exec/translate-all.h" | ||||
| #include "sysemu/tcg.h" | ||||
| #include "tcg/tcg.h" | ||||
| #include "tb-hash.h" | ||||
| #include "tb-context.h" | ||||
| #include "internal-common.h" | ||||
| #include "internal-target.h" | ||||
| #include "internal.h" | ||||
|  | ||||
|  | ||||
| /* List iterators for lists of tagged pointers in TranslationBlock. */ | ||||
| @@ -51,6 +48,7 @@ static bool tb_cmp(const void *ap, const void *bp) | ||||
|             a->cs_base == b->cs_base && | ||||
|             a->flags == b->flags && | ||||
|             (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && | ||||
|             a->trace_vcpu_dstate == b->trace_vcpu_dstate && | ||||
|             tb_page_addr0(a) == tb_page_addr0(b) && | ||||
|             tb_page_addr1(a) == tb_page_addr1(b)); | ||||
| } | ||||
| @@ -71,7 +69,17 @@ typedef struct PageDesc PageDesc; | ||||
|  */ | ||||
| #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) | ||||
|  | ||||
| static inline void tb_lock_pages(const TranslationBlock *tb) { } | ||||
| static inline void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, | ||||
|                                   PageDesc **ret_p2, tb_page_addr_t phys2, | ||||
|                                   bool alloc) | ||||
| { | ||||
|     *ret_p1 = NULL; | ||||
|     *ret_p2 = NULL; | ||||
| } | ||||
|  | ||||
| static inline void page_unlock(PageDesc *pd) { } | ||||
| static inline void page_lock_tb(const TranslationBlock *tb) { } | ||||
| static inline void page_unlock_tb(const TranslationBlock *tb) { } | ||||
|  | ||||
| /* | ||||
|  * For user-only, since we are protecting all of memory with a single lock, | ||||
| @@ -87,9 +95,9 @@ static void tb_remove_all(void) | ||||
| } | ||||
|  | ||||
| /* Call with mmap_lock held. */ | ||||
| static void tb_record(TranslationBlock *tb) | ||||
| static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2) | ||||
| { | ||||
|     vaddr addr; | ||||
|     target_ulong addr; | ||||
|     int flags; | ||||
|  | ||||
|     assert_memory_lock(); | ||||
| @@ -117,29 +125,29 @@ static void tb_remove(TranslationBlock *tb) | ||||
| } | ||||
|  | ||||
| /* TODO: For now, still shared with translate-all.c for system mode. */ | ||||
| #define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N)   \ | ||||
|     for (T = foreach_tb_first(start, last),             \ | ||||
|          N = foreach_tb_next(T, start, last);           \ | ||||
| #define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N)    \ | ||||
|     for (T = foreach_tb_first(start, end),              \ | ||||
|          N = foreach_tb_next(T, start, end);            \ | ||||
|          T != NULL;                                     \ | ||||
|          T = N, N = foreach_tb_next(N, start, last)) | ||||
|          T = N, N = foreach_tb_next(N, start, end)) | ||||
|  | ||||
| typedef TranslationBlock *PageForEachNext; | ||||
|  | ||||
| static PageForEachNext foreach_tb_first(tb_page_addr_t start, | ||||
|                                         tb_page_addr_t last) | ||||
|                                         tb_page_addr_t end) | ||||
| { | ||||
|     IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last); | ||||
|     IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1); | ||||
|     return n ? container_of(n, TranslationBlock, itree) : NULL; | ||||
| } | ||||
|  | ||||
| static PageForEachNext foreach_tb_next(PageForEachNext tb, | ||||
|                                        tb_page_addr_t start, | ||||
|                                        tb_page_addr_t last) | ||||
|                                        tb_page_addr_t end) | ||||
| { | ||||
|     IntervalTreeNode *n; | ||||
|  | ||||
|     if (tb) { | ||||
|         n = interval_tree_iter_next(&tb->itree, start, last); | ||||
|         n = interval_tree_iter_next(&tb->itree, start, end - 1); | ||||
|         if (n) { | ||||
|             return container_of(n, TranslationBlock, itree); | ||||
|         } | ||||
| @@ -208,12 +216,13 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc) | ||||
| { | ||||
|     PageDesc *pd; | ||||
|     void **lp; | ||||
|     int i; | ||||
|  | ||||
|     /* Level 1.  Always allocated.  */ | ||||
|     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); | ||||
|  | ||||
|     /* Level 2..N-1.  */ | ||||
|     for (int i = v_l2_levels; i > 0; i--) { | ||||
|     for (i = v_l2_levels; i > 0; i--) { | ||||
|         void **p = qatomic_rcu_read(lp); | ||||
|  | ||||
|         if (p == NULL) { | ||||
| @@ -304,12 +313,12 @@ struct page_entry { | ||||
|  * See also: page_collection_lock(). | ||||
|  */ | ||||
| struct page_collection { | ||||
|     QTree *tree; | ||||
|     GTree *tree; | ||||
|     struct page_entry *max; | ||||
| }; | ||||
|  | ||||
| typedef int PageForEachNext; | ||||
| #define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \ | ||||
| #define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \ | ||||
|     TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) | ||||
|  | ||||
| #ifdef CONFIG_DEBUG_TCG | ||||
| @@ -381,108 +390,12 @@ static void page_lock(PageDesc *pd) | ||||
|     qemu_spin_lock(&pd->lock); | ||||
| } | ||||
|  | ||||
| /* Like qemu_spin_trylock, returns false on success */ | ||||
| static bool page_trylock(PageDesc *pd) | ||||
| { | ||||
|     bool busy = qemu_spin_trylock(&pd->lock); | ||||
|     if (!busy) { | ||||
|         page_lock__debug(pd); | ||||
|     } | ||||
|     return busy; | ||||
| } | ||||
|  | ||||
| static void page_unlock(PageDesc *pd) | ||||
| { | ||||
|     qemu_spin_unlock(&pd->lock); | ||||
|     page_unlock__debug(pd); | ||||
| } | ||||
|  | ||||
| void tb_lock_page0(tb_page_addr_t paddr) | ||||
| { | ||||
|     page_lock(page_find_alloc(paddr >> TARGET_PAGE_BITS, true)); | ||||
| } | ||||
|  | ||||
| void tb_lock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1) | ||||
| { | ||||
|     tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS; | ||||
|     tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS; | ||||
|     PageDesc *pd0, *pd1; | ||||
|  | ||||
|     if (pindex0 == pindex1) { | ||||
|         /* Identical pages, and the first page is already locked. */ | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     pd1 = page_find_alloc(pindex1, true); | ||||
|     if (pindex0 < pindex1) { | ||||
|         /* Correct locking order, we may block. */ | ||||
|         page_lock(pd1); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     /* Incorrect locking order, we cannot block lest we deadlock. */ | ||||
|     if (!page_trylock(pd1)) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     /* | ||||
|      * Drop the lock on page0 and get both page locks in the right order. | ||||
|      * Restart translation via longjmp. | ||||
|      */ | ||||
|     pd0 = page_find_alloc(pindex0, false); | ||||
|     page_unlock(pd0); | ||||
|     page_lock(pd1); | ||||
|     page_lock(pd0); | ||||
|     siglongjmp(tcg_ctx->jmp_trans, -3); | ||||
| } | ||||
|  | ||||
| void tb_unlock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1) | ||||
| { | ||||
|     tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS; | ||||
|     tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS; | ||||
|  | ||||
|     if (pindex0 != pindex1) { | ||||
|         page_unlock(page_find_alloc(pindex1, false)); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void tb_lock_pages(TranslationBlock *tb) | ||||
| { | ||||
|     tb_page_addr_t paddr0 = tb_page_addr0(tb); | ||||
|     tb_page_addr_t paddr1 = tb_page_addr1(tb); | ||||
|     tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS; | ||||
|     tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS; | ||||
|  | ||||
|     if (unlikely(paddr0 == -1)) { | ||||
|         return; | ||||
|     } | ||||
|     if (unlikely(paddr1 != -1) && pindex0 != pindex1) { | ||||
|         if (pindex0 < pindex1) { | ||||
|             page_lock(page_find_alloc(pindex0, true)); | ||||
|             page_lock(page_find_alloc(pindex1, true)); | ||||
|             return; | ||||
|         } | ||||
|         page_lock(page_find_alloc(pindex1, true)); | ||||
|     } | ||||
|     page_lock(page_find_alloc(pindex0, true)); | ||||
| } | ||||
|  | ||||
| void tb_unlock_pages(TranslationBlock *tb) | ||||
| { | ||||
|     tb_page_addr_t paddr0 = tb_page_addr0(tb); | ||||
|     tb_page_addr_t paddr1 = tb_page_addr1(tb); | ||||
|     tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS; | ||||
|     tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS; | ||||
|  | ||||
|     if (unlikely(paddr0 == -1)) { | ||||
|         return; | ||||
|     } | ||||
|     if (unlikely(paddr1 != -1) && pindex0 != pindex1) { | ||||
|         page_unlock(page_find_alloc(pindex1, false)); | ||||
|     } | ||||
|     page_unlock(page_find_alloc(pindex0, false)); | ||||
| } | ||||
|  | ||||
| static inline struct page_entry * | ||||
| page_entry_new(PageDesc *pd, tb_page_addr_t index) | ||||
| { | ||||
| @@ -506,10 +419,13 @@ static void page_entry_destroy(gpointer p) | ||||
| /* returns false on success */ | ||||
| static bool page_entry_trylock(struct page_entry *pe) | ||||
| { | ||||
|     bool busy = page_trylock(pe->pd); | ||||
|     bool busy; | ||||
|  | ||||
|     busy = qemu_spin_trylock(&pe->pd->lock); | ||||
|     if (!busy) { | ||||
|         g_assert(!pe->locked); | ||||
|         pe->locked = true; | ||||
|         page_lock__debug(pe->pd); | ||||
|     } | ||||
|     return busy; | ||||
| } | ||||
| @@ -550,7 +466,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) | ||||
|     struct page_entry *pe; | ||||
|     PageDesc *pd; | ||||
|  | ||||
|     pe = q_tree_lookup(set->tree, &index); | ||||
|     pe = g_tree_lookup(set->tree, &index); | ||||
|     if (pe) { | ||||
|         return false; | ||||
|     } | ||||
| @@ -561,7 +477,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) | ||||
|     } | ||||
|  | ||||
|     pe = page_entry_new(pd, index); | ||||
|     q_tree_insert(set->tree, &pe->index, pe); | ||||
|     g_tree_insert(set->tree, &pe->index, pe); | ||||
|  | ||||
|     /* | ||||
|      * If this is either (1) the first insertion or (2) a page whose index | ||||
| @@ -593,30 +509,30 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) | ||||
| } | ||||
|  | ||||
| /* | ||||
|  * Lock a range of pages ([@start,@last]) as well as the pages of all | ||||
|  * Lock a range of pages ([@start,@end[) as well as the pages of all | ||||
|  * intersecting TBs. | ||||
|  * Locking order: acquire locks in ascending order of page index. | ||||
|  */ | ||||
| static struct page_collection *page_collection_lock(tb_page_addr_t start, | ||||
|                                                     tb_page_addr_t last) | ||||
|                                                     tb_page_addr_t end) | ||||
| { | ||||
|     struct page_collection *set = g_malloc(sizeof(*set)); | ||||
|     tb_page_addr_t index; | ||||
|     PageDesc *pd; | ||||
|  | ||||
|     start >>= TARGET_PAGE_BITS; | ||||
|     last >>= TARGET_PAGE_BITS; | ||||
|     g_assert(start <= last); | ||||
|     end   >>= TARGET_PAGE_BITS; | ||||
|     g_assert(start <= end); | ||||
|  | ||||
|     set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL, | ||||
|     set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, | ||||
|                                 page_entry_destroy); | ||||
|     set->max = NULL; | ||||
|     assert_no_pages_locked(); | ||||
|  | ||||
|  retry: | ||||
|     q_tree_foreach(set->tree, page_entry_lock, NULL); | ||||
|     g_tree_foreach(set->tree, page_entry_lock, NULL); | ||||
|  | ||||
|     for (index = start; index <= last; index++) { | ||||
|     for (index = start; index <= end; index++) { | ||||
|         TranslationBlock *tb; | ||||
|         PageForEachNext n; | ||||
|  | ||||
| @@ -625,7 +541,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start, | ||||
|             continue; | ||||
|         } | ||||
|         if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { | ||||
|             q_tree_foreach(set->tree, page_entry_unlock, NULL); | ||||
|             g_tree_foreach(set->tree, page_entry_unlock, NULL); | ||||
|             goto retry; | ||||
|         } | ||||
|         assert_page_locked(pd); | ||||
| @@ -634,7 +550,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start, | ||||
|                 (tb_page_addr1(tb) != -1 && | ||||
|                  page_trylock_add(set, tb_page_addr1(tb)))) { | ||||
|                 /* drop all locks, and reacquire in order */ | ||||
|                 q_tree_foreach(set->tree, page_entry_unlock, NULL); | ||||
|                 g_tree_foreach(set->tree, page_entry_unlock, NULL); | ||||
|                 goto retry; | ||||
|             } | ||||
|         } | ||||
| @@ -645,7 +561,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start, | ||||
| static void page_collection_unlock(struct page_collection *set) | ||||
| { | ||||
|     /* entries are unlocked and freed via page_entry_destroy */ | ||||
|     q_tree_destroy(set->tree); | ||||
|     g_tree_destroy(set->tree); | ||||
|     g_free(set); | ||||
| } | ||||
|  | ||||
| @@ -687,7 +603,8 @@ static void tb_remove_all(void) | ||||
|  * Add the tb in the target page and protect it if necessary. | ||||
|  * Called with @p->lock held. | ||||
|  */ | ||||
| static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n) | ||||
| static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, | ||||
|                                unsigned int n) | ||||
| { | ||||
|     bool page_already_protected; | ||||
|  | ||||
| @@ -707,21 +624,15 @@ static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n) | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void tb_record(TranslationBlock *tb) | ||||
| static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2) | ||||
| { | ||||
|     tb_page_addr_t paddr0 = tb_page_addr0(tb); | ||||
|     tb_page_addr_t paddr1 = tb_page_addr1(tb); | ||||
|     tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS; | ||||
|     tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS; | ||||
|  | ||||
|     assert(paddr0 != -1); | ||||
|     if (unlikely(paddr1 != -1) && pindex0 != pindex1) { | ||||
|         tb_page_add(page_find_alloc(pindex1, false), tb, 1); | ||||
|     tb_page_add(p1, tb, 0); | ||||
|     if (unlikely(p2)) { | ||||
|         tb_page_add(p2, tb, 1); | ||||
|     } | ||||
|     tb_page_add(page_find_alloc(pindex0, false), tb, 0); | ||||
| } | ||||
|  | ||||
| static void tb_page_remove(PageDesc *pd, TranslationBlock *tb) | ||||
| static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) | ||||
| { | ||||
|     TranslationBlock *tb1; | ||||
|     uintptr_t *pprev; | ||||
| @@ -741,16 +652,74 @@ static void tb_page_remove(PageDesc *pd, TranslationBlock *tb) | ||||
|  | ||||
| static void tb_remove(TranslationBlock *tb) | ||||
| { | ||||
|     tb_page_addr_t paddr0 = tb_page_addr0(tb); | ||||
|     tb_page_addr_t paddr1 = tb_page_addr1(tb); | ||||
|     tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS; | ||||
|     tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS; | ||||
|     PageDesc *pd; | ||||
|  | ||||
|     assert(paddr0 != -1); | ||||
|     if (unlikely(paddr1 != -1) && pindex0 != pindex1) { | ||||
|         tb_page_remove(page_find_alloc(pindex1, false), tb); | ||||
|     pd = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | ||||
|     tb_page_remove(pd, tb); | ||||
|     if (unlikely(tb->page_addr[1] != -1)) { | ||||
|         pd = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | ||||
|         tb_page_remove(pd, tb); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, | ||||
|                            PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc) | ||||
| { | ||||
|     PageDesc *p1, *p2; | ||||
|     tb_page_addr_t page1; | ||||
|     tb_page_addr_t page2; | ||||
|  | ||||
|     assert_memory_lock(); | ||||
|     g_assert(phys1 != -1); | ||||
|  | ||||
|     page1 = phys1 >> TARGET_PAGE_BITS; | ||||
|     page2 = phys2 >> TARGET_PAGE_BITS; | ||||
|  | ||||
|     p1 = page_find_alloc(page1, alloc); | ||||
|     if (ret_p1) { | ||||
|         *ret_p1 = p1; | ||||
|     } | ||||
|     if (likely(phys2 == -1)) { | ||||
|         page_lock(p1); | ||||
|         return; | ||||
|     } else if (page1 == page2) { | ||||
|         page_lock(p1); | ||||
|         if (ret_p2) { | ||||
|             *ret_p2 = p1; | ||||
|         } | ||||
|         return; | ||||
|     } | ||||
|     p2 = page_find_alloc(page2, alloc); | ||||
|     if (ret_p2) { | ||||
|         *ret_p2 = p2; | ||||
|     } | ||||
|     if (page1 < page2) { | ||||
|         page_lock(p1); | ||||
|         page_lock(p2); | ||||
|     } else { | ||||
|         page_lock(p2); | ||||
|         page_lock(p1); | ||||
|     } | ||||
| } | ||||
|  | ||||
| /* lock the page(s) of a TB in the correct acquisition order */ | ||||
| static void page_lock_tb(const TranslationBlock *tb) | ||||
| { | ||||
|     page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false); | ||||
| } | ||||
|  | ||||
| static void page_unlock_tb(const TranslationBlock *tb) | ||||
| { | ||||
|     PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS); | ||||
|  | ||||
|     page_unlock(p1); | ||||
|     if (unlikely(tb_page_addr1(tb) != -1)) { | ||||
|         PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS); | ||||
|  | ||||
|         if (p2 != p1) { | ||||
|             page_unlock(p2); | ||||
|         } | ||||
|     } | ||||
|     tb_page_remove(page_find_alloc(pindex0, false), tb); | ||||
| } | ||||
| #endif /* CONFIG_USER_ONLY */ | ||||
|  | ||||
| @@ -775,7 +744,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) | ||||
|  | ||||
|     tcg_region_reset_all(); | ||||
|     /* XXX: flush processor icache at this point if cache flush is expensive */ | ||||
|     qatomic_inc(&tb_ctx.tb_flush_count); | ||||
|     qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); | ||||
|  | ||||
| done: | ||||
|     mmap_unlock(); | ||||
| @@ -787,9 +756,9 @@ done: | ||||
| void tb_flush(CPUState *cpu) | ||||
| { | ||||
|     if (tcg_enabled()) { | ||||
|         unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count); | ||||
|         unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); | ||||
|  | ||||
|         if (cpu_in_serial_context(cpu)) { | ||||
|         if (cpu_in_exclusive_context(cpu)) { | ||||
|             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); | ||||
|         } else { | ||||
|             async_safe_run_on_cpu(cpu, do_tb_flush, | ||||
| @@ -917,7 +886,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||||
|     /* remove the TB from the hash list */ | ||||
|     phys_pc = tb_page_addr0(tb); | ||||
|     h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb->pc), | ||||
|                      tb->flags, tb->cs_base, orig_cflags); | ||||
|                      tb->flags, orig_cflags, tb->trace_vcpu_dstate); | ||||
|     if (!qht_remove(&tb_ctx.htable, tb, h)) { | ||||
|         return; | ||||
|     } | ||||
| @@ -955,16 +924,18 @@ static void tb_phys_invalidate__locked(TranslationBlock *tb) | ||||
| void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) | ||||
| { | ||||
|     if (page_addr == -1 && tb_page_addr0(tb) != -1) { | ||||
|         tb_lock_pages(tb); | ||||
|         page_lock_tb(tb); | ||||
|         do_tb_phys_invalidate(tb, true); | ||||
|         tb_unlock_pages(tb); | ||||
|         page_unlock_tb(tb); | ||||
|     } else { | ||||
|         do_tb_phys_invalidate(tb, false); | ||||
|     } | ||||
| } | ||||
|  | ||||
| /* | ||||
|  * Add a new TB and link it to the physical page tables. | ||||
|  * Add a new TB and link it to the physical page tables. phys_page2 is | ||||
|  * (-1) to indicate that only one page contains the TB. | ||||
|  * | ||||
|  * Called with mmap_lock held for user-mode emulation. | ||||
|  * | ||||
|  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. | ||||
| @@ -972,29 +943,43 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) | ||||
|  * for the same block of guest code that @tb corresponds to. In that case, | ||||
|  * the caller should discard the original @tb, and use instead the returned TB. | ||||
|  */ | ||||
| TranslationBlock *tb_link_page(TranslationBlock *tb) | ||||
| TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||||
|                                tb_page_addr_t phys_page2) | ||||
| { | ||||
|     PageDesc *p; | ||||
|     PageDesc *p2 = NULL; | ||||
|     void *existing_tb = NULL; | ||||
|     uint32_t h; | ||||
|  | ||||
|     assert_memory_lock(); | ||||
|     tcg_debug_assert(!(tb->cflags & CF_INVALID)); | ||||
|  | ||||
|     tb_record(tb); | ||||
|     /* | ||||
|      * Add the TB to the page list, acquiring first the pages's locks. | ||||
|      * We keep the locks held until after inserting the TB in the hash table, | ||||
|      * so that if the insertion fails we know for sure that the TBs are still | ||||
|      * in the page descriptors. | ||||
|      * Note that inserting into the hash table first isn't an option, since | ||||
|      * we can only insert TBs that are fully initialized. | ||||
|      */ | ||||
|     page_lock_pair(&p, phys_pc, &p2, phys_page2, true); | ||||
|     tb_record(tb, p, p2); | ||||
|  | ||||
|     /* add in the hash table */ | ||||
|     h = tb_hash_func(tb_page_addr0(tb), (tb->cflags & CF_PCREL ? 0 : tb->pc), | ||||
|                      tb->flags, tb->cs_base, tb->cflags); | ||||
|     h = tb_hash_func(phys_pc, (tb->cflags & CF_PCREL ? 0 : tb->pc), | ||||
|                      tb->flags, tb->cflags, tb->trace_vcpu_dstate); | ||||
|     qht_insert(&tb_ctx.htable, tb, h, &existing_tb); | ||||
|  | ||||
|     /* remove TB from the page(s) if we couldn't insert it */ | ||||
|     if (unlikely(existing_tb)) { | ||||
|         tb_remove(tb); | ||||
|         tb_unlock_pages(tb); | ||||
|         return existing_tb; | ||||
|         tb = existing_tb; | ||||
|     } | ||||
|  | ||||
|     tb_unlock_pages(tb); | ||||
|     if (p2 && p2 != p) { | ||||
|         page_unlock(p2); | ||||
|     } | ||||
|     page_unlock(p); | ||||
|     return tb; | ||||
| } | ||||
|  | ||||
| @@ -1004,14 +989,14 @@ TranslationBlock *tb_link_page(TranslationBlock *tb) | ||||
|  * Called with mmap_lock held for user-mode emulation. | ||||
|  * NOTE: this function must not be called while a TB is running. | ||||
|  */ | ||||
| void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) | ||||
| void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) | ||||
| { | ||||
|     TranslationBlock *tb; | ||||
|     PageForEachNext n; | ||||
|  | ||||
|     assert_memory_lock(); | ||||
|  | ||||
|     PAGE_FOR_EACH_TB(start, last, unused, tb, n) { | ||||
|     PAGE_FOR_EACH_TB(start, end, unused, tb, n) { | ||||
|         tb_phys_invalidate__locked(tb); | ||||
|     } | ||||
| } | ||||
| @@ -1023,11 +1008,11 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) | ||||
|  */ | ||||
| void tb_invalidate_phys_page(tb_page_addr_t addr) | ||||
| { | ||||
|     tb_page_addr_t start, last; | ||||
|     tb_page_addr_t start, end; | ||||
|  | ||||
|     start = addr & TARGET_PAGE_MASK; | ||||
|     last = addr | ~TARGET_PAGE_MASK; | ||||
|     tb_invalidate_phys_range(start, last); | ||||
|     end = start + TARGET_PAGE_SIZE; | ||||
|     tb_invalidate_phys_range(start, end); | ||||
| } | ||||
|  | ||||
| /* | ||||
| @@ -1043,7 +1028,6 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) | ||||
|     bool current_tb_modified; | ||||
|     TranslationBlock *tb; | ||||
|     PageForEachNext n; | ||||
|     tb_page_addr_t last; | ||||
|  | ||||
|     /* | ||||
|      * Without precise smc semantics, or when outside of a TB, | ||||
| @@ -1060,11 +1044,10 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) | ||||
|     assert_memory_lock(); | ||||
|     current_tb = tcg_tb_lookup(pc); | ||||
|  | ||||
|     last = addr | ~TARGET_PAGE_MASK; | ||||
|     addr &= TARGET_PAGE_MASK; | ||||
|     current_tb_modified = false; | ||||
|  | ||||
|     PAGE_FOR_EACH_TB(addr, last, unused, tb, n) { | ||||
|     PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) { | ||||
|         if (current_tb == tb && | ||||
|             (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { | ||||
|             /* | ||||
| @@ -1083,8 +1066,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) | ||||
|     if (current_tb_modified) { | ||||
|         /* Force execution of one insn next time.  */ | ||||
|         CPUState *cpu = current_cpu; | ||||
|         cpu->cflags_next_tb = | ||||
|             1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu); | ||||
|         cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu); | ||||
|         return true; | ||||
|     } | ||||
|     return false; | ||||
| @@ -1097,36 +1079,34 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) | ||||
| static void | ||||
| tb_invalidate_phys_page_range__locked(struct page_collection *pages, | ||||
|                                       PageDesc *p, tb_page_addr_t start, | ||||
|                                       tb_page_addr_t last, | ||||
|                                       tb_page_addr_t end, | ||||
|                                       uintptr_t retaddr) | ||||
| { | ||||
|     TranslationBlock *tb; | ||||
|     tb_page_addr_t tb_start, tb_end; | ||||
|     PageForEachNext n; | ||||
| #ifdef TARGET_HAS_PRECISE_SMC | ||||
|     bool current_tb_modified = false; | ||||
|     TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL; | ||||
| #endif /* TARGET_HAS_PRECISE_SMC */ | ||||
|  | ||||
|     /* Range may not cross a page. */ | ||||
|     tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0); | ||||
|  | ||||
|     /* | ||||
|      * We remove all the TBs in the range [start, last]. | ||||
|      * We remove all the TBs in the range [start, end[. | ||||
|      * XXX: see if in some cases it could be faster to invalidate all the code | ||||
|      */ | ||||
|     PAGE_FOR_EACH_TB(start, last, p, tb, n) { | ||||
|         tb_page_addr_t tb_start, tb_last; | ||||
|  | ||||
|     PAGE_FOR_EACH_TB(start, end, p, tb, n) { | ||||
|         /* NOTE: this is subtle as a TB may span two physical pages */ | ||||
|         tb_start = tb_page_addr0(tb); | ||||
|         tb_last = tb_start + tb->size - 1; | ||||
|         if (n == 0) { | ||||
|             tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK); | ||||
|             /* NOTE: tb_end may be after the end of the page, but | ||||
|                it is not a problem */ | ||||
|             tb_start = tb_page_addr0(tb); | ||||
|             tb_end = tb_start + tb->size; | ||||
|         } else { | ||||
|             tb_start = tb_page_addr1(tb); | ||||
|             tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK); | ||||
|             tb_end = tb_start + ((tb_page_addr0(tb) + tb->size) | ||||
|                                  & ~TARGET_PAGE_MASK); | ||||
|         } | ||||
|         if (!(tb_last < start || tb_start > last)) { | ||||
|         if (!(tb_end <= start || tb_start >= end)) { | ||||
| #ifdef TARGET_HAS_PRECISE_SMC | ||||
|             if (current_tb == tb && | ||||
|                 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { | ||||
| @@ -1154,8 +1134,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, | ||||
|     if (current_tb_modified) { | ||||
|         page_collection_unlock(pages); | ||||
|         /* Force execution of one insn next time.  */ | ||||
|         current_cpu->cflags_next_tb = | ||||
|             1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu); | ||||
|         current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu); | ||||
|         mmap_unlock(); | ||||
|         cpu_loop_exit_noexc(current_cpu); | ||||
|     } | ||||
| @@ -1169,7 +1148,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, | ||||
| void tb_invalidate_phys_page(tb_page_addr_t addr) | ||||
| { | ||||
|     struct page_collection *pages; | ||||
|     tb_page_addr_t start, last; | ||||
|     tb_page_addr_t start, end; | ||||
|     PageDesc *p; | ||||
|  | ||||
|     p = page_find(addr >> TARGET_PAGE_BITS); | ||||
| @@ -1178,40 +1157,36 @@ void tb_invalidate_phys_page(tb_page_addr_t addr) | ||||
|     } | ||||
|  | ||||
|     start = addr & TARGET_PAGE_MASK; | ||||
|     last = addr | ~TARGET_PAGE_MASK; | ||||
|     pages = page_collection_lock(start, last); | ||||
|     tb_invalidate_phys_page_range__locked(pages, p, start, last, 0); | ||||
|     end = start + TARGET_PAGE_SIZE; | ||||
|     pages = page_collection_lock(start, end); | ||||
|     tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); | ||||
|     page_collection_unlock(pages); | ||||
| } | ||||
|  | ||||
| /* | ||||
|  * Invalidate all TBs which intersect with the target physical address range | ||||
|  * [start;last]. NOTE: start and end may refer to *different* physical pages. | ||||
|  * [start;end[. NOTE: start and end may refer to *different* physical pages. | ||||
|  * 'is_cpu_write_access' should be true if called from a real cpu write | ||||
|  * access: the virtual CPU will exit the current TB if code is modified inside | ||||
|  * this TB. | ||||
|  */ | ||||
| void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) | ||||
| void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) | ||||
| { | ||||
|     struct page_collection *pages; | ||||
|     tb_page_addr_t index, index_last; | ||||
|     tb_page_addr_t next; | ||||
|  | ||||
|     pages = page_collection_lock(start, last); | ||||
|  | ||||
|     index_last = last >> TARGET_PAGE_BITS; | ||||
|     for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) { | ||||
|         PageDesc *pd = page_find(index); | ||||
|         tb_page_addr_t page_start, page_last; | ||||
|     pages = page_collection_lock(start, end); | ||||
|     for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; | ||||
|          start < end; | ||||
|          start = next, next += TARGET_PAGE_SIZE) { | ||||
|         PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); | ||||
|         tb_page_addr_t bound = MIN(next, end); | ||||
|  | ||||
|         if (pd == NULL) { | ||||
|             continue; | ||||
|         } | ||||
|         assert_page_locked(pd); | ||||
|         page_start = index << TARGET_PAGE_BITS; | ||||
|         page_last = page_start | ~TARGET_PAGE_MASK; | ||||
|         page_last = MIN(page_last, last); | ||||
|         tb_invalidate_phys_page_range__locked(pages, pd, | ||||
|                                               page_start, page_last, 0); | ||||
|         tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); | ||||
|     } | ||||
|     page_collection_unlock(pages); | ||||
| } | ||||
| @@ -1231,7 +1206,7 @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages, | ||||
|     } | ||||
|  | ||||
|     assert_page_locked(p); | ||||
|     tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra); | ||||
|     tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra); | ||||
| } | ||||
|  | ||||
| /* | ||||
| @@ -1245,7 +1220,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, | ||||
| { | ||||
|     struct page_collection *pages; | ||||
|  | ||||
|     pages = page_collection_lock(ram_addr, ram_addr + size - 1); | ||||
|     pages = page_collection_lock(ram_addr, ram_addr + size); | ||||
|     tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr); | ||||
|     page_collection_unlock(pages); | ||||
| } | ||||
|   | ||||
| @@ -89,20 +89,7 @@ void icount_handle_deadline(void) | ||||
|     } | ||||
| } | ||||
|  | ||||
| /* Distribute the budget evenly across all CPUs */ | ||||
| int64_t icount_percpu_budget(int cpu_count) | ||||
| { | ||||
|     int64_t limit = icount_get_limit(); | ||||
|     int64_t timeslice = limit / cpu_count; | ||||
|  | ||||
|     if (timeslice == 0) { | ||||
|         timeslice = limit; | ||||
|     } | ||||
|  | ||||
|     return timeslice; | ||||
| } | ||||
|  | ||||
| void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget) | ||||
| void icount_prepare_for_run(CPUState *cpu) | ||||
| { | ||||
|     int insns_left; | ||||
|  | ||||
| @@ -111,16 +98,16 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget) | ||||
|      * each vCPU execution. However u16.high can be raised | ||||
|      * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt | ||||
|      */ | ||||
|     g_assert(cpu->neg.icount_decr.u16.low == 0); | ||||
|     g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0); | ||||
|     g_assert(cpu->icount_extra == 0); | ||||
|  | ||||
|     replay_mutex_lock(); | ||||
|  | ||||
|     cpu->icount_budget = MIN(icount_get_limit(), cpu_budget); | ||||
|     cpu->icount_budget = icount_get_limit(); | ||||
|     insns_left = MIN(0xffff, cpu->icount_budget); | ||||
|     cpu->neg.icount_decr.u16.low = insns_left; | ||||
|     cpu_neg(cpu)->icount_decr.u16.low = insns_left; | ||||
|     cpu->icount_extra = cpu->icount_budget - insns_left; | ||||
|  | ||||
|     replay_mutex_lock(); | ||||
|  | ||||
|     if (cpu->icount_budget == 0) { | ||||
|         /* | ||||
|          * We're called without the iothread lock, so must take it while | ||||
| @@ -138,7 +125,7 @@ void icount_process_data(CPUState *cpu) | ||||
|     icount_update(cpu); | ||||
|  | ||||
|     /* Reset the counters */ | ||||
|     cpu->neg.icount_decr.u16.low = 0; | ||||
|     cpu_neg(cpu)->icount_decr.u16.low = 0; | ||||
|     cpu->icount_extra = 0; | ||||
|     cpu->icount_budget = 0; | ||||
|  | ||||
| @@ -153,7 +140,7 @@ void icount_handle_interrupt(CPUState *cpu, int mask) | ||||
|  | ||||
|     tcg_handle_interrupt(cpu, mask); | ||||
|     if (qemu_cpu_is_self(cpu) && | ||||
|         !cpu->neg.can_do_io | ||||
|         !cpu->can_do_io | ||||
|         && (mask & ~old_mask) != 0) { | ||||
|         cpu_abort(cpu, "Raised interrupt while not in I/O function"); | ||||
|     } | ||||
|   | ||||
| @@ -11,8 +11,7 @@ | ||||
| #define TCG_ACCEL_OPS_ICOUNT_H | ||||
|  | ||||
| void icount_handle_deadline(void); | ||||
| void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget); | ||||
| int64_t icount_percpu_budget(int cpu_count); | ||||
| void icount_prepare_for_run(CPUState *cpu); | ||||
| void icount_process_data(CPUState *cpu); | ||||
|  | ||||
| void icount_handle_interrupt(CPUState *cpu, int mask); | ||||
|   | ||||
| @@ -32,7 +32,7 @@ | ||||
| #include "qemu/guest-random.h" | ||||
| #include "exec/exec-all.h" | ||||
| #include "hw/boards.h" | ||||
| #include "tcg/startup.h" | ||||
|  | ||||
| #include "tcg-accel-ops.h" | ||||
| #include "tcg-accel-ops-mttcg.h" | ||||
|  | ||||
| @@ -80,7 +80,7 @@ static void *mttcg_cpu_thread_fn(void *arg) | ||||
|     qemu_thread_get_self(cpu->thread); | ||||
|  | ||||
|     cpu->thread_id = qemu_get_thread_id(); | ||||
|     cpu->neg.can_do_io = true; | ||||
|     cpu->can_do_io = 1; | ||||
|     current_cpu = cpu; | ||||
|     cpu_thread_signal_created(cpu); | ||||
|     qemu_guest_random_seed_thread_part2(cpu->random_seed); | ||||
| @@ -100,9 +100,14 @@ static void *mttcg_cpu_thread_fn(void *arg) | ||||
|                 break; | ||||
|             case EXCP_HALTED: | ||||
|                 /* | ||||
|                  * Usually cpu->halted is set, but may have already been | ||||
|                  * reset by another thread by the time we arrive here. | ||||
|                  * during start-up the vCPU is reset and the thread is | ||||
|                  * kicked several times. If we don't ensure we go back | ||||
|                  * to sleep in the halted state we won't cleanly | ||||
|                  * start-up when the vCPU is enabled. | ||||
|                  * | ||||
|                  * cpu->halted should ensure we sleep in wait_io_event | ||||
|                  */ | ||||
|                 g_assert(cpu->halted); | ||||
|                 break; | ||||
|             case EXCP_ATOMIC: | ||||
|                 qemu_mutex_unlock_iothread(); | ||||
| @@ -114,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg) | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         qatomic_set_mb(&cpu->exit_request, 0); | ||||
|         qatomic_mb_set(&cpu->exit_request, 0); | ||||
|         qemu_wait_io_event(cpu); | ||||
|     } while (!cpu->unplug || cpu_can_run(cpu)); | ||||
|  | ||||
| @@ -147,4 +152,8 @@ void mttcg_start_vcpu_thread(CPUState *cpu) | ||||
|  | ||||
|     qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn, | ||||
|                        cpu, QEMU_THREAD_JOINABLE); | ||||
|  | ||||
| #ifdef _WIN32 | ||||
|     cpu->hThread = qemu_thread_get_handle(cpu->thread); | ||||
| #endif | ||||
| } | ||||
|   | ||||
| @@ -24,7 +24,6 @@ | ||||
|  */ | ||||
|  | ||||
| #include "qemu/osdep.h" | ||||
| #include "qemu/lockable.h" | ||||
| #include "sysemu/tcg.h" | ||||
| #include "sysemu/replay.h" | ||||
| #include "sysemu/cpu-timers.h" | ||||
| @@ -32,7 +31,7 @@ | ||||
| #include "qemu/notify.h" | ||||
| #include "qemu/guest-random.h" | ||||
| #include "exec/exec-all.h" | ||||
| #include "tcg/startup.h" | ||||
|  | ||||
| #include "tcg-accel-ops.h" | ||||
| #include "tcg-accel-ops-rr.h" | ||||
| #include "tcg-accel-ops-icount.h" | ||||
| @@ -72,13 +71,11 @@ static void rr_kick_next_cpu(void) | ||||
| { | ||||
|     CPUState *cpu; | ||||
|     do { | ||||
|         cpu = qatomic_read(&rr_current_cpu); | ||||
|         cpu = qatomic_mb_read(&rr_current_cpu); | ||||
|         if (cpu) { | ||||
|             cpu_exit(cpu); | ||||
|         } | ||||
|         /* Finish kicking this cpu before reading again.  */ | ||||
|         smp_mb(); | ||||
|     } while (cpu != qatomic_read(&rr_current_cpu)); | ||||
|     } while (cpu != qatomic_mb_read(&rr_current_cpu)); | ||||
| } | ||||
|  | ||||
| static void rr_kick_thread(void *opaque) | ||||
| @@ -142,33 +139,6 @@ static void rr_force_rcu(Notifier *notify, void *data) | ||||
|     rr_kick_next_cpu(); | ||||
| } | ||||
|  | ||||
| /* | ||||
|  * Calculate the number of CPUs that we will process in a single iteration of | ||||
|  * the main CPU thread loop so that we can fairly distribute the instruction | ||||
|  * count across CPUs. | ||||
|  * | ||||
|  * The CPU count is cached based on the CPU list generation ID to avoid | ||||
|  * iterating the list every time. | ||||
|  */ | ||||
| static int rr_cpu_count(void) | ||||
| { | ||||
|     static unsigned int last_gen_id = ~0; | ||||
|     static int cpu_count; | ||||
|     CPUState *cpu; | ||||
|  | ||||
|     QEMU_LOCK_GUARD(&qemu_cpu_list_lock); | ||||
|  | ||||
|     if (cpu_list_generation_id_get() != last_gen_id) { | ||||
|         cpu_count = 0; | ||||
|         CPU_FOREACH(cpu) { | ||||
|             ++cpu_count; | ||||
|         } | ||||
|         last_gen_id = cpu_list_generation_id_get(); | ||||
|     } | ||||
|  | ||||
|     return cpu_count; | ||||
| } | ||||
|  | ||||
| /* | ||||
|  * In the single-threaded case each vCPU is simulated in turn. If | ||||
|  * there is more than a single vCPU we create a simple timer to kick | ||||
| @@ -192,7 +162,7 @@ static void *rr_cpu_thread_fn(void *arg) | ||||
|     qemu_thread_get_self(cpu->thread); | ||||
|  | ||||
|     cpu->thread_id = qemu_get_thread_id(); | ||||
|     cpu->neg.can_do_io = true; | ||||
|     cpu->can_do_io = 1; | ||||
|     cpu_thread_signal_created(cpu); | ||||
|     qemu_guest_random_seed_thread_part2(cpu->random_seed); | ||||
|  | ||||
| @@ -215,16 +185,11 @@ static void *rr_cpu_thread_fn(void *arg) | ||||
|     cpu->exit_request = 1; | ||||
|  | ||||
|     while (1) { | ||||
|         /* Only used for icount_enabled() */ | ||||
|         int64_t cpu_budget = 0; | ||||
|  | ||||
|         qemu_mutex_unlock_iothread(); | ||||
|         replay_mutex_lock(); | ||||
|         qemu_mutex_lock_iothread(); | ||||
|  | ||||
|         if (icount_enabled()) { | ||||
|             int cpu_count = rr_cpu_count(); | ||||
|  | ||||
|             /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */ | ||||
|             icount_account_warp_timer(); | ||||
|             /* | ||||
| @@ -232,8 +197,6 @@ static void *rr_cpu_thread_fn(void *arg) | ||||
|              * waking up the I/O thread and waiting for completion. | ||||
|              */ | ||||
|             icount_handle_deadline(); | ||||
|  | ||||
|             cpu_budget = icount_percpu_budget(cpu_count); | ||||
|         } | ||||
|  | ||||
|         replay_mutex_unlock(); | ||||
| @@ -243,9 +206,8 @@ static void *rr_cpu_thread_fn(void *arg) | ||||
|         } | ||||
|  | ||||
|         while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { | ||||
|             /* Store rr_current_cpu before evaluating cpu_can_run().  */ | ||||
|             qatomic_set_mb(&rr_current_cpu, cpu); | ||||
|  | ||||
|             qatomic_mb_set(&rr_current_cpu, cpu); | ||||
|             current_cpu = cpu; | ||||
|  | ||||
|             qemu_clock_enable(QEMU_CLOCK_VIRTUAL, | ||||
| @@ -256,7 +218,7 @@ static void *rr_cpu_thread_fn(void *arg) | ||||
|  | ||||
|                 qemu_mutex_unlock_iothread(); | ||||
|                 if (icount_enabled()) { | ||||
|                     icount_prepare_for_run(cpu, cpu_budget); | ||||
|                     icount_prepare_for_run(cpu); | ||||
|                 } | ||||
|                 r = tcg_cpus_exec(cpu); | ||||
|                 if (icount_enabled()) { | ||||
| @@ -283,11 +245,11 @@ static void *rr_cpu_thread_fn(void *arg) | ||||
|             cpu = CPU_NEXT(cpu); | ||||
|         } /* while (cpu && !cpu->exit_request).. */ | ||||
|  | ||||
|         /* Does not need a memory barrier because a spurious wakeup is okay.  */ | ||||
|         /* Does not need qatomic_mb_set because a spurious wakeup is okay.  */ | ||||
|         qatomic_set(&rr_current_cpu, NULL); | ||||
|  | ||||
|         if (cpu && cpu->exit_request) { | ||||
|             qatomic_set_mb(&cpu->exit_request, 0); | ||||
|             qatomic_mb_set(&cpu->exit_request, 0); | ||||
|         } | ||||
|  | ||||
|         if (icount_enabled() && all_cpu_threads_idle()) { | ||||
| @@ -329,12 +291,15 @@ void rr_start_vcpu_thread(CPUState *cpu) | ||||
|  | ||||
|         single_tcg_halt_cond = cpu->halt_cond; | ||||
|         single_tcg_cpu_thread = cpu->thread; | ||||
| #ifdef _WIN32 | ||||
|         cpu->hThread = qemu_thread_get_handle(cpu->thread); | ||||
| #endif | ||||
|     } else { | ||||
|         /* we share the thread */ | ||||
|         cpu->thread = single_tcg_cpu_thread; | ||||
|         cpu->halt_cond = single_tcg_halt_cond; | ||||
|         cpu->thread_id = first_cpu->thread_id; | ||||
|         cpu->neg.can_do_io = 1; | ||||
|         cpu->can_do_io = 1; | ||||
|         cpu->created = true; | ||||
|     } | ||||
| } | ||||
|   | ||||
| @@ -34,7 +34,6 @@ | ||||
| #include "qemu/timer.h" | ||||
| #include "exec/exec-all.h" | ||||
| #include "exec/hwaddr.h" | ||||
| #include "exec/tb-flush.h" | ||||
| #include "exec/gdbstub.h" | ||||
|  | ||||
| #include "tcg-accel-ops.h" | ||||
| @@ -60,7 +59,7 @@ void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) | ||||
|  | ||||
|     cflags |= parallel ? CF_PARALLEL : 0; | ||||
|     cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; | ||||
|     cpu->tcg_cflags |= cflags; | ||||
|     cpu->tcg_cflags = cflags; | ||||
| } | ||||
|  | ||||
| void tcg_cpus_destroy(CPUState *cpu) | ||||
| @@ -71,20 +70,23 @@ void tcg_cpus_destroy(CPUState *cpu) | ||||
| int tcg_cpus_exec(CPUState *cpu) | ||||
| { | ||||
|     int ret; | ||||
| #ifdef CONFIG_PROFILER | ||||
|     int64_t ti; | ||||
| #endif | ||||
|     assert(tcg_enabled()); | ||||
| #ifdef CONFIG_PROFILER | ||||
|     ti = profile_getclock(); | ||||
| #endif | ||||
|     cpu_exec_start(cpu); | ||||
|     ret = cpu_exec(cpu); | ||||
|     cpu_exec_end(cpu); | ||||
| #ifdef CONFIG_PROFILER | ||||
|     qatomic_set(&tcg_ctx->prof.cpu_exec_time, | ||||
|                 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti); | ||||
| #endif | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static void tcg_cpu_reset_hold(CPUState *cpu) | ||||
| { | ||||
|     tcg_flush_jmp_cache(cpu); | ||||
|  | ||||
|     tlb_flush(cpu); | ||||
| } | ||||
|  | ||||
| /* mask must never be zero, except for A20 change call */ | ||||
| void tcg_handle_interrupt(CPUState *cpu, int mask) | ||||
| { | ||||
| @@ -99,7 +101,7 @@ void tcg_handle_interrupt(CPUState *cpu, int mask) | ||||
|     if (!qemu_cpu_is_self(cpu)) { | ||||
|         qemu_cpu_kick(cpu); | ||||
|     } else { | ||||
|         qatomic_set(&cpu->neg.icount_decr.u16.high, -1); | ||||
|         qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -213,7 +215,6 @@ static void tcg_accel_ops_init(AccelOpsClass *ops) | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     ops->cpu_reset_hold = tcg_cpu_reset_hold; | ||||
|     ops->supports_guest_debug = tcg_supports_guest_debug; | ||||
|     ops->insert_breakpoint = tcg_insert_breakpoint; | ||||
|     ops->remove_breakpoint = tcg_remove_breakpoint; | ||||
|   | ||||
| @@ -27,24 +27,21 @@ | ||||
| #include "sysemu/tcg.h" | ||||
| #include "exec/replay-core.h" | ||||
| #include "sysemu/cpu-timers.h" | ||||
| #include "tcg/startup.h" | ||||
| #include "tcg/oversized-guest.h" | ||||
| #include "tcg/tcg.h" | ||||
| #include "qapi/error.h" | ||||
| #include "qemu/error-report.h" | ||||
| #include "qemu/accel.h" | ||||
| #include "qemu/atomic.h" | ||||
| #include "qapi/qapi-builtin-visit.h" | ||||
| #include "qemu/units.h" | ||||
| #if !defined(CONFIG_USER_ONLY) | ||||
| #include "hw/boards.h" | ||||
| #endif | ||||
| #include "internal-target.h" | ||||
| #include "internal.h" | ||||
|  | ||||
| struct TCGState { | ||||
|     AccelState parent_obj; | ||||
|  | ||||
|     bool mttcg_enabled; | ||||
|     bool one_insn_per_tb; | ||||
|     int splitwx_enabled; | ||||
|     unsigned long tb_size; | ||||
| }; | ||||
| @@ -64,23 +61,37 @@ DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE, | ||||
|  * they can set the appropriate CONFIG flags in ${target}-softmmu.mak | ||||
|  * | ||||
|  * Once a guest architecture has been converted to the new primitives | ||||
|  * there is one remaining limitation to check: | ||||
|  *   - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) | ||||
|  * there are two remaining limitations to check. | ||||
|  * | ||||
|  * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) | ||||
|  * - The host must have a stronger memory order than the guest | ||||
|  * | ||||
|  * It may be possible in future to support strong guests on weak hosts | ||||
|  * but that will require tagging all load/stores in a guest with their | ||||
|  * implicit memory order requirements which would likely slow things | ||||
|  * down a lot. | ||||
|  */ | ||||
|  | ||||
| static bool check_tcg_memory_orders_compatible(void) | ||||
| { | ||||
| #if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO) | ||||
|     return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0; | ||||
| #else | ||||
|     return false; | ||||
| #endif | ||||
| } | ||||
|  | ||||
| static bool default_mttcg_enabled(void) | ||||
| { | ||||
|     if (icount_enabled() || TCG_OVERSIZED_GUEST) { | ||||
|         return false; | ||||
|     } | ||||
|     } else { | ||||
| #ifdef TARGET_SUPPORTS_MTTCG | ||||
| # ifndef TCG_GUEST_DEFAULT_MO | ||||
| #  error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO" | ||||
| # endif | ||||
|     return true; | ||||
|         return check_tcg_memory_orders_compatible(); | ||||
| #else | ||||
|     return false; | ||||
|         return false; | ||||
| #endif | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void tcg_accel_instance_init(Object *obj) | ||||
| @@ -98,7 +109,6 @@ static void tcg_accel_instance_init(Object *obj) | ||||
| } | ||||
|  | ||||
| bool mttcg_enabled; | ||||
| bool one_insn_per_tb; | ||||
|  | ||||
| static int tcg_init_machine(MachineState *ms) | ||||
| { | ||||
| @@ -121,7 +131,7 @@ static int tcg_init_machine(MachineState *ms) | ||||
|      * There's no guest base to take into account, so go ahead and | ||||
|      * initialize the prologue now. | ||||
|      */ | ||||
|     tcg_prologue_init(); | ||||
|     tcg_prologue_init(tcg_ctx); | ||||
| #endif | ||||
|  | ||||
|     return 0; | ||||
| @@ -148,6 +158,11 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp) | ||||
|             warn_report("Guest not yet converted to MTTCG - " | ||||
|                         "you may get unexpected results"); | ||||
| #endif | ||||
|             if (!check_tcg_memory_orders_compatible()) { | ||||
|                 warn_report("Guest expects a stronger memory ordering " | ||||
|                             "than the host provides"); | ||||
|                 error_printf("This may cause strange/hard to debug errors\n"); | ||||
|             } | ||||
|             s->mttcg_enabled = true; | ||||
|         } | ||||
|     } else if (strcmp(value, "single") == 0) { | ||||
| @@ -193,20 +208,6 @@ static void tcg_set_splitwx(Object *obj, bool value, Error **errp) | ||||
|     s->splitwx_enabled = value; | ||||
| } | ||||
|  | ||||
| static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp) | ||||
| { | ||||
|     TCGState *s = TCG_STATE(obj); | ||||
|     return s->one_insn_per_tb; | ||||
| } | ||||
|  | ||||
| static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp) | ||||
| { | ||||
|     TCGState *s = TCG_STATE(obj); | ||||
|     s->one_insn_per_tb = value; | ||||
|     /* Set the global also: this changes the behaviour */ | ||||
|     qatomic_set(&one_insn_per_tb, value); | ||||
| } | ||||
|  | ||||
| static int tcg_gdbstub_supported_sstep_flags(void) | ||||
| { | ||||
|     /* | ||||
| @@ -227,8 +228,6 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data) | ||||
|     AccelClass *ac = ACCEL_CLASS(oc); | ||||
|     ac->name = "tcg"; | ||||
|     ac->init_machine = tcg_init_machine; | ||||
|     ac->cpu_common_realize = tcg_exec_realizefn; | ||||
|     ac->cpu_common_unrealize = tcg_exec_unrealizefn; | ||||
|     ac->allowed = &tcg_allowed; | ||||
|     ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags; | ||||
|  | ||||
| @@ -246,12 +245,6 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data) | ||||
|         tcg_get_splitwx, tcg_set_splitwx); | ||||
|     object_class_property_set_description(oc, "split-wx", | ||||
|         "Map jit pages into separate RW and RX regions"); | ||||
|  | ||||
|     object_class_property_add_bool(oc, "one-insn-per-tb", | ||||
|                                    tcg_get_one_insn_per_tb, | ||||
|                                    tcg_set_one_insn_per_tb); | ||||
|     object_class_property_set_description(oc, "one-insn-per-tb", | ||||
|         "Only put one guest insn in each translation block"); | ||||
| } | ||||
|  | ||||
| static const TypeInfo tcg_accel_type = { | ||||
|   | ||||
| @@ -20,7 +20,7 @@ | ||||
| #include "qemu/osdep.h" | ||||
| #include "qemu/host-utils.h" | ||||
| #include "cpu.h" | ||||
| #include "exec/helper-proto-common.h" | ||||
| #include "exec/helper-proto.h" | ||||
| #include "tcg/tcg-gvec-desc.h" | ||||
|  | ||||
|  | ||||
| @@ -550,17 +550,6 @@ void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc) | ||||
|     clear_high(d, oprsz, desc); | ||||
| } | ||||
|  | ||||
| void HELPER(gvec_andcs)(void *d, void *a, uint64_t b, uint32_t desc) | ||||
| { | ||||
|     intptr_t oprsz = simd_oprsz(desc); | ||||
|     intptr_t i; | ||||
|  | ||||
|     for (i = 0; i < oprsz; i += sizeof(uint64_t)) { | ||||
|         *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & ~b; | ||||
|     } | ||||
|     clear_high(d, oprsz, desc); | ||||
| } | ||||
|  | ||||
| void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc) | ||||
| { | ||||
|     intptr_t oprsz = simd_oprsz(desc); | ||||
| @@ -1042,32 +1031,6 @@ DO_CMP2(64) | ||||
| #undef DO_CMP1 | ||||
| #undef DO_CMP2 | ||||
|  | ||||
| #define DO_CMP1(NAME, TYPE, OP)                                            \ | ||||
| void HELPER(NAME)(void *d, void *a, uint64_t b64, uint32_t desc)           \ | ||||
| {                                                                          \ | ||||
|     intptr_t oprsz = simd_oprsz(desc);                                     \ | ||||
|     TYPE inv = simd_data(desc), b = b64;                                   \ | ||||
|     for (intptr_t i = 0; i < oprsz; i += sizeof(TYPE)) {                   \ | ||||
|         *(TYPE *)(d + i) = -((*(TYPE *)(a + i) OP b) ^ inv);               \ | ||||
|     }                                                                      \ | ||||
|     clear_high(d, oprsz, desc);                                            \ | ||||
| } | ||||
|  | ||||
| #define DO_CMP2(SZ) \ | ||||
|     DO_CMP1(gvec_eqs##SZ, uint##SZ##_t, ==)    \ | ||||
|     DO_CMP1(gvec_lts##SZ, int##SZ##_t, <)      \ | ||||
|     DO_CMP1(gvec_les##SZ, int##SZ##_t, <=)     \ | ||||
|     DO_CMP1(gvec_ltus##SZ, uint##SZ##_t, <)    \ | ||||
|     DO_CMP1(gvec_leus##SZ, uint##SZ##_t, <=) | ||||
|  | ||||
| DO_CMP2(8) | ||||
| DO_CMP2(16) | ||||
| DO_CMP2(32) | ||||
| DO_CMP2(64) | ||||
|  | ||||
| #undef DO_CMP1 | ||||
| #undef DO_CMP2 | ||||
|  | ||||
| void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc) | ||||
| { | ||||
|     intptr_t oprsz = simd_oprsz(desc); | ||||
|   | ||||
| @@ -24,17 +24,13 @@ | ||||
| #include "qemu/osdep.h" | ||||
| #include "qemu/host-utils.h" | ||||
| #include "cpu.h" | ||||
| #include "exec/helper-proto-common.h" | ||||
| #include "exec/helper-proto.h" | ||||
| #include "exec/cpu_ldst.h" | ||||
| #include "exec/exec-all.h" | ||||
| #include "disas/disas.h" | ||||
| #include "exec/log.h" | ||||
| #include "tcg/tcg.h" | ||||
|  | ||||
| #define HELPER_H  "accel/tcg/tcg-runtime.h" | ||||
| #include "exec/helper-info.c.inc" | ||||
| #undef  HELPER_H | ||||
|  | ||||
| /* 32-bit helpers */ | ||||
|  | ||||
| int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2) | ||||
|   | ||||
| @@ -39,63 +39,62 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) | ||||
| DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr) | ||||
| #endif /* IN_HELPER_PROTO */ | ||||
|  | ||||
| DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32) | ||||
|  | ||||
| DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, | ||||
|                    i32, env, i64, i32, i32, i32) | ||||
|                    i32, env, tl, i32, i32, i32) | ||||
| DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, | ||||
|                    i32, env, i64, i32, i32, i32) | ||||
|                    i32, env, tl, i32, i32, i32) | ||||
| DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, | ||||
|                    i32, env, i64, i32, i32, i32) | ||||
|                    i32, env, tl, i32, i32, i32) | ||||
| DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, | ||||
|                    i32, env, i64, i32, i32, i32) | ||||
|                    i32, env, tl, i32, i32, i32) | ||||
| DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, | ||||
|                    i32, env, i64, i32, i32, i32) | ||||
|                    i32, env, tl, i32, i32, i32) | ||||
| #ifdef CONFIG_ATOMIC64 | ||||
| DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, | ||||
|                    i64, env, i64, i64, i64, i32) | ||||
|                    i64, env, tl, i64, i64, i32) | ||||
| DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, | ||||
|                    i64, env, i64, i64, i64, i32) | ||||
|                    i64, env, tl, i64, i64, i32) | ||||
| #endif | ||||
| #if HAVE_CMPXCHG128 | ||||
| #ifdef CONFIG_CMPXCHG128 | ||||
| DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG, | ||||
|                    i128, env, i64, i128, i128, i32) | ||||
|                    i128, env, tl, i128, i128, i32) | ||||
| DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG, | ||||
|                    i128, env, i64, i128, i128, i32) | ||||
|                    i128, env, tl, i128, i128, i32) | ||||
| #endif | ||||
|  | ||||
| DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo, TCG_CALL_NO_WG, | ||||
|                    i128, env, i64, i128, i128, i32) | ||||
| DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG, | ||||
|                    i128, env, tl, i128, i128, i32) | ||||
| DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG, | ||||
|                    i128, env, tl, i128, i128, i32) | ||||
|  | ||||
| #ifdef CONFIG_ATOMIC64 | ||||
| #define GEN_ATOMIC_HELPERS(NAME)                                  \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b),              \ | ||||
|                        TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \ | ||||
|                        TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le),           \ | ||||
|                        TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \ | ||||
|                        TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be),           \ | ||||
|                        TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \ | ||||
|                        TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le),           \ | ||||
|                        TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \ | ||||
|                        TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be),           \ | ||||
|                        TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \ | ||||
|                        TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le),           \ | ||||
|                        TCG_CALL_NO_WG, i64, env, i64, i64, i32)   \ | ||||
|                        TCG_CALL_NO_WG, i64, env, tl, i64, i32)    \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be),           \ | ||||
|                        TCG_CALL_NO_WG, i64, env, i64, i64, i32) | ||||
|                        TCG_CALL_NO_WG, i64, env, tl, i64, i32) | ||||
| #else | ||||
| #define GEN_ATOMIC_HELPERS(NAME)                                  \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b),              \ | ||||
|                        TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \ | ||||
|                        TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le),           \ | ||||
|                        TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \ | ||||
|                        TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be),           \ | ||||
|                        TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \ | ||||
|                        TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le),           \ | ||||
|                        TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \ | ||||
|                        TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \ | ||||
|     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be),           \ | ||||
|                        TCG_CALL_NO_WG, i32, env, i64, i32, i32) | ||||
|                        TCG_CALL_NO_WG, i32, env, tl, i32, i32) | ||||
| #endif /* CONFIG_ATOMIC64 */ | ||||
|  | ||||
| GEN_ATOMIC_HELPERS(fetch_add) | ||||
| @@ -218,7 +217,6 @@ DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||||
|  | ||||
| DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_andcs, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
|  | ||||
| @@ -297,29 +295,4 @@ DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) | ||||
|  | ||||
| DEF_HELPER_FLAGS_4(gvec_eqs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_eqs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_eqs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_eqs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
|  | ||||
| DEF_HELPER_FLAGS_4(gvec_lts8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_lts16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_lts32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_lts64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
|  | ||||
| DEF_HELPER_FLAGS_4(gvec_les8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_les16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_les32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_les64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
|  | ||||
| DEF_HELPER_FLAGS_4(gvec_ltus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_ltus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_ltus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_ltus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
|  | ||||
| DEF_HELPER_FLAGS_4(gvec_leus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_leus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_leus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
| DEF_HELPER_FLAGS_4(gvec_leus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) | ||||
|  | ||||
| DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) | ||||
|   | ||||
| @@ -19,6 +19,7 @@ | ||||
|  | ||||
| #include "qemu/osdep.h" | ||||
|  | ||||
| #define NO_CPU_IO_DEFS | ||||
| #include "trace.h" | ||||
| #include "disas/disas.h" | ||||
| #include "exec/exec-all.h" | ||||
| @@ -46,7 +47,6 @@ | ||||
| #include "exec/cputlb.h" | ||||
| #include "exec/translate-all.h" | ||||
| #include "exec/translator.h" | ||||
| #include "exec/tb-flush.h" | ||||
| #include "qemu/bitmap.h" | ||||
| #include "qemu/qemu-print.h" | ||||
| #include "qemu/main-loop.h" | ||||
| @@ -61,18 +61,19 @@ | ||||
| #include "tb-jmp-cache.h" | ||||
| #include "tb-hash.h" | ||||
| #include "tb-context.h" | ||||
| #include "internal-common.h" | ||||
| #include "internal-target.h" | ||||
| #include "internal.h" | ||||
| #include "perf.h" | ||||
| #include "tcg/insn-start-words.h" | ||||
|  | ||||
| /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ | ||||
| QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > | ||||
|                   sizeof_field(TranslationBlock, trace_vcpu_dstate) | ||||
|                   * BITS_PER_BYTE); | ||||
|  | ||||
| TBContext tb_ctx; | ||||
|  | ||||
| /* | ||||
|  * Encode VAL as a signed leb128 sequence at P. | ||||
|  * Return P incremented past the encoded value. | ||||
|  */ | ||||
| static uint8_t *encode_sleb128(uint8_t *p, int64_t val) | ||||
| /* Encode VAL as a signed leb128 sequence at P. | ||||
|    Return P incremented past the encoded value.  */ | ||||
| static uint8_t *encode_sleb128(uint8_t *p, target_long val) | ||||
| { | ||||
|     int more, byte; | ||||
|  | ||||
| @@ -90,23 +91,21 @@ static uint8_t *encode_sleb128(uint8_t *p, int64_t val) | ||||
|     return p; | ||||
| } | ||||
|  | ||||
| /* | ||||
|  * Decode a signed leb128 sequence at *PP; increment *PP past the | ||||
|  * decoded value.  Return the decoded value. | ||||
|  */ | ||||
| static int64_t decode_sleb128(const uint8_t **pp) | ||||
| /* Decode a signed leb128 sequence at *PP; increment *PP past the | ||||
|    decoded value.  Return the decoded value.  */ | ||||
| static target_long decode_sleb128(const uint8_t **pp) | ||||
| { | ||||
|     const uint8_t *p = *pp; | ||||
|     int64_t val = 0; | ||||
|     target_long val = 0; | ||||
|     int byte, shift = 0; | ||||
|  | ||||
|     do { | ||||
|         byte = *p++; | ||||
|         val |= (int64_t)(byte & 0x7f) << shift; | ||||
|         val |= (target_ulong)(byte & 0x7f) << shift; | ||||
|         shift += 7; | ||||
|     } while (byte & 0x80); | ||||
|     if (shift < TARGET_LONG_BITS && (byte & 0x40)) { | ||||
|         val |= -(int64_t)1 << shift; | ||||
|         val |= -(target_ulong)1 << shift; | ||||
|     } | ||||
|  | ||||
|     *pp = p; | ||||
| @@ -128,26 +127,22 @@ static int64_t decode_sleb128(const uint8_t **pp) | ||||
| static int encode_search(TranslationBlock *tb, uint8_t *block) | ||||
| { | ||||
|     uint8_t *highwater = tcg_ctx->code_gen_highwater; | ||||
|     uint64_t *insn_data = tcg_ctx->gen_insn_data; | ||||
|     uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off; | ||||
|     uint8_t *p = block; | ||||
|     int i, j, n; | ||||
|  | ||||
|     for (i = 0, n = tb->icount; i < n; ++i) { | ||||
|         uint64_t prev, curr; | ||||
|         target_ulong prev; | ||||
|  | ||||
|         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | ||||
|             if (i == 0) { | ||||
|                 prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0); | ||||
|             } else { | ||||
|                 prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j]; | ||||
|                 prev = tcg_ctx->gen_insn_data[i - 1][j]; | ||||
|             } | ||||
|             curr = insn_data[i * TARGET_INSN_START_WORDS + j]; | ||||
|             p = encode_sleb128(p, curr - prev); | ||||
|             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); | ||||
|         } | ||||
|         prev = (i == 0 ? 0 : insn_end_off[i - 1]); | ||||
|         curr = insn_end_off[i]; | ||||
|         p = encode_sleb128(p, curr - prev); | ||||
|         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); | ||||
|         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); | ||||
|  | ||||
|         /* Test for (pending) buffer overflow.  The assumption is that any | ||||
|            one row beginning below the high water mark cannot overrun | ||||
| @@ -203,6 +198,10 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | ||||
|                                uintptr_t host_pc) | ||||
| { | ||||
|     uint64_t data[TARGET_INSN_START_WORDS]; | ||||
| #ifdef CONFIG_PROFILER | ||||
|     TCGProfile *prof = &tcg_ctx->prof; | ||||
|     int64_t ti = profile_getclock(); | ||||
| #endif | ||||
|     int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data); | ||||
|  | ||||
|     if (insns_left < 0) { | ||||
| @@ -215,10 +214,16 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | ||||
|          * Reset the cycle counter to the start of the block and | ||||
|          * shift if to the number of actually executed instructions. | ||||
|          */ | ||||
|         cpu->neg.icount_decr.u16.low += insns_left; | ||||
|         cpu_neg(cpu)->icount_decr.u16.low += insns_left; | ||||
|     } | ||||
|  | ||||
|     cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data); | ||||
|  | ||||
| #ifdef CONFIG_PROFILER | ||||
|     qatomic_set(&prof->restore_time, | ||||
|                 prof->restore_time + profile_getclock() - ti); | ||||
|     qatomic_set(&prof->restore_count, prof->restore_count + 1); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc) | ||||
| @@ -265,7 +270,7 @@ void page_init(void) | ||||
|  * Return the size of the generated code, or negative on error. | ||||
|  */ | ||||
| static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, | ||||
|                            vaddr pc, void *host_pc, | ||||
|                            target_ulong pc, void *host_pc, | ||||
|                            int *max_insns, int64_t *ti) | ||||
| { | ||||
|     int ret = sigsetjmp(tcg_ctx->jmp_trans, 0); | ||||
| @@ -281,19 +286,29 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, | ||||
|     tcg_ctx->cpu = NULL; | ||||
|     *max_insns = tb->icount; | ||||
|  | ||||
| #ifdef CONFIG_PROFILER | ||||
|     qatomic_set(&tcg_ctx->prof.tb_count, tcg_ctx->prof.tb_count + 1); | ||||
|     qatomic_set(&tcg_ctx->prof.interm_time, | ||||
|                 tcg_ctx->prof.interm_time + profile_getclock() - *ti); | ||||
|     *ti = profile_getclock(); | ||||
| #endif | ||||
|  | ||||
|     return tcg_gen_code(tcg_ctx, tb, pc); | ||||
| } | ||||
|  | ||||
| /* Called with mmap_lock held for user mode emulation.  */ | ||||
| TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|                               vaddr pc, uint64_t cs_base, | ||||
|                               target_ulong pc, target_ulong cs_base, | ||||
|                               uint32_t flags, int cflags) | ||||
| { | ||||
|     CPUArchState *env = cpu_env(cpu); | ||||
|     CPUArchState *env = cpu->env_ptr; | ||||
|     TranslationBlock *tb, *existing_tb; | ||||
|     tb_page_addr_t phys_pc, phys_p2; | ||||
|     tb_page_addr_t phys_pc; | ||||
|     tcg_insn_unit *gen_code_buf; | ||||
|     int gen_code_size, search_size, max_insns; | ||||
| #ifdef CONFIG_PROFILER | ||||
|     TCGProfile *prof = &tcg_ctx->prof; | ||||
| #endif | ||||
|     int64_t ti; | ||||
|     void *host_pc; | ||||
|  | ||||
| @@ -314,7 +329,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|     QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS); | ||||
|  | ||||
|  buffer_overflow: | ||||
|     assert_no_pages_locked(); | ||||
|     tb = tcg_tb_alloc(tcg_ctx); | ||||
|     if (unlikely(!tb)) { | ||||
|         /* flush must be done */ | ||||
| @@ -333,27 +347,18 @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|     tb->cs_base = cs_base; | ||||
|     tb->flags = flags; | ||||
|     tb->cflags = cflags; | ||||
|     tb->trace_vcpu_dstate = *cpu->trace_dstate; | ||||
|     tb_set_page_addr0(tb, phys_pc); | ||||
|     tb_set_page_addr1(tb, -1); | ||||
|     if (phys_pc != -1) { | ||||
|         tb_lock_page0(phys_pc); | ||||
|     } | ||||
|  | ||||
|     tcg_ctx->gen_tb = tb; | ||||
|     tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64; | ||||
| #ifdef CONFIG_SOFTMMU | ||||
|     tcg_ctx->page_bits = TARGET_PAGE_BITS; | ||||
|     tcg_ctx->page_mask = TARGET_PAGE_MASK; | ||||
|     tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS; | ||||
| #endif | ||||
|     tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS; | ||||
| #ifdef TCG_GUEST_DEFAULT_MO | ||||
|     tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO; | ||||
| #else | ||||
|     tcg_ctx->guest_mo = TCG_MO_ALL; | ||||
|  tb_overflow: | ||||
|  | ||||
| #ifdef CONFIG_PROFILER | ||||
|     /* includes aborted translations because of exceptions */ | ||||
|     qatomic_set(&prof->tb_count1, prof->tb_count1 + 1); | ||||
|     ti = profile_getclock(); | ||||
| #endif | ||||
|  | ||||
|  restart_translate: | ||||
|     trace_translate_block(tb, pc, tb->tc.ptr); | ||||
|  | ||||
|     gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti); | ||||
| @@ -372,8 +377,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, | ||||
|                           "Restarting code generation for " | ||||
|                           "code_gen_buffer overflow\n"); | ||||
|             tb_unlock_pages(tb); | ||||
|             tcg_ctx->gen_tb = NULL; | ||||
|             goto buffer_overflow; | ||||
|  | ||||
|         case -2: | ||||
| @@ -392,39 +395,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|                           "Restarting code generation with " | ||||
|                           "smaller translation block (max %d insns)\n", | ||||
|                           max_insns); | ||||
|  | ||||
|             /* | ||||
|              * The half-sized TB may not cross pages. | ||||
|              * TODO: Fix all targets that cross pages except with | ||||
|              * the first insn, at which point this can't be reached. | ||||
|              */ | ||||
|             phys_p2 = tb_page_addr1(tb); | ||||
|             if (unlikely(phys_p2 != -1)) { | ||||
|                 tb_unlock_page1(phys_pc, phys_p2); | ||||
|                 tb_set_page_addr1(tb, -1); | ||||
|             } | ||||
|             goto restart_translate; | ||||
|  | ||||
|         case -3: | ||||
|             /* | ||||
|              * We had a page lock ordering problem.  In order to avoid | ||||
|              * deadlock we had to drop the lock on page0, which means | ||||
|              * that everything we translated so far is compromised. | ||||
|              * Restart with locks held on both pages. | ||||
|              */ | ||||
|             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, | ||||
|                           "Restarting code generation with re-locked pages"); | ||||
|             goto restart_translate; | ||||
|             goto tb_overflow; | ||||
|  | ||||
|         default: | ||||
|             g_assert_not_reached(); | ||||
|         } | ||||
|     } | ||||
|     tcg_ctx->gen_tb = NULL; | ||||
|  | ||||
|     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); | ||||
|     if (unlikely(search_size < 0)) { | ||||
|         tb_unlock_pages(tb); | ||||
|         goto buffer_overflow; | ||||
|     } | ||||
|     tb->tc.size = gen_code_size; | ||||
| @@ -435,6 +413,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|      */ | ||||
|     perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf)); | ||||
|  | ||||
| #ifdef CONFIG_PROFILER | ||||
|     qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); | ||||
|     qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size); | ||||
|     qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); | ||||
|     qatomic_set(&prof->search_out_len, prof->search_out_len + search_size); | ||||
| #endif | ||||
|  | ||||
| #ifdef DEBUG_DISAS | ||||
|     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && | ||||
|         qemu_log_in_addr_range(pc)) { | ||||
|         FILE *logfile = qemu_log_trylock(); | ||||
| @@ -457,8 +443,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|             /* Dump header and the first instruction */ | ||||
|             fprintf(logfile, "OUT: [size=%d]\n", gen_code_size); | ||||
|             fprintf(logfile, | ||||
|                     "  -- guest addr 0x%016" PRIx64 " + tb prologue\n", | ||||
|                     tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]); | ||||
|                     "  -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", | ||||
|                     tcg_ctx->gen_insn_data[insn][0]); | ||||
|             chunk_start = tcg_ctx->gen_insn_end_off[insn]; | ||||
|             disas(logfile, tb->tc.ptr, chunk_start); | ||||
|  | ||||
| @@ -470,8 +456,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|             while (insn < tb->icount) { | ||||
|                 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; | ||||
|                 if (chunk_end > chunk_start) { | ||||
|                     fprintf(logfile, "  -- guest addr 0x%016" PRIx64 "\n", | ||||
|                             tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]); | ||||
|                     fprintf(logfile, "  -- guest addr 0x" TARGET_FMT_lx "\n", | ||||
|                             tcg_ctx->gen_insn_data[insn][0]); | ||||
|                     disas(logfile, tb->tc.ptr + chunk_start, | ||||
|                           chunk_end - chunk_start); | ||||
|                     chunk_start = chunk_end; | ||||
| @@ -507,6 +493,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|             qemu_log_unlock(logfile); | ||||
|         } | ||||
|     } | ||||
| #endif | ||||
|  | ||||
|     qatomic_set(&tcg_ctx->code_gen_ptr, (void *) | ||||
|         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, | ||||
| @@ -534,7 +521,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|      * before attempting to link to other TBs or add to the lookup table. | ||||
|      */ | ||||
|     if (tb_page_addr0(tb) == -1) { | ||||
|         assert_no_pages_locked(); | ||||
|         return tb; | ||||
|     } | ||||
|  | ||||
| @@ -549,9 +535,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||||
|      * No explicit memory barrier is required -- tb_link_page() makes the | ||||
|      * TB visible in a consistent state. | ||||
|      */ | ||||
|     existing_tb = tb_link_page(tb); | ||||
|     assert_no_pages_locked(); | ||||
|  | ||||
|     existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb)); | ||||
|     /* if the TB already exists, discard what we just translated */ | ||||
|     if (unlikely(existing_tb != tb)) { | ||||
|         uintptr_t orig_aligned = (uintptr_t)gen_code_buf; | ||||
| @@ -579,16 +563,15 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) | ||||
|     } else { | ||||
|         /* The exception probably happened in a helper.  The CPU state should | ||||
|            have been saved before calling it. Fetch the PC from there.  */ | ||||
|         CPUArchState *env = cpu_env(cpu); | ||||
|         vaddr pc; | ||||
|         uint64_t cs_base; | ||||
|         CPUArchState *env = cpu->env_ptr; | ||||
|         target_ulong pc, cs_base; | ||||
|         tb_page_addr_t addr; | ||||
|         uint32_t flags; | ||||
|  | ||||
|         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||||
|         addr = get_page_addr_code(env, pc); | ||||
|         if (addr != -1) { | ||||
|             tb_invalidate_phys_range(addr, addr); | ||||
|             tb_invalidate_phys_range(addr, addr + 1); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @@ -622,7 +605,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) | ||||
|     cc = CPU_GET_CLASS(cpu); | ||||
|     if (cc->tcg_ops->io_recompile_replay_branch && | ||||
|         cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { | ||||
|         cpu->neg.icount_decr.u16.low++; | ||||
|         cpu_neg(cpu)->icount_decr.u16.low++; | ||||
|         n = 2; | ||||
|     } | ||||
|  | ||||
| @@ -635,23 +618,150 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) | ||||
|     cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; | ||||
|  | ||||
|     if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | ||||
|         vaddr pc = log_pc(cpu, tb); | ||||
|         target_ulong pc = log_pc(cpu, tb); | ||||
|         if (qemu_log_in_addr_range(pc)) { | ||||
|             qemu_log("cpu_io_recompile: rewound execution of TB to %016" | ||||
|                      VADDR_PRIx "\n", pc); | ||||
|             qemu_log("cpu_io_recompile: rewound execution of TB to " | ||||
|                      TARGET_FMT_lx "\n", pc); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     cpu_loop_exit_noexc(cpu); | ||||
| } | ||||
|  | ||||
| static void print_qht_statistics(struct qht_stats hst, GString *buf) | ||||
| { | ||||
|     uint32_t hgram_opts; | ||||
|     size_t hgram_bins; | ||||
|     char *hgram; | ||||
|  | ||||
|     if (!hst.head_buckets) { | ||||
|         return; | ||||
|     } | ||||
|     g_string_append_printf(buf, "TB hash buckets     %zu/%zu " | ||||
|                            "(%0.2f%% head buckets used)\n", | ||||
|                            hst.used_head_buckets, hst.head_buckets, | ||||
|                            (double)hst.used_head_buckets / | ||||
|                            hst.head_buckets * 100); | ||||
|  | ||||
|     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS; | ||||
|     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT; | ||||
|     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { | ||||
|         hgram_opts |= QDIST_PR_NODECIMAL; | ||||
|     } | ||||
|     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); | ||||
|     g_string_append_printf(buf, "TB hash occupancy   %0.2f%% avg chain occ. " | ||||
|                            "Histogram: %s\n", | ||||
|                            qdist_avg(&hst.occupancy) * 100, hgram); | ||||
|     g_free(hgram); | ||||
|  | ||||
|     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; | ||||
|     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); | ||||
|     if (hgram_bins > 10) { | ||||
|         hgram_bins = 10; | ||||
|     } else { | ||||
|         hgram_bins = 0; | ||||
|         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; | ||||
|     } | ||||
|     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); | ||||
|     g_string_append_printf(buf, "TB hash avg chain   %0.3f buckets. " | ||||
|                            "Histogram: %s\n", | ||||
|                            qdist_avg(&hst.chain), hgram); | ||||
|     g_free(hgram); | ||||
| } | ||||
|  | ||||
| struct tb_tree_stats { | ||||
|     size_t nb_tbs; | ||||
|     size_t host_size; | ||||
|     size_t target_size; | ||||
|     size_t max_target_size; | ||||
|     size_t direct_jmp_count; | ||||
|     size_t direct_jmp2_count; | ||||
|     size_t cross_page; | ||||
| }; | ||||
|  | ||||
| static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) | ||||
| { | ||||
|     const TranslationBlock *tb = value; | ||||
|     struct tb_tree_stats *tst = data; | ||||
|  | ||||
|     tst->nb_tbs++; | ||||
|     tst->host_size += tb->tc.size; | ||||
|     tst->target_size += tb->size; | ||||
|     if (tb->size > tst->max_target_size) { | ||||
|         tst->max_target_size = tb->size; | ||||
|     } | ||||
|     if (tb_page_addr1(tb) != -1) { | ||||
|         tst->cross_page++; | ||||
|     } | ||||
|     if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) { | ||||
|         tst->direct_jmp_count++; | ||||
|         if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) { | ||||
|             tst->direct_jmp2_count++; | ||||
|         } | ||||
|     } | ||||
|     return false; | ||||
| } | ||||
|  | ||||
| void dump_exec_info(GString *buf) | ||||
| { | ||||
|     struct tb_tree_stats tst = {}; | ||||
|     struct qht_stats hst; | ||||
|     size_t nb_tbs, flush_full, flush_part, flush_elide; | ||||
|  | ||||
|     tcg_tb_foreach(tb_tree_stats_iter, &tst); | ||||
|     nb_tbs = tst.nb_tbs; | ||||
|     /* XXX: avoid using doubles ? */ | ||||
|     g_string_append_printf(buf, "Translation buffer state:\n"); | ||||
|     /* | ||||
|      * Report total code size including the padding and TB structs; | ||||
|      * otherwise users might think "-accel tcg,tb-size" is not honoured. | ||||
|      * For avg host size we use the precise numbers from tb_tree_stats though. | ||||
|      */ | ||||
|     g_string_append_printf(buf, "gen code size       %zu/%zu\n", | ||||
|                            tcg_code_size(), tcg_code_capacity()); | ||||
|     g_string_append_printf(buf, "TB count            %zu\n", nb_tbs); | ||||
|     g_string_append_printf(buf, "TB avg target size  %zu max=%zu bytes\n", | ||||
|                            nb_tbs ? tst.target_size / nb_tbs : 0, | ||||
|                            tst.max_target_size); | ||||
|     g_string_append_printf(buf, "TB avg host size    %zu bytes " | ||||
|                            "(expansion ratio: %0.1f)\n", | ||||
|                            nb_tbs ? tst.host_size / nb_tbs : 0, | ||||
|                            tst.target_size ? | ||||
|                            (double)tst.host_size / tst.target_size : 0); | ||||
|     g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n", | ||||
|                            tst.cross_page, | ||||
|                            nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); | ||||
|     g_string_append_printf(buf, "direct jump count   %zu (%zu%%) " | ||||
|                            "(2 jumps=%zu %zu%%)\n", | ||||
|                            tst.direct_jmp_count, | ||||
|                            nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, | ||||
|                            tst.direct_jmp2_count, | ||||
|                            nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); | ||||
|  | ||||
|     qht_statistics_init(&tb_ctx.htable, &hst); | ||||
|     print_qht_statistics(hst, buf); | ||||
|     qht_statistics_destroy(&hst); | ||||
|  | ||||
|     g_string_append_printf(buf, "\nStatistics:\n"); | ||||
|     g_string_append_printf(buf, "TB flush count      %u\n", | ||||
|                            qatomic_read(&tb_ctx.tb_flush_count)); | ||||
|     g_string_append_printf(buf, "TB invalidate count %u\n", | ||||
|                            qatomic_read(&tb_ctx.tb_phys_invalidate_count)); | ||||
|  | ||||
|     tlb_flush_counts(&flush_full, &flush_part, &flush_elide); | ||||
|     g_string_append_printf(buf, "TLB full flushes    %zu\n", flush_full); | ||||
|     g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); | ||||
|     g_string_append_printf(buf, "TLB elided flushes  %zu\n", flush_elide); | ||||
|     tcg_dump_info(buf); | ||||
| } | ||||
|  | ||||
| #else /* CONFIG_USER_ONLY */ | ||||
|  | ||||
| void cpu_interrupt(CPUState *cpu, int mask) | ||||
| { | ||||
|     g_assert(qemu_mutex_iothread_locked()); | ||||
|     cpu->interrupt_request |= mask; | ||||
|     qatomic_set(&cpu->neg.icount_decr.u16.high, -1); | ||||
|     qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); | ||||
| } | ||||
|  | ||||
| #endif /* CONFIG_USER_ONLY */ | ||||
| @@ -673,3 +783,11 @@ void tcg_flush_jmp_cache(CPUState *cpu) | ||||
|         qatomic_set(&jc->array[i].tb, NULL); | ||||
|     } | ||||
| } | ||||
|  | ||||
| /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ | ||||
| void tcg_flush_softmmu_tlb(CPUState *cs) | ||||
| { | ||||
| #ifdef CONFIG_SOFTMMU | ||||
|     tlb_flush(cs); | ||||
| #endif | ||||
| } | ||||
|   | ||||
| @@ -8,111 +8,17 @@ | ||||
|  */ | ||||
|  | ||||
| #include "qemu/osdep.h" | ||||
| #include "qemu/log.h" | ||||
| #include "qemu/error-report.h" | ||||
| #include "tcg/tcg.h" | ||||
| #include "tcg/tcg-op.h" | ||||
| #include "exec/exec-all.h" | ||||
| #include "exec/gen-icount.h" | ||||
| #include "exec/log.h" | ||||
| #include "exec/translator.h" | ||||
| #include "exec/plugin-gen.h" | ||||
| #include "tcg/tcg-op-common.h" | ||||
| #include "internal-target.h" | ||||
| #include "exec/replay-core.h" | ||||
|  | ||||
| static void set_can_do_io(DisasContextBase *db, bool val) | ||||
| { | ||||
|     if (db->saved_can_do_io != val) { | ||||
|         db->saved_can_do_io = val; | ||||
|  | ||||
|         QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1); | ||||
|         tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env, | ||||
|                         offsetof(ArchCPU, parent_obj.neg.can_do_io) - | ||||
|                         offsetof(ArchCPU, env)); | ||||
|     } | ||||
| } | ||||
|  | ||||
| bool translator_io_start(DisasContextBase *db) | ||||
| { | ||||
|     set_can_do_io(db, true); | ||||
|  | ||||
|     /* | ||||
|      * Ensure that this instruction will be the last in the TB. | ||||
|      * The target may override this to something more forceful. | ||||
|      */ | ||||
|     if (db->is_jmp == DISAS_NEXT) { | ||||
|         db->is_jmp = DISAS_TOO_MANY; | ||||
|     } | ||||
|     return true; | ||||
| } | ||||
|  | ||||
| static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags) | ||||
| { | ||||
|     TCGv_i32 count = NULL; | ||||
|     TCGOp *icount_start_insn = NULL; | ||||
|  | ||||
|     if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) { | ||||
|         count = tcg_temp_new_i32(); | ||||
|         tcg_gen_ld_i32(count, tcg_env, | ||||
|                        offsetof(ArchCPU, parent_obj.neg.icount_decr.u32) | ||||
|                        - offsetof(ArchCPU, env)); | ||||
|     } | ||||
|  | ||||
|     if (cflags & CF_USE_ICOUNT) { | ||||
|         /* | ||||
|          * We emit a sub with a dummy immediate argument. Keep the insn index | ||||
|          * of the sub so that we later (when we know the actual insn count) | ||||
|          * can update the argument with the actual insn count. | ||||
|          */ | ||||
|         tcg_gen_sub_i32(count, count, tcg_constant_i32(0)); | ||||
|         icount_start_insn = tcg_last_op(); | ||||
|     } | ||||
|  | ||||
|     /* | ||||
|      * Emit the check against icount_decr.u32 to see if we should exit | ||||
|      * unless we suppress the check with CF_NOIRQ. If we are using | ||||
|      * icount and have suppressed interruption the higher level code | ||||
|      * should have ensured we don't run more instructions than the | ||||
|      * budget. | ||||
|      */ | ||||
|     if (cflags & CF_NOIRQ) { | ||||
|         tcg_ctx->exitreq_label = NULL; | ||||
|     } else { | ||||
|         tcg_ctx->exitreq_label = gen_new_label(); | ||||
|         tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label); | ||||
|     } | ||||
|  | ||||
|     if (cflags & CF_USE_ICOUNT) { | ||||
|         tcg_gen_st16_i32(count, tcg_env, | ||||
|                          offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low) | ||||
|                          - offsetof(ArchCPU, env)); | ||||
|     } | ||||
|  | ||||
|     /* | ||||
|      * cpu->neg.can_do_io is set automatically here at the beginning of | ||||
|      * each translation block.  The cost is minimal, plus it would be | ||||
|      * very easy to forget doing it in the translator. | ||||
|      */ | ||||
|     set_can_do_io(db, db->max_insns == 1 && (cflags & CF_LAST_IO)); | ||||
|  | ||||
|     return icount_start_insn; | ||||
| } | ||||
|  | ||||
| static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags, | ||||
|                        TCGOp *icount_start_insn, int num_insns) | ||||
| { | ||||
|     if (cflags & CF_USE_ICOUNT) { | ||||
|         /* | ||||
|          * Update the num_insn immediate parameter now that we know | ||||
|          * the actual insn count. | ||||
|          */ | ||||
|         tcg_set_insn_param(icount_start_insn, 2, | ||||
|                            tcgv_i32_arg(tcg_constant_i32(num_insns))); | ||||
|     } | ||||
|  | ||||
|     if (tcg_ctx->exitreq_label) { | ||||
|         gen_set_label(tcg_ctx->exitreq_label); | ||||
|         tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED); | ||||
|     } | ||||
| } | ||||
|  | ||||
| bool translator_use_goto_tb(DisasContextBase *db, vaddr dest) | ||||
| bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) | ||||
| { | ||||
|     /* Suppress goto_tb if requested. */ | ||||
|     if (tb_cflags(db->tb) & CF_NO_GOTO_TB) { | ||||
| @@ -124,11 +30,10 @@ bool translator_use_goto_tb(DisasContextBase *db, vaddr dest) | ||||
| } | ||||
|  | ||||
| void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, | ||||
|                      vaddr pc, void *host_pc, const TranslatorOps *ops, | ||||
|                      DisasContextBase *db) | ||||
|                      target_ulong pc, void *host_pc, | ||||
|                      const TranslatorOps *ops, DisasContextBase *db) | ||||
| { | ||||
|     uint32_t cflags = tb_cflags(tb); | ||||
|     TCGOp *icount_start_insn; | ||||
|     bool plugin_enabled; | ||||
|  | ||||
|     /* Initialize DisasContext */ | ||||
| @@ -139,26 +44,22 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, | ||||
|     db->num_insns = 0; | ||||
|     db->max_insns = *max_insns; | ||||
|     db->singlestep_enabled = cflags & CF_SINGLE_STEP; | ||||
|     db->saved_can_do_io = -1; | ||||
|     db->host_addr[0] = host_pc; | ||||
|     db->host_addr[1] = NULL; | ||||
|  | ||||
| #ifdef CONFIG_USER_ONLY | ||||
|     page_protect(pc); | ||||
| #endif | ||||
|  | ||||
|     ops->init_disas_context(db, cpu); | ||||
|     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */ | ||||
|  | ||||
|     /* Start translating.  */ | ||||
|     icount_start_insn = gen_tb_start(db, cflags); | ||||
|     gen_tb_start(db->tb); | ||||
|     ops->tb_start(db, cpu); | ||||
|     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */ | ||||
|  | ||||
|     if (cflags & CF_MEMI_ONLY) { | ||||
|         /* We should only see CF_MEMI_ONLY for io_recompile. */ | ||||
|         assert(cflags & CF_LAST_IO); | ||||
|         plugin_enabled = plugin_gen_tb_start(cpu, db, true); | ||||
|     } else { | ||||
|         plugin_enabled = plugin_gen_tb_start(cpu, db, false); | ||||
|     } | ||||
|     db->plugin_enabled = plugin_enabled; | ||||
|     plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY); | ||||
|  | ||||
|     while (true) { | ||||
|         *max_insns = ++db->num_insns; | ||||
| @@ -175,9 +76,13 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, | ||||
|            the next instruction.  */ | ||||
|         if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) { | ||||
|             /* Accept I/O on the last instruction.  */ | ||||
|             set_can_do_io(db, true); | ||||
|             gen_io_start(); | ||||
|             ops->translate_insn(db, cpu); | ||||
|         } else { | ||||
|             /* we should only see CF_MEMI_ONLY for io_recompile */ | ||||
|             tcg_debug_assert(!(cflags & CF_MEMI_ONLY)); | ||||
|             ops->translate_insn(db, cpu); | ||||
|         } | ||||
|         ops->translate_insn(db, cpu); | ||||
|  | ||||
|         /* | ||||
|          * We can't instrument after instructions that change control | ||||
| @@ -207,16 +112,17 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, | ||||
|  | ||||
|     /* Emit code to exit the TB, as indicated by db->is_jmp.  */ | ||||
|     ops->tb_stop(db, cpu); | ||||
|     gen_tb_end(tb, cflags, icount_start_insn, db->num_insns); | ||||
|     gen_tb_end(db->tb, db->num_insns); | ||||
|  | ||||
|     if (plugin_enabled) { | ||||
|         plugin_gen_tb_end(cpu, db->num_insns); | ||||
|         plugin_gen_tb_end(cpu); | ||||
|     } | ||||
|  | ||||
|     /* The disas_log hook may use these values rather than recompute.  */ | ||||
|     tb->size = db->pc_next - db->pc_first; | ||||
|     tb->icount = db->num_insns; | ||||
|  | ||||
| #ifdef DEBUG_DISAS | ||||
|     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) | ||||
|         && qemu_log_in_addr_range(db->pc_first)) { | ||||
|         FILE *logfile = qemu_log_trylock(); | ||||
| @@ -227,13 +133,14 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, | ||||
|             qemu_log_unlock(logfile); | ||||
|         } | ||||
|     } | ||||
| #endif | ||||
| } | ||||
|  | ||||
| static void *translator_access(CPUArchState *env, DisasContextBase *db, | ||||
|                                vaddr pc, size_t len) | ||||
|                                target_ulong pc, size_t len) | ||||
| { | ||||
|     void *host; | ||||
|     vaddr base, end; | ||||
|     target_ulong base, end; | ||||
|     TranslationBlock *tb; | ||||
|  | ||||
|     tb = db->tb; | ||||
| @@ -251,36 +158,22 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db, | ||||
|         host = db->host_addr[1]; | ||||
|         base = TARGET_PAGE_ALIGN(db->pc_first); | ||||
|         if (host == NULL) { | ||||
|             tb_page_addr_t page0, old_page1, new_page1; | ||||
|  | ||||
|             new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]); | ||||
|             tb_page_addr_t phys_page = | ||||
|                 get_page_addr_code_hostp(env, base, &db->host_addr[1]); | ||||
|  | ||||
|             /* | ||||
|              * If the second page is MMIO, treat as if the first page | ||||
|              * was MMIO as well, so that we do not cache the TB. | ||||
|              */ | ||||
|             if (unlikely(new_page1 == -1)) { | ||||
|                 tb_unlock_pages(tb); | ||||
|             if (unlikely(phys_page == -1)) { | ||||
|                 tb_set_page_addr0(tb, -1); | ||||
|                 return NULL; | ||||
|             } | ||||
|  | ||||
|             /* | ||||
|              * If this is not the first time around, and page1 matches, | ||||
|              * then we already have the page locked.  Alternately, we're | ||||
|              * not doing anything to prevent the PTE from changing, so | ||||
|              * we might wind up with a different page, requiring us to | ||||
|              * re-do the locking. | ||||
|              */ | ||||
|             old_page1 = tb_page_addr1(tb); | ||||
|             if (likely(new_page1 != old_page1)) { | ||||
|                 page0 = tb_page_addr0(tb); | ||||
|                 if (unlikely(old_page1 != -1)) { | ||||
|                     tb_unlock_page1(page0, old_page1); | ||||
|                 } | ||||
|                 tb_set_page_addr1(tb, new_page1); | ||||
|                 tb_lock_page1(page0, new_page1); | ||||
|             } | ||||
|             tb_set_page_addr1(tb, phys_page); | ||||
| #ifdef CONFIG_USER_ONLY | ||||
|             page_protect(end); | ||||
| #endif | ||||
|             host = db->host_addr[1]; | ||||
|         } | ||||
|  | ||||
| @@ -294,27 +187,6 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db, | ||||
|     return host + (pc - base); | ||||
| } | ||||
|  | ||||
| static void plugin_insn_append(abi_ptr pc, const void *from, size_t size) | ||||
| { | ||||
| #ifdef CONFIG_PLUGIN | ||||
|     struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn; | ||||
|     abi_ptr off; | ||||
|  | ||||
|     if (insn == NULL) { | ||||
|         return; | ||||
|     } | ||||
|     off = pc - insn->vaddr; | ||||
|     if (off < insn->data->len) { | ||||
|         g_byte_array_set_size(insn->data, off); | ||||
|     } else if (off > insn->data->len) { | ||||
|         /* we have an unexpected gap */ | ||||
|         g_assert_not_reached(); | ||||
|     } | ||||
|  | ||||
|     insn->data = g_byte_array_append(insn->data, from, size); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc) | ||||
| { | ||||
|     uint8_t ret; | ||||
| @@ -373,8 +245,3 @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc) | ||||
|     plugin_insn_append(pc, &plug, sizeof(ret)); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| void translator_fake_ldb(uint8_t insn8, abi_ptr pc) | ||||
| { | ||||
|     plugin_insn_append(pc, &insn8, sizeof(insn8)); | ||||
| } | ||||
|   | ||||
| @@ -2,6 +2,8 @@ | ||||
| #include "hw/core/cpu.h" | ||||
| #include "exec/replay-core.h" | ||||
|  | ||||
| bool enable_cpu_pm = false; | ||||
|  | ||||
| void cpu_resume(CPUState *cpu) | ||||
| { | ||||
| } | ||||
| @@ -14,10 +16,6 @@ void qemu_init_vcpu(CPUState *cpu) | ||||
| { | ||||
| } | ||||
|  | ||||
| void cpu_exec_reset_hold(CPUState *cpu) | ||||
| { | ||||
| } | ||||
|  | ||||
| /* User mode emulation does not support record/replay yet.  */ | ||||
|  | ||||
| bool replay_exception(void) | ||||
|   | ||||
| @@ -29,8 +29,7 @@ | ||||
| #include "qemu/atomic128.h" | ||||
| #include "trace/trace-root.h" | ||||
| #include "tcg/tcg-ldst.h" | ||||
| #include "internal-common.h" | ||||
| #include "internal-target.h" | ||||
| #include "internal.h" | ||||
|  | ||||
| __thread uintptr_t helper_retaddr; | ||||
|  | ||||
| @@ -145,7 +144,7 @@ typedef struct PageFlagsNode { | ||||
|  | ||||
| static IntervalTreeRoot pageflags_root; | ||||
|  | ||||
| static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last) | ||||
| static PageFlagsNode *pageflags_find(target_ulong start, target_long last) | ||||
| { | ||||
|     IntervalTreeNode *n; | ||||
|  | ||||
| @@ -154,7 +153,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last) | ||||
| } | ||||
|  | ||||
| static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start, | ||||
|                                      target_ulong last) | ||||
|                                      target_long last) | ||||
| { | ||||
|     IntervalTreeNode *n; | ||||
|  | ||||
| @@ -481,22 +480,24 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last, | ||||
|  * The flag PAGE_WRITE_ORG is positioned automatically depending | ||||
|  * on PAGE_WRITE.  The mmap_lock should already be held. | ||||
|  */ | ||||
| void page_set_flags(target_ulong start, target_ulong last, int flags) | ||||
| void page_set_flags(target_ulong start, target_ulong end, int flags) | ||||
| { | ||||
|     target_ulong last; | ||||
|     bool reset = false; | ||||
|     bool inval_tb = false; | ||||
|  | ||||
|     /* This function should never be called with addresses outside the | ||||
|        guest address space.  If this assert fires, it probably indicates | ||||
|        a missing call to h2g_valid.  */ | ||||
|     assert(start <= last); | ||||
|     assert(last <= GUEST_ADDR_MAX); | ||||
|     assert(start < end); | ||||
|     assert(end - 1 <= GUEST_ADDR_MAX); | ||||
|     /* Only set PAGE_ANON with new mappings. */ | ||||
|     assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); | ||||
|     assert_memory_lock(); | ||||
|  | ||||
|     start &= TARGET_PAGE_MASK; | ||||
|     last |= ~TARGET_PAGE_MASK; | ||||
|     start = start & TARGET_PAGE_MASK; | ||||
|     end = TARGET_PAGE_ALIGN(end); | ||||
|     last = end - 1; | ||||
|  | ||||
|     if (!(flags & PAGE_VALID)) { | ||||
|         flags = 0; | ||||
| @@ -509,7 +510,7 @@ void page_set_flags(target_ulong start, target_ulong last, int flags) | ||||
|     } | ||||
|  | ||||
|     if (!flags || reset) { | ||||
|         page_reset_target_data(start, last); | ||||
|         page_reset_target_data(start, end); | ||||
|         inval_tb |= pageflags_unset(start, last); | ||||
|     } | ||||
|     if (flags) { | ||||
| @@ -517,23 +518,23 @@ void page_set_flags(target_ulong start, target_ulong last, int flags) | ||||
|                                         ~(reset ? 0 : PAGE_STICKY)); | ||||
|     } | ||||
|     if (inval_tb) { | ||||
|         tb_invalidate_phys_range(start, last); | ||||
|         tb_invalidate_phys_range(start, end); | ||||
|     } | ||||
| } | ||||
|  | ||||
| bool page_check_range(target_ulong start, target_ulong len, int flags) | ||||
| int page_check_range(target_ulong start, target_ulong len, int flags) | ||||
| { | ||||
|     target_ulong last; | ||||
|     int locked;  /* tri-state: =0: unlocked, +1: global, -1: local */ | ||||
|     bool ret; | ||||
|     int ret; | ||||
|  | ||||
|     if (len == 0) { | ||||
|         return true;  /* trivial length */ | ||||
|         return 0;  /* trivial length */ | ||||
|     } | ||||
|  | ||||
|     last = start + len - 1; | ||||
|     if (last < start) { | ||||
|         return false; /* wrap around */ | ||||
|         return -1; /* wrap around */ | ||||
|     } | ||||
|  | ||||
|     locked = have_mmap_lock(); | ||||
| @@ -552,33 +553,33 @@ bool page_check_range(target_ulong start, target_ulong len, int flags) | ||||
|                 p = pageflags_find(start, last); | ||||
|             } | ||||
|             if (!p) { | ||||
|                 ret = false; /* entire region invalid */ | ||||
|                 ret = -1; /* entire region invalid */ | ||||
|                 break; | ||||
|             } | ||||
|         } | ||||
|         if (start < p->itree.start) { | ||||
|             ret = false; /* initial bytes invalid */ | ||||
|             ret = -1; /* initial bytes invalid */ | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|         missing = flags & ~p->flags; | ||||
|         if (missing & ~PAGE_WRITE) { | ||||
|             ret = false; /* page doesn't match */ | ||||
|         if (missing & PAGE_READ) { | ||||
|             ret = -1; /* page not readable */ | ||||
|             break; | ||||
|         } | ||||
|         if (missing & PAGE_WRITE) { | ||||
|             if (!(p->flags & PAGE_WRITE_ORG)) { | ||||
|                 ret = false; /* page not writable */ | ||||
|                 ret = -1; /* page not writable */ | ||||
|                 break; | ||||
|             } | ||||
|             /* Asking about writable, but has been protected: undo. */ | ||||
|             if (!page_unprotect(start, 0)) { | ||||
|                 ret = false; | ||||
|                 ret = -1; | ||||
|                 break; | ||||
|             } | ||||
|             /* TODO: page_unprotect should take a range, not a single page. */ | ||||
|             if (last - start < TARGET_PAGE_SIZE) { | ||||
|                 ret = true; /* ok */ | ||||
|                 ret = 0; /* ok */ | ||||
|                 break; | ||||
|             } | ||||
|             start += TARGET_PAGE_SIZE; | ||||
| @@ -586,7 +587,7 @@ bool page_check_range(target_ulong start, target_ulong len, int flags) | ||||
|         } | ||||
|  | ||||
|         if (last <= p->itree.last) { | ||||
|             ret = true; /* ok */ | ||||
|             ret = 0; /* ok */ | ||||
|             break; | ||||
|         } | ||||
|         start = p->itree.last + 1; | ||||
| @@ -599,54 +600,6 @@ bool page_check_range(target_ulong start, target_ulong len, int flags) | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| bool page_check_range_empty(target_ulong start, target_ulong last) | ||||
| { | ||||
|     assert(last >= start); | ||||
|     assert_memory_lock(); | ||||
|     return pageflags_find(start, last) == NULL; | ||||
| } | ||||
|  | ||||
| target_ulong page_find_range_empty(target_ulong min, target_ulong max, | ||||
|                                    target_ulong len, target_ulong align) | ||||
| { | ||||
|     target_ulong len_m1, align_m1; | ||||
|  | ||||
|     assert(min <= max); | ||||
|     assert(max <= GUEST_ADDR_MAX); | ||||
|     assert(len != 0); | ||||
|     assert(is_power_of_2(align)); | ||||
|     assert_memory_lock(); | ||||
|  | ||||
|     len_m1 = len - 1; | ||||
|     align_m1 = align - 1; | ||||
|  | ||||
|     /* Iteratively narrow the search region. */ | ||||
|     while (1) { | ||||
|         PageFlagsNode *p; | ||||
|  | ||||
|         /* Align min and double-check there's enough space remaining. */ | ||||
|         min = (min + align_m1) & ~align_m1; | ||||
|         if (min > max) { | ||||
|             return -1; | ||||
|         } | ||||
|         if (len_m1 > max - min) { | ||||
|             return -1; | ||||
|         } | ||||
|  | ||||
|         p = pageflags_find(min, min + len_m1); | ||||
|         if (p == NULL) { | ||||
|             /* Found! */ | ||||
|             return min; | ||||
|         } | ||||
|         if (max <= p->itree.last) { | ||||
|             /* Existing allocation fills the remainder of the search region. */ | ||||
|             return -1; | ||||
|         } | ||||
|         /* Skip across existing allocation. */ | ||||
|         min = p->itree.last + 1; | ||||
|     } | ||||
| } | ||||
|  | ||||
| void page_protect(tb_page_addr_t address) | ||||
| { | ||||
|     PageFlagsNode *p; | ||||
| @@ -770,7 +723,7 @@ int page_unprotect(target_ulong address, uintptr_t pc) | ||||
|     return current_tb_invalidated ? 2 : 1; | ||||
| } | ||||
|  | ||||
| static int probe_access_internal(CPUArchState *env, vaddr addr, | ||||
| static int probe_access_internal(CPUArchState *env, target_ulong addr, | ||||
|                                  int fault_size, MMUAccessType access_type, | ||||
|                                  bool nonfault, uintptr_t ra) | ||||
| { | ||||
| @@ -794,10 +747,6 @@ static int probe_access_internal(CPUArchState *env, vaddr addr, | ||||
|     if (guest_addr_valid_untagged(addr)) { | ||||
|         int page_flags = page_get_flags(addr); | ||||
|         if (page_flags & acc_flag) { | ||||
|             if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE) | ||||
|                 && cpu_plugin_mem_cbs_enabled(env_cpu(env))) { | ||||
|                 return TLB_MMIO; | ||||
|             } | ||||
|             return 0; /* success */ | ||||
|         } | ||||
|         maperr = !(page_flags & PAGE_VALID); | ||||
| @@ -812,7 +761,7 @@ static int probe_access_internal(CPUArchState *env, vaddr addr, | ||||
|     cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); | ||||
| } | ||||
|  | ||||
| int probe_access_flags(CPUArchState *env, vaddr addr, int size, | ||||
| int probe_access_flags(CPUArchState *env, target_ulong addr, int size, | ||||
|                        MMUAccessType access_type, int mmu_idx, | ||||
|                        bool nonfault, void **phost, uintptr_t ra) | ||||
| { | ||||
| @@ -820,23 +769,23 @@ int probe_access_flags(CPUArchState *env, vaddr addr, int size, | ||||
|  | ||||
|     g_assert(-(addr | TARGET_PAGE_MASK) >= size); | ||||
|     flags = probe_access_internal(env, addr, size, access_type, nonfault, ra); | ||||
|     *phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr); | ||||
|     *phost = flags ? NULL : g2h(env_cpu(env), addr); | ||||
|     return flags; | ||||
| } | ||||
|  | ||||
| void *probe_access(CPUArchState *env, vaddr addr, int size, | ||||
| void *probe_access(CPUArchState *env, target_ulong addr, int size, | ||||
|                    MMUAccessType access_type, int mmu_idx, uintptr_t ra) | ||||
| { | ||||
|     int flags; | ||||
|  | ||||
|     g_assert(-(addr | TARGET_PAGE_MASK) >= size); | ||||
|     flags = probe_access_internal(env, addr, size, access_type, false, ra); | ||||
|     g_assert((flags & ~TLB_MMIO) == 0); | ||||
|     g_assert(flags == 0); | ||||
|  | ||||
|     return size ? g2h(env_cpu(env), addr) : NULL; | ||||
| } | ||||
|  | ||||
| tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, | ||||
| tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, | ||||
|                                         void **hostp) | ||||
| { | ||||
|     int flags; | ||||
| @@ -867,14 +816,15 @@ typedef struct TargetPageDataNode { | ||||
|  | ||||
| static IntervalTreeRoot targetdata_root; | ||||
|  | ||||
| void page_reset_target_data(target_ulong start, target_ulong last) | ||||
| void page_reset_target_data(target_ulong start, target_ulong end) | ||||
| { | ||||
|     IntervalTreeNode *n, *next; | ||||
|     target_ulong last; | ||||
|  | ||||
|     assert_memory_lock(); | ||||
|  | ||||
|     start &= TARGET_PAGE_MASK; | ||||
|     last |= ~TARGET_PAGE_MASK; | ||||
|     start = start & TARGET_PAGE_MASK; | ||||
|     last = TARGET_PAGE_ALIGN(end) - 1; | ||||
|  | ||||
|     for (n = interval_tree_iter_first(&targetdata_root, start, last), | ||||
|          next = n ? interval_tree_iter_next(n, start, last) : NULL; | ||||
| @@ -937,188 +887,299 @@ void *page_get_target_data(target_ulong address) | ||||
|     return t->data[(page - region) >> TARGET_PAGE_BITS]; | ||||
| } | ||||
| #else | ||||
| void page_reset_target_data(target_ulong start, target_ulong last) { } | ||||
| void page_reset_target_data(target_ulong start, target_ulong end) { } | ||||
| #endif /* TARGET_PAGE_DATA_SIZE */ | ||||
|  | ||||
| /* The system-mode versions of these helpers are in cputlb.c.  */ | ||||
| /* The softmmu versions of these helpers are in cputlb.c.  */ | ||||
|  | ||||
| static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr, | ||||
|                             MemOp mop, uintptr_t ra, MMUAccessType type) | ||||
| /* | ||||
|  * Verify that we have passed the correct MemOp to the correct function. | ||||
|  * | ||||
|  * We could present one function to target code, and dispatch based on | ||||
|  * the MemOp, but so far we have worked hard to avoid an indirect function | ||||
|  * call along the memory path. | ||||
|  */ | ||||
| static void validate_memop(MemOpIdx oi, MemOp expected) | ||||
| { | ||||
| #ifdef CONFIG_DEBUG_TCG | ||||
|     MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); | ||||
|     assert(have == expected); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| void helper_unaligned_ld(CPUArchState *env, target_ulong addr) | ||||
| { | ||||
|     cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); | ||||
| } | ||||
|  | ||||
| void helper_unaligned_st(CPUArchState *env, target_ulong addr) | ||||
| { | ||||
|     cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); | ||||
| } | ||||
|  | ||||
| static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, | ||||
|                             MemOpIdx oi, uintptr_t ra, MMUAccessType type) | ||||
| { | ||||
|     MemOp mop = get_memop(oi); | ||||
|     int a_bits = get_alignment_bits(mop); | ||||
|     void *ret; | ||||
|  | ||||
|     /* Enforce guest required alignment.  */ | ||||
|     if (unlikely(addr & ((1 << a_bits) - 1))) { | ||||
|         cpu_loop_exit_sigbus(cpu, addr, type, ra); | ||||
|         cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); | ||||
|     } | ||||
|  | ||||
|     ret = g2h(cpu, addr); | ||||
|     ret = g2h(env_cpu(env), addr); | ||||
|     set_helper_retaddr(ra); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| #include "ldst_atomicity.c.inc" | ||||
|  | ||||
| static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, | ||||
|                           uintptr_t ra, MMUAccessType access_type) | ||||
| uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                     MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint8_t ret; | ||||
|  | ||||
|     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); | ||||
|     haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type); | ||||
|     validate_memop(oi, MO_UB); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); | ||||
|     ret = ldub_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, | ||||
|                            uintptr_t ra, MMUAccessType access_type) | ||||
| uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                         MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint16_t ret; | ||||
|     MemOp mop = get_memop(oi); | ||||
|  | ||||
|     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); | ||||
|     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); | ||||
|     ret = load_atom_2(cpu, ra, haddr, mop); | ||||
|     validate_memop(oi, MO_BEUW); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); | ||||
|     ret = lduw_be_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|  | ||||
|     if (mop & MO_BSWAP) { | ||||
|         ret = bswap16(ret); | ||||
|     } | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, | ||||
|                            uintptr_t ra, MMUAccessType access_type) | ||||
| uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                         MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint32_t ret; | ||||
|     MemOp mop = get_memop(oi); | ||||
|  | ||||
|     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); | ||||
|     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); | ||||
|     ret = load_atom_4(cpu, ra, haddr, mop); | ||||
|     validate_memop(oi, MO_BEUL); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); | ||||
|     ret = ldl_be_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|  | ||||
|     if (mop & MO_BSWAP) { | ||||
|         ret = bswap32(ret); | ||||
|     } | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, | ||||
|                            uintptr_t ra, MMUAccessType access_type) | ||||
| uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                         MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint64_t ret; | ||||
|     MemOp mop = get_memop(oi); | ||||
|  | ||||
|     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); | ||||
|     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); | ||||
|     ret = load_atom_8(cpu, ra, haddr, mop); | ||||
|     validate_memop(oi, MO_BEUQ); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); | ||||
|     ret = ldq_be_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|  | ||||
|     if (mop & MO_BSWAP) { | ||||
|         ret = bswap64(ret); | ||||
|     } | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr, | ||||
|                           MemOpIdx oi, uintptr_t ra) | ||||
| uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                         MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint16_t ret; | ||||
|  | ||||
|     validate_memop(oi, MO_LEUW); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); | ||||
|     ret = lduw_le_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                         MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint32_t ret; | ||||
|  | ||||
|     validate_memop(oi, MO_LEUL); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); | ||||
|     ret = ldl_le_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                         MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint64_t ret; | ||||
|  | ||||
|     validate_memop(oi, MO_LEUQ); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); | ||||
|     ret = ldq_le_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                        MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     Int128 ret; | ||||
|     MemOp mop = get_memop(oi); | ||||
|  | ||||
|     tcg_debug_assert((mop & MO_SIZE) == MO_128); | ||||
|     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); | ||||
|     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD); | ||||
|     ret = load_atom_16(cpu, ra, haddr, mop); | ||||
|     validate_memop(oi, MO_128 | MO_BE); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); | ||||
|     memcpy(&ret, haddr, 16); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
|  | ||||
|     if (mop & MO_BSWAP) { | ||||
|     if (!HOST_BIG_ENDIAN) { | ||||
|         ret = bswap128(ret); | ||||
|     } | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, | ||||
| Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                        MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     Int128 ret; | ||||
|  | ||||
|     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); | ||||
|     haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE); | ||||
|     validate_memop(oi, MO_128 | MO_LE); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); | ||||
|     memcpy(&ret, haddr, 16); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); | ||||
|  | ||||
|     if (HOST_BIG_ENDIAN) { | ||||
|         ret = bswap128(ret); | ||||
|     } | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, | ||||
|                  MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|  | ||||
|     validate_memop(oi, MO_UB); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); | ||||
|     stb_p(haddr, val); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
|  | ||||
| static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, | ||||
|                        MemOpIdx oi, uintptr_t ra) | ||||
| void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, | ||||
|                     MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     MemOp mop = get_memop(oi); | ||||
|  | ||||
|     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); | ||||
|     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); | ||||
|  | ||||
|     if (mop & MO_BSWAP) { | ||||
|         val = bswap16(val); | ||||
|     } | ||||
|     store_atom_2(cpu, ra, haddr, mop, val); | ||||
|     validate_memop(oi, MO_BEUW); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); | ||||
|     stw_be_p(haddr, val); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
|  | ||||
| static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, | ||||
|                        MemOpIdx oi, uintptr_t ra) | ||||
| void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, | ||||
|                     MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     MemOp mop = get_memop(oi); | ||||
|  | ||||
|     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); | ||||
|     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); | ||||
|  | ||||
|     if (mop & MO_BSWAP) { | ||||
|         val = bswap32(val); | ||||
|     } | ||||
|     store_atom_4(cpu, ra, haddr, mop, val); | ||||
|     validate_memop(oi, MO_BEUL); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); | ||||
|     stl_be_p(haddr, val); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
|  | ||||
| static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, | ||||
|                        MemOpIdx oi, uintptr_t ra) | ||||
| void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, | ||||
|                     MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     MemOp mop = get_memop(oi); | ||||
|  | ||||
|     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); | ||||
|     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); | ||||
|  | ||||
|     if (mop & MO_BSWAP) { | ||||
|         val = bswap64(val); | ||||
|     } | ||||
|     store_atom_8(cpu, ra, haddr, mop, val); | ||||
|     validate_memop(oi, MO_BEUQ); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); | ||||
|     stq_be_p(haddr, val); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
|  | ||||
| static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, | ||||
|                         MemOpIdx oi, uintptr_t ra) | ||||
| void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, | ||||
|                     MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     MemOpIdx mop = get_memop(oi); | ||||
|  | ||||
|     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); | ||||
|     haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); | ||||
|     validate_memop(oi, MO_LEUW); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); | ||||
|     stw_le_p(haddr, val); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
|  | ||||
|     if (mop & MO_BSWAP) { | ||||
| void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, | ||||
|                     MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|  | ||||
|     validate_memop(oi, MO_LEUL); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); | ||||
|     stl_le_p(haddr, val); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
|  | ||||
| void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, | ||||
|                     MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|  | ||||
|     validate_memop(oi, MO_LEUQ); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); | ||||
|     stq_le_p(haddr, val); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
|  | ||||
| void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                      Int128 val, MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|  | ||||
|     validate_memop(oi, MO_128 | MO_BE); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); | ||||
|     if (!HOST_BIG_ENDIAN) { | ||||
|         val = bswap128(val); | ||||
|     } | ||||
|     store_atom_16(cpu, ra, haddr, mop, val); | ||||
|     memcpy(haddr, &val, 16); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
|  | ||||
| void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                      Int128 val, MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|  | ||||
|     validate_memop(oi, MO_128 | MO_LE); | ||||
|     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); | ||||
|     if (HOST_BIG_ENDIAN) { | ||||
|         val = bswap128(val); | ||||
|     } | ||||
|     memcpy(haddr, &val, 16); | ||||
|     clear_helper_retaddr(); | ||||
|     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); | ||||
| } | ||||
|  | ||||
| uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) | ||||
| @@ -1161,70 +1222,16 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                          MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint8_t ret; | ||||
|  | ||||
|     haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH); | ||||
|     ret = ldub_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                           MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint16_t ret; | ||||
|  | ||||
|     haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH); | ||||
|     ret = lduw_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|     if (get_memop(oi) & MO_BSWAP) { | ||||
|         ret = bswap16(ret); | ||||
|     } | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                           MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint32_t ret; | ||||
|  | ||||
|     haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH); | ||||
|     ret = ldl_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|     if (get_memop(oi) & MO_BSWAP) { | ||||
|         ret = bswap32(ret); | ||||
|     } | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, | ||||
|                           MemOpIdx oi, uintptr_t ra) | ||||
| { | ||||
|     void *haddr; | ||||
|     uint64_t ret; | ||||
|  | ||||
|     haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); | ||||
|     ret = ldq_p(haddr); | ||||
|     clear_helper_retaddr(); | ||||
|     if (get_memop(oi) & MO_BSWAP) { | ||||
|         ret = bswap64(ret); | ||||
|     } | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| #include "ldst_common.c.inc" | ||||
|  | ||||
| /* | ||||
|  * Do not allow unaligned operations to proceed.  Return the host address. | ||||
|  * | ||||
|  * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. | ||||
|  */ | ||||
| static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, | ||||
|                                int size, uintptr_t retaddr) | ||||
| static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||||
|                                MemOpIdx oi, int size, int prot, | ||||
|                                uintptr_t retaddr) | ||||
| { | ||||
|     MemOp mop = get_memop(oi); | ||||
|     int a_bits = get_alignment_bits(mop); | ||||
| @@ -1232,15 +1239,16 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, | ||||
|  | ||||
|     /* Enforce guest required alignment.  */ | ||||
|     if (unlikely(addr & ((1 << a_bits) - 1))) { | ||||
|         cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr); | ||||
|         MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; | ||||
|         cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); | ||||
|     } | ||||
|  | ||||
|     /* Enforce qemu required alignment.  */ | ||||
|     if (unlikely(addr & (size - 1))) { | ||||
|         cpu_loop_exit_atomic(cpu, retaddr); | ||||
|         cpu_loop_exit_atomic(env_cpu(env), retaddr); | ||||
|     } | ||||
|  | ||||
|     ret = g2h(cpu, addr); | ||||
|     ret = g2h(env_cpu(env), addr); | ||||
|     set_helper_retaddr(retaddr); | ||||
|     return ret; | ||||
| } | ||||
| @@ -1270,7 +1278,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, | ||||
| #include "atomic_template.h" | ||||
| #endif | ||||
|  | ||||
| #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128 | ||||
| #if HAVE_ATOMIC128 || HAVE_CMPXCHG128 | ||||
| #define DATA_SIZE 16 | ||||
| #include "atomic_template.h" | ||||
| #endif | ||||
|   | ||||
| @@ -12,7 +12,6 @@ | ||||
| #include "qemu/error-report.h" | ||||
| #include "qemu/module.h" | ||||
| #include "qapi/error.h" | ||||
| #include "hw/xen/xen_native.h" | ||||
| #include "hw/xen/xen-legacy-backend.h" | ||||
| #include "hw/xen/xen_pt.h" | ||||
| #include "chardev/char.h" | ||||
| @@ -30,12 +29,83 @@ xc_interface *xen_xc; | ||||
| xenforeignmemory_handle *xen_fmem; | ||||
| xendevicemodel_handle *xen_dmod; | ||||
|  | ||||
| static void xenstore_record_dm_state(const char *state) | ||||
| static int store_dev_info(int domid, Chardev *cs, const char *string) | ||||
| { | ||||
|     struct xs_handle *xs = NULL; | ||||
|     char *path = NULL; | ||||
|     char *newpath = NULL; | ||||
|     char *pts = NULL; | ||||
|     int ret = -1; | ||||
|  | ||||
|     /* Only continue if we're talking to a pty. */ | ||||
|     if (!CHARDEV_IS_PTY(cs)) { | ||||
|         return 0; | ||||
|     } | ||||
|     pts = cs->filename + 4; | ||||
|  | ||||
|     /* We now have everything we need to set the xenstore entry. */ | ||||
|     xs = xs_open(0); | ||||
|     if (xs == NULL) { | ||||
|         fprintf(stderr, "Could not contact XenStore\n"); | ||||
|         goto out; | ||||
|     } | ||||
|  | ||||
|     path = xs_get_domain_path(xs, domid); | ||||
|     if (path == NULL) { | ||||
|         fprintf(stderr, "xs_get_domain_path() error\n"); | ||||
|         goto out; | ||||
|     } | ||||
|     newpath = realloc(path, (strlen(path) + strlen(string) + | ||||
|                 strlen("/tty") + 1)); | ||||
|     if (newpath == NULL) { | ||||
|         fprintf(stderr, "realloc error\n"); | ||||
|         goto out; | ||||
|     } | ||||
|     path = newpath; | ||||
|  | ||||
|     strcat(path, string); | ||||
|     strcat(path, "/tty"); | ||||
|     if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) { | ||||
|         fprintf(stderr, "xs_write for '%s' fail", string); | ||||
|         goto out; | ||||
|     } | ||||
|     ret = 0; | ||||
|  | ||||
| out: | ||||
|     free(path); | ||||
|     xs_close(xs); | ||||
|  | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| void xenstore_store_pv_console_info(int i, Chardev *chr) | ||||
| { | ||||
|     if (i == 0) { | ||||
|         store_dev_info(xen_domid, chr, "/console"); | ||||
|     } else { | ||||
|         char buf[32]; | ||||
|         snprintf(buf, sizeof(buf), "/device/console/%d", i); | ||||
|         store_dev_info(xen_domid, chr, buf); | ||||
|     } | ||||
| } | ||||
|  | ||||
|  | ||||
| static void xenstore_record_dm_state(struct xs_handle *xs, const char *state) | ||||
| { | ||||
|     char path[50]; | ||||
|  | ||||
|     if (xs == NULL) { | ||||
|         error_report("xenstore connection not initialized"); | ||||
|         exit(1); | ||||
|     } | ||||
|  | ||||
|     snprintf(path, sizeof (path), "device-model/%u/state", xen_domid); | ||||
|     if (!qemu_xen_xs_write(xenstore, XBT_NULL, path, state, strlen(state))) { | ||||
|     /* | ||||
|      * This call may fail when running restricted so don't make it fatal in | ||||
|      * that case. Toolstacks should instead use QMP to listen for state changes. | ||||
|      */ | ||||
|     if (!xs_write(xs, XBT_NULL, path, state, strlen(state)) && | ||||
|             !xen_domid_restrict) { | ||||
|         error_report("error recording dm state"); | ||||
|         exit(1); | ||||
|     } | ||||
| @@ -47,7 +117,7 @@ static void xen_change_state_handler(void *opaque, bool running, | ||||
| { | ||||
|     if (running) { | ||||
|         /* record state running */ | ||||
|         xenstore_record_dm_state("running"); | ||||
|         xenstore_record_dm_state(xenstore, "running"); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -96,15 +166,7 @@ static int xen_init(MachineState *ms) | ||||
|         xc_interface_close(xen_xc); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     /* | ||||
|      * The XenStore write would fail when running restricted so don't attempt | ||||
|      * it in that case. Toolstacks should instead use QMP to listen for state | ||||
|      * changes. | ||||
|      */ | ||||
|     if (!xen_domid_restrict) { | ||||
|         qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); | ||||
|     } | ||||
|     qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); | ||||
|     /* | ||||
|      * opt out of system RAM being allocated by generic code | ||||
|      */ | ||||
|   | ||||
| @@ -222,7 +222,11 @@ static int alsa_poll_helper (snd_pcm_t *handle, struct pollhlp *hlp, int mask) | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     pfds = g_new0(struct pollfd, count); | ||||
|     pfds = audio_calloc ("alsa_poll_helper", count, sizeof (*pfds)); | ||||
|     if (!pfds) { | ||||
|         dolog ("Could not initialize poll mode\n"); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     err = snd_pcm_poll_descriptors (handle, pfds, count); | ||||
|     if (err < 0) { | ||||
| @@ -904,7 +908,7 @@ static void alsa_init_per_direction(AudiodevAlsaPerDirectionOptions *apdo) | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void *alsa_audio_init(Audiodev *dev, Error **errp) | ||||
| static void *alsa_audio_init(Audiodev *dev) | ||||
| { | ||||
|     AudiodevAlsaOptions *aopts; | ||||
|     assert(dev->driver == AUDIODEV_DRIVER_ALSA); | ||||
| @@ -913,23 +917,28 @@ static void *alsa_audio_init(Audiodev *dev, Error **errp) | ||||
|     alsa_init_per_direction(aopts->in); | ||||
|     alsa_init_per_direction(aopts->out); | ||||
|  | ||||
|     /* don't set has_* so alsa_open can identify it wasn't set by the user */ | ||||
|     /* | ||||
|      * need to define them, as otherwise alsa produces no sound | ||||
|      * doesn't set has_* so alsa_open can identify it wasn't set by the user | ||||
|      */ | ||||
|     if (!dev->u.alsa.out->has_period_length) { | ||||
|         /* 256 frames assuming 44100Hz */ | ||||
|         dev->u.alsa.out->period_length = 5805; | ||||
|         /* 1024 frames assuming 44100Hz */ | ||||
|         dev->u.alsa.out->period_length = 1024 * 1000000 / 44100; | ||||
|     } | ||||
|     if (!dev->u.alsa.out->has_buffer_length) { | ||||
|         /* 4096 frames assuming 44100Hz */ | ||||
|         dev->u.alsa.out->buffer_length = 92880; | ||||
|         dev->u.alsa.out->buffer_length = 4096ll * 1000000 / 44100; | ||||
|     } | ||||
|  | ||||
|     /* | ||||
|      * OptsVisitor sets unspecified optional fields to zero, but do not depend | ||||
|      * on it... | ||||
|      */ | ||||
|     if (!dev->u.alsa.in->has_period_length) { | ||||
|         /* 256 frames assuming 44100Hz */ | ||||
|         dev->u.alsa.in->period_length = 5805; | ||||
|         dev->u.alsa.in->period_length = 0; | ||||
|     } | ||||
|     if (!dev->u.alsa.in->has_buffer_length) { | ||||
|         /* 4096 frames assuming 44100Hz */ | ||||
|         dev->u.alsa.in->buffer_length = 92880; | ||||
|         dev->u.alsa.in->buffer_length = 0; | ||||
|     } | ||||
|  | ||||
|     return dev; | ||||
| @@ -960,6 +969,7 @@ static struct audio_driver alsa_audio_driver = { | ||||
|     .init           = alsa_audio_init, | ||||
|     .fini           = alsa_audio_fini, | ||||
|     .pcm_ops        = &alsa_pcm_ops, | ||||
|     .can_be_default = 1, | ||||
|     .max_voices_out = INT_MAX, | ||||
|     .max_voices_in  = INT_MAX, | ||||
|     .voice_size_out = sizeof (ALSAVoiceOut), | ||||
|   | ||||
| @@ -26,7 +26,6 @@ | ||||
| #include "audio/audio.h" | ||||
| #include "monitor/hmp.h" | ||||
| #include "monitor/monitor.h" | ||||
| #include "qapi/error.h" | ||||
| #include "qapi/qmp/qdict.h" | ||||
|  | ||||
| static QLIST_HEAD (capture_list_head, CaptureState) capture_head; | ||||
| @@ -66,11 +65,10 @@ void hmp_wavcapture(Monitor *mon, const QDict *qdict) | ||||
|     int nchannels = qdict_get_try_int(qdict, "nchannels", 2); | ||||
|     const char *audiodev = qdict_get_str(qdict, "audiodev"); | ||||
|     CaptureState *s; | ||||
|     Error *local_err = NULL; | ||||
|     AudioState *as = audio_state_by_name(audiodev, &local_err); | ||||
|     AudioState *as = audio_state_by_name(audiodev); | ||||
|  | ||||
|     if (!as) { | ||||
|         error_report_err(local_err); | ||||
|         monitor_printf(mon, "Audiodev '%s' not found\n", audiodev); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|   | ||||
							
								
								
									
										631
									
								
								audio/audio.c
									
									
									
									
									
								
							
							
						
						
									
										631
									
								
								audio/audio.c
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -94,7 +94,7 @@ typedef struct QEMUAudioTimeStamp { | ||||
| void AUD_vlog (const char *cap, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0); | ||||
| void AUD_log (const char *cap, const char *fmt, ...) G_GNUC_PRINTF(2, 3); | ||||
|  | ||||
| bool AUD_register_card (const char *name, QEMUSoundCard *card, Error **errp); | ||||
| void AUD_register_card (const char *name, QEMUSoundCard *card); | ||||
| void AUD_remove_card (QEMUSoundCard *card); | ||||
| CaptureVoiceOut *AUD_add_capture( | ||||
|     AudioState *s, | ||||
| @@ -169,14 +169,12 @@ void audio_sample_from_uint64(void *samples, int pos, | ||||
|                             uint64_t left, uint64_t right); | ||||
|  | ||||
| void audio_define(Audiodev *audio); | ||||
| void audio_define_default(Audiodev *dev, Error **errp); | ||||
| void audio_parse_option(const char *opt); | ||||
| void audio_create_default_audiodevs(void); | ||||
| void audio_init_audiodevs(void); | ||||
| bool audio_init_audiodevs(void); | ||||
| void audio_help(void); | ||||
| void audio_legacy_help(void); | ||||
|  | ||||
| AudioState *audio_state_by_name(const char *name, Error **errp); | ||||
| AudioState *audio_get_default_audio_state(Error **errp); | ||||
| AudioState *audio_state_by_name(const char *name); | ||||
| const char *audio_get_id(QEMUSoundCard *card); | ||||
|  | ||||
| #define DEFINE_AUDIO_PROPERTIES(_s, _f)         \ | ||||
|   | ||||
| @@ -58,7 +58,7 @@ typedef struct SWVoiceCap SWVoiceCap; | ||||
|  | ||||
| typedef struct STSampleBuffer { | ||||
|     size_t pos, size; | ||||
|     st_sample *buffer; | ||||
|     st_sample samples[]; | ||||
| } STSampleBuffer; | ||||
|  | ||||
| typedef struct HWVoiceOut { | ||||
| @@ -71,7 +71,7 @@ typedef struct HWVoiceOut { | ||||
|     f_sample *clip; | ||||
|     uint64_t ts_helper; | ||||
|  | ||||
|     STSampleBuffer mix_buf; | ||||
|     STSampleBuffer *mix_buf; | ||||
|     void *buf_emul; | ||||
|     size_t pos_emul, pending_emul, size_emul; | ||||
|  | ||||
| @@ -93,7 +93,7 @@ typedef struct HWVoiceIn { | ||||
|     size_t total_samples_captured; | ||||
|     uint64_t ts_helper; | ||||
|  | ||||
|     STSampleBuffer conv_buf; | ||||
|     STSampleBuffer *conv_buf; | ||||
|     void *buf_emul; | ||||
|     size_t pos_emul, pending_emul, size_emul; | ||||
|  | ||||
| @@ -108,7 +108,8 @@ struct SWVoiceOut { | ||||
|     AudioState *s; | ||||
|     struct audio_pcm_info info; | ||||
|     t_sample *conv; | ||||
|     STSampleBuffer resample_buf; | ||||
|     int64_t ratio; | ||||
|     struct st_sample *buf; | ||||
|     void *rate; | ||||
|     size_t total_hw_samples_mixed; | ||||
|     int active; | ||||
| @@ -125,9 +126,10 @@ struct SWVoiceIn { | ||||
|     AudioState *s; | ||||
|     int active; | ||||
|     struct audio_pcm_info info; | ||||
|     int64_t ratio; | ||||
|     void *rate; | ||||
|     size_t total_hw_samples_acquired; | ||||
|     STSampleBuffer resample_buf; | ||||
|     struct st_sample *buf; | ||||
|     f_sample *clip; | ||||
|     HWVoiceIn *hw; | ||||
|     char *name; | ||||
| @@ -140,16 +142,17 @@ typedef struct audio_driver audio_driver; | ||||
| struct audio_driver { | ||||
|     const char *name; | ||||
|     const char *descr; | ||||
|     void *(*init) (Audiodev *, Error **); | ||||
|     void *(*init) (Audiodev *); | ||||
|     void (*fini) (void *); | ||||
| #ifdef CONFIG_GIO | ||||
|     void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager, bool p2p); | ||||
|     void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager); | ||||
| #endif | ||||
|     struct audio_pcm_ops *pcm_ops; | ||||
|     int can_be_default; | ||||
|     int max_voices_out; | ||||
|     int max_voices_in; | ||||
|     size_t voice_size_out; | ||||
|     size_t voice_size_in; | ||||
|     int voice_size_out; | ||||
|     int voice_size_in; | ||||
|     QLIST_ENTRY(audio_driver) next; | ||||
| }; | ||||
|  | ||||
| @@ -242,11 +245,13 @@ extern const struct mixeng_volume nominal_volume; | ||||
| extern const char *audio_prio_list[]; | ||||
|  | ||||
| void audio_driver_register(audio_driver *drv); | ||||
| audio_driver *audio_driver_lookup(const char *name); | ||||
|  | ||||
| void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as); | ||||
| void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len); | ||||
|  | ||||
| int audio_bug (const char *funcname, int cond); | ||||
| void *audio_calloc (const char *funcname, int nmemb, size_t size); | ||||
|  | ||||
| void audio_run(AudioState *s, const char *msg); | ||||
|  | ||||
| @@ -289,12 +294,18 @@ static inline size_t audio_ring_posb(size_t pos, size_t dist, size_t len) | ||||
| #define ldebug(fmt, ...) (void)0 | ||||
| #endif | ||||
|  | ||||
| #define AUDIO_STRINGIFY_(n) #n | ||||
| #define AUDIO_STRINGIFY(n) AUDIO_STRINGIFY_(n) | ||||
|  | ||||
| typedef struct AudiodevListEntry { | ||||
|     Audiodev *dev; | ||||
|     QSIMPLEQ_ENTRY(AudiodevListEntry) next; | ||||
| } AudiodevListEntry; | ||||
|  | ||||
| typedef QSIMPLEQ_HEAD(, AudiodevListEntry) AudiodevListHead; | ||||
| AudiodevListHead audio_handle_legacy_opts(void); | ||||
|  | ||||
| void audio_free_audiodev_list(AudiodevListHead *head); | ||||
|  | ||||
| void audio_create_pdos(Audiodev *dev); | ||||
| AudiodevPerDirectionOptions *audio_get_pdo_in(Audiodev *dev); | ||||
|   | ||||
							
								
								
									
										591
									
								
								audio/audio_legacy.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										591
									
								
								audio/audio_legacy.c
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,591 @@ | ||||
| /* | ||||
|  * QEMU Audio subsystem: legacy configuration handling | ||||
|  * | ||||
|  * Copyright (c) 2015-2019 Zoltán Kővágó <DirtY.iCE.hu@gmail.com> | ||||
|  * | ||||
|  * Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
|  * of this software and associated documentation files (the "Software"), to deal | ||||
|  * in the Software without restriction, including without limitation the rights | ||||
|  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||
|  * copies of the Software, and to permit persons to whom the Software is | ||||
|  * furnished to do so, subject to the following conditions: | ||||
|  * | ||||
|  * The above copyright notice and this permission notice shall be included in | ||||
|  * all copies or substantial portions of the Software. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
|  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
|  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||||
|  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
|  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
|  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||||
|  * THE SOFTWARE. | ||||
|  */ | ||||
| #include "qemu/osdep.h" | ||||
| #include "audio.h" | ||||
| #include "audio_int.h" | ||||
| #include "qemu/cutils.h" | ||||
| #include "qemu/timer.h" | ||||
| #include "qapi/error.h" | ||||
| #include "qapi/qapi-visit-audio.h" | ||||
| #include "qapi/visitor-impl.h" | ||||
|  | ||||
| #define AUDIO_CAP "audio-legacy" | ||||
| #include "audio_int.h" | ||||
|  | ||||
| static uint32_t toui32(const char *str) | ||||
| { | ||||
|     unsigned long long ret; | ||||
|     if (parse_uint_full(str, &ret, 10) || ret > UINT32_MAX) { | ||||
|         dolog("Invalid integer value `%s'\n", str); | ||||
|         exit(1); | ||||
|     } | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| /* helper functions to convert env variables */ | ||||
| static void get_bool(const char *env, bool *dst, bool *has_dst) | ||||
| { | ||||
|     const char *val = getenv(env); | ||||
|     if (val) { | ||||
|         *dst = toui32(val) != 0; | ||||
|         *has_dst = true; | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void get_int(const char *env, uint32_t *dst, bool *has_dst) | ||||
| { | ||||
|     const char *val = getenv(env); | ||||
|     if (val) { | ||||
|         *dst = toui32(val); | ||||
|         *has_dst = true; | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void get_str(const char *env, char **dst) | ||||
| { | ||||
|     const char *val = getenv(env); | ||||
|     if (val) { | ||||
|         g_free(*dst); | ||||
|         *dst = g_strdup(val); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void get_fmt(const char *env, AudioFormat *dst, bool *has_dst) | ||||
| { | ||||
|     const char *val = getenv(env); | ||||
|     if (val) { | ||||
|         size_t i; | ||||
|         for (i = 0; AudioFormat_lookup.size; ++i) { | ||||
|             if (strcasecmp(val, AudioFormat_lookup.array[i]) == 0) { | ||||
|                 *dst = i; | ||||
|                 *has_dst = true; | ||||
|                 return; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         dolog("Invalid audio format `%s'\n", val); | ||||
|         exit(1); | ||||
|     } | ||||
| } | ||||
|  | ||||
|  | ||||
| #if defined(CONFIG_AUDIO_ALSA) || defined(CONFIG_AUDIO_DSOUND) | ||||
| static void get_millis_to_usecs(const char *env, uint32_t *dst, bool *has_dst) | ||||
| { | ||||
|     const char *val = getenv(env); | ||||
|     if (val) { | ||||
|         *dst = toui32(val) * 1000; | ||||
|         *has_dst = true; | ||||
|     } | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #if defined(CONFIG_AUDIO_ALSA) || defined(CONFIG_AUDIO_COREAUDIO) || \ | ||||
|     defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) || \ | ||||
|     defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS) | ||||
| static uint32_t frames_to_usecs(uint32_t frames, | ||||
|                                 AudiodevPerDirectionOptions *pdo) | ||||
| { | ||||
|     uint32_t freq = pdo->has_frequency ? pdo->frequency : 44100; | ||||
|     return (frames * 1000000 + freq / 2) / freq; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_COREAUDIO | ||||
| static void get_frames_to_usecs(const char *env, uint32_t *dst, bool *has_dst, | ||||
|                                 AudiodevPerDirectionOptions *pdo) | ||||
| { | ||||
|     const char *val = getenv(env); | ||||
|     if (val) { | ||||
|         *dst = frames_to_usecs(toui32(val), pdo); | ||||
|         *has_dst = true; | ||||
|     } | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #if defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) || \ | ||||
|     defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS) | ||||
| static uint32_t samples_to_usecs(uint32_t samples, | ||||
|                                  AudiodevPerDirectionOptions *pdo) | ||||
| { | ||||
|     uint32_t channels = pdo->has_channels ? pdo->channels : 2; | ||||
|     return frames_to_usecs(samples / channels, pdo); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #if defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) | ||||
| static void get_samples_to_usecs(const char *env, uint32_t *dst, bool *has_dst, | ||||
|                                  AudiodevPerDirectionOptions *pdo) | ||||
| { | ||||
|     const char *val = getenv(env); | ||||
|     if (val) { | ||||
|         *dst = samples_to_usecs(toui32(val), pdo); | ||||
|         *has_dst = true; | ||||
|     } | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #if defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS) | ||||
| static uint32_t bytes_to_usecs(uint32_t bytes, AudiodevPerDirectionOptions *pdo) | ||||
| { | ||||
|     AudioFormat fmt = pdo->has_format ? pdo->format : AUDIO_FORMAT_S16; | ||||
|     uint32_t bytes_per_sample = audioformat_bytes_per_sample(fmt); | ||||
|     return samples_to_usecs(bytes / bytes_per_sample, pdo); | ||||
| } | ||||
|  | ||||
| static void get_bytes_to_usecs(const char *env, uint32_t *dst, bool *has_dst, | ||||
|                                AudiodevPerDirectionOptions *pdo) | ||||
| { | ||||
|     const char *val = getenv(env); | ||||
|     if (val) { | ||||
|         *dst = bytes_to_usecs(toui32(val), pdo); | ||||
|         *has_dst = true; | ||||
|     } | ||||
| } | ||||
| #endif | ||||
|  | ||||
| /* backend specific functions */ | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_ALSA | ||||
| /* ALSA */ | ||||
| static void handle_alsa_per_direction( | ||||
|     AudiodevAlsaPerDirectionOptions *apdo, const char *prefix) | ||||
| { | ||||
|     char buf[64]; | ||||
|     size_t len = strlen(prefix); | ||||
|     bool size_in_usecs = false; | ||||
|     bool dummy; | ||||
|  | ||||
|     memcpy(buf, prefix, len); | ||||
|     strcpy(buf + len, "TRY_POLL"); | ||||
|     get_bool(buf, &apdo->try_poll, &apdo->has_try_poll); | ||||
|  | ||||
|     strcpy(buf + len, "DEV"); | ||||
|     get_str(buf, &apdo->dev); | ||||
|  | ||||
|     strcpy(buf + len, "SIZE_IN_USEC"); | ||||
|     get_bool(buf, &size_in_usecs, &dummy); | ||||
|  | ||||
|     strcpy(buf + len, "PERIOD_SIZE"); | ||||
|     get_int(buf, &apdo->period_length, &apdo->has_period_length); | ||||
|     if (apdo->has_period_length && !size_in_usecs) { | ||||
|         apdo->period_length = frames_to_usecs( | ||||
|             apdo->period_length, | ||||
|             qapi_AudiodevAlsaPerDirectionOptions_base(apdo)); | ||||
|     } | ||||
|  | ||||
|     strcpy(buf + len, "BUFFER_SIZE"); | ||||
|     get_int(buf, &apdo->buffer_length, &apdo->has_buffer_length); | ||||
|     if (apdo->has_buffer_length && !size_in_usecs) { | ||||
|         apdo->buffer_length = frames_to_usecs( | ||||
|             apdo->buffer_length, | ||||
|             qapi_AudiodevAlsaPerDirectionOptions_base(apdo)); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void handle_alsa(Audiodev *dev) | ||||
| { | ||||
|     AudiodevAlsaOptions *aopt = &dev->u.alsa; | ||||
|     handle_alsa_per_direction(aopt->in, "QEMU_ALSA_ADC_"); | ||||
|     handle_alsa_per_direction(aopt->out, "QEMU_ALSA_DAC_"); | ||||
|  | ||||
|     get_millis_to_usecs("QEMU_ALSA_THRESHOLD", | ||||
|                         &aopt->threshold, &aopt->has_threshold); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_COREAUDIO | ||||
| /* coreaudio */ | ||||
| static void handle_coreaudio(Audiodev *dev) | ||||
| { | ||||
|     get_frames_to_usecs( | ||||
|         "QEMU_COREAUDIO_BUFFER_SIZE", | ||||
|         &dev->u.coreaudio.out->buffer_length, | ||||
|         &dev->u.coreaudio.out->has_buffer_length, | ||||
|         qapi_AudiodevCoreaudioPerDirectionOptions_base(dev->u.coreaudio.out)); | ||||
|     get_int("QEMU_COREAUDIO_BUFFER_COUNT", | ||||
|             &dev->u.coreaudio.out->buffer_count, | ||||
|             &dev->u.coreaudio.out->has_buffer_count); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_DSOUND | ||||
| /* dsound */ | ||||
| static void handle_dsound(Audiodev *dev) | ||||
| { | ||||
|     get_millis_to_usecs("QEMU_DSOUND_LATENCY_MILLIS", | ||||
|                         &dev->u.dsound.latency, &dev->u.dsound.has_latency); | ||||
|     get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_OUT", | ||||
|                        &dev->u.dsound.out->buffer_length, | ||||
|                        &dev->u.dsound.out->has_buffer_length, | ||||
|                        dev->u.dsound.out); | ||||
|     get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_IN", | ||||
|                        &dev->u.dsound.in->buffer_length, | ||||
|                        &dev->u.dsound.in->has_buffer_length, | ||||
|                        dev->u.dsound.in); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_OSS | ||||
| /* OSS */ | ||||
| static void handle_oss_per_direction( | ||||
|     AudiodevOssPerDirectionOptions *opdo, const char *try_poll_env, | ||||
|     const char *dev_env) | ||||
| { | ||||
|     get_bool(try_poll_env, &opdo->try_poll, &opdo->has_try_poll); | ||||
|     get_str(dev_env, &opdo->dev); | ||||
|  | ||||
|     get_bytes_to_usecs("QEMU_OSS_FRAGSIZE", | ||||
|                        &opdo->buffer_length, &opdo->has_buffer_length, | ||||
|                        qapi_AudiodevOssPerDirectionOptions_base(opdo)); | ||||
|     get_int("QEMU_OSS_NFRAGS", &opdo->buffer_count, | ||||
|             &opdo->has_buffer_count); | ||||
| } | ||||
|  | ||||
| static void handle_oss(Audiodev *dev) | ||||
| { | ||||
|     AudiodevOssOptions *oopt = &dev->u.oss; | ||||
|     handle_oss_per_direction(oopt->in, "QEMU_AUDIO_ADC_TRY_POLL", | ||||
|                              "QEMU_OSS_ADC_DEV"); | ||||
|     handle_oss_per_direction(oopt->out, "QEMU_AUDIO_DAC_TRY_POLL", | ||||
|                              "QEMU_OSS_DAC_DEV"); | ||||
|  | ||||
|     get_bool("QEMU_OSS_MMAP", &oopt->try_mmap, &oopt->has_try_mmap); | ||||
|     get_bool("QEMU_OSS_EXCLUSIVE", &oopt->exclusive, &oopt->has_exclusive); | ||||
|     get_int("QEMU_OSS_POLICY", &oopt->dsp_policy, &oopt->has_dsp_policy); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_PA | ||||
| /* pulseaudio */ | ||||
| static void handle_pa_per_direction( | ||||
|     AudiodevPaPerDirectionOptions *ppdo, const char *env) | ||||
| { | ||||
|     get_str(env, &ppdo->name); | ||||
| } | ||||
|  | ||||
| static void handle_pa(Audiodev *dev) | ||||
| { | ||||
|     handle_pa_per_direction(dev->u.pa.in, "QEMU_PA_SOURCE"); | ||||
|     handle_pa_per_direction(dev->u.pa.out, "QEMU_PA_SINK"); | ||||
|  | ||||
|     get_samples_to_usecs( | ||||
|         "QEMU_PA_SAMPLES", &dev->u.pa.in->buffer_length, | ||||
|         &dev->u.pa.in->has_buffer_length, | ||||
|         qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.in)); | ||||
|     get_samples_to_usecs( | ||||
|         "QEMU_PA_SAMPLES", &dev->u.pa.out->buffer_length, | ||||
|         &dev->u.pa.out->has_buffer_length, | ||||
|         qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.out)); | ||||
|  | ||||
|     get_str("QEMU_PA_SERVER", &dev->u.pa.server); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_SDL | ||||
| /* SDL */ | ||||
| static void handle_sdl(Audiodev *dev) | ||||
| { | ||||
|     /* SDL is output only */ | ||||
|     get_samples_to_usecs("QEMU_SDL_SAMPLES", &dev->u.sdl.out->buffer_length, | ||||
|         &dev->u.sdl.out->has_buffer_length, | ||||
|         qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.out)); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| /* wav */ | ||||
| static void handle_wav(Audiodev *dev) | ||||
| { | ||||
|     get_int("QEMU_WAV_FREQUENCY", | ||||
|             &dev->u.wav.out->frequency, &dev->u.wav.out->has_frequency); | ||||
|     get_fmt("QEMU_WAV_FORMAT", &dev->u.wav.out->format, | ||||
|             &dev->u.wav.out->has_format); | ||||
|     get_int("QEMU_WAV_DAC_FIXED_CHANNELS", | ||||
|             &dev->u.wav.out->channels, &dev->u.wav.out->has_channels); | ||||
|     get_str("QEMU_WAV_PATH", &dev->u.wav.path); | ||||
| } | ||||
|  | ||||
| /* general */ | ||||
| static void handle_per_direction( | ||||
|     AudiodevPerDirectionOptions *pdo, const char *prefix) | ||||
| { | ||||
|     char buf[64]; | ||||
|     size_t len = strlen(prefix); | ||||
|  | ||||
|     memcpy(buf, prefix, len); | ||||
|     strcpy(buf + len, "FIXED_SETTINGS"); | ||||
|     get_bool(buf, &pdo->fixed_settings, &pdo->has_fixed_settings); | ||||
|  | ||||
|     strcpy(buf + len, "FIXED_FREQ"); | ||||
|     get_int(buf, &pdo->frequency, &pdo->has_frequency); | ||||
|  | ||||
|     strcpy(buf + len, "FIXED_FMT"); | ||||
|     get_fmt(buf, &pdo->format, &pdo->has_format); | ||||
|  | ||||
|     strcpy(buf + len, "FIXED_CHANNELS"); | ||||
|     get_int(buf, &pdo->channels, &pdo->has_channels); | ||||
|  | ||||
|     strcpy(buf + len, "VOICES"); | ||||
|     get_int(buf, &pdo->voices, &pdo->has_voices); | ||||
| } | ||||
|  | ||||
| static AudiodevListEntry *legacy_opt(const char *drvname) | ||||
| { | ||||
|     AudiodevListEntry *e = g_new0(AudiodevListEntry, 1); | ||||
|     e->dev = g_new0(Audiodev, 1); | ||||
|     e->dev->id = g_strdup(drvname); | ||||
|     e->dev->driver = qapi_enum_parse( | ||||
|         &AudiodevDriver_lookup, drvname, -1, &error_abort); | ||||
|  | ||||
|     audio_create_pdos(e->dev); | ||||
|  | ||||
|     handle_per_direction(audio_get_pdo_in(e->dev), "QEMU_AUDIO_ADC_"); | ||||
|     handle_per_direction(audio_get_pdo_out(e->dev), "QEMU_AUDIO_DAC_"); | ||||
|  | ||||
|     /* Original description: Timer period in HZ (0 - use lowest possible) */ | ||||
|     get_int("QEMU_AUDIO_TIMER_PERIOD", | ||||
|             &e->dev->timer_period, &e->dev->has_timer_period); | ||||
|     if (e->dev->has_timer_period && e->dev->timer_period) { | ||||
|         e->dev->timer_period = NANOSECONDS_PER_SECOND / 1000 / | ||||
|                                e->dev->timer_period; | ||||
|     } | ||||
|  | ||||
|     switch (e->dev->driver) { | ||||
| #ifdef CONFIG_AUDIO_ALSA | ||||
|     case AUDIODEV_DRIVER_ALSA: | ||||
|         handle_alsa(e->dev); | ||||
|         break; | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_COREAUDIO | ||||
|     case AUDIODEV_DRIVER_COREAUDIO: | ||||
|         handle_coreaudio(e->dev); | ||||
|         break; | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_DSOUND | ||||
|     case AUDIODEV_DRIVER_DSOUND: | ||||
|         handle_dsound(e->dev); | ||||
|         break; | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_OSS | ||||
|     case AUDIODEV_DRIVER_OSS: | ||||
|         handle_oss(e->dev); | ||||
|         break; | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_PA | ||||
|     case AUDIODEV_DRIVER_PA: | ||||
|         handle_pa(e->dev); | ||||
|         break; | ||||
| #endif | ||||
|  | ||||
| #ifdef CONFIG_AUDIO_SDL | ||||
|     case AUDIODEV_DRIVER_SDL: | ||||
|         handle_sdl(e->dev); | ||||
|         break; | ||||
| #endif | ||||
|  | ||||
|     case AUDIODEV_DRIVER_WAV: | ||||
|         handle_wav(e->dev); | ||||
|         break; | ||||
|  | ||||
|     default: | ||||
|         break; | ||||
|     } | ||||
|  | ||||
|     return e; | ||||
| } | ||||
|  | ||||
| AudiodevListHead audio_handle_legacy_opts(void) | ||||
| { | ||||
|     const char *drvname = getenv("QEMU_AUDIO_DRV"); | ||||
|     AudiodevListHead head = QSIMPLEQ_HEAD_INITIALIZER(head); | ||||
|  | ||||
|     if (drvname) { | ||||
|         AudiodevListEntry *e; | ||||
|         audio_driver *driver = audio_driver_lookup(drvname); | ||||
|         if (!driver) { | ||||
|             dolog("Unknown audio driver `%s'\n", drvname); | ||||
|             exit(1); | ||||
|         } | ||||
|         e = legacy_opt(drvname); | ||||
|         QSIMPLEQ_INSERT_TAIL(&head, e, next); | ||||
|     } else { | ||||
|         for (int i = 0; audio_prio_list[i]; i++) { | ||||
|             audio_driver *driver = audio_driver_lookup(audio_prio_list[i]); | ||||
|             if (driver && driver->can_be_default) { | ||||
|                 AudiodevListEntry *e = legacy_opt(driver->name); | ||||
|                 QSIMPLEQ_INSERT_TAIL(&head, e, next); | ||||
|             } | ||||
|         } | ||||
|         if (QSIMPLEQ_EMPTY(&head)) { | ||||
|             dolog("Internal error: no default audio driver available\n"); | ||||
|             exit(1); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     return head; | ||||
| } | ||||
|  | ||||
| /* visitor to print -audiodev option */ | ||||
| typedef struct { | ||||
|     Visitor visitor; | ||||
|  | ||||
|     bool comma; | ||||
|     GList *path; | ||||
| } LegacyPrintVisitor; | ||||
|  | ||||
| static bool lv_start_struct(Visitor *v, const char *name, void **obj, | ||||
|                             size_t size, Error **errp) | ||||
| { | ||||
|     LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v; | ||||
|     lv->path = g_list_append(lv->path, g_strdup(name)); | ||||
|     return true; | ||||
| } | ||||
|  | ||||
| static void lv_end_struct(Visitor *v, void **obj) | ||||
| { | ||||
|     LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v; | ||||
|     lv->path = g_list_delete_link(lv->path, g_list_last(lv->path)); | ||||
| } | ||||
|  | ||||
| static void lv_print_key(Visitor *v, const char *name) | ||||
| { | ||||
|     GList *e; | ||||
|     LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v; | ||||
|     if (lv->comma) { | ||||
|         putchar(','); | ||||
|     } else { | ||||
|         lv->comma = true; | ||||
|     } | ||||
|  | ||||
|     for (e = lv->path; e; e = e->next) { | ||||
|         if (e->data) { | ||||
|             printf("%s.", (const char *) e->data); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     printf("%s=", name); | ||||
| } | ||||
|  | ||||
| static bool lv_type_int64(Visitor *v, const char *name, int64_t *obj, | ||||
|                           Error **errp) | ||||
| { | ||||
|     lv_print_key(v, name); | ||||
|     printf("%" PRIi64, *obj); | ||||
|     return true; | ||||
| } | ||||
|  | ||||
| static bool lv_type_uint64(Visitor *v, const char *name, uint64_t *obj, | ||||
|                            Error **errp) | ||||
| { | ||||
|     lv_print_key(v, name); | ||||
|     printf("%" PRIu64, *obj); | ||||
|     return true; | ||||
| } | ||||
|  | ||||
| static bool lv_type_bool(Visitor *v, const char *name, bool *obj, Error **errp) | ||||
| { | ||||
|     lv_print_key(v, name); | ||||
|     printf("%s", *obj ? "on" : "off"); | ||||
|     return true; | ||||
| } | ||||
|  | ||||
| static bool lv_type_str(Visitor *v, const char *name, char **obj, Error **errp) | ||||
| { | ||||
|     const char *str = *obj; | ||||
|     lv_print_key(v, name); | ||||
|  | ||||
|     while (*str) { | ||||
|         if (*str == ',') { | ||||
|             putchar(','); | ||||
|         } | ||||
|         putchar(*str++); | ||||
|     } | ||||
|     return true; | ||||
| } | ||||
|  | ||||
| static void lv_complete(Visitor *v, void *opaque) | ||||
| { | ||||
|     LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v; | ||||
|     assert(lv->path == NULL); | ||||
| } | ||||
|  | ||||
| static void lv_free(Visitor *v) | ||||
| { | ||||
|     LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v; | ||||
|  | ||||
|     g_list_free_full(lv->path, g_free); | ||||
|     g_free(lv); | ||||
| } | ||||
|  | ||||
| static Visitor *legacy_visitor_new(void) | ||||
| { | ||||
|     LegacyPrintVisitor *lv = g_new0(LegacyPrintVisitor, 1); | ||||
|  | ||||
|     lv->visitor.start_struct = lv_start_struct; | ||||
|     lv->visitor.end_struct = lv_end_struct; | ||||
|     /* lists not supported */ | ||||
|     lv->visitor.type_int64 = lv_type_int64; | ||||
|     lv->visitor.type_uint64 = lv_type_uint64; | ||||
|     lv->visitor.type_bool = lv_type_bool; | ||||
|     lv->visitor.type_str = lv_type_str; | ||||
|  | ||||
|     lv->visitor.type = VISITOR_OUTPUT; | ||||
|     lv->visitor.complete = lv_complete; | ||||
|     lv->visitor.free = lv_free; | ||||
|  | ||||
|     return &lv->visitor; | ||||
| } | ||||
|  | ||||
| void audio_legacy_help(void) | ||||
| { | ||||
|     AudiodevListHead head; | ||||
|     AudiodevListEntry *e; | ||||
|  | ||||
|     printf("Environment variable based configuration deprecated.\n"); | ||||
|     printf("Please use the new -audiodev option.\n"); | ||||
|  | ||||
|     head = audio_handle_legacy_opts(); | ||||
|     printf("\nEquivalent -audiodev to your current environment variables:\n"); | ||||
|     if (!getenv("QEMU_AUDIO_DRV")) { | ||||
|         printf("(Since you didn't specify QEMU_AUDIO_DRV, I'll list all " | ||||
|                "possibilities)\n"); | ||||
|     } | ||||
|  | ||||
|     QSIMPLEQ_FOREACH(e, &head, next) { | ||||
|         Visitor *v; | ||||
|         Audiodev *dev = e->dev; | ||||
|         printf("-audiodev "); | ||||
|  | ||||
|         v = legacy_visitor_new(); | ||||
|         visit_type_Audiodev(v, NULL, &dev, &error_abort); | ||||
|         visit_free(v); | ||||
|  | ||||
|         printf("\n"); | ||||
|     } | ||||
|     audio_free_audiodev_list(&head); | ||||
| } | ||||
| @@ -37,12 +37,11 @@ | ||||
| #endif | ||||
|  | ||||
| static void glue(audio_init_nb_voices_, TYPE)(AudioState *s, | ||||
|                                               struct audio_driver *drv, int min_voices) | ||||
|                                               struct audio_driver *drv) | ||||
| { | ||||
|     int max_voices = glue (drv->max_voices_, TYPE); | ||||
|     size_t voice_size = glue(drv->voice_size_, TYPE); | ||||
|     int voice_size = glue (drv->voice_size_, TYPE); | ||||
|  | ||||
|     glue (s->nb_hw_voices_, TYPE) = glue(audio_get_pdo_, TYPE)(s->dev)->voices; | ||||
|     if (glue (s->nb_hw_voices_, TYPE) > max_voices) { | ||||
|         if (!max_voices) { | ||||
| #ifdef DAC | ||||
| @@ -57,12 +56,6 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s, | ||||
|         glue (s->nb_hw_voices_, TYPE) = max_voices; | ||||
|     } | ||||
|  | ||||
|     if (glue (s->nb_hw_voices_, TYPE) < min_voices) { | ||||
|         dolog ("Bogus number of " NAME " voices %d, setting to %d\n", | ||||
|                glue (s->nb_hw_voices_, TYPE), | ||||
|                min_voices); | ||||
|     } | ||||
|  | ||||
|     if (audio_bug(__func__, !voice_size && max_voices)) { | ||||
|         dolog ("drv=`%s' voice_size=0 max_voices=%d\n", | ||||
|                drv->name, max_voices); | ||||
| @@ -70,17 +63,16 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s, | ||||
|     } | ||||
|  | ||||
|     if (audio_bug(__func__, voice_size && !max_voices)) { | ||||
|         dolog("drv=`%s' voice_size=%zu max_voices=0\n", | ||||
|               drv->name, voice_size); | ||||
|         dolog ("drv=`%s' voice_size=%d max_voices=0\n", | ||||
|                drv->name, voice_size); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void glue (audio_pcm_hw_free_resources_, TYPE) (HW *hw) | ||||
| { | ||||
|     g_free(hw->buf_emul); | ||||
|     g_free(HWBUF.buffer); | ||||
|     HWBUF.buffer = NULL; | ||||
|     HWBUF.size = 0; | ||||
|     g_free (HWBUF); | ||||
|     HWBUF = NULL; | ||||
| } | ||||
|  | ||||
| static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw) | ||||
| @@ -91,67 +83,56 @@ static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw) | ||||
|             dolog("Attempted to allocate empty buffer\n"); | ||||
|         } | ||||
|  | ||||
|         HWBUF.buffer = g_new0(st_sample, samples); | ||||
|         HWBUF.size = samples; | ||||
|         HWBUF.pos = 0; | ||||
|         HWBUF = g_malloc0(sizeof(STSampleBuffer) + sizeof(st_sample) * samples); | ||||
|         HWBUF->size = samples; | ||||
|     } else { | ||||
|         HWBUF.buffer = NULL; | ||||
|         HWBUF.size = 0; | ||||
|         HWBUF = NULL; | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void glue (audio_pcm_sw_free_resources_, TYPE) (SW *sw) | ||||
| { | ||||
|     g_free(sw->resample_buf.buffer); | ||||
|     sw->resample_buf.buffer = NULL; | ||||
|     sw->resample_buf.size = 0; | ||||
|     g_free (sw->buf); | ||||
|  | ||||
|     if (sw->rate) { | ||||
|         st_rate_stop (sw->rate); | ||||
|     } | ||||
|  | ||||
|     sw->buf = NULL; | ||||
|     sw->rate = NULL; | ||||
| } | ||||
|  | ||||
| static int glue (audio_pcm_sw_alloc_resources_, TYPE) (SW *sw) | ||||
| { | ||||
|     HW *hw = sw->hw; | ||||
|     uint64_t samples; | ||||
|     int samples; | ||||
|  | ||||
|     if (!glue(audio_get_pdo_, TYPE)(sw->s->dev)->mixing_engine) { | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
|     samples = muldiv64(HWBUF.size, sw->info.freq, hw->info.freq); | ||||
|     if (samples == 0) { | ||||
|         uint64_t f_fe_min; | ||||
|         uint64_t f_be = (uint32_t)hw->info.freq; | ||||
| #ifdef DAC | ||||
|     samples = ((int64_t) sw->HWBUF->size << 32) / sw->ratio; | ||||
| #else | ||||
|     samples = (int64_t)sw->HWBUF->size * sw->ratio >> 32; | ||||
| #endif | ||||
|  | ||||
|         /* f_fe_min = ceil(1 [frames] * f_be [Hz] / size_be [frames]) */ | ||||
|         f_fe_min = (f_be + HWBUF.size - 1) / HWBUF.size; | ||||
|         qemu_log_mask(LOG_UNIMP, | ||||
|                       AUDIO_CAP ": The guest selected a " NAME " sample rate" | ||||
|                       " of %d Hz for %s. Only sample rates >= %" PRIu64 " Hz" | ||||
|                       " are supported.\n", | ||||
|                       sw->info.freq, sw->name, f_fe_min); | ||||
|     sw->buf = audio_calloc(__func__, samples, sizeof(struct st_sample)); | ||||
|     if (!sw->buf) { | ||||
|         dolog ("Could not allocate buffer for `%s' (%d samples)\n", | ||||
|                SW_NAME (sw), samples); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     /* | ||||
|      * Allocate one additional audio frame that is needed for upsampling | ||||
|      * if the resample buffer size is small. For large buffer sizes take | ||||
|      * care of overflows and truncation. | ||||
|      */ | ||||
|     samples = samples < SIZE_MAX ? samples + 1 : SIZE_MAX; | ||||
|     sw->resample_buf.buffer = g_new0(st_sample, samples); | ||||
|     sw->resample_buf.size = samples; | ||||
|     sw->resample_buf.pos = 0; | ||||
|  | ||||
| #ifdef DAC | ||||
|     sw->rate = st_rate_start(sw->info.freq, hw->info.freq); | ||||
|     sw->rate = st_rate_start (sw->info.freq, sw->hw->info.freq); | ||||
| #else | ||||
|     sw->rate = st_rate_start(hw->info.freq, sw->info.freq); | ||||
|     sw->rate = st_rate_start (sw->hw->info.freq, sw->info.freq); | ||||
| #endif | ||||
|  | ||||
|     if (!sw->rate) { | ||||
|         g_free (sw->buf); | ||||
|         sw->buf = NULL; | ||||
|         return -1; | ||||
|     } | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| @@ -168,8 +149,11 @@ static int glue (audio_pcm_sw_init_, TYPE) ( | ||||
|     sw->hw = hw; | ||||
|     sw->active = 0; | ||||
| #ifdef DAC | ||||
|     sw->ratio = ((int64_t) sw->hw->info.freq << 32) / sw->info.freq; | ||||
|     sw->total_hw_samples_mixed = 0; | ||||
|     sw->empty = 1; | ||||
| #else | ||||
|     sw->ratio = ((int64_t) sw->info.freq << 32) / sw->hw->info.freq; | ||||
| #endif | ||||
|  | ||||
|     if (sw->info.is_float) { | ||||
| @@ -280,11 +264,13 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s, | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
|     /* | ||||
|      * Since glue(s->nb_hw_voices_, TYPE) is != 0, glue(drv->voice_size_, TYPE) | ||||
|      * is guaranteed to be != 0. See the audio_init_nb_voices_* functions. | ||||
|      */ | ||||
|     hw = g_malloc0(glue(drv->voice_size_, TYPE)); | ||||
|     hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE)); | ||||
|     if (!hw) { | ||||
|         dolog ("Can not allocate voice `%s' size %d\n", | ||||
|                drv->name, glue (drv->voice_size_, TYPE)); | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
|     hw->s = s; | ||||
|     hw->pcm_ops = drv->pcm_ops; | ||||
|  | ||||
| @@ -369,10 +355,6 @@ AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev) | ||||
|     case AUDIODEV_DRIVER_PA: | ||||
|         return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE); | ||||
| #endif | ||||
| #ifdef CONFIG_AUDIO_PIPEWIRE | ||||
|     case AUDIODEV_DRIVER_PIPEWIRE: | ||||
|         return qapi_AudiodevPipewirePerDirectionOptions_base(dev->u.pipewire.TYPE); | ||||
| #endif | ||||
| #ifdef CONFIG_AUDIO_SDL | ||||
|     case AUDIODEV_DRIVER_SDL: | ||||
|         return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE); | ||||
| @@ -436,28 +418,33 @@ static SW *glue(audio_pcm_create_voice_pair_, TYPE)( | ||||
|         hw_as = *as; | ||||
|     } | ||||
|  | ||||
|     sw = g_new0(SW, 1); | ||||
|     sw = audio_calloc(__func__, 1, sizeof(*sw)); | ||||
|     if (!sw) { | ||||
|         dolog ("Could not allocate soft voice `%s' (%zu bytes)\n", | ||||
|                sw_name ? sw_name : "unknown", sizeof (*sw)); | ||||
|         goto err1; | ||||
|     } | ||||
|     sw->s = s; | ||||
|  | ||||
|     hw = glue(audio_pcm_hw_add_, TYPE)(s, &hw_as); | ||||
|     if (!hw) { | ||||
|         dolog("Could not create a backend for voice `%s'\n", sw_name); | ||||
|         goto err1; | ||||
|         goto err2; | ||||
|     } | ||||
|  | ||||
|     glue (audio_pcm_hw_add_sw_, TYPE) (hw, sw); | ||||
|  | ||||
|     if (glue (audio_pcm_sw_init_, TYPE) (sw, hw, sw_name, as)) { | ||||
|         goto err2; | ||||
|         goto err3; | ||||
|     } | ||||
|  | ||||
|     return sw; | ||||
|  | ||||
| err2: | ||||
| err3: | ||||
|     glue (audio_pcm_hw_del_sw_, TYPE) (sw); | ||||
|     glue (audio_pcm_hw_gc_, TYPE) (&hw); | ||||
| err2: | ||||
|     g_free (sw); | ||||
| err1: | ||||
|     g_free(sw); | ||||
|     return NULL; | ||||
| } | ||||
|  | ||||
| @@ -528,8 +515,8 @@ SW *glue (AUD_open_, TYPE) ( | ||||
|         HW *hw = sw->hw; | ||||
|  | ||||
|         if (!hw) { | ||||
|             dolog("Internal logic error: voice `%s' has no backend\n", | ||||
|                   SW_NAME(sw)); | ||||
|             dolog ("Internal logic error voice `%s' has no hardware store\n", | ||||
|                    SW_NAME (sw)); | ||||
|             goto fail; | ||||
|         } | ||||
|  | ||||
| @@ -540,6 +527,7 @@ SW *glue (AUD_open_, TYPE) ( | ||||
|     } else { | ||||
|         sw = glue(audio_pcm_create_voice_pair_, TYPE)(s, name, as); | ||||
|         if (!sw) { | ||||
|             dolog ("Failed to create voice `%s'\n", name); | ||||
|             return NULL; | ||||
|         } | ||||
|     } | ||||
|   | ||||
| @@ -644,7 +644,7 @@ static void coreaudio_enable_out(HWVoiceOut *hw, bool enable) | ||||
|     update_device_playback_state(core); | ||||
| } | ||||
| 
 | ||||
| static void *coreaudio_audio_init(Audiodev *dev, Error **errp) | ||||
| static void *coreaudio_audio_init(Audiodev *dev) | ||||
| { | ||||
|     return dev; | ||||
| } | ||||
| @@ -673,6 +673,7 @@ static struct audio_driver coreaudio_audio_driver = { | ||||
|     .init           = coreaudio_audio_init, | ||||
|     .fini           = coreaudio_audio_fini, | ||||
|     .pcm_ops        = &coreaudio_pcm_ops, | ||||
|     .can_be_default = 1, | ||||
|     .max_voices_out = 1, | ||||
|     .max_voices_in  = 0, | ||||
|     .voice_size_out = sizeof (coreaudioVoiceOut), | ||||
|   | ||||
| @@ -29,11 +29,7 @@ | ||||
| #include "qemu/timer.h" | ||||
| #include "qemu/dbus.h" | ||||
|  | ||||
| #ifdef G_OS_UNIX | ||||
| #include <gio/gunixfdlist.h> | ||||
| #endif | ||||
|  | ||||
| #include "ui/dbus.h" | ||||
| #include "ui/dbus-display1.h" | ||||
|  | ||||
| #define AUDIO_CAP "dbus" | ||||
| @@ -47,7 +43,6 @@ | ||||
|  | ||||
| typedef struct DBusAudio { | ||||
|     GDBusObjectManagerServer *server; | ||||
|     bool p2p; | ||||
|     GDBusObjectSkeleton *audio; | ||||
|     QemuDBusDisplay1Audio *iface; | ||||
|     GHashTable *out_listeners; | ||||
| @@ -395,7 +390,7 @@ dbus_enable_in(HWVoiceIn *hw, bool enable) | ||||
| } | ||||
|  | ||||
| static void * | ||||
| dbus_audio_init(Audiodev *dev, Error **errp) | ||||
| dbus_audio_init(Audiodev *dev) | ||||
| { | ||||
|     DBusAudio *da = g_new0(DBusAudio, 1); | ||||
|  | ||||
| @@ -448,15 +443,12 @@ listener_in_vanished_cb(GDBusConnection *connection, | ||||
| static gboolean | ||||
| dbus_audio_register_listener(AudioState *s, | ||||
|                              GDBusMethodInvocation *invocation, | ||||
| #ifdef G_OS_UNIX | ||||
|                              GUnixFDList *fd_list, | ||||
| #endif | ||||
|                              GVariant *arg_listener, | ||||
|                              bool out) | ||||
| { | ||||
|     DBusAudio *da = s->drv_opaque; | ||||
|     const char *sender = | ||||
|         da->p2p ? "p2p" : g_dbus_method_invocation_get_sender(invocation); | ||||
|     const char *sender = g_dbus_method_invocation_get_sender(invocation); | ||||
|     g_autoptr(GDBusConnection) listener_conn = NULL; | ||||
|     g_autoptr(GError) err = NULL; | ||||
|     g_autoptr(GSocket) socket = NULL; | ||||
| @@ -477,11 +469,6 @@ dbus_audio_register_listener(AudioState *s, | ||||
|         return DBUS_METHOD_INVOCATION_HANDLED; | ||||
|     } | ||||
|  | ||||
| #ifdef G_OS_WIN32 | ||||
|     if (!dbus_win32_import_socket(invocation, arg_listener, &fd)) { | ||||
|         return DBUS_METHOD_INVOCATION_HANDLED; | ||||
|     } | ||||
| #else | ||||
|     fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err); | ||||
|     if (err) { | ||||
|         g_dbus_method_invocation_return_error(invocation, | ||||
| @@ -491,7 +478,6 @@ dbus_audio_register_listener(AudioState *s, | ||||
|                                               err->message); | ||||
|         return DBUS_METHOD_INVOCATION_HANDLED; | ||||
|     } | ||||
| #endif | ||||
|  | ||||
|     socket = g_socket_new_from_fd(fd, &err); | ||||
|     if (err) { | ||||
| @@ -500,28 +486,15 @@ dbus_audio_register_listener(AudioState *s, | ||||
|                                               DBUS_DISPLAY_ERROR_FAILED, | ||||
|                                               "Couldn't make a socket: %s", | ||||
|                                               err->message); | ||||
| #ifdef G_OS_WIN32 | ||||
|         closesocket(fd); | ||||
| #else | ||||
|         close(fd); | ||||
| #endif | ||||
|         return DBUS_METHOD_INVOCATION_HANDLED; | ||||
|     } | ||||
|     socket_conn = g_socket_connection_factory_create_connection(socket); | ||||
|     if (out) { | ||||
|         qemu_dbus_display1_audio_complete_register_out_listener( | ||||
|             da->iface, invocation | ||||
| #ifdef G_OS_UNIX | ||||
|             , NULL | ||||
| #endif | ||||
|             ); | ||||
|             da->iface, invocation, NULL); | ||||
|     } else { | ||||
|         qemu_dbus_display1_audio_complete_register_in_listener( | ||||
|             da->iface, invocation | ||||
| #ifdef G_OS_UNIX | ||||
|             , NULL | ||||
| #endif | ||||
|             ); | ||||
|             da->iface, invocation, NULL); | ||||
|     } | ||||
|  | ||||
|     listener_conn = | ||||
| @@ -599,36 +572,26 @@ dbus_audio_register_listener(AudioState *s, | ||||
| static gboolean | ||||
| dbus_audio_register_out_listener(AudioState *s, | ||||
|                                  GDBusMethodInvocation *invocation, | ||||
| #ifdef G_OS_UNIX | ||||
|                                  GUnixFDList *fd_list, | ||||
| #endif | ||||
|                                  GVariant *arg_listener) | ||||
| { | ||||
|     return dbus_audio_register_listener(s, invocation, | ||||
| #ifdef G_OS_UNIX | ||||
|                                         fd_list, | ||||
| #endif | ||||
|                                         arg_listener, true); | ||||
|                                         fd_list, arg_listener, true); | ||||
|  | ||||
| } | ||||
|  | ||||
| static gboolean | ||||
| dbus_audio_register_in_listener(AudioState *s, | ||||
|                                 GDBusMethodInvocation *invocation, | ||||
| #ifdef G_OS_UNIX | ||||
|                                 GUnixFDList *fd_list, | ||||
| #endif | ||||
|                                 GVariant *arg_listener) | ||||
| { | ||||
|     return dbus_audio_register_listener(s, invocation, | ||||
| #ifdef G_OS_UNIX | ||||
|                                         fd_list, | ||||
| #endif | ||||
|                                         arg_listener, false); | ||||
|                                         fd_list, arg_listener, false); | ||||
| } | ||||
|  | ||||
| static void | ||||
| dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p) | ||||
| dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server) | ||||
| { | ||||
|     DBusAudio *da = s->drv_opaque; | ||||
|  | ||||
| @@ -636,7 +599,6 @@ dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p) | ||||
|     g_assert(!da->server); | ||||
|  | ||||
|     da->server = g_object_ref(server); | ||||
|     da->p2p = p2p; | ||||
|  | ||||
|     da->audio = g_dbus_object_skeleton_new(DBUS_DISPLAY1_AUDIO_PATH); | ||||
|     da->iface = qemu_dbus_display1_audio_skeleton_new(); | ||||
| @@ -676,6 +638,7 @@ static struct audio_driver dbus_audio_driver = { | ||||
|     .fini            = dbus_audio_fini, | ||||
|     .set_dbus_server = dbus_audio_set_server, | ||||
|     .pcm_ops         = &dbus_pcm_ops, | ||||
|     .can_be_default  = 1, | ||||
|     .max_voices_out  = INT_MAX, | ||||
|     .max_voices_in   = INT_MAX, | ||||
|     .voice_size_out  = sizeof(DBusVoiceOut), | ||||
|   | ||||
| @@ -619,7 +619,7 @@ static void dsound_audio_fini (void *opaque) | ||||
|     g_free(s); | ||||
| } | ||||
|  | ||||
| static void *dsound_audio_init(Audiodev *dev, Error **errp) | ||||
| static void *dsound_audio_init(Audiodev *dev) | ||||
| { | ||||
|     int err; | ||||
|     HRESULT hr; | ||||
| @@ -721,6 +721,7 @@ static struct audio_driver dsound_audio_driver = { | ||||
|     .init           = dsound_audio_init, | ||||
|     .fini           = dsound_audio_fini, | ||||
|     .pcm_ops        = &dsound_pcm_ops, | ||||
|     .can_be_default = 1, | ||||
|     .max_voices_out = INT_MAX, | ||||
|     .max_voices_in  = 1, | ||||
|     .voice_size_out = sizeof (DSoundVoiceOut), | ||||
|   | ||||
| @@ -70,9 +70,6 @@ typedef struct QJackClient { | ||||
|     int             buffersize; | ||||
|     jack_port_t   **port; | ||||
|     QJackBuffer     fifo; | ||||
|  | ||||
|     /* Used as workspace by qjack_process() */ | ||||
|     float **process_buffers; | ||||
| } | ||||
| QJackClient; | ||||
|  | ||||
| @@ -270,21 +267,22 @@ static int qjack_process(jack_nframes_t nframes, void *arg) | ||||
|     } | ||||
|  | ||||
|     /* get the buffers for the ports */ | ||||
|     float *buffers[c->nchannels]; | ||||
|     for (int i = 0; i < c->nchannels; ++i) { | ||||
|         c->process_buffers[i] = jack_port_get_buffer(c->port[i], nframes); | ||||
|         buffers[i] = jack_port_get_buffer(c->port[i], nframes); | ||||
|     } | ||||
|  | ||||
|     if (c->out) { | ||||
|         if (likely(c->enabled)) { | ||||
|             qjack_buffer_read_l(&c->fifo, c->process_buffers, nframes); | ||||
|             qjack_buffer_read_l(&c->fifo, buffers, nframes); | ||||
|         } else { | ||||
|             for (int i = 0; i < c->nchannels; ++i) { | ||||
|                 memset(c->process_buffers[i], 0, nframes * sizeof(float)); | ||||
|                 memset(buffers[i], 0, nframes * sizeof(float)); | ||||
|             } | ||||
|         } | ||||
|     } else { | ||||
|         if (likely(c->enabled)) { | ||||
|             qjack_buffer_write_l(&c->fifo, c->process_buffers, nframes); | ||||
|             qjack_buffer_write_l(&c->fifo, buffers, nframes); | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -402,8 +400,7 @@ static void qjack_client_connect_ports(QJackClient *c) | ||||
| static int qjack_client_init(QJackClient *c) | ||||
| { | ||||
|     jack_status_t status; | ||||
|     int client_name_len = jack_client_name_size(); /* includes NUL */ | ||||
|     g_autofree char *client_name = g_new(char, client_name_len); | ||||
|     char client_name[jack_client_name_size()]; | ||||
|     jack_options_t options = JackNullOption; | ||||
|  | ||||
|     if (c->state == QJACK_STATE_RUNNING) { | ||||
| @@ -412,7 +409,7 @@ static int qjack_client_init(QJackClient *c) | ||||
|  | ||||
|     c->connect_ports = true; | ||||
|  | ||||
|     snprintf(client_name, client_name_len, "%s-%s", | ||||
|     snprintf(client_name, sizeof(client_name), "%s-%s", | ||||
|         c->out ? "out" : "in", | ||||
|         c->opt->client_name ? c->opt->client_name : audio_application_name()); | ||||
|  | ||||
| @@ -450,9 +447,6 @@ static int qjack_client_init(QJackClient *c) | ||||
|           jack_get_client_name(c->client)); | ||||
|     } | ||||
|  | ||||
|     /* Allocate working buffer for process callback */ | ||||
|     c->process_buffers = g_new(float *, c->nchannels); | ||||
|  | ||||
|     jack_set_process_callback(c->client, qjack_process , c); | ||||
|     jack_set_port_registration_callback(c->client, qjack_port_registration, c); | ||||
|     jack_set_xrun_callback(c->client, qjack_xrun, c); | ||||
| @@ -584,7 +578,6 @@ static void qjack_client_fini_locked(QJackClient *c) | ||||
|  | ||||
|         qjack_buffer_free(&c->fifo); | ||||
|         g_free(c->port); | ||||
|         g_free(c->process_buffers); | ||||
|  | ||||
|         c->state = QJACK_STATE_DISCONNECTED; | ||||
|         /* fallthrough */ | ||||
| @@ -645,7 +638,7 @@ static int qjack_thread_creator(jack_native_thread_t *thread, | ||||
| } | ||||
| #endif | ||||
|  | ||||
| static void *qjack_init(Audiodev *dev, Error **errp) | ||||
| static void *qjack_init(Audiodev *dev) | ||||
| { | ||||
|     assert(dev->driver == AUDIODEV_DRIVER_JACK); | ||||
|     return dev; | ||||
| @@ -676,6 +669,7 @@ static struct audio_driver jack_driver = { | ||||
|     .init           = qjack_init, | ||||
|     .fini           = qjack_fini, | ||||
|     .pcm_ops        = &jack_pcm_ops, | ||||
|     .can_be_default = 1, | ||||
|     .max_voices_out = INT_MAX, | ||||
|     .max_voices_in  = INT_MAX, | ||||
|     .voice_size_out = sizeof(QJackOut), | ||||
|   | ||||
| @@ -1,14 +1,15 @@ | ||||
| system_ss.add([spice_headers, files('audio.c')]) | ||||
| system_ss.add(files( | ||||
| softmmu_ss.add([spice_headers, files('audio.c')]) | ||||
| softmmu_ss.add(files( | ||||
|   'audio-hmp-cmds.c', | ||||
|   'audio_legacy.c', | ||||
|   'mixeng.c', | ||||
|   'noaudio.c', | ||||
|   'wavaudio.c', | ||||
|   'wavcapture.c', | ||||
| )) | ||||
|  | ||||
| system_ss.add(when: coreaudio, if_true: files('coreaudio.m')) | ||||
| system_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c')) | ||||
| softmmu_ss.add(when: coreaudio, if_true: files('coreaudio.m')) | ||||
| softmmu_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c')) | ||||
|  | ||||
| audio_modules = {} | ||||
| foreach m : [ | ||||
| @@ -18,7 +19,6 @@ foreach m : [ | ||||
|   ['sdl', sdl, files('sdlaudio.c')], | ||||
|   ['jack', jack, files('jackaudio.c')], | ||||
|   ['sndio', sndio, files('sndioaudio.c')], | ||||
|   ['pipewire', pipewire, files('pwaudio.c')], | ||||
|   ['spice', spice, files('spiceaudio.c')] | ||||
| ] | ||||
|   if m[1].found() | ||||
| @@ -30,7 +30,7 @@ endforeach | ||||
|  | ||||
| if dbus_display | ||||
|     module_ss = ss.source_set() | ||||
|     module_ss.add(when: [gio, pixman], if_true: files('dbusaudio.c')) | ||||
|     module_ss.add(when: gio, if_true: files('dbusaudio.c')) | ||||
|     audio_modules += {'dbus': module_ss} | ||||
| endif | ||||
|  | ||||
|   | ||||
| @@ -414,7 +414,12 @@ struct rate { | ||||
|  */ | ||||
| void *st_rate_start (int inrate, int outrate) | ||||
| { | ||||
|     struct rate *rate = g_new0(struct rate, 1); | ||||
|     struct rate *rate = audio_calloc(__func__, 1, sizeof(*rate)); | ||||
|  | ||||
|     if (!rate) { | ||||
|         dolog ("Could not allocate resampler (%zu bytes)\n", sizeof (*rate)); | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
|     rate->opos = 0; | ||||
|  | ||||
| @@ -440,86 +445,6 @@ void st_rate_stop (void *opaque) | ||||
|     g_free (opaque); | ||||
| } | ||||
|  | ||||
| /** | ||||
|  * st_rate_frames_out() - returns the number of frames the resampling code | ||||
|  * generates from frames_in frames | ||||
|  * | ||||
|  * @opaque: pointer to struct rate | ||||
|  * @frames_in: number of frames | ||||
|  * | ||||
|  * When upsampling, there may be more than one correct result. In this case, | ||||
|  * the function returns the maximum number of output frames the resampling | ||||
|  * code can generate. | ||||
|  */ | ||||
| uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in) | ||||
| { | ||||
|     struct rate *rate = opaque; | ||||
|     uint64_t opos_end, opos_delta; | ||||
|     uint32_t ipos_end; | ||||
|     uint32_t frames_out; | ||||
|  | ||||
|     if (rate->opos_inc == 1ULL << 32) { | ||||
|         return frames_in; | ||||
|     } | ||||
|  | ||||
|     /* no output frame without at least one input frame */ | ||||
|     if (!frames_in) { | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
|     /* last frame read was at rate->ipos - 1 */ | ||||
|     ipos_end = rate->ipos - 1 + frames_in; | ||||
|     opos_end = (uint64_t)ipos_end << 32; | ||||
|  | ||||
|     /* last frame written was at rate->opos - rate->opos_inc */ | ||||
|     if (opos_end + rate->opos_inc <= rate->opos) { | ||||
|         return 0; | ||||
|     } | ||||
|     opos_delta = opos_end - rate->opos + rate->opos_inc; | ||||
|     frames_out = opos_delta / rate->opos_inc; | ||||
|  | ||||
|     return opos_delta % rate->opos_inc ? frames_out : frames_out - 1; | ||||
| } | ||||
|  | ||||
| /** | ||||
|  * st_rate_frames_in() - returns the number of frames needed to | ||||
|  * get frames_out frames after resampling | ||||
|  * | ||||
|  * @opaque: pointer to struct rate | ||||
|  * @frames_out: number of frames | ||||
|  * | ||||
|  * When downsampling, there may be more than one correct result. In this | ||||
|  * case, the function returns the maximum number of input frames needed. | ||||
|  */ | ||||
| uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out) | ||||
| { | ||||
|     struct rate *rate = opaque; | ||||
|     uint64_t opos_start, opos_end; | ||||
|     uint32_t ipos_start, ipos_end; | ||||
|  | ||||
|     if (rate->opos_inc == 1ULL << 32) { | ||||
|         return frames_out; | ||||
|     } | ||||
|  | ||||
|     if (frames_out) { | ||||
|         opos_start = rate->opos; | ||||
|         ipos_start = rate->ipos; | ||||
|     } else { | ||||
|         uint64_t offset; | ||||
|  | ||||
|         /* add offset = ceil(opos_inc) to opos and ipos to avoid an underflow */ | ||||
|         offset = (rate->opos_inc + (1ULL << 32) - 1) & ~((1ULL << 32) - 1); | ||||
|         opos_start = rate->opos + offset; | ||||
|         ipos_start = rate->ipos + (offset >> 32); | ||||
|     } | ||||
|     /* last frame written was at opos_start - rate->opos_inc */ | ||||
|     opos_end = opos_start - rate->opos_inc + rate->opos_inc * frames_out; | ||||
|     ipos_end = (opos_end >> 32) + 1; | ||||
|  | ||||
|     /* last frame read was at ipos_start - 1 */ | ||||
|     return ipos_end + 1 > ipos_start ? ipos_end + 1 - ipos_start : 0; | ||||
| } | ||||
|  | ||||
| void mixeng_clear (struct st_sample *buf, int len) | ||||
| { | ||||
|     memset (buf, 0, len * sizeof (struct st_sample)); | ||||
|   | ||||
| @@ -38,7 +38,7 @@ typedef struct st_sample st_sample; | ||||
| typedef void (t_sample) (struct st_sample *dst, const void *src, int samples); | ||||
| typedef void (f_sample) (void *dst, const struct st_sample *src, int samples); | ||||
|  | ||||
| /* indices: [stereo][signed][swap endianness][8, 16 or 32-bits] */ | ||||
| /* indices: [stereo][signed][swap endiannes][8, 16 or 32-bits] */ | ||||
| extern t_sample *mixeng_conv[2][2][2][3]; | ||||
| extern f_sample *mixeng_clip[2][2][2][3]; | ||||
|  | ||||
| @@ -52,8 +52,6 @@ void st_rate_flow(void *opaque, st_sample *ibuf, st_sample *obuf, | ||||
| void st_rate_flow_mix(void *opaque, st_sample *ibuf, st_sample *obuf, | ||||
|                       size_t *isamp, size_t *osamp); | ||||
| void st_rate_stop (void *opaque); | ||||
| uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in); | ||||
| uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out); | ||||
| void mixeng_clear (struct st_sample *buf, int len); | ||||
| void mixeng_volume (struct st_sample *buf, int len, struct mixeng_volume *vol); | ||||
|  | ||||
|   | ||||
| @@ -104,7 +104,7 @@ static void no_enable_in(HWVoiceIn *hw, bool enable) | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void *no_audio_init(Audiodev *dev, Error **errp) | ||||
| static void *no_audio_init(Audiodev *dev) | ||||
| { | ||||
|     return &no_audio_init; | ||||
| } | ||||
| @@ -135,6 +135,7 @@ static struct audio_driver no_audio_driver = { | ||||
|     .init           = no_audio_init, | ||||
|     .fini           = no_audio_fini, | ||||
|     .pcm_ops        = &no_pcm_ops, | ||||
|     .can_be_default = 1, | ||||
|     .max_voices_out = INT_MAX, | ||||
|     .max_voices_in  = INT_MAX, | ||||
|     .voice_size_out = sizeof (NoVoiceOut), | ||||
|   | ||||
| @@ -28,7 +28,6 @@ | ||||
| #include "qemu/main-loop.h" | ||||
| #include "qemu/module.h" | ||||
| #include "qemu/host-utils.h" | ||||
| #include "qapi/error.h" | ||||
| #include "audio.h" | ||||
| #include "trace.h" | ||||
|  | ||||
| @@ -549,6 +548,7 @@ static int oss_init_out(HWVoiceOut *hw, struct audsettings *as, | ||||
|                        hw->size_emul); | ||||
|             hw->buf_emul = NULL; | ||||
|         } else { | ||||
|             int err; | ||||
|             int trig = 0; | ||||
|             if (ioctl (fd, SNDCTL_DSP_SETTRIGGER, &trig) < 0) { | ||||
|                 oss_logerr (errno, "SNDCTL_DSP_SETTRIGGER 0 failed\n"); | ||||
| @@ -736,7 +736,7 @@ static void oss_init_per_direction(AudiodevOssPerDirectionOptions *opdo) | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void *oss_audio_init(Audiodev *dev, Error **errp) | ||||
| static void *oss_audio_init(Audiodev *dev) | ||||
| { | ||||
|     AudiodevOssOptions *oopts; | ||||
|     assert(dev->driver == AUDIODEV_DRIVER_OSS); | ||||
| @@ -745,12 +745,8 @@ static void *oss_audio_init(Audiodev *dev, Error **errp) | ||||
|     oss_init_per_direction(oopts->in); | ||||
|     oss_init_per_direction(oopts->out); | ||||
|  | ||||
|     if (access(oopts->in->dev ?: "/dev/dsp", R_OK | W_OK) < 0) { | ||||
|         error_setg_errno(errp, errno, "%s not accessible", oopts->in->dev ?: "/dev/dsp"); | ||||
|         return NULL; | ||||
|     } | ||||
|     if (access(oopts->out->dev ?: "/dev/dsp", R_OK | W_OK) < 0) { | ||||
|         error_setg_errno(errp, errno, "%s not accessible", oopts->out->dev ?: "/dev/dsp"); | ||||
|     if (access(oopts->in->dev ?: "/dev/dsp", R_OK | W_OK) < 0 || | ||||
|         access(oopts->out->dev ?: "/dev/dsp", R_OK | W_OK) < 0) { | ||||
|         return NULL; | ||||
|     } | ||||
|     return dev; | ||||
| @@ -783,6 +779,7 @@ static struct audio_driver oss_audio_driver = { | ||||
|     .init           = oss_audio_init, | ||||
|     .fini           = oss_audio_fini, | ||||
|     .pcm_ops        = &oss_pcm_ops, | ||||
|     .can_be_default = 1, | ||||
|     .max_voices_out = INT_MAX, | ||||
|     .max_voices_in  = INT_MAX, | ||||
|     .voice_size_out = sizeof (OSSVoiceOut), | ||||
|   | ||||
| @@ -3,7 +3,7 @@ | ||||
| #include "qemu/osdep.h" | ||||
| #include "qemu/module.h" | ||||
| #include "audio.h" | ||||
| #include "qapi/error.h" | ||||
| #include "qapi/opts-visitor.h" | ||||
|  | ||||
| #include <pulse/pulseaudio.h> | ||||
|  | ||||
| @@ -818,7 +818,7 @@ fail: | ||||
|     return NULL; | ||||
| } | ||||
|  | ||||
| static void *qpa_audio_init(Audiodev *dev, Error **errp) | ||||
| static void *qpa_audio_init(Audiodev *dev) | ||||
| { | ||||
|     paaudio *g; | ||||
|     AudiodevPaOptions *popts = &dev->u.pa; | ||||
| @@ -834,12 +834,10 @@ static void *qpa_audio_init(Audiodev *dev, Error **errp) | ||||
|  | ||||
|         runtime = getenv("XDG_RUNTIME_DIR"); | ||||
|         if (!runtime) { | ||||
|             error_setg(errp, "XDG_RUNTIME_DIR not set"); | ||||
|             return NULL; | ||||
|         } | ||||
|         snprintf(pidfile, sizeof(pidfile), "%s/pulse/pid", runtime); | ||||
|         if (stat(pidfile, &st) != 0) { | ||||
|             error_setg_errno(errp, errno, "could not stat pidfile %s", pidfile); | ||||
|             return NULL; | ||||
|         } | ||||
|     } | ||||
| @@ -869,7 +867,6 @@ static void *qpa_audio_init(Audiodev *dev, Error **errp) | ||||
|     } | ||||
|     if (!g->conn) { | ||||
|         g_free(g); | ||||
|         error_setg(errp, "could not connect to PulseAudio server"); | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
| @@ -931,6 +928,7 @@ static struct audio_driver pa_audio_driver = { | ||||
|     .init           = qpa_audio_init, | ||||
|     .fini           = qpa_audio_fini, | ||||
|     .pcm_ops        = &qpa_pcm_ops, | ||||
|     .can_be_default = 1, | ||||
|     .max_voices_out = INT_MAX, | ||||
|     .max_voices_in  = INT_MAX, | ||||
|     .voice_size_out = sizeof (PAVoiceOut), | ||||
|   | ||||
							
								
								
									
										858
									
								
								audio/pwaudio.c
									
									
									
									
									
								
							
							
						
						
									
										858
									
								
								audio/pwaudio.c
									
									
									
									
									
								
							| @@ -1,858 +0,0 @@ | ||||
| /* | ||||
|  * QEMU PipeWire audio driver | ||||
|  * | ||||
|  * Copyright (c) 2023 Red Hat Inc. | ||||
|  * | ||||
|  * Author: Dorinda Bassey       <dbassey@redhat.com> | ||||
|  * | ||||
|  * SPDX-License-Identifier: GPL-2.0-or-later | ||||
|  */ | ||||
|  | ||||
| #include "qemu/osdep.h" | ||||
| #include "qemu/module.h" | ||||
| #include "audio.h" | ||||
| #include <errno.h> | ||||
| #include "qemu/error-report.h" | ||||
| #include "qapi/error.h" | ||||
| #include <spa/param/audio/format-utils.h> | ||||
| #include <spa/utils/ringbuffer.h> | ||||
| #include <spa/utils/result.h> | ||||
| #include <spa/param/props.h> | ||||
|  | ||||
| #include <pipewire/pipewire.h> | ||||
| #include "trace.h" | ||||
|  | ||||
| #define AUDIO_CAP "pipewire" | ||||
| #define RINGBUFFER_SIZE    (1u << 22) | ||||
| #define RINGBUFFER_MASK    (RINGBUFFER_SIZE - 1) | ||||
|  | ||||
| #include "audio_int.h" | ||||
|  | ||||
| typedef struct pwvolume { | ||||
|     uint32_t channels; | ||||
|     float values[SPA_AUDIO_MAX_CHANNELS]; | ||||
| } pwvolume; | ||||
|  | ||||
| typedef struct pwaudio { | ||||
|     Audiodev *dev; | ||||
|     struct pw_thread_loop *thread_loop; | ||||
|     struct pw_context *context; | ||||
|  | ||||
|     struct pw_core *core; | ||||
|     struct spa_hook core_listener; | ||||
|     int last_seq, pending_seq, error; | ||||
| } pwaudio; | ||||
|  | ||||
| typedef struct PWVoice { | ||||
|     pwaudio *g; | ||||
|     struct pw_stream *stream; | ||||
|     struct spa_hook stream_listener; | ||||
|     struct spa_audio_info_raw info; | ||||
|     uint32_t highwater_mark; | ||||
|     uint32_t frame_size, req; | ||||
|     struct spa_ringbuffer ring; | ||||
|     uint8_t buffer[RINGBUFFER_SIZE]; | ||||
|  | ||||
|     pwvolume volume; | ||||
|     bool muted; | ||||
| } PWVoice; | ||||
|  | ||||
| typedef struct PWVoiceOut { | ||||
|     HWVoiceOut hw; | ||||
|     PWVoice v; | ||||
| } PWVoiceOut; | ||||
|  | ||||
| typedef struct PWVoiceIn { | ||||
|     HWVoiceIn hw; | ||||
|     PWVoice v; | ||||
| } PWVoiceIn; | ||||
|  | ||||
| #define PW_VOICE_IN(v) ((PWVoiceIn *)v) | ||||
| #define PW_VOICE_OUT(v) ((PWVoiceOut *)v) | ||||
|  | ||||
| static void | ||||
| stream_destroy(void *data) | ||||
| { | ||||
|     PWVoice *v = (PWVoice *) data; | ||||
|     spa_hook_remove(&v->stream_listener); | ||||
|     v->stream = NULL; | ||||
| } | ||||
|  | ||||
| /* output data processing function to read stuffs from the buffer */ | ||||
| static void | ||||
| playback_on_process(void *data) | ||||
| { | ||||
|     PWVoice *v = data; | ||||
|     void *p; | ||||
|     struct pw_buffer *b; | ||||
|     struct spa_buffer *buf; | ||||
|     uint32_t req, index, n_bytes; | ||||
|     int32_t avail; | ||||
|  | ||||
|     assert(v->stream); | ||||
|  | ||||
|     /* obtain a buffer to read from */ | ||||
|     b = pw_stream_dequeue_buffer(v->stream); | ||||
|     if (b == NULL) { | ||||
|         error_report("out of buffers: %s", strerror(errno)); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     buf = b->buffer; | ||||
|     p = buf->datas[0].data; | ||||
|     if (p == NULL) { | ||||
|         return; | ||||
|     } | ||||
|     /* calculate the total no of bytes to read data from buffer */ | ||||
|     req = b->requested * v->frame_size; | ||||
|     if (req == 0) { | ||||
|         req = v->req; | ||||
|     } | ||||
|     n_bytes = SPA_MIN(req, buf->datas[0].maxsize); | ||||
|  | ||||
|     /* get no of available bytes to read data from buffer */ | ||||
|     avail = spa_ringbuffer_get_read_index(&v->ring, &index); | ||||
|  | ||||
|     if (avail <= 0) { | ||||
|         PWVoiceOut *vo = container_of(data, PWVoiceOut, v); | ||||
|         audio_pcm_info_clear_buf(&vo->hw.info, p, n_bytes / v->frame_size); | ||||
|     } else { | ||||
|         if ((uint32_t) avail < n_bytes) { | ||||
|             /* | ||||
|              * PipeWire immediately calls this callback again if we provide | ||||
|              * less than n_bytes. Then audio_pcm_info_clear_buf() fills the | ||||
|              * rest of the buffer with silence. | ||||
|              */ | ||||
|             n_bytes = avail; | ||||
|         } | ||||
|  | ||||
|         spa_ringbuffer_read_data(&v->ring, | ||||
|                                     v->buffer, RINGBUFFER_SIZE, | ||||
|                                     index & RINGBUFFER_MASK, p, n_bytes); | ||||
|  | ||||
|         index += n_bytes; | ||||
|         spa_ringbuffer_read_update(&v->ring, index); | ||||
|  | ||||
|     } | ||||
|     buf->datas[0].chunk->offset = 0; | ||||
|     buf->datas[0].chunk->stride = v->frame_size; | ||||
|     buf->datas[0].chunk->size = n_bytes; | ||||
|  | ||||
|     /* queue the buffer for playback */ | ||||
|     pw_stream_queue_buffer(v->stream, b); | ||||
| } | ||||
|  | ||||
| /* output data processing function to generate stuffs in the buffer */ | ||||
| static void | ||||
| capture_on_process(void *data) | ||||
| { | ||||
|     PWVoice *v = (PWVoice *) data; | ||||
|     void *p; | ||||
|     struct pw_buffer *b; | ||||
|     struct spa_buffer *buf; | ||||
|     int32_t filled; | ||||
|     uint32_t index, offs, n_bytes; | ||||
|  | ||||
|     assert(v->stream); | ||||
|  | ||||
|     /* obtain a buffer */ | ||||
|     b = pw_stream_dequeue_buffer(v->stream); | ||||
|     if (b == NULL) { | ||||
|         error_report("out of buffers: %s", strerror(errno)); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     /* Write data into buffer */ | ||||
|     buf = b->buffer; | ||||
|     p = buf->datas[0].data; | ||||
|     if (p == NULL) { | ||||
|         return; | ||||
|     } | ||||
|     offs = SPA_MIN(buf->datas[0].chunk->offset, buf->datas[0].maxsize); | ||||
|     n_bytes = SPA_MIN(buf->datas[0].chunk->size, buf->datas[0].maxsize - offs); | ||||
|  | ||||
|     filled = spa_ringbuffer_get_write_index(&v->ring, &index); | ||||
|  | ||||
|  | ||||
|     if (filled < 0) { | ||||
|         error_report("%p: underrun write:%u filled:%d", p, index, filled); | ||||
|     } else { | ||||
|         if ((uint32_t) filled + n_bytes > RINGBUFFER_SIZE) { | ||||
|             error_report("%p: overrun write:%u filled:%d + size:%u > max:%u", | ||||
|             p, index, filled, n_bytes, RINGBUFFER_SIZE); | ||||
|         } | ||||
|     } | ||||
|     spa_ringbuffer_write_data(&v->ring, | ||||
|                                 v->buffer, RINGBUFFER_SIZE, | ||||
|                                 index & RINGBUFFER_MASK, | ||||
|                                 SPA_PTROFF(p, offs, void), n_bytes); | ||||
|     index += n_bytes; | ||||
|     spa_ringbuffer_write_update(&v->ring, index); | ||||
|  | ||||
|     /* queue the buffer for playback */ | ||||
|     pw_stream_queue_buffer(v->stream, b); | ||||
| } | ||||
|  | ||||
| static void | ||||
| on_stream_state_changed(void *data, enum pw_stream_state old, | ||||
|                         enum pw_stream_state state, const char *error) | ||||
| { | ||||
|     PWVoice *v = (PWVoice *) data; | ||||
|  | ||||
|     trace_pw_state_changed(pw_stream_get_node_id(v->stream), | ||||
|                            pw_stream_state_as_string(state)); | ||||
| } | ||||
|  | ||||
| static const struct pw_stream_events capture_stream_events = { | ||||
|     PW_VERSION_STREAM_EVENTS, | ||||
|     .destroy = stream_destroy, | ||||
|     .state_changed = on_stream_state_changed, | ||||
|     .process = capture_on_process | ||||
| }; | ||||
|  | ||||
| static const struct pw_stream_events playback_stream_events = { | ||||
|     PW_VERSION_STREAM_EVENTS, | ||||
|     .destroy = stream_destroy, | ||||
|     .state_changed = on_stream_state_changed, | ||||
|     .process = playback_on_process | ||||
| }; | ||||
|  | ||||
| static size_t | ||||
| qpw_read(HWVoiceIn *hw, void *data, size_t len) | ||||
| { | ||||
|     PWVoiceIn *pw = (PWVoiceIn *) hw; | ||||
|     PWVoice *v = &pw->v; | ||||
|     pwaudio *c = v->g; | ||||
|     const char *error = NULL; | ||||
|     size_t l; | ||||
|     int32_t avail; | ||||
|     uint32_t index; | ||||
|  | ||||
|     pw_thread_loop_lock(c->thread_loop); | ||||
|     if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) { | ||||
|         /* wait for stream to become ready */ | ||||
|         l = 0; | ||||
|         goto done_unlock; | ||||
|     } | ||||
|     /* get no of available bytes to read data from buffer */ | ||||
|     avail = spa_ringbuffer_get_read_index(&v->ring, &index); | ||||
|  | ||||
|     trace_pw_read(avail, index, len); | ||||
|  | ||||
|     if (avail < (int32_t) len) { | ||||
|         len = avail; | ||||
|     } | ||||
|  | ||||
|     spa_ringbuffer_read_data(&v->ring, | ||||
|                              v->buffer, RINGBUFFER_SIZE, | ||||
|                              index & RINGBUFFER_MASK, data, len); | ||||
|     index += len; | ||||
|     spa_ringbuffer_read_update(&v->ring, index); | ||||
|     l = len; | ||||
|  | ||||
| done_unlock: | ||||
|     pw_thread_loop_unlock(c->thread_loop); | ||||
|     return l; | ||||
| } | ||||
|  | ||||
| static size_t qpw_buffer_get_free(HWVoiceOut *hw) | ||||
| { | ||||
|     PWVoiceOut *pw = (PWVoiceOut *)hw; | ||||
|     PWVoice *v = &pw->v; | ||||
|     pwaudio *c = v->g; | ||||
|     const char *error = NULL; | ||||
|     int32_t filled, avail; | ||||
|     uint32_t index; | ||||
|  | ||||
|     pw_thread_loop_lock(c->thread_loop); | ||||
|     if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) { | ||||
|         /* wait for stream to become ready */ | ||||
|         avail = 0; | ||||
|         goto done_unlock; | ||||
|     } | ||||
|  | ||||
|     filled = spa_ringbuffer_get_write_index(&v->ring, &index); | ||||
|     avail = v->highwater_mark - filled; | ||||
|  | ||||
| done_unlock: | ||||
|     pw_thread_loop_unlock(c->thread_loop); | ||||
|     return avail; | ||||
| } | ||||
|  | ||||
| static size_t | ||||
| qpw_write(HWVoiceOut *hw, void *data, size_t len) | ||||
| { | ||||
|     PWVoiceOut *pw = (PWVoiceOut *) hw; | ||||
|     PWVoice *v = &pw->v; | ||||
|     pwaudio *c = v->g; | ||||
|     const char *error = NULL; | ||||
|     int32_t filled, avail; | ||||
|     uint32_t index; | ||||
|  | ||||
|     pw_thread_loop_lock(c->thread_loop); | ||||
|     if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) { | ||||
|         /* wait for stream to become ready */ | ||||
|         len = 0; | ||||
|         goto done_unlock; | ||||
|     } | ||||
|     filled = spa_ringbuffer_get_write_index(&v->ring, &index); | ||||
|     avail = v->highwater_mark - filled; | ||||
|  | ||||
|     trace_pw_write(filled, avail, index, len); | ||||
|  | ||||
|     if (len > avail) { | ||||
|         len = avail; | ||||
|     } | ||||
|  | ||||
|     if (filled < 0) { | ||||
|         error_report("%p: underrun write:%u filled:%d", pw, index, filled); | ||||
|     } else { | ||||
|         if ((uint32_t) filled + len > RINGBUFFER_SIZE) { | ||||
|             error_report("%p: overrun write:%u filled:%d + size:%zu > max:%u", | ||||
|             pw, index, filled, len, RINGBUFFER_SIZE); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     spa_ringbuffer_write_data(&v->ring, | ||||
|                                 v->buffer, RINGBUFFER_SIZE, | ||||
|                                 index & RINGBUFFER_MASK, data, len); | ||||
|     index += len; | ||||
|     spa_ringbuffer_write_update(&v->ring, index); | ||||
|  | ||||
| done_unlock: | ||||
|     pw_thread_loop_unlock(c->thread_loop); | ||||
|     return len; | ||||
| } | ||||
|  | ||||
| static int | ||||
| audfmt_to_pw(AudioFormat fmt, int endianness) | ||||
| { | ||||
|     int format; | ||||
|  | ||||
|     switch (fmt) { | ||||
|     case AUDIO_FORMAT_S8: | ||||
|         format = SPA_AUDIO_FORMAT_S8; | ||||
|         break; | ||||
|     case AUDIO_FORMAT_U8: | ||||
|         format = SPA_AUDIO_FORMAT_U8; | ||||
|         break; | ||||
|     case AUDIO_FORMAT_S16: | ||||
|         format = endianness ? SPA_AUDIO_FORMAT_S16_BE : SPA_AUDIO_FORMAT_S16_LE; | ||||
|         break; | ||||
|     case AUDIO_FORMAT_U16: | ||||
|         format = endianness ? SPA_AUDIO_FORMAT_U16_BE : SPA_AUDIO_FORMAT_U16_LE; | ||||
|         break; | ||||
|     case AUDIO_FORMAT_S32: | ||||
|         format = endianness ? SPA_AUDIO_FORMAT_S32_BE : SPA_AUDIO_FORMAT_S32_LE; | ||||
|         break; | ||||
|     case AUDIO_FORMAT_U32: | ||||
|         format = endianness ? SPA_AUDIO_FORMAT_U32_BE : SPA_AUDIO_FORMAT_U32_LE; | ||||
|         break; | ||||
|     case AUDIO_FORMAT_F32: | ||||
|         format = endianness ? SPA_AUDIO_FORMAT_F32_BE : SPA_AUDIO_FORMAT_F32_LE; | ||||
|         break; | ||||
|     default: | ||||
|         dolog("Internal logic error: Bad audio format %d\n", fmt); | ||||
|         format = SPA_AUDIO_FORMAT_U8; | ||||
|         break; | ||||
|     } | ||||
|     return format; | ||||
| } | ||||
|  | ||||
| static AudioFormat | ||||
| pw_to_audfmt(enum spa_audio_format fmt, int *endianness, | ||||
|              uint32_t *sample_size) | ||||
| { | ||||
|     switch (fmt) { | ||||
|     case SPA_AUDIO_FORMAT_S8: | ||||
|         *sample_size = 1; | ||||
|         return AUDIO_FORMAT_S8; | ||||
|     case SPA_AUDIO_FORMAT_U8: | ||||
|         *sample_size = 1; | ||||
|         return AUDIO_FORMAT_U8; | ||||
|     case SPA_AUDIO_FORMAT_S16_BE: | ||||
|         *sample_size = 2; | ||||
|         *endianness = 1; | ||||
|         return AUDIO_FORMAT_S16; | ||||
|     case SPA_AUDIO_FORMAT_S16_LE: | ||||
|         *sample_size = 2; | ||||
|         *endianness = 0; | ||||
|         return AUDIO_FORMAT_S16; | ||||
|     case SPA_AUDIO_FORMAT_U16_BE: | ||||
|         *sample_size = 2; | ||||
|         *endianness = 1; | ||||
|         return AUDIO_FORMAT_U16; | ||||
|     case SPA_AUDIO_FORMAT_U16_LE: | ||||
|         *sample_size = 2; | ||||
|         *endianness = 0; | ||||
|         return AUDIO_FORMAT_U16; | ||||
|     case SPA_AUDIO_FORMAT_S32_BE: | ||||
|         *sample_size = 4; | ||||
|         *endianness = 1; | ||||
|         return AUDIO_FORMAT_S32; | ||||
|     case SPA_AUDIO_FORMAT_S32_LE: | ||||
|         *sample_size = 4; | ||||
|         *endianness = 0; | ||||
|         return AUDIO_FORMAT_S32; | ||||
|     case SPA_AUDIO_FORMAT_U32_BE: | ||||
|         *sample_size = 4; | ||||
|         *endianness = 1; | ||||
|         return AUDIO_FORMAT_U32; | ||||
|     case SPA_AUDIO_FORMAT_U32_LE: | ||||
|         *sample_size = 4; | ||||
|         *endianness = 0; | ||||
|         return AUDIO_FORMAT_U32; | ||||
|     case SPA_AUDIO_FORMAT_F32_BE: | ||||
|         *sample_size = 4; | ||||
|         *endianness = 1; | ||||
|         return AUDIO_FORMAT_F32; | ||||
|     case SPA_AUDIO_FORMAT_F32_LE: | ||||
|         *sample_size = 4; | ||||
|         *endianness = 0; | ||||
|         return AUDIO_FORMAT_F32; | ||||
|     default: | ||||
|         *sample_size = 1; | ||||
|         dolog("Internal logic error: Bad spa_audio_format %d\n", fmt); | ||||
|         return AUDIO_FORMAT_U8; | ||||
|     } | ||||
| } | ||||
|  | ||||
| static int | ||||
| qpw_stream_new(pwaudio *c, PWVoice *v, const char *stream_name, | ||||
|                const char *name, enum spa_direction dir) | ||||
| { | ||||
|     int res; | ||||
|     uint32_t n_params; | ||||
|     const struct spa_pod *params[2]; | ||||
|     uint8_t buffer[1024]; | ||||
|     struct spa_pod_builder b; | ||||
|     uint64_t buf_samples; | ||||
|     struct pw_properties *props; | ||||
|  | ||||
|     props = pw_properties_new(NULL, NULL); | ||||
|     if (!props) { | ||||
|         error_report("Failed to create PW properties: %s", g_strerror(errno)); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     /* 75% of the timer period for faster updates */ | ||||
|     buf_samples = (uint64_t)v->g->dev->timer_period * v->info.rate | ||||
|                     * 3 / 4 / 1000000; | ||||
|     pw_properties_setf(props, PW_KEY_NODE_LATENCY, "%" PRIu64 "/%u", | ||||
|                        buf_samples, v->info.rate); | ||||
|  | ||||
|     trace_pw_period(buf_samples, v->info.rate); | ||||
|     if (name) { | ||||
|         pw_properties_set(props, PW_KEY_TARGET_OBJECT, name); | ||||
|     } | ||||
|     v->stream = pw_stream_new(c->core, stream_name, props); | ||||
|     if (v->stream == NULL) { | ||||
|         error_report("Failed to create PW stream: %s", g_strerror(errno)); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     if (dir == SPA_DIRECTION_INPUT) { | ||||
|         pw_stream_add_listener(v->stream, | ||||
|                             &v->stream_listener, &capture_stream_events, v); | ||||
|     } else { | ||||
|         pw_stream_add_listener(v->stream, | ||||
|                             &v->stream_listener, &playback_stream_events, v); | ||||
|     } | ||||
|  | ||||
|     n_params = 0; | ||||
|     spa_pod_builder_init(&b, buffer, sizeof(buffer)); | ||||
|     params[n_params++] = spa_format_audio_raw_build(&b, | ||||
|                             SPA_PARAM_EnumFormat, | ||||
|                             &v->info); | ||||
|  | ||||
|     /* connect the stream to a sink or source */ | ||||
|     res = pw_stream_connect(v->stream, | ||||
|                             dir == | ||||
|                             SPA_DIRECTION_INPUT ? PW_DIRECTION_INPUT : | ||||
|                             PW_DIRECTION_OUTPUT, PW_ID_ANY, | ||||
|                             PW_STREAM_FLAG_AUTOCONNECT | | ||||
|                             PW_STREAM_FLAG_INACTIVE | | ||||
|                             PW_STREAM_FLAG_MAP_BUFFERS | | ||||
|                             PW_STREAM_FLAG_RT_PROCESS, params, n_params); | ||||
|     if (res < 0) { | ||||
|         error_report("Failed to connect PW stream: %s", g_strerror(errno)); | ||||
|         pw_stream_destroy(v->stream); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_set_position(uint32_t channels, uint32_t position[SPA_AUDIO_MAX_CHANNELS]) | ||||
| { | ||||
|     memcpy(position, (uint32_t[SPA_AUDIO_MAX_CHANNELS]) { SPA_AUDIO_CHANNEL_UNKNOWN, }, | ||||
|            sizeof(uint32_t) * SPA_AUDIO_MAX_CHANNELS); | ||||
|     /* | ||||
|      * TODO: This currently expects the only frontend supporting more than 2 | ||||
|      * channels is the usb-audio.  We will need some means to set channel | ||||
|      * order when a new frontend gains multi-channel support. | ||||
|      */ | ||||
|     switch (channels) { | ||||
|     case 8: | ||||
|         position[6] = SPA_AUDIO_CHANNEL_SL; | ||||
|         position[7] = SPA_AUDIO_CHANNEL_SR; | ||||
|         /* fallthrough */ | ||||
|     case 6: | ||||
|         position[2] = SPA_AUDIO_CHANNEL_FC; | ||||
|         position[3] = SPA_AUDIO_CHANNEL_LFE; | ||||
|         position[4] = SPA_AUDIO_CHANNEL_RL; | ||||
|         position[5] = SPA_AUDIO_CHANNEL_RR; | ||||
|         /* fallthrough */ | ||||
|     case 2: | ||||
|         position[0] = SPA_AUDIO_CHANNEL_FL; | ||||
|         position[1] = SPA_AUDIO_CHANNEL_FR; | ||||
|         break; | ||||
|     case 1: | ||||
|         position[0] = SPA_AUDIO_CHANNEL_MONO; | ||||
|         break; | ||||
|     default: | ||||
|         dolog("Internal error: unsupported channel count %d\n", channels); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static int | ||||
| qpw_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque) | ||||
| { | ||||
|     PWVoiceOut *pw = (PWVoiceOut *) hw; | ||||
|     PWVoice *v = &pw->v; | ||||
|     struct audsettings obt_as = *as; | ||||
|     pwaudio *c = v->g = drv_opaque; | ||||
|     AudiodevPipewireOptions *popts = &c->dev->u.pipewire; | ||||
|     AudiodevPipewirePerDirectionOptions *ppdo = popts->out; | ||||
|     int r; | ||||
|  | ||||
|     pw_thread_loop_lock(c->thread_loop); | ||||
|  | ||||
|     v->info.format = audfmt_to_pw(as->fmt, as->endianness); | ||||
|     v->info.channels = as->nchannels; | ||||
|     qpw_set_position(as->nchannels, v->info.position); | ||||
|     v->info.rate = as->freq; | ||||
|  | ||||
|     obt_as.fmt = | ||||
|         pw_to_audfmt(v->info.format, &obt_as.endianness, &v->frame_size); | ||||
|     v->frame_size *= as->nchannels; | ||||
|  | ||||
|     v->req = (uint64_t)c->dev->timer_period * v->info.rate | ||||
|         * 1 / 2 / 1000000 * v->frame_size; | ||||
|  | ||||
|     /* call the function that creates a new stream for playback */ | ||||
|     r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id, | ||||
|                        ppdo->name, SPA_DIRECTION_OUTPUT); | ||||
|     if (r < 0) { | ||||
|         pw_thread_loop_unlock(c->thread_loop); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     /* report the audio format we support */ | ||||
|     audio_pcm_init_info(&hw->info, &obt_as); | ||||
|  | ||||
|     /* report the buffer size to qemu */ | ||||
|     hw->samples = audio_buffer_frames( | ||||
|         qapi_AudiodevPipewirePerDirectionOptions_base(ppdo), &obt_as, 46440); | ||||
|     v->highwater_mark = MIN(RINGBUFFER_SIZE, | ||||
|                             (ppdo->has_latency ? ppdo->latency : 46440) | ||||
|                             * (uint64_t)v->info.rate / 1000000 * v->frame_size); | ||||
|  | ||||
|     pw_thread_loop_unlock(c->thread_loop); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static int | ||||
| qpw_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque) | ||||
| { | ||||
|     PWVoiceIn *pw = (PWVoiceIn *) hw; | ||||
|     PWVoice *v = &pw->v; | ||||
|     struct audsettings obt_as = *as; | ||||
|     pwaudio *c = v->g = drv_opaque; | ||||
|     AudiodevPipewireOptions *popts = &c->dev->u.pipewire; | ||||
|     AudiodevPipewirePerDirectionOptions *ppdo = popts->in; | ||||
|     int r; | ||||
|  | ||||
|     pw_thread_loop_lock(c->thread_loop); | ||||
|  | ||||
|     v->info.format = audfmt_to_pw(as->fmt, as->endianness); | ||||
|     v->info.channels = as->nchannels; | ||||
|     qpw_set_position(as->nchannels, v->info.position); | ||||
|     v->info.rate = as->freq; | ||||
|  | ||||
|     obt_as.fmt = | ||||
|         pw_to_audfmt(v->info.format, &obt_as.endianness, &v->frame_size); | ||||
|     v->frame_size *= as->nchannels; | ||||
|  | ||||
|     /* call the function that creates a new stream for recording */ | ||||
|     r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id, | ||||
|                        ppdo->name, SPA_DIRECTION_INPUT); | ||||
|     if (r < 0) { | ||||
|         pw_thread_loop_unlock(c->thread_loop); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     /* report the audio format we support */ | ||||
|     audio_pcm_init_info(&hw->info, &obt_as); | ||||
|  | ||||
|     /* report the buffer size to qemu */ | ||||
|     hw->samples = audio_buffer_frames( | ||||
|         qapi_AudiodevPipewirePerDirectionOptions_base(ppdo), &obt_as, 46440); | ||||
|  | ||||
|     pw_thread_loop_unlock(c->thread_loop); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_voice_fini(PWVoice *v) | ||||
| { | ||||
|     pwaudio *c = v->g; | ||||
|  | ||||
|     if (!v->stream) { | ||||
|         return; | ||||
|     } | ||||
|     pw_thread_loop_lock(c->thread_loop); | ||||
|     pw_stream_destroy(v->stream); | ||||
|     v->stream = NULL; | ||||
|     pw_thread_loop_unlock(c->thread_loop); | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_fini_out(HWVoiceOut *hw) | ||||
| { | ||||
|     qpw_voice_fini(&PW_VOICE_OUT(hw)->v); | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_fini_in(HWVoiceIn *hw) | ||||
| { | ||||
|     qpw_voice_fini(&PW_VOICE_IN(hw)->v); | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_voice_set_enabled(PWVoice *v, bool enable) | ||||
| { | ||||
|     pwaudio *c = v->g; | ||||
|     pw_thread_loop_lock(c->thread_loop); | ||||
|     pw_stream_set_active(v->stream, enable); | ||||
|     pw_thread_loop_unlock(c->thread_loop); | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_enable_out(HWVoiceOut *hw, bool enable) | ||||
| { | ||||
|     qpw_voice_set_enabled(&PW_VOICE_OUT(hw)->v, enable); | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_enable_in(HWVoiceIn *hw, bool enable) | ||||
| { | ||||
|     qpw_voice_set_enabled(&PW_VOICE_IN(hw)->v, enable); | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_voice_set_volume(PWVoice *v, Volume *vol) | ||||
| { | ||||
|     pwaudio *c = v->g; | ||||
|     int i, ret; | ||||
|  | ||||
|     pw_thread_loop_lock(c->thread_loop); | ||||
|     v->volume.channels = vol->channels; | ||||
|  | ||||
|     for (i = 0; i < vol->channels; ++i) { | ||||
|         v->volume.values[i] = (float)vol->vol[i] / 255; | ||||
|     } | ||||
|  | ||||
|     ret = pw_stream_set_control(v->stream, | ||||
|         SPA_PROP_channelVolumes, v->volume.channels, v->volume.values, 0); | ||||
|     trace_pw_vol(ret == 0 ? "success" : "failed"); | ||||
|  | ||||
|     v->muted = vol->mute; | ||||
|     float val = v->muted ? 1.f : 0.f; | ||||
|     ret = pw_stream_set_control(v->stream, SPA_PROP_mute, 1, &val, 0); | ||||
|     pw_thread_loop_unlock(c->thread_loop); | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_volume_out(HWVoiceOut *hw, Volume *vol) | ||||
| { | ||||
|     qpw_voice_set_volume(&PW_VOICE_OUT(hw)->v, vol); | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_volume_in(HWVoiceIn *hw, Volume *vol) | ||||
| { | ||||
|     qpw_voice_set_volume(&PW_VOICE_IN(hw)->v, vol); | ||||
| } | ||||
|  | ||||
| static int wait_resync(pwaudio *pw) | ||||
| { | ||||
|     int res; | ||||
|     pw->pending_seq = pw_core_sync(pw->core, PW_ID_CORE, pw->pending_seq); | ||||
|  | ||||
|     while (true) { | ||||
|         pw_thread_loop_wait(pw->thread_loop); | ||||
|  | ||||
|         res = pw->error; | ||||
|         if (res < 0) { | ||||
|             pw->error = 0; | ||||
|             return res; | ||||
|         } | ||||
|         if (pw->pending_seq == pw->last_seq) { | ||||
|             break; | ||||
|         } | ||||
|     } | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static void | ||||
| on_core_error(void *data, uint32_t id, int seq, int res, const char *message) | ||||
| { | ||||
|     pwaudio *pw = data; | ||||
|  | ||||
|     error_report("error id:%u seq:%d res:%d (%s): %s", | ||||
|                 id, seq, res, spa_strerror(res), message); | ||||
|  | ||||
|     /* stop and exit the thread loop */ | ||||
|     pw_thread_loop_signal(pw->thread_loop, FALSE); | ||||
| } | ||||
|  | ||||
| static void | ||||
| on_core_done(void *data, uint32_t id, int seq) | ||||
| { | ||||
|     pwaudio *pw = data; | ||||
|     assert(id == PW_ID_CORE); | ||||
|     pw->last_seq = seq; | ||||
|     if (pw->pending_seq == seq) { | ||||
|         /* stop and exit the thread loop */ | ||||
|         pw_thread_loop_signal(pw->thread_loop, FALSE); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static const struct pw_core_events core_events = { | ||||
|     PW_VERSION_CORE_EVENTS, | ||||
|     .done = on_core_done, | ||||
|     .error = on_core_error, | ||||
| }; | ||||
|  | ||||
| static void * | ||||
| qpw_audio_init(Audiodev *dev, Error **errp) | ||||
| { | ||||
|     g_autofree pwaudio *pw = g_new0(pwaudio, 1); | ||||
|  | ||||
|     assert(dev->driver == AUDIODEV_DRIVER_PIPEWIRE); | ||||
|     trace_pw_audio_init(); | ||||
|  | ||||
|     pw_init(NULL, NULL); | ||||
|  | ||||
|     pw->dev = dev; | ||||
|     pw->thread_loop = pw_thread_loop_new("PipeWire thread loop", NULL); | ||||
|     if (pw->thread_loop == NULL) { | ||||
|         error_setg_errno(errp, errno, "Could not create PipeWire loop"); | ||||
|         goto fail; | ||||
|     } | ||||
|  | ||||
|     pw->context = | ||||
|         pw_context_new(pw_thread_loop_get_loop(pw->thread_loop), NULL, 0); | ||||
|     if (pw->context == NULL) { | ||||
|         error_setg_errno(errp, errno, "Could not create PipeWire context"); | ||||
|         goto fail; | ||||
|     } | ||||
|  | ||||
|     if (pw_thread_loop_start(pw->thread_loop) < 0) { | ||||
|         error_setg_errno(errp, errno, "Could not start PipeWire loop"); | ||||
|         goto fail; | ||||
|     } | ||||
|  | ||||
|     pw_thread_loop_lock(pw->thread_loop); | ||||
|  | ||||
|     pw->core = pw_context_connect(pw->context, NULL, 0); | ||||
|     if (pw->core == NULL) { | ||||
|         pw_thread_loop_unlock(pw->thread_loop); | ||||
|         goto fail_error; | ||||
|     } | ||||
|  | ||||
|     if (pw_core_add_listener(pw->core, &pw->core_listener, | ||||
|                              &core_events, pw) < 0) { | ||||
|         pw_thread_loop_unlock(pw->thread_loop); | ||||
|         goto fail_error; | ||||
|     } | ||||
|     if (wait_resync(pw) < 0) { | ||||
|         pw_thread_loop_unlock(pw->thread_loop); | ||||
|     } | ||||
|  | ||||
|     pw_thread_loop_unlock(pw->thread_loop); | ||||
|  | ||||
|     return g_steal_pointer(&pw); | ||||
|  | ||||
| fail_error: | ||||
|     error_setg(errp, "Failed to initialize PW context"); | ||||
| fail: | ||||
|     if (pw->thread_loop) { | ||||
|         pw_thread_loop_stop(pw->thread_loop); | ||||
|     } | ||||
|     g_clear_pointer(&pw->context, pw_context_destroy); | ||||
|     g_clear_pointer(&pw->thread_loop, pw_thread_loop_destroy); | ||||
|     return NULL; | ||||
| } | ||||
|  | ||||
| static void | ||||
| qpw_audio_fini(void *opaque) | ||||
| { | ||||
|     pwaudio *pw = opaque; | ||||
|  | ||||
|     if (pw->thread_loop) { | ||||
|         pw_thread_loop_stop(pw->thread_loop); | ||||
|     } | ||||
|  | ||||
|     if (pw->core) { | ||||
|         spa_hook_remove(&pw->core_listener); | ||||
|         spa_zero(pw->core_listener); | ||||
|         pw_core_disconnect(pw->core); | ||||
|     } | ||||
|  | ||||
|     if (pw->context) { | ||||
|         pw_context_destroy(pw->context); | ||||
|     } | ||||
|     pw_thread_loop_destroy(pw->thread_loop); | ||||
|  | ||||
|     g_free(pw); | ||||
| } | ||||
|  | ||||
| static struct audio_pcm_ops qpw_pcm_ops = { | ||||
|     .init_out = qpw_init_out, | ||||
|     .fini_out = qpw_fini_out, | ||||
|     .write = qpw_write, | ||||
|     .buffer_get_free = qpw_buffer_get_free, | ||||
|     .run_buffer_out = audio_generic_run_buffer_out, | ||||
|     .enable_out = qpw_enable_out, | ||||
|     .volume_out = qpw_volume_out, | ||||
|     .volume_in = qpw_volume_in, | ||||
|  | ||||
|     .init_in = qpw_init_in, | ||||
|     .fini_in = qpw_fini_in, | ||||
|     .read = qpw_read, | ||||
|     .run_buffer_in = audio_generic_run_buffer_in, | ||||
|     .enable_in = qpw_enable_in | ||||
| }; | ||||
|  | ||||
| static struct audio_driver pw_audio_driver = { | ||||
|     .name = "pipewire", | ||||
|     .descr = "http://www.pipewire.org/", | ||||
|     .init = qpw_audio_init, | ||||
|     .fini = qpw_audio_fini, | ||||
|     .pcm_ops = &qpw_pcm_ops, | ||||
|     .max_voices_out = INT_MAX, | ||||
|     .max_voices_in = INT_MAX, | ||||
|     .voice_size_out = sizeof(PWVoiceOut), | ||||
|     .voice_size_in = sizeof(PWVoiceIn), | ||||
| }; | ||||
|  | ||||
| static void | ||||
| register_audio_pw(void) | ||||
| { | ||||
|     audio_driver_register(&pw_audio_driver); | ||||
| } | ||||
|  | ||||
| type_init(register_audio_pw); | ||||
| @@ -40,6 +40,8 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, | ||||
|     int64_t t; | ||||
| #endif | ||||
|  | ||||
|     ilast = rate->ilast; | ||||
|  | ||||
|     istart = ibuf; | ||||
|     iend = ibuf + *isamp; | ||||
|  | ||||
| @@ -57,17 +59,15 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     /* without input samples, there's nothing to do */ | ||||
|     if (ibuf >= iend) { | ||||
|         *osamp = 0; | ||||
|         return; | ||||
|     } | ||||
|     while (obuf < oend) { | ||||
|  | ||||
|     ilast = rate->ilast; | ||||
|  | ||||
|     while (true) { | ||||
|         /* Safety catch to make sure we have input samples.  */ | ||||
|         if (ibuf >= iend) { | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|         /* read as many input samples so that ipos > opos */ | ||||
|  | ||||
|         while (rate->ipos <= (rate->opos >> 32)) { | ||||
|             ilast = *ibuf++; | ||||
|             rate->ipos++; | ||||
| @@ -78,11 +78,6 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         /* make sure that the next output sample can be written */ | ||||
|         if (obuf >= oend) { | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|         icur = *ibuf; | ||||
|  | ||||
|         /* wrap ipos and opos around long before they overflow */ | ||||
|   | ||||
| @@ -26,7 +26,6 @@ | ||||
| #include <SDL.h> | ||||
| #include <SDL_thread.h> | ||||
| #include "qemu/module.h" | ||||
| #include "qapi/error.h" | ||||
| #include "audio.h" | ||||
|  | ||||
| #ifndef _WIN32 | ||||
| @@ -450,10 +449,10 @@ static void sdl_enable_in(HWVoiceIn *hw, bool enable) | ||||
|     SDL_PauseAudioDevice(sdl->devid, !enable); | ||||
| } | ||||
|  | ||||
| static void *sdl_audio_init(Audiodev *dev, Error **errp) | ||||
| static void *sdl_audio_init(Audiodev *dev) | ||||
| { | ||||
|     if (SDL_InitSubSystem (SDL_INIT_AUDIO)) { | ||||
|         error_setg(errp, "SDL failed to initialize audio subsystem"); | ||||
|         sdl_logerr ("SDL failed to initialize audio subsystem\n"); | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
| @@ -494,6 +493,7 @@ static struct audio_driver sdl_audio_driver = { | ||||
|     .init           = sdl_audio_init, | ||||
|     .fini           = sdl_audio_fini, | ||||
|     .pcm_ops        = &sdl_pcm_ops, | ||||
|     .can_be_default = 1, | ||||
|     .max_voices_out = INT_MAX, | ||||
|     .max_voices_in  = INT_MAX, | ||||
|     .voice_size_out = sizeof(SDLVoiceOut), | ||||
|   | ||||
| @@ -518,7 +518,7 @@ static void sndio_fini_in(HWVoiceIn *hw) | ||||
|     sndio_fini(self); | ||||
| } | ||||
|  | ||||
| static void *sndio_audio_init(Audiodev *dev, Error **errp) | ||||
| static void *sndio_audio_init(Audiodev *dev) | ||||
| { | ||||
|     assert(dev->driver == AUDIODEV_DRIVER_SNDIO); | ||||
|     return dev; | ||||
| @@ -550,6 +550,7 @@ static struct audio_driver sndio_audio_driver = { | ||||
|     .init           = sndio_audio_init, | ||||
|     .fini           = sndio_audio_fini, | ||||
|     .pcm_ops        = &sndio_pcm_ops, | ||||
|     .can_be_default = 1, | ||||
|     .max_voices_out = INT_MAX, | ||||
|     .max_voices_in  = INT_MAX, | ||||
|     .voice_size_out = sizeof(SndioVoice), | ||||
|   | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user