Compare commits
55 Commits
qemu-2.3.0
...
v2.2.1
Author | SHA1 | Date | |
---|---|---|---|
|
2001e197cf | ||
|
c70221df1f | ||
|
07db6859ab | ||
|
c4ca8af86d | ||
|
16765a55c1 | ||
|
dab0efc33f | ||
|
6c699aa9f9 | ||
|
a958b9be86 | ||
|
4ec1b9b159 | ||
|
3e04f97cbc | ||
|
00fd8904f6 | ||
|
3d1cd5997d | ||
|
a97f9a7ec7 | ||
|
987aba53db | ||
|
7d389a2138 | ||
|
2a020d29df | ||
|
6833856e86 | ||
|
a9eb2b6053 | ||
|
4d49de6b6f | ||
|
3750d2588e | ||
|
4ac8b01fa8 | ||
|
e60fb7af55 | ||
|
451b9e2d4c | ||
|
0d093159b4 | ||
|
8d1fdb16cd | ||
|
b0a231a9a9 | ||
|
09e2753be0 | ||
|
49725cdf04 | ||
|
fdb2ed44f1 | ||
|
e54bcad901 | ||
|
e1ce0c3cb7 | ||
|
cb3360dbdd | ||
|
f738adeb5e | ||
|
83dbd88b5c | ||
|
718ab31016 | ||
|
27ad3df73e | ||
|
6569578197 | ||
|
51d703ff2e | ||
|
ebd2bd2227 | ||
|
9f8da0319d | ||
|
63a3acd24a | ||
|
9fc6075d28 | ||
|
6950b92765 | ||
|
9b3f3d6da9 | ||
|
6f45cda114 | ||
|
1e85e69fd6 | ||
|
ff15187eca | ||
|
0a0a984352 | ||
|
b15bfd0558 | ||
|
10be14ee7d | ||
|
6065d5484a | ||
|
0fc9a06b56 | ||
|
1961d1c347 | ||
|
e81703b42c | ||
|
7e213f8535 |
10
.gitignore
vendored
10
.gitignore
vendored
@@ -37,8 +37,14 @@
|
||||
/qemu-tech.html
|
||||
/qemu-doc.info
|
||||
/qemu-tech.info
|
||||
/qemu.1
|
||||
/qemu.pod
|
||||
/qemu-img.1
|
||||
/qemu-img.pod
|
||||
/qemu-img
|
||||
/qemu-nbd
|
||||
/qemu-nbd.8
|
||||
/qemu-nbd.pod
|
||||
/qemu-options.def
|
||||
/qemu-options.texi
|
||||
/qemu-img-cmds.texi
|
||||
@@ -50,7 +56,8 @@
|
||||
/qmp-commands.txt
|
||||
/vscclient
|
||||
/fsdev/virtfs-proxy-helper
|
||||
*.[1-9]
|
||||
/fsdev/virtfs-proxy-helper.1
|
||||
/fsdev/virtfs-proxy-helper.pod
|
||||
*.a
|
||||
*.aux
|
||||
*.cp
|
||||
@@ -63,7 +70,6 @@
|
||||
*.ky
|
||||
*.log
|
||||
*.pdf
|
||||
*.pod
|
||||
*.cps
|
||||
*.fns
|
||||
*.kys
|
||||
|
@@ -98,6 +98,3 @@ matrix:
|
||||
EXTRA_PKGS="liblttng-ust-dev liburcu-dev"
|
||||
EXTRA_CONFIG="--enable-trace-backends=ust"
|
||||
compiler: gcc
|
||||
- env: TARGETS=i386-softmmu,x86_64-softmmu
|
||||
EXTRA_CONFIG="--enable-modules"
|
||||
compiler: gcc
|
||||
|
2
LICENSE
2
LICENSE
@@ -11,7 +11,7 @@ option) any later version.
|
||||
|
||||
As of July 2013, contributions under version 2 of the GNU General Public
|
||||
License (and no later version) are only accepted for the following files
|
||||
or directories: bsd-user/, linux-user/, hw/vfio/, hw/xen/xen_pt*.
|
||||
or directories: bsd-user/, linux-user/, hw/misc/vfio.c, hw/xen/xen_pt*.
|
||||
|
||||
3) The Tiny Code Generator (TCG) is released under the BSD license
|
||||
(see license headers in files).
|
||||
|
112
MAINTAINERS
112
MAINTAINERS
@@ -50,12 +50,14 @@ Descriptions of section entries:
|
||||
|
||||
General Project Administration
|
||||
------------------------------
|
||||
M: Anthony Liguori <aliguori@amazon.com>
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
|
||||
Responsible Disclosure, Reporting Security Issues
|
||||
------------------------------
|
||||
W: http://wiki.qemu.org/SecurityProcess
|
||||
M: Michael S. Tsirkin <mst@redhat.com>
|
||||
M: Anthony Liguori <aliguori@amazon.com>
|
||||
L: secalert@redhat.com
|
||||
|
||||
Guest CPU cores (TCG):
|
||||
@@ -96,12 +98,8 @@ LM32
|
||||
M: Michael Walle <michael@walle.cc>
|
||||
S: Maintained
|
||||
F: target-lm32/
|
||||
F: disas/lm32.c
|
||||
F: hw/lm32/
|
||||
F: hw/*/lm32_*
|
||||
F: hw/*/milkymist-*
|
||||
F: include/hw/char/lm32_juart.h
|
||||
F: include/hw/lm32/
|
||||
F: hw/char/lm32_*
|
||||
F: tests/tcg/lm32/
|
||||
|
||||
M68K
|
||||
@@ -157,7 +155,6 @@ F: hw/sh4/
|
||||
|
||||
SPARC
|
||||
M: Blue Swirl <blauwirbel@gmail.com>
|
||||
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
|
||||
S: Maintained
|
||||
F: target-sparc/
|
||||
F: hw/sparc/
|
||||
@@ -517,13 +514,11 @@ SPARC Machines
|
||||
--------------
|
||||
Sun4m
|
||||
M: Blue Swirl <blauwirbel@gmail.com>
|
||||
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
|
||||
S: Maintained
|
||||
F: hw/sparc/sun4m.c
|
||||
|
||||
Sun4u
|
||||
M: Blue Swirl <blauwirbel@gmail.com>
|
||||
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
|
||||
S: Maintained
|
||||
F: hw/sparc64/sun4u.c
|
||||
|
||||
@@ -539,7 +534,6 @@ S390 Virtio
|
||||
M: Alexander Graf <agraf@suse.de>
|
||||
S: Maintained
|
||||
F: hw/s390x/s390-*.c
|
||||
X: hw/s390x/*pci*.[hc]
|
||||
|
||||
S390 Virtio-ccw
|
||||
M: Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
@@ -550,7 +544,6 @@ F: hw/s390x/s390-virtio-ccw.c
|
||||
F: hw/s390x/css.[hc]
|
||||
F: hw/s390x/sclp*.[hc]
|
||||
F: hw/s390x/ipl*.[hc]
|
||||
F: hw/s390x/*pci*.[hc]
|
||||
F: include/hw/s390x/
|
||||
F: pc-bios/s390-ccw/
|
||||
T: git git://github.com/cohuck/qemu virtio-ccw-upstr
|
||||
@@ -566,6 +559,7 @@ F: hw/unicore32/
|
||||
X86 Machines
|
||||
------------
|
||||
PC
|
||||
M: Anthony Liguori <aliguori@amazon.com>
|
||||
M: Michael S. Tsirkin <mst@redhat.com>
|
||||
S: Supported
|
||||
F: include/hw/i386/
|
||||
@@ -599,31 +593,12 @@ F: hw/net/opencores_eth.c
|
||||
|
||||
Devices
|
||||
-------
|
||||
EDU
|
||||
M: Jiri Slaby <jslaby@suse.cz>
|
||||
S: Maintained
|
||||
F: hw/misc/edu.c
|
||||
|
||||
IDE
|
||||
M: John Snow <jsnow@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
M: Kevin Wolf <kwolf@redhat.com>
|
||||
M: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: include/hw/ide.h
|
||||
F: hw/ide/
|
||||
F: hw/block/block.c
|
||||
F: hw/block/cdrom.c
|
||||
F: hw/block/hd-geometry.c
|
||||
F: tests/ide-test.c
|
||||
F: tests/ahci-test.c
|
||||
T: git git://github.com/jnsnow/qemu.git ide
|
||||
|
||||
Floppy
|
||||
M: John Snow <jsnow@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: hw/block/fdc.c
|
||||
F: include/hw/block/fdc.h
|
||||
T: git git://github.com/jnsnow/qemu.git ide
|
||||
|
||||
OMAP
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
@@ -682,7 +657,7 @@ F: hw/usb/dev-serial.c
|
||||
VFIO
|
||||
M: Alex Williamson <alex.williamson@redhat.com>
|
||||
S: Supported
|
||||
F: hw/vfio/*
|
||||
F: hw/misc/vfio.c
|
||||
|
||||
vhost
|
||||
M: Michael S. Tsirkin <mst@redhat.com>
|
||||
@@ -690,6 +665,7 @@ S: Supported
|
||||
F: hw/*/*vhost*
|
||||
|
||||
virtio
|
||||
M: Anthony Liguori <aliguori@amazon.com>
|
||||
M: Michael S. Tsirkin <mst@redhat.com>
|
||||
S: Supported
|
||||
F: hw/*/virtio*
|
||||
@@ -720,14 +696,6 @@ M: Amit Shah <amit.shah@redhat.com>
|
||||
S: Supported
|
||||
F: hw/char/virtio-serial-bus.c
|
||||
F: hw/char/virtio-console.c
|
||||
F: include/hw/virtio/virtio-serial.h
|
||||
|
||||
virtio-rng
|
||||
M: Amit Shah <amit.shah@redhat.com>
|
||||
S: Supported
|
||||
F: hw/virtio/virtio-rng.c
|
||||
F: include/hw/virtio/virtio-rng.h
|
||||
F: backends/rng*.c
|
||||
|
||||
nvme
|
||||
M: Keith Busch <keith.busch@intel.com>
|
||||
@@ -775,7 +743,6 @@ F: aio-*.c
|
||||
F: block*
|
||||
F: block/
|
||||
F: hw/block/
|
||||
F: migration/block*
|
||||
F: qemu-img*
|
||||
F: qemu-io*
|
||||
F: tests/image-fuzzer/
|
||||
@@ -783,19 +750,8 @@ F: tests/qemu-iotests/
|
||||
T: git git://repo.or.cz/qemu/kevin.git block
|
||||
T: git git://github.com/stefanha/qemu.git block
|
||||
|
||||
Block Jobs
|
||||
M: Jeff Cody <jcody@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: blockjob.c
|
||||
F: include/block/blockjob.h
|
||||
F: block/backup.c
|
||||
F: block/commit.c
|
||||
F: block/stream.h
|
||||
F: block/mirror.c
|
||||
T: git git://github.com/codyprime/qemu-kvm-jtc.git block
|
||||
|
||||
Character Devices
|
||||
M: Anthony Liguori <aliguori@amazon.com>
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: qemu-char.c
|
||||
@@ -807,11 +763,6 @@ M: Samuel Thibault <samuel.thibault@ens-lyon.org>
|
||||
S: Maintained
|
||||
F: backends/baum.c
|
||||
|
||||
Coverity model
|
||||
M: Markus Armbruster <armbru@redhat.com>
|
||||
S: Supported
|
||||
F: scripts/coverity-model.c
|
||||
|
||||
CPU
|
||||
M: Andreas Färber <afaerber@suse.de>
|
||||
S: Supported
|
||||
@@ -856,6 +807,7 @@ F: audio/spiceaudio.c
|
||||
F: hw/display/qxl*
|
||||
|
||||
Graphics
|
||||
M: Anthony Liguori <aliguori@amazon.com>
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: ui/
|
||||
@@ -867,6 +819,7 @@ S: Odd Fixes
|
||||
F: ui/cocoa.m
|
||||
|
||||
Main loop
|
||||
M: Anthony Liguori <aliguori@amazon.com>
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: cpus.c
|
||||
@@ -883,8 +836,8 @@ F: hmp-commands.hx
|
||||
T: git git://repo.or.cz/qemu/qmp-unstable.git queue/qmp
|
||||
|
||||
Network device layer
|
||||
M: Anthony Liguori <aliguori@amazon.com>
|
||||
M: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
M: Jason Wang <jasowang@redhat.com>
|
||||
S: Maintained
|
||||
F: net/
|
||||
T: git git://github.com/stefanha/qemu.git net
|
||||
@@ -934,6 +887,7 @@ F: qga/
|
||||
T: git git://github.com/mdroth/qemu.git qga
|
||||
|
||||
QOM
|
||||
M: Anthony Liguori <aliguori@amazon.com>
|
||||
M: Andreas Färber <afaerber@suse.de>
|
||||
S: Supported
|
||||
T: git git://github.com/afaerber/qemu-cpu.git qom-next
|
||||
@@ -974,14 +928,12 @@ F: scripts/checkpatch.pl
|
||||
|
||||
Migration
|
||||
M: Juan Quintela <quintela@redhat.com>
|
||||
M: Amit Shah <amit.shah@redhat.com>
|
||||
S: Maintained
|
||||
F: include/migration/
|
||||
F: migration/
|
||||
F: migration*
|
||||
F: savevm.c
|
||||
F: arch_init.c
|
||||
F: scripts/vmstate-static-checker.py
|
||||
F: tests/vmstate-static-checker-data/
|
||||
F: vmstate.c
|
||||
|
||||
Seccomp
|
||||
M: Eduardo Otubo <eduardo.otubo@profitbricks.com>
|
||||
@@ -1099,28 +1051,20 @@ F: block/vmdk.c
|
||||
|
||||
RBD
|
||||
M: Josh Durgin <josh.durgin@inktank.com>
|
||||
M: Jeff Cody <jcody@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: block/rbd.c
|
||||
T: git git://github.com/codyprime/qemu-kvm-jtc.git block
|
||||
|
||||
Sheepdog
|
||||
M: Hitoshi Mitake <mitake.hitoshi@lab.ntt.co.jp>
|
||||
M: Liu Yuan <namei.unix@gmail.com>
|
||||
M: Jeff Cody <jcody@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
L: sheepdog@lists.wpkg.org
|
||||
S: Supported
|
||||
F: block/sheepdog.c
|
||||
T: git git://github.com/codyprime/qemu-kvm-jtc.git block
|
||||
|
||||
VHDX
|
||||
M: Jeff Cody <jcody@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: block/vhdx*
|
||||
T: git git://github.com/codyprime/qemu-kvm-jtc.git block
|
||||
|
||||
VDI
|
||||
M: Stefan Weil <sw@weilnetz.de>
|
||||
@@ -1135,42 +1079,20 @@ S: Supported
|
||||
F: block/iscsi.c
|
||||
|
||||
NFS
|
||||
M: Jeff Cody <jcody@redhat.com>
|
||||
M: Peter Lieven <pl@kamp.de>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Maintained
|
||||
F: block/nfs.c
|
||||
T: git git://github.com/codyprime/qemu-kvm-jtc.git block
|
||||
|
||||
SSH
|
||||
M: Richard W.M. Jones <rjones@redhat.com>
|
||||
M: Jeff Cody <jcody@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: block/ssh.c
|
||||
T: git git://github.com/codyprime/qemu-kvm-jtc.git block
|
||||
|
||||
ARCHIPELAGO
|
||||
M: Chrysostomos Nanakos <cnanakos@grnet.gr>
|
||||
M: Chrysostomos Nanakos <chris@include.gr>
|
||||
M: Jeff Cody <jcody@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Maintained
|
||||
F: block/archipelago.c
|
||||
T: git git://github.com/codyprime/qemu-kvm-jtc.git block
|
||||
|
||||
CURL
|
||||
M: Jeff Cody <jcody@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: block/curl.c
|
||||
T: git git://github.com/codyprime/qemu-kvm-jtc.git block
|
||||
|
||||
GLUSTER
|
||||
M: Jeff Cody <jcody@redhat.com>
|
||||
L: qemu-block@nongnu.org
|
||||
S: Supported
|
||||
F: block/gluster.c
|
||||
T: git git://github.com/codyprime/qemu-kvm-jtc.git block
|
||||
|
||||
Bootdevice
|
||||
M: Gonglei <arei.gonglei@huawei.com>
|
||||
|
30
Makefile
30
Makefile
@@ -84,9 +84,6 @@ HELPERS-$(CONFIG_LINUX) = qemu-bridge-helper$(EXESUF)
|
||||
|
||||
ifdef BUILD_DOCS
|
||||
DOCS=qemu-doc.html qemu-tech.html qemu.1 qemu-img.1 qemu-nbd.8 qmp-commands.txt
|
||||
ifdef CONFIG_LINUX
|
||||
DOCS+=kvm_stat.1
|
||||
endif
|
||||
ifdef CONFIG_VIRTFS
|
||||
DOCS+=fsdev/virtfs-proxy-helper.1
|
||||
endif
|
||||
@@ -112,9 +109,8 @@ endif
|
||||
-include $(SUBDIR_DEVICES_MAK_DEP)
|
||||
|
||||
%/config-devices.mak: default-configs/%.mak
|
||||
$(call quiet-command, \
|
||||
$(SHELL) $(SRC_PATH)/scripts/make_device_config.sh $< $*-config-devices.mak.d $@ > $@.tmp, " GEN $@.tmp")
|
||||
$(call quiet-command, if test -f $@; then \
|
||||
$(call quiet-command,$(SHELL) $(SRC_PATH)/scripts/make_device_config.sh $@ $<, " GEN $@")
|
||||
@if test -f $@; then \
|
||||
if cmp -s $@.old $@; then \
|
||||
mv $@.tmp $@; \
|
||||
cp -p $@ $@.old; \
|
||||
@@ -130,7 +126,7 @@ endif
|
||||
else \
|
||||
mv $@.tmp $@; \
|
||||
cp -p $@ $@.old; \
|
||||
fi, " GEN $@");
|
||||
fi
|
||||
|
||||
defconfig:
|
||||
rm -f config-all-devices.mak $(SUBDIR_DEVICES_MAK)
|
||||
@@ -201,9 +197,9 @@ ALL_SUBDIRS=$(TARGET_DIRS) $(patsubst %,pc-bios/%, $(ROMS))
|
||||
|
||||
recurse-all: $(SUBDIR_RULES) $(ROMSUBDIR_RULES)
|
||||
|
||||
$(BUILD_DIR)/version.o: $(SRC_PATH)/version.rc config-host.h | $(BUILD_DIR)/version.lo
|
||||
$(BUILD_DIR)/version.o: $(SRC_PATH)/version.rc $(BUILD_DIR)/config-host.h | $(BUILD_DIR)/version.lo
|
||||
$(call quiet-command,$(WINDRES) -I$(BUILD_DIR) -o $@ $<," RC version.o")
|
||||
$(BUILD_DIR)/version.lo: $(SRC_PATH)/version.rc config-host.h
|
||||
$(BUILD_DIR)/version.lo: $(SRC_PATH)/version.rc $(BUILD_DIR)/config-host.h
|
||||
$(call quiet-command,$(WINDRES) -I$(BUILD_DIR) -o $@ $<," RC version.lo")
|
||||
|
||||
Makefile: $(version-obj-y) $(version-lobj-y)
|
||||
@@ -317,8 +313,8 @@ qemu-%.tar.bz2:
|
||||
|
||||
distclean: clean
|
||||
rm -f config-host.mak config-host.h* config-host.ld $(DOCS) qemu-options.texi qemu-img-cmds.texi qemu-monitor.texi
|
||||
rm -f config-all-devices.mak config-all-disas.mak config.status
|
||||
rm -f po/*.mo tests/qemu-iotests/common.env
|
||||
rm -f config-all-devices.mak config-all-disas.mak
|
||||
rm -f po/*.mo
|
||||
rm -f roms/seabios/config.mak roms/vgabios/config.mak
|
||||
rm -f qemu-doc.info qemu-doc.aux qemu-doc.cp qemu-doc.cps qemu-doc.dvi
|
||||
rm -f qemu-doc.fn qemu-doc.fns qemu-doc.info qemu-doc.ky qemu-doc.kys
|
||||
@@ -331,8 +327,8 @@ distclean: clean
|
||||
rm -rf $$d || exit 1 ; \
|
||||
done
|
||||
rm -Rf .sdk
|
||||
if test -f pixman/config.log; then $(MAKE) -C pixman distclean; fi
|
||||
if test -f dtc/version_gen.h; then $(MAKE) $(DTC_MAKE_ARGS) clean; fi
|
||||
if test -f pixman/config.log; then make -C pixman distclean; fi
|
||||
if test -f dtc/version_gen.h; then make $(DTC_MAKE_ARGS) clean; fi
|
||||
|
||||
KEYMAPS=da en-gb et fr fr-ch is lt modifiers no pt-br sv \
|
||||
ar de en-us fi fr-be hr it lv nl pl ru th \
|
||||
@@ -494,12 +490,6 @@ qemu-nbd.8: qemu-nbd.texi
|
||||
$(POD2MAN) --section=8 --center=" " --release=" " qemu-nbd.pod > $@, \
|
||||
" GEN $@")
|
||||
|
||||
kvm_stat.1: scripts/kvm/kvm_stat.texi
|
||||
$(call quiet-command, \
|
||||
perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< kvm_stat.pod && \
|
||||
$(POD2MAN) --section=1 --center=" " --release=" " kvm_stat.pod > $@, \
|
||||
" GEN $@")
|
||||
|
||||
dvi: qemu-doc.dvi qemu-tech.dvi
|
||||
html: qemu-doc.html qemu-tech.html
|
||||
info: qemu-doc.info qemu-tech.info
|
||||
@@ -532,7 +522,7 @@ installer: $(INSTALLER)
|
||||
INSTDIR=/tmp/qemu-nsis
|
||||
|
||||
$(INSTALLER): $(SRC_PATH)/qemu.nsi
|
||||
$(MAKE) install prefix=${INSTDIR}
|
||||
make install prefix=${INSTDIR}
|
||||
ifdef SIGNCODE
|
||||
(cd ${INSTDIR}; \
|
||||
for i in *.exe; do \
|
||||
|
@@ -48,10 +48,15 @@ common-obj-$(CONFIG_POSIX) += os-posix.o
|
||||
|
||||
common-obj-$(CONFIG_LINUX) += fsdev/
|
||||
|
||||
common-obj-y += migration/
|
||||
common-obj-y += migration.o migration-tcp.o
|
||||
common-obj-y += vmstate.o
|
||||
common-obj-y += qemu-file.o qemu-file-unix.o qemu-file-stdio.o
|
||||
common-obj-$(CONFIG_RDMA) += migration-rdma.o
|
||||
common-obj-y += qemu-char.o #aio.o
|
||||
common-obj-y += page_cache.o
|
||||
common-obj-y += qjson.o
|
||||
common-obj-y += block-migration.o
|
||||
common-obj-y += page_cache.o xbzrle.o
|
||||
|
||||
common-obj-$(CONFIG_POSIX) += migration-exec.o migration-unix.o migration-fd.o
|
||||
|
||||
common-obj-$(CONFIG_SPICE) += spice-qemu-char.o
|
||||
|
||||
|
@@ -83,7 +83,7 @@ all: $(PROGS) stap
|
||||
#########################################################
|
||||
# cpu emulator library
|
||||
obj-y = exec.o translate-all.o cpu-exec.o
|
||||
obj-y += tcg/tcg.o tcg/tcg-op.o tcg/optimize.o
|
||||
obj-y += tcg/tcg.o tcg/optimize.o
|
||||
obj-$(CONFIG_TCG_INTERPRETER) += tci.o
|
||||
obj-$(CONFIG_TCG_INTERPRETER) += disas/tci.o
|
||||
obj-y += fpu/softfloat.o
|
||||
@@ -175,11 +175,9 @@ all-obj-y += $(common-obj-y)
|
||||
all-obj-y += $(target-obj-y)
|
||||
all-obj-$(CONFIG_SOFTMMU) += $(block-obj-y)
|
||||
|
||||
$(QEMU_PROG_BUILD): config-devices.mak
|
||||
|
||||
# build either PROG or PROGW
|
||||
$(QEMU_PROG_BUILD): $(all-obj-y) ../libqemuutil.a ../libqemustub.a
|
||||
$(call LINK, $(filter-out %.mak, $^))
|
||||
$(call LINK,$^)
|
||||
|
||||
gdbstub-xml.c: $(TARGET_XML_FILES) $(SRC_PATH)/scripts/feature_to_c.sh
|
||||
$(call quiet-command,rm -f $@ && $(SHELL) $(SRC_PATH)/scripts/feature_to_c.sh $@ $(TARGET_XML_FILES)," GEN $(TARGET_DIR)$@")
|
||||
|
@@ -73,7 +73,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
} else {
|
||||
if (node == NULL) {
|
||||
/* Alloc and insert if it's not already there */
|
||||
node = g_new0(AioHandler, 1);
|
||||
node = g_malloc0(sizeof(AioHandler));
|
||||
node->pfd.fd = fd;
|
||||
QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
|
||||
|
||||
|
@@ -67,7 +67,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
|
||||
if (node == NULL) {
|
||||
/* Alloc and insert if it's not already there */
|
||||
node = g_new0(AioHandler, 1);
|
||||
node = g_malloc0(sizeof(AioHandler));
|
||||
node->pfd.fd = fd;
|
||||
QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
|
||||
}
|
||||
@@ -129,7 +129,7 @@ void aio_set_event_notifier(AioContext *ctx,
|
||||
} else {
|
||||
if (node == NULL) {
|
||||
/* Alloc and insert if it's not already there */
|
||||
node = g_new0(AioHandler, 1);
|
||||
node = g_malloc0(sizeof(AioHandler));
|
||||
node->e = e;
|
||||
node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
|
||||
node->pfd.events = G_IO_IN;
|
||||
|
289
arch_init.c
289
arch_init.c
@@ -52,7 +52,6 @@
|
||||
#include "exec/ram_addr.h"
|
||||
#include "hw/acpi/acpi.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
|
||||
#ifdef DEBUG_ARCH_INIT
|
||||
#define DPRINTF(fmt, ...) \
|
||||
@@ -305,6 +304,23 @@ uint64_t xbzrle_mig_pages_overflow(void)
|
||||
return acct_info.xbzrle_overflows;
|
||||
}
|
||||
|
||||
static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
|
||||
int cont, int flag)
|
||||
{
|
||||
size_t size;
|
||||
|
||||
qemu_put_be64(f, offset | cont | flag);
|
||||
size = 8;
|
||||
|
||||
if (!cont) {
|
||||
qemu_put_byte(f, strlen(block->idstr));
|
||||
qemu_put_buffer(f, (uint8_t *)block->idstr,
|
||||
strlen(block->idstr));
|
||||
size += 1 + strlen(block->idstr);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/* This is the last block that we have visited serching for dirty pages
|
||||
*/
|
||||
static RAMBlock *last_seen_block;
|
||||
@@ -316,34 +332,6 @@ static uint64_t migration_dirty_pages;
|
||||
static uint32_t last_version;
|
||||
static bool ram_bulk_stage;
|
||||
|
||||
/**
|
||||
* save_page_header: Write page header to wire
|
||||
*
|
||||
* If this is the 1st block, it also writes the block identification
|
||||
*
|
||||
* Returns: Number of bytes written
|
||||
*
|
||||
* @f: QEMUFile where to send the data
|
||||
* @block: block that contains the page we want to send
|
||||
* @offset: offset inside the block for the page
|
||||
* in the lower bits, it contains flags
|
||||
*/
|
||||
static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
|
||||
{
|
||||
size_t size;
|
||||
|
||||
qemu_put_be64(f, offset);
|
||||
size = 8;
|
||||
|
||||
if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
|
||||
qemu_put_byte(f, strlen(block->idstr));
|
||||
qemu_put_buffer(f, (uint8_t *)block->idstr,
|
||||
strlen(block->idstr));
|
||||
size += 1 + strlen(block->idstr);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/* Update the xbzrle cache to reflect a page that's been sent as all 0.
|
||||
* The important thing is that a stale (not-yet-0'd) page be replaced
|
||||
* by the new data.
|
||||
@@ -358,40 +346,22 @@ static void xbzrle_cache_zero_page(ram_addr_t current_addr)
|
||||
|
||||
/* We don't care if this fails to allocate a new cache page
|
||||
* as long as it updated an old one */
|
||||
cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
|
||||
bitmap_sync_count);
|
||||
cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
|
||||
}
|
||||
|
||||
#define ENCODING_FLAG_XBZRLE 0x1
|
||||
|
||||
/**
|
||||
* save_xbzrle_page: compress and send current page
|
||||
*
|
||||
* Returns: 1 means that we wrote the page
|
||||
* 0 means that page is identical to the one already sent
|
||||
* -1 means that xbzrle would be longer than normal
|
||||
*
|
||||
* @f: QEMUFile where to send the data
|
||||
* @current_data:
|
||||
* @current_addr:
|
||||
* @block: block that contains the page we want to send
|
||||
* @offset: offset inside the block for the page
|
||||
* @last_stage: if we are at the completion stage
|
||||
* @bytes_transferred: increase it with the number of transferred bytes
|
||||
*/
|
||||
static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
|
||||
ram_addr_t current_addr, RAMBlock *block,
|
||||
ram_addr_t offset, bool last_stage,
|
||||
uint64_t *bytes_transferred)
|
||||
ram_addr_t offset, int cont, bool last_stage)
|
||||
{
|
||||
int encoded_len = 0, bytes_xbzrle;
|
||||
int encoded_len = 0, bytes_sent = -1;
|
||||
uint8_t *prev_cached_page;
|
||||
|
||||
if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
|
||||
if (!cache_is_cached(XBZRLE.cache, current_addr)) {
|
||||
acct_info.xbzrle_cache_miss++;
|
||||
if (!last_stage) {
|
||||
if (cache_insert(XBZRLE.cache, current_addr, *current_data,
|
||||
bitmap_sync_count) == -1) {
|
||||
if (cache_insert(XBZRLE.cache, current_addr, *current_data) == -1) {
|
||||
return -1;
|
||||
} else {
|
||||
/* update *current_data when the page has been
|
||||
@@ -431,16 +401,15 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
|
||||
}
|
||||
|
||||
/* Send XBZRLE based compressed page */
|
||||
bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
|
||||
bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
|
||||
qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
|
||||
qemu_put_be16(f, encoded_len);
|
||||
qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
|
||||
bytes_xbzrle += encoded_len + 1 + 2;
|
||||
bytes_sent += encoded_len + 1 + 2;
|
||||
acct_info.xbzrle_pages++;
|
||||
acct_info.xbzrle_bytes += bytes_xbzrle;
|
||||
*bytes_transferred += bytes_xbzrle;
|
||||
acct_info.xbzrle_bytes += bytes_sent;
|
||||
|
||||
return 1;
|
||||
return bytes_sent;
|
||||
}
|
||||
|
||||
static inline
|
||||
@@ -516,6 +485,7 @@ static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
|
||||
}
|
||||
|
||||
|
||||
/* Needs iothread lock! */
|
||||
/* Fix me: there are too many global variables used in migration process. */
|
||||
static int64_t start_time;
|
||||
static int64_t bytes_xfer_prev;
|
||||
@@ -528,7 +498,6 @@ static void migration_bitmap_sync_init(void)
|
||||
num_dirty_pages_period = 0;
|
||||
}
|
||||
|
||||
/* Called with iothread lock held, to protect ram_list.dirty_memory[] */
|
||||
static void migration_bitmap_sync(void)
|
||||
{
|
||||
RAMBlock *block;
|
||||
@@ -552,12 +521,9 @@ static void migration_bitmap_sync(void)
|
||||
trace_migration_bitmap_sync_start();
|
||||
address_space_sync_dirty_bitmap(&address_space_memory);
|
||||
|
||||
rcu_read_lock();
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
|
||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||
migration_bitmap_sync_range(block->mr->ram_addr, block->length);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
trace_migration_bitmap_sync_end(migration_dirty_pages
|
||||
- num_dirty_pages_init);
|
||||
num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
|
||||
@@ -603,68 +569,55 @@ static void migration_bitmap_sync(void)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* ram_save_page: Send the given page to the stream
|
||||
*
|
||||
* Returns: Number of pages written.
|
||||
*
|
||||
* @f: QEMUFile where to send the data
|
||||
* @block: block that contains the page we want to send
|
||||
* @offset: offset inside the block for the page
|
||||
* @last_stage: if we are at the completion stage
|
||||
* @bytes_transferred: increase it with the number of transferred bytes
|
||||
* Returns: Number of bytes written.
|
||||
*/
|
||||
static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
|
||||
bool last_stage, uint64_t *bytes_transferred)
|
||||
bool last_stage)
|
||||
{
|
||||
int pages = -1;
|
||||
uint64_t bytes_xmit;
|
||||
int bytes_sent;
|
||||
int cont;
|
||||
ram_addr_t current_addr;
|
||||
MemoryRegion *mr = block->mr;
|
||||
uint8_t *p;
|
||||
int ret;
|
||||
bool send_async = true;
|
||||
|
||||
cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
|
||||
|
||||
p = memory_region_get_ram_ptr(mr) + offset;
|
||||
|
||||
/* In doubt sent page as normal */
|
||||
bytes_xmit = 0;
|
||||
bytes_sent = -1;
|
||||
ret = ram_control_save_page(f, block->offset,
|
||||
offset, TARGET_PAGE_SIZE, &bytes_xmit);
|
||||
if (bytes_xmit) {
|
||||
*bytes_transferred += bytes_xmit;
|
||||
pages = 1;
|
||||
}
|
||||
offset, TARGET_PAGE_SIZE, &bytes_sent);
|
||||
|
||||
XBZRLE_cache_lock();
|
||||
|
||||
current_addr = block->offset + offset;
|
||||
|
||||
if (block == last_sent_block) {
|
||||
offset |= RAM_SAVE_FLAG_CONTINUE;
|
||||
}
|
||||
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
|
||||
if (ret != RAM_SAVE_CONTROL_DELAYED) {
|
||||
if (bytes_xmit > 0) {
|
||||
if (bytes_sent > 0) {
|
||||
acct_info.norm_pages++;
|
||||
} else if (bytes_xmit == 0) {
|
||||
} else if (bytes_sent == 0) {
|
||||
acct_info.dup_pages++;
|
||||
}
|
||||
}
|
||||
} else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
|
||||
acct_info.dup_pages++;
|
||||
*bytes_transferred += save_page_header(f, block,
|
||||
offset | RAM_SAVE_FLAG_COMPRESS);
|
||||
bytes_sent = save_block_hdr(f, block, offset, cont,
|
||||
RAM_SAVE_FLAG_COMPRESS);
|
||||
qemu_put_byte(f, 0);
|
||||
*bytes_transferred += 1;
|
||||
pages = 1;
|
||||
bytes_sent++;
|
||||
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
|
||||
* page would be stale
|
||||
*/
|
||||
xbzrle_cache_zero_page(current_addr);
|
||||
} else if (!ram_bulk_stage && migrate_use_xbzrle()) {
|
||||
pages = save_xbzrle_page(f, &p, current_addr, block,
|
||||
offset, last_stage, bytes_transferred);
|
||||
bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
|
||||
offset, cont, last_stage);
|
||||
if (!last_stage) {
|
||||
/* Can't send this cached data async, since the cache page
|
||||
* might get updated before it gets to the wire
|
||||
@@ -674,48 +627,39 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
|
||||
}
|
||||
|
||||
/* XBZRLE overflow or normal page */
|
||||
if (pages == -1) {
|
||||
*bytes_transferred += save_page_header(f, block,
|
||||
offset | RAM_SAVE_FLAG_PAGE);
|
||||
if (bytes_sent == -1) {
|
||||
bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
|
||||
if (send_async) {
|
||||
qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
|
||||
} else {
|
||||
qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
|
||||
}
|
||||
*bytes_transferred += TARGET_PAGE_SIZE;
|
||||
pages = 1;
|
||||
bytes_sent += TARGET_PAGE_SIZE;
|
||||
acct_info.norm_pages++;
|
||||
}
|
||||
|
||||
XBZRLE_cache_unlock();
|
||||
|
||||
return pages;
|
||||
return bytes_sent;
|
||||
}
|
||||
|
||||
/**
|
||||
* ram_find_and_save_block: Finds a dirty page and sends it to f
|
||||
/*
|
||||
* ram_find_and_save_block: Finds a page to send and sends it to f
|
||||
*
|
||||
* Called within an RCU critical section.
|
||||
*
|
||||
* Returns: The number of pages written
|
||||
* Returns: The number of bytes written.
|
||||
* 0 means no dirty pages
|
||||
*
|
||||
* @f: QEMUFile where to send the data
|
||||
* @last_stage: if we are at the completion stage
|
||||
* @bytes_transferred: increase it with the number of transferred bytes
|
||||
*/
|
||||
|
||||
static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
|
||||
uint64_t *bytes_transferred)
|
||||
static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
|
||||
{
|
||||
RAMBlock *block = last_seen_block;
|
||||
ram_addr_t offset = last_offset;
|
||||
bool complete_round = false;
|
||||
int pages = 0;
|
||||
int bytes_sent = 0;
|
||||
MemoryRegion *mr;
|
||||
|
||||
if (!block)
|
||||
block = QLIST_FIRST_RCU(&ram_list.blocks);
|
||||
block = QTAILQ_FIRST(&ram_list.blocks);
|
||||
|
||||
while (true) {
|
||||
mr = block->mr;
|
||||
@@ -724,30 +668,28 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
|
||||
offset >= last_offset) {
|
||||
break;
|
||||
}
|
||||
if (offset >= block->used_length) {
|
||||
if (offset >= block->length) {
|
||||
offset = 0;
|
||||
block = QLIST_NEXT_RCU(block, next);
|
||||
block = QTAILQ_NEXT(block, next);
|
||||
if (!block) {
|
||||
block = QLIST_FIRST_RCU(&ram_list.blocks);
|
||||
block = QTAILQ_FIRST(&ram_list.blocks);
|
||||
complete_round = true;
|
||||
ram_bulk_stage = false;
|
||||
}
|
||||
} else {
|
||||
pages = ram_save_page(f, block, offset, last_stage,
|
||||
bytes_transferred);
|
||||
bytes_sent = ram_save_page(f, block, offset, last_stage);
|
||||
|
||||
/* if page is unmodified, continue to the next */
|
||||
if (pages > 0) {
|
||||
if (bytes_sent > 0) {
|
||||
last_sent_block = block;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
last_seen_block = block;
|
||||
last_offset = offset;
|
||||
|
||||
return pages;
|
||||
return bytes_sent;
|
||||
}
|
||||
|
||||
static uint64_t bytes_transferred;
|
||||
@@ -784,10 +726,9 @@ uint64_t ram_bytes_total(void)
|
||||
RAMBlock *block;
|
||||
uint64_t total = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
|
||||
total += block->used_length;
|
||||
rcu_read_unlock();
|
||||
QTAILQ_FOREACH(block, &ram_list.blocks, next)
|
||||
total += block->length;
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
@@ -833,13 +774,6 @@ static void reset_ram_globals(void)
|
||||
|
||||
#define MAX_WAIT 50 /* ms, half buffered_file limit */
|
||||
|
||||
|
||||
/* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
|
||||
* long-running RCU critical section. When rcu-reclaims in the code
|
||||
* start to become numerous it will be necessary to reduce the
|
||||
* granularity of these critical sections.
|
||||
*/
|
||||
|
||||
static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||
{
|
||||
RAMBlock *block;
|
||||
@@ -880,10 +814,8 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||
acct_clear();
|
||||
}
|
||||
|
||||
/* iothread lock needed for ram_list.dirty_memory[] */
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_mutex_lock_ramlist();
|
||||
rcu_read_lock();
|
||||
bytes_transferred = 0;
|
||||
reset_ram_globals();
|
||||
|
||||
@@ -895,22 +827,27 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||
* Count the total number of pages used by ram blocks not including any
|
||||
* gaps due to alignment or unplugs.
|
||||
*/
|
||||
migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
|
||||
migration_dirty_pages = 0;
|
||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||
uint64_t block_pages;
|
||||
|
||||
block_pages = block->length >> TARGET_PAGE_BITS;
|
||||
migration_dirty_pages += block_pages;
|
||||
}
|
||||
|
||||
memory_global_dirty_log_start();
|
||||
migration_bitmap_sync();
|
||||
qemu_mutex_unlock_ramlist();
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
|
||||
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||
qemu_put_byte(f, strlen(block->idstr));
|
||||
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
|
||||
qemu_put_be64(f, block->used_length);
|
||||
qemu_put_be64(f, block->length);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
qemu_mutex_unlock_ramlist();
|
||||
|
||||
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
|
||||
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
|
||||
@@ -925,29 +862,27 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
||||
int ret;
|
||||
int i;
|
||||
int64_t t0;
|
||||
int pages_sent = 0;
|
||||
int total_sent = 0;
|
||||
|
||||
qemu_mutex_lock_ramlist();
|
||||
|
||||
rcu_read_lock();
|
||||
if (ram_list.version != last_version) {
|
||||
reset_ram_globals();
|
||||
}
|
||||
|
||||
/* Read version before ram_list.blocks */
|
||||
smp_rmb();
|
||||
|
||||
ram_control_before_iterate(f, RAM_CONTROL_ROUND);
|
||||
|
||||
t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
i = 0;
|
||||
while ((ret = qemu_file_rate_limit(f)) == 0) {
|
||||
int pages;
|
||||
int bytes_sent;
|
||||
|
||||
pages = ram_find_and_save_block(f, false, &bytes_transferred);
|
||||
/* no more pages to sent */
|
||||
if (pages == 0) {
|
||||
bytes_sent = ram_find_and_save_block(f, false);
|
||||
/* no more blocks to sent */
|
||||
if (bytes_sent == 0) {
|
||||
break;
|
||||
}
|
||||
pages_sent += pages;
|
||||
total_sent += bytes_sent;
|
||||
acct_info.iterations++;
|
||||
check_guest_throttling();
|
||||
/* we want to check in the 1st loop, just in case it was the 1st time
|
||||
@@ -965,7 +900,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
||||
}
|
||||
i++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
qemu_mutex_unlock_ramlist();
|
||||
|
||||
/*
|
||||
* Must occur before EOS (or any QEMUFile operation)
|
||||
@@ -973,6 +909,12 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
||||
*/
|
||||
ram_control_after_iterate(f, RAM_CONTROL_ROUND);
|
||||
|
||||
bytes_transferred += total_sent;
|
||||
|
||||
/*
|
||||
* Do not count these 8 bytes into total_sent, so that we can
|
||||
* return 0 if no page had been dirtied.
|
||||
*/
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||
bytes_transferred += 8;
|
||||
|
||||
@@ -981,14 +923,12 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return pages_sent;
|
||||
return total_sent;
|
||||
}
|
||||
|
||||
/* Called with iothread lock */
|
||||
static int ram_save_complete(QEMUFile *f, void *opaque)
|
||||
{
|
||||
rcu_read_lock();
|
||||
|
||||
qemu_mutex_lock_ramlist();
|
||||
migration_bitmap_sync();
|
||||
|
||||
ram_control_before_iterate(f, RAM_CONTROL_FINISH);
|
||||
@@ -997,19 +937,20 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
||||
|
||||
/* flush all remaining blocks regardless of rate limiting */
|
||||
while (true) {
|
||||
int pages;
|
||||
int bytes_sent;
|
||||
|
||||
pages = ram_find_and_save_block(f, true, &bytes_transferred);
|
||||
bytes_sent = ram_find_and_save_block(f, true);
|
||||
/* no more blocks to sent */
|
||||
if (pages == 0) {
|
||||
if (bytes_sent == 0) {
|
||||
break;
|
||||
}
|
||||
bytes_transferred += bytes_sent;
|
||||
}
|
||||
|
||||
ram_control_after_iterate(f, RAM_CONTROL_FINISH);
|
||||
migration_end();
|
||||
|
||||
rcu_read_unlock();
|
||||
qemu_mutex_unlock_ramlist();
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||
|
||||
return 0;
|
||||
@@ -1023,9 +964,7 @@ static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
|
||||
|
||||
if (remaining_size < max_size) {
|
||||
qemu_mutex_lock_iothread();
|
||||
rcu_read_lock();
|
||||
migration_bitmap_sync();
|
||||
rcu_read_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
|
||||
}
|
||||
@@ -1067,9 +1006,6 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Must be called from within a rcu critical section.
|
||||
* Returns a pointer from within the RCU-protected ram_list.
|
||||
*/
|
||||
static inline void *host_from_stream_offset(QEMUFile *f,
|
||||
ram_addr_t offset,
|
||||
int flags)
|
||||
@@ -1079,7 +1015,7 @@ static inline void *host_from_stream_offset(QEMUFile *f,
|
||||
uint8_t len;
|
||||
|
||||
if (flags & RAM_SAVE_FLAG_CONTINUE) {
|
||||
if (!block || block->max_length <= offset) {
|
||||
if (!block || block->length <= offset) {
|
||||
error_report("Ack, bad migration stream!");
|
||||
return NULL;
|
||||
}
|
||||
@@ -1091,9 +1027,8 @@ static inline void *host_from_stream_offset(QEMUFile *f,
|
||||
qemu_get_buffer(f, (uint8_t *)id, len);
|
||||
id[len] = 0;
|
||||
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
if (!strncmp(id, block->idstr, sizeof(id)) &&
|
||||
block->max_length > offset) {
|
||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||
if (!strncmp(id, block->idstr, sizeof(id)) && block->length > offset) {
|
||||
return memory_region_get_ram_ptr(block->mr) + offset;
|
||||
}
|
||||
}
|
||||
@@ -1124,12 +1059,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
/* This RCU critical section can be very long running.
|
||||
* When RCU reclaims in the code start to become numerous,
|
||||
* it will be necessary to reduce the granularity of this
|
||||
* critical section.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
|
||||
ram_addr_t addr, total_ram_bytes;
|
||||
void *host;
|
||||
@@ -1154,15 +1083,13 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||
id[len] = 0;
|
||||
length = qemu_get_be64(f);
|
||||
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||
if (!strncmp(id, block->idstr, sizeof(id))) {
|
||||
if (length != block->used_length) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
ret = qemu_ram_resize(block->offset, length, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
if (block->length != length) {
|
||||
error_report("Length mismatch: %s: 0x" RAM_ADDR_FMT
|
||||
" in != 0x" RAM_ADDR_FMT, id, length,
|
||||
block->length);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -1184,6 +1111,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
ch = qemu_get_byte(f);
|
||||
ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
|
||||
break;
|
||||
@@ -1194,6 +1122,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
|
||||
break;
|
||||
case RAM_SAVE_FLAG_XBZRLE:
|
||||
@@ -1203,6 +1132,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (load_xbzrle(f, addr, host) < 0) {
|
||||
error_report("Failed to decompress XBZRLE page at "
|
||||
RAM_ADDR_FMT, addr);
|
||||
@@ -1227,7 +1157,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
DPRINTF("Completed load of VM with exit code %d seq iteration "
|
||||
"%" PRIu64 "\n", ret, seq_iter);
|
||||
return ret;
|
||||
|
39
async.c
39
async.c
@@ -44,12 +44,10 @@ struct QEMUBH {
|
||||
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
||||
{
|
||||
QEMUBH *bh;
|
||||
bh = g_new(QEMUBH, 1);
|
||||
*bh = (QEMUBH){
|
||||
.ctx = ctx,
|
||||
.cb = cb,
|
||||
.opaque = opaque,
|
||||
};
|
||||
bh = g_malloc0(sizeof(QEMUBH));
|
||||
bh->ctx = ctx;
|
||||
bh->cb = cb;
|
||||
bh->opaque = opaque;
|
||||
qemu_mutex_lock(&ctx->bh_lock);
|
||||
bh->next = ctx->first_bh;
|
||||
/* Make sure that the members are ready before putting bh into list */
|
||||
@@ -72,13 +70,12 @@ int aio_bh_poll(AioContext *ctx)
|
||||
/* Make sure that fetching bh happens before accessing its members */
|
||||
smp_read_barrier_depends();
|
||||
next = bh->next;
|
||||
/* The atomic_xchg is paired with the one in qemu_bh_schedule. The
|
||||
* implicit memory barrier ensures that the callback sees all writes
|
||||
* done by the scheduling thread. It also ensures that the scheduling
|
||||
* thread sees the zero before bh->cb has run, and thus will call
|
||||
* aio_notify again if necessary.
|
||||
*/
|
||||
if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
|
||||
if (!bh->deleted && bh->scheduled) {
|
||||
bh->scheduled = 0;
|
||||
/* Paired with write barrier in bh schedule to ensure reading for
|
||||
* idle & callbacks coming after bh's scheduling.
|
||||
*/
|
||||
smp_rmb();
|
||||
if (!bh->idle)
|
||||
ret = 1;
|
||||
bh->idle = 0;
|
||||
@@ -109,28 +106,33 @@ int aio_bh_poll(AioContext *ctx)
|
||||
|
||||
void qemu_bh_schedule_idle(QEMUBH *bh)
|
||||
{
|
||||
if (bh->scheduled)
|
||||
return;
|
||||
bh->idle = 1;
|
||||
/* Make sure that idle & any writes needed by the callback are done
|
||||
* before the locations are read in the aio_bh_poll.
|
||||
*/
|
||||
atomic_mb_set(&bh->scheduled, 1);
|
||||
smp_wmb();
|
||||
bh->scheduled = 1;
|
||||
}
|
||||
|
||||
void qemu_bh_schedule(QEMUBH *bh)
|
||||
{
|
||||
AioContext *ctx;
|
||||
|
||||
if (bh->scheduled)
|
||||
return;
|
||||
ctx = bh->ctx;
|
||||
bh->idle = 0;
|
||||
/* The memory barrier implicit in atomic_xchg makes sure that:
|
||||
/* Make sure that:
|
||||
* 1. idle & any writes needed by the callback are done before the
|
||||
* locations are read in the aio_bh_poll.
|
||||
* 2. ctx is loaded before scheduled is set and the callback has a chance
|
||||
* to execute.
|
||||
*/
|
||||
if (atomic_xchg(&bh->scheduled, 1) == 0) {
|
||||
aio_notify(ctx);
|
||||
}
|
||||
smp_mb();
|
||||
bh->scheduled = 1;
|
||||
aio_notify(ctx);
|
||||
}
|
||||
|
||||
|
||||
@@ -298,7 +300,6 @@ AioContext *aio_context_new(Error **errp)
|
||||
error_setg_errno(errp, -ret, "Failed to initialize event notifier");
|
||||
return NULL;
|
||||
}
|
||||
g_source_set_can_recurse(&ctx->source, true);
|
||||
aio_set_event_notifier(ctx, &ctx->notifier,
|
||||
(EventNotifierHandler *)
|
||||
event_notifier_test_and_clear);
|
||||
|
@@ -335,26 +335,12 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
host_memory_backend_can_be_deleted(UserCreatable *uc, Error **errp)
|
||||
{
|
||||
MemoryRegion *mr;
|
||||
|
||||
mr = host_memory_backend_get_memory(MEMORY_BACKEND(uc), errp);
|
||||
if (memory_region_is_mapped(mr)) {
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
host_memory_backend_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
|
||||
|
||||
ucc->complete = host_memory_backend_memory_complete;
|
||||
ucc->can_be_deleted = host_memory_backend_can_be_deleted;
|
||||
}
|
||||
|
||||
static const TypeInfo host_memory_backend_info = {
|
||||
|
@@ -88,7 +88,11 @@ static char *rng_random_get_filename(Object *obj, Error **errp)
|
||||
{
|
||||
RndRandom *s = RNG_RANDOM(obj);
|
||||
|
||||
return g_strdup(s->filename);
|
||||
if (s->filename) {
|
||||
return g_strdup(s->filename);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void rng_random_set_filename(Object *obj, const char *filename,
|
||||
|
@@ -36,7 +36,7 @@ void tpm_backend_destroy(TPMBackend *s)
|
||||
{
|
||||
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
|
||||
|
||||
k->ops->destroy(s);
|
||||
return k->ops->destroy(s);
|
||||
}
|
||||
|
||||
int tpm_backend_init(TPMBackend *s, TPMState *state,
|
||||
|
59
balloon.c
59
balloon.c
@@ -36,21 +36,6 @@ static QEMUBalloonEvent *balloon_event_fn;
|
||||
static QEMUBalloonStatus *balloon_stat_fn;
|
||||
static void *balloon_opaque;
|
||||
|
||||
static bool have_balloon(Error **errp)
|
||||
{
|
||||
if (kvm_enabled() && !kvm_has_sync_mmu()) {
|
||||
error_set(errp, ERROR_CLASS_KVM_MISSING_CAP,
|
||||
"Using KVM without synchronous MMU, balloon unavailable");
|
||||
return false;
|
||||
}
|
||||
if (!balloon_event_fn) {
|
||||
error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
|
||||
"No balloon device has been activated");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int qemu_add_balloon_handler(QEMUBalloonEvent *event_func,
|
||||
QEMUBalloonStatus *stat_func, void *opaque)
|
||||
{
|
||||
@@ -77,30 +62,58 @@ void qemu_remove_balloon_handler(void *opaque)
|
||||
balloon_opaque = NULL;
|
||||
}
|
||||
|
||||
static int qemu_balloon(ram_addr_t target)
|
||||
{
|
||||
if (!balloon_event_fn) {
|
||||
return 0;
|
||||
}
|
||||
trace_balloon_event(balloon_opaque, target);
|
||||
balloon_event_fn(balloon_opaque, target);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int qemu_balloon_status(BalloonInfo *info)
|
||||
{
|
||||
if (!balloon_stat_fn) {
|
||||
return 0;
|
||||
}
|
||||
balloon_stat_fn(balloon_opaque, info);
|
||||
return 1;
|
||||
}
|
||||
|
||||
BalloonInfo *qmp_query_balloon(Error **errp)
|
||||
{
|
||||
BalloonInfo *info;
|
||||
|
||||
if (!have_balloon(errp)) {
|
||||
if (kvm_enabled() && !kvm_has_sync_mmu()) {
|
||||
error_set(errp, QERR_KVM_MISSING_CAP, "synchronous MMU", "balloon");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
info = g_malloc0(sizeof(*info));
|
||||
balloon_stat_fn(balloon_opaque, info);
|
||||
|
||||
if (qemu_balloon_status(info) == 0) {
|
||||
error_set(errp, QERR_DEVICE_NOT_ACTIVE, "balloon");
|
||||
qapi_free_BalloonInfo(info);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
void qmp_balloon(int64_t target, Error **errp)
|
||||
void qmp_balloon(int64_t value, Error **errp)
|
||||
{
|
||||
if (!have_balloon(errp)) {
|
||||
if (kvm_enabled() && !kvm_has_sync_mmu()) {
|
||||
error_set(errp, QERR_KVM_MISSING_CAP, "synchronous MMU", "balloon");
|
||||
return;
|
||||
}
|
||||
|
||||
if (target <= 0) {
|
||||
if (value <= 0) {
|
||||
error_set(errp, QERR_INVALID_PARAMETER_VALUE, "target", "a size");
|
||||
return;
|
||||
}
|
||||
|
||||
trace_balloon_event(balloon_opaque, target);
|
||||
balloon_event_fn(balloon_opaque, target);
|
||||
|
||||
if (qemu_balloon(value) == 0) {
|
||||
error_set(errp, QERR_DEVICE_NOT_ACTIVE, "balloon");
|
||||
}
|
||||
}
|
||||
|
@@ -23,7 +23,6 @@
|
||||
#include "migration/block.h"
|
||||
#include "migration/migration.h"
|
||||
#include "sysemu/blockdev.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
#include <assert.h>
|
||||
|
||||
#define BLOCK_SIZE (1 << 20)
|
||||
@@ -304,7 +303,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
|
||||
blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
|
||||
nr_sectors, blk_mig_read_cb, blk);
|
||||
|
||||
bdrv_reset_dirty_bitmap(bs, bmds->dirty_bitmap, cur_sector, nr_sectors);
|
||||
bdrv_reset_dirty(bs, cur_sector, nr_sectors);
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
bmds->cur_sector = cur_sector + nr_sectors;
|
||||
@@ -497,8 +496,7 @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
|
||||
g_free(blk);
|
||||
}
|
||||
|
||||
bdrv_reset_dirty_bitmap(bmds->bs, bmds->dirty_bitmap, sector,
|
||||
nr_sectors);
|
||||
bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
|
||||
break;
|
||||
}
|
||||
sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
|
||||
@@ -784,7 +782,6 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
|
||||
char device_name[256];
|
||||
int64_t addr;
|
||||
BlockDriverState *bs, *bs_prev = NULL;
|
||||
BlockBackend *blk;
|
||||
uint8_t *buf;
|
||||
int64_t total_sectors = 0;
|
||||
int nr_sectors;
|
||||
@@ -802,13 +799,12 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
|
||||
qemu_get_buffer(f, (uint8_t *)device_name, len);
|
||||
device_name[len] = '\0';
|
||||
|
||||
blk = blk_by_name(device_name);
|
||||
if (!blk) {
|
||||
bs = bdrv_find(device_name);
|
||||
if (!bs) {
|
||||
fprintf(stderr, "Error unknown block device %s\n",
|
||||
device_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
bs = blk_bs(blk);
|
||||
|
||||
if (bs != bs_prev) {
|
||||
bs_prev = bs;
|
413
block.c
413
block.c
@@ -97,10 +97,6 @@ static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
|
||||
static QLIST_HEAD(, BlockDriver) bdrv_drivers =
|
||||
QLIST_HEAD_INITIALIZER(bdrv_drivers);
|
||||
|
||||
static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
|
||||
int nr_sectors);
|
||||
static void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
|
||||
int nr_sectors);
|
||||
/* If non-zero, use only whitelisted block drivers */
|
||||
static int use_bdrv_whitelist;
|
||||
|
||||
@@ -233,7 +229,7 @@ size_t bdrv_opt_mem_align(BlockDriverState *bs)
|
||||
}
|
||||
|
||||
/* check if the path starts with "<protocol>:" */
|
||||
int path_has_protocol(const char *path)
|
||||
static int path_has_protocol(const char *path)
|
||||
{
|
||||
const char *p;
|
||||
|
||||
@@ -307,32 +303,15 @@ void path_combine(char *dest, int dest_size,
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_get_full_backing_filename_from_filename(const char *backed,
|
||||
const char *backing,
|
||||
char *dest, size_t sz,
|
||||
Error **errp)
|
||||
void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
|
||||
{
|
||||
if (backing[0] == '\0' || path_has_protocol(backing) ||
|
||||
path_is_absolute(backing))
|
||||
{
|
||||
pstrcpy(dest, sz, backing);
|
||||
} else if (backed[0] == '\0' || strstart(backed, "json:", NULL)) {
|
||||
error_setg(errp, "Cannot use relative backing file names for '%s'",
|
||||
backed);
|
||||
if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
|
||||
pstrcpy(dest, sz, bs->backing_file);
|
||||
} else {
|
||||
path_combine(dest, sz, backed, backing);
|
||||
path_combine(dest, sz, bs->filename, bs->backing_file);
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz,
|
||||
Error **errp)
|
||||
{
|
||||
char *backed = bs->exact_filename[0] ? bs->exact_filename : bs->filename;
|
||||
|
||||
bdrv_get_full_backing_filename_from_filename(backed, bs->backing_file,
|
||||
dest, sz, errp);
|
||||
}
|
||||
|
||||
void bdrv_register(BlockDriver *bdrv)
|
||||
{
|
||||
/* Block drivers without coroutine functions need emulation */
|
||||
@@ -508,8 +487,9 @@ int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
drv = bdrv_find_protocol(filename, true, errp);
|
||||
drv = bdrv_find_protocol(filename, true);
|
||||
if (drv == NULL) {
|
||||
error_setg(errp, "Could not find protocol for file '%s'", filename);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
@@ -568,40 +548,6 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to get @bs's logical and physical block size.
|
||||
* On success, store them in @bsz struct and return 0.
|
||||
* On failure return -errno.
|
||||
* @bs must not be empty.
|
||||
*/
|
||||
int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
|
||||
if (drv && drv->bdrv_probe_blocksizes) {
|
||||
return drv->bdrv_probe_blocksizes(bs, bsz);
|
||||
}
|
||||
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to get @bs's geometry (cyls, heads, sectors).
|
||||
* On success, store them in @geo struct and return 0.
|
||||
* On failure return -errno.
|
||||
* @bs must not be empty.
|
||||
*/
|
||||
int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
|
||||
if (drv && drv->bdrv_probe_geometry) {
|
||||
return drv->bdrv_probe_geometry(bs, geo);
|
||||
}
|
||||
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a uniquely-named empty temporary file.
|
||||
* Return 0 upon success, otherwise a negative errno value.
|
||||
@@ -661,8 +607,7 @@ static BlockDriver *find_hdev_driver(const char *filename)
|
||||
}
|
||||
|
||||
BlockDriver *bdrv_find_protocol(const char *filename,
|
||||
bool allow_protocol_prefix,
|
||||
Error **errp)
|
||||
bool allow_protocol_prefix)
|
||||
{
|
||||
BlockDriver *drv1;
|
||||
char protocol[128];
|
||||
@@ -700,49 +645,15 @@ BlockDriver *bdrv_find_protocol(const char *filename,
|
||||
return drv1;
|
||||
}
|
||||
}
|
||||
|
||||
error_setg(errp, "Unknown protocol '%s'", protocol);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Guess image format by probing its contents.
|
||||
* This is not a good idea when your image is raw (CVE-2008-2004), but
|
||||
* we do it anyway for backward compatibility.
|
||||
*
|
||||
* @buf contains the image's first @buf_size bytes.
|
||||
* @buf_size is the buffer size in bytes (generally BLOCK_PROBE_BUF_SIZE,
|
||||
* but can be smaller if the image file is smaller)
|
||||
* @filename is its filename.
|
||||
*
|
||||
* For all block drivers, call the bdrv_probe() method to get its
|
||||
* probing score.
|
||||
* Return the first block driver with the highest probing score.
|
||||
*/
|
||||
BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
|
||||
const char *filename)
|
||||
{
|
||||
int score_max = 0, score;
|
||||
BlockDriver *drv = NULL, *d;
|
||||
|
||||
QLIST_FOREACH(d, &bdrv_drivers, list) {
|
||||
if (d->bdrv_probe) {
|
||||
score = d->bdrv_probe(buf, buf_size, filename);
|
||||
if (score > score_max) {
|
||||
score_max = score;
|
||||
drv = d;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return drv;
|
||||
}
|
||||
|
||||
static int find_image_format(BlockDriverState *bs, const char *filename,
|
||||
BlockDriver **pdrv, Error **errp)
|
||||
{
|
||||
BlockDriver *drv;
|
||||
uint8_t buf[BLOCK_PROBE_BUF_SIZE];
|
||||
int score, score_max;
|
||||
BlockDriver *drv1, *drv;
|
||||
uint8_t buf[2048];
|
||||
int ret = 0;
|
||||
|
||||
/* Return the raw BlockDriver * to scsi-generic devices or empty drives */
|
||||
@@ -759,7 +670,17 @@ static int find_image_format(BlockDriverState *bs, const char *filename,
|
||||
return ret;
|
||||
}
|
||||
|
||||
drv = bdrv_probe_all(buf, ret, filename);
|
||||
score_max = 0;
|
||||
drv = NULL;
|
||||
QLIST_FOREACH(drv1, &bdrv_drivers, list) {
|
||||
if (drv1->bdrv_probe) {
|
||||
score = drv1->bdrv_probe(buf, ret, filename);
|
||||
if (score > score_max) {
|
||||
score_max = score;
|
||||
drv = drv1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!drv) {
|
||||
error_setg(errp, "Could not determine image format: No compatible "
|
||||
"driver found");
|
||||
@@ -1006,6 +927,7 @@ static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
|
||||
bs->zero_beyond_eof = true;
|
||||
open_flags = bdrv_open_flags(bs, flags);
|
||||
bs->read_only = !(open_flags & BDRV_O_RDWR);
|
||||
bs->growable = !!(flags & BDRV_O_PROTOCOL);
|
||||
|
||||
if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
|
||||
error_setg(errp,
|
||||
@@ -1065,13 +987,6 @@ static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
|
||||
goto free_and_fail;
|
||||
}
|
||||
|
||||
if (bs->encrypted) {
|
||||
error_report("Encrypted images are deprecated");
|
||||
error_printf("Support for them will be removed in a future release.\n"
|
||||
"You can use 'qemu-img convert' to convert your image"
|
||||
" to an unencrypted one.\n");
|
||||
}
|
||||
|
||||
ret = refresh_total_sectors(bs, bs->total_sectors);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not refresh total sector count");
|
||||
@@ -1178,8 +1093,9 @@ static int bdrv_fill_options(QDict **options, const char **pfilename, int flags,
|
||||
} else {
|
||||
if (!drvname && protocol) {
|
||||
if (filename) {
|
||||
drv = bdrv_find_protocol(filename, parse_filename, errp);
|
||||
drv = bdrv_find_protocol(filename, parse_filename);
|
||||
if (!drv) {
|
||||
error_setg(errp, "Unknown protocol");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -1241,7 +1157,7 @@ void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
|
||||
|
||||
bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
|
||||
/* Otherwise we won't be able to commit due to check in bdrv_commit */
|
||||
bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT_TARGET,
|
||||
bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
|
||||
bs->backing_blocker);
|
||||
out:
|
||||
bdrv_refresh_limits(bs, NULL);
|
||||
@@ -1279,14 +1195,7 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
|
||||
QDECREF(options);
|
||||
goto free_exit;
|
||||
} else {
|
||||
bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
ret = -EINVAL;
|
||||
error_propagate(errp, local_err);
|
||||
QDECREF(options);
|
||||
goto free_exit;
|
||||
}
|
||||
bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
|
||||
}
|
||||
|
||||
if (!bs->drv || !bs->drv->supports_backing) {
|
||||
@@ -1405,7 +1314,7 @@ int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
|
||||
|
||||
opts = qemu_opts_create(bdrv_qcow2.create_opts, NULL, 0,
|
||||
&error_abort);
|
||||
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size, &error_abort);
|
||||
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size);
|
||||
ret = bdrv_create(&bdrv_qcow2, tmp_filename, opts, &local_err);
|
||||
qemu_opts_del(opts);
|
||||
if (ret < 0) {
|
||||
@@ -1550,7 +1459,6 @@ int bdrv_open(BlockDriverState **pbs, const char *filename,
|
||||
}
|
||||
|
||||
/* Image format probing */
|
||||
bs->probed = !drv;
|
||||
if (!drv && file) {
|
||||
ret = find_image_format(file, filename, &drv, &local_err);
|
||||
if (ret < 0) {
|
||||
@@ -1926,6 +1834,7 @@ void bdrv_close(BlockDriverState *bs)
|
||||
bs->encrypted = 0;
|
||||
bs->valid_key = 0;
|
||||
bs->sg = 0;
|
||||
bs->growable = 0;
|
||||
bs->zero_beyond_eof = false;
|
||||
QDECREF(bs->options);
|
||||
bs->options = NULL;
|
||||
@@ -2247,6 +2156,7 @@ int bdrv_commit(BlockDriverState *bs)
|
||||
int n, ro, open_flags;
|
||||
int ret = 0;
|
||||
uint8_t *buf = NULL;
|
||||
char filename[PATH_MAX];
|
||||
|
||||
if (!drv)
|
||||
return -ENOMEDIUM;
|
||||
@@ -2255,12 +2165,14 @@ int bdrv_commit(BlockDriverState *bs)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) ||
|
||||
bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT_TARGET, NULL)) {
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
|
||||
bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ro = bs->backing_hd->read_only;
|
||||
/* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
|
||||
pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
|
||||
open_flags = bs->backing_hd->open_flags;
|
||||
|
||||
if (ro) {
|
||||
@@ -2685,17 +2597,25 @@ exit:
|
||||
static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
|
||||
size_t size)
|
||||
{
|
||||
if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
|
||||
int64_t len;
|
||||
|
||||
if (size > INT_MAX) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!bdrv_is_inserted(bs)) {
|
||||
if (!bdrv_is_inserted(bs))
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
if (offset < 0) {
|
||||
if (bs->growable)
|
||||
return 0;
|
||||
|
||||
len = bdrv_getlength(bs);
|
||||
|
||||
if (offset < 0)
|
||||
return -EIO;
|
||||
|
||||
if ((offset > len) || (len - offset < size))
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2703,7 +2623,7 @@ static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
|
||||
static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors)
|
||||
{
|
||||
if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
|
||||
if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@@ -2790,7 +2710,7 @@ static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
|
||||
.iov_len = nb_sectors * BDRV_SECTOR_SIZE,
|
||||
};
|
||||
|
||||
if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
|
||||
if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -2858,10 +2778,13 @@ int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
|
||||
nb_sectors = target_sectors - sector_num;
|
||||
if (nb_sectors <= 0) {
|
||||
return 0;
|
||||
}
|
||||
if (nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
|
||||
nb_sectors = INT_MAX / BDRV_SECTOR_SIZE;
|
||||
}
|
||||
ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
|
||||
if (ret < 0) {
|
||||
error_report("error getting block status at sector %" PRId64 ": %s",
|
||||
@@ -3074,10 +2997,10 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
|
||||
}
|
||||
|
||||
/* Forward the request to the BlockDriver */
|
||||
if (!bs->zero_beyond_eof) {
|
||||
if (!(bs->zero_beyond_eof && bs->growable)) {
|
||||
ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
|
||||
} else {
|
||||
/* Read zeros after EOF */
|
||||
/* Read zeros after EOF of growable BDSes */
|
||||
int64_t total_sectors, max_nb_sectors;
|
||||
|
||||
total_sectors = bdrv_nb_sectors(bs);
|
||||
@@ -3088,16 +3011,18 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
|
||||
|
||||
max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
|
||||
align >> BDRV_SECTOR_BITS);
|
||||
if (nb_sectors < max_nb_sectors) {
|
||||
ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
|
||||
} else if (max_nb_sectors > 0) {
|
||||
if (max_nb_sectors > 0) {
|
||||
QEMUIOVector local_qiov;
|
||||
size_t local_sectors;
|
||||
|
||||
max_nb_sectors = MIN(max_nb_sectors, SIZE_MAX / BDRV_SECTOR_BITS);
|
||||
local_sectors = MIN(max_nb_sectors, nb_sectors);
|
||||
|
||||
qemu_iovec_init(&local_qiov, qiov->niov);
|
||||
qemu_iovec_concat(&local_qiov, qiov, 0,
|
||||
max_nb_sectors * BDRV_SECTOR_SIZE);
|
||||
local_sectors * BDRV_SECTOR_SIZE);
|
||||
|
||||
ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
|
||||
ret = drv->bdrv_co_readv(bs, sector_num, local_sectors,
|
||||
&local_qiov);
|
||||
|
||||
qemu_iovec_destroy(&local_qiov);
|
||||
@@ -3118,19 +3043,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline uint64_t bdrv_get_align(BlockDriverState *bs)
|
||||
{
|
||||
/* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
|
||||
return MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
|
||||
}
|
||||
|
||||
static inline bool bdrv_req_is_aligned(BlockDriverState *bs,
|
||||
int64_t offset, size_t bytes)
|
||||
{
|
||||
int64_t align = bdrv_get_align(bs);
|
||||
return !(offset & (align - 1) || (bytes & (align - 1)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a read request in coroutine context
|
||||
*/
|
||||
@@ -3141,7 +3053,8 @@ static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
|
||||
BlockDriver *drv = bs->drv;
|
||||
BdrvTrackedRequest req;
|
||||
|
||||
uint64_t align = bdrv_get_align(bs);
|
||||
/* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
|
||||
uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
|
||||
uint8_t *head_buf = NULL;
|
||||
uint8_t *tail_buf = NULL;
|
||||
QEMUIOVector local_qiov;
|
||||
@@ -3151,10 +3064,8 @@ static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
|
||||
if (!drv) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
ret = bdrv_check_byte_request(bs, offset, bytes);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
if (bdrv_check_byte_request(bs, offset, bytes)) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (bs->copy_on_read) {
|
||||
@@ -3210,7 +3121,7 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
|
||||
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
|
||||
if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -3235,7 +3146,10 @@ int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
|
||||
BDRV_REQ_COPY_ON_READ);
|
||||
}
|
||||
|
||||
#define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
|
||||
/* if no limit is specified in the BlockLimits use a default
|
||||
* of 32768 512-byte sectors (16 MiB) per request.
|
||||
*/
|
||||
#define MAX_WRITE_ZEROES_DEFAULT 32768
|
||||
|
||||
static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
|
||||
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
|
||||
@@ -3245,8 +3159,8 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
|
||||
struct iovec iov = {0};
|
||||
int ret = 0;
|
||||
|
||||
int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
|
||||
BDRV_REQUEST_MAX_SECTORS);
|
||||
int max_write_zeroes = bs->bl.max_write_zeroes ?
|
||||
bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
|
||||
|
||||
while (nb_sectors > 0 && !ret) {
|
||||
int num = nb_sectors;
|
||||
@@ -3281,9 +3195,6 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
|
||||
|
||||
if (ret == -ENOTSUP) {
|
||||
/* Fall back to bounce buffer if write zeroes is unsupported */
|
||||
int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
|
||||
MAX_WRITE_ZEROES_BOUNCE_BUFFER);
|
||||
num = MIN(num, max_xfer_len);
|
||||
iov.iov_len = num * BDRV_SECTOR_SIZE;
|
||||
if (iov.iov_base == NULL) {
|
||||
iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
|
||||
@@ -3300,7 +3211,7 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
|
||||
/* Keep bounce buffer around if it is big enough for all
|
||||
* all future requests.
|
||||
*/
|
||||
if (num < max_xfer_len) {
|
||||
if (num < max_write_zeroes) {
|
||||
qemu_vfree(iov.iov_base);
|
||||
iov.iov_base = NULL;
|
||||
}
|
||||
@@ -3368,7 +3279,7 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
|
||||
|
||||
block_acct_highest_sector(&bs->stats, sector_num, nb_sectors);
|
||||
|
||||
if (ret >= 0) {
|
||||
if (bs->growable && ret >= 0) {
|
||||
bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
|
||||
}
|
||||
|
||||
@@ -3383,7 +3294,8 @@ static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BdrvTrackedRequest req;
|
||||
uint64_t align = bdrv_get_align(bs);
|
||||
/* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
|
||||
uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
|
||||
uint8_t *head_buf = NULL;
|
||||
uint8_t *tail_buf = NULL;
|
||||
QEMUIOVector local_qiov;
|
||||
@@ -3396,10 +3308,8 @@ static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
|
||||
if (bs->read_only) {
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
ret = bdrv_check_byte_request(bs, offset, bytes);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
if (bdrv_check_byte_request(bs, offset, bytes)) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* throttling disk I/O */
|
||||
@@ -3482,10 +3392,6 @@ static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
|
||||
bytes = ROUND_UP(bytes, align);
|
||||
}
|
||||
|
||||
if (use_local_qiov) {
|
||||
/* Local buffer may have non-zero data. */
|
||||
flags &= ~BDRV_REQ_ZERO_WRITE;
|
||||
}
|
||||
ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
|
||||
use_local_qiov ? &local_qiov : qiov,
|
||||
flags);
|
||||
@@ -3506,7 +3412,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
|
||||
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
|
||||
if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -3526,32 +3432,14 @@ int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
|
||||
int64_t sector_num, int nb_sectors,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
|
||||
|
||||
if (!(bs->open_flags & BDRV_O_UNMAP)) {
|
||||
flags &= ~BDRV_REQ_MAY_UNMAP;
|
||||
}
|
||||
if (bdrv_req_is_aligned(bs, sector_num << BDRV_SECTOR_BITS,
|
||||
nb_sectors << BDRV_SECTOR_BITS)) {
|
||||
ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
|
||||
BDRV_REQ_ZERO_WRITE | flags);
|
||||
} else {
|
||||
uint8_t *buf;
|
||||
QEMUIOVector local_qiov;
|
||||
size_t bytes = nb_sectors << BDRV_SECTOR_BITS;
|
||||
|
||||
buf = qemu_memalign(bdrv_opt_mem_align(bs), bytes);
|
||||
memset(buf, 0, bytes);
|
||||
qemu_iovec_init(&local_qiov, 1);
|
||||
qemu_iovec_add(&local_qiov, buf, bytes);
|
||||
|
||||
ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, &local_qiov,
|
||||
BDRV_REQ_ZERO_WRITE | flags);
|
||||
qemu_vfree(buf);
|
||||
}
|
||||
return ret;
|
||||
return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
|
||||
BDRV_REQ_ZERO_WRITE | flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3782,36 +3670,6 @@ int bdrv_set_key(BlockDriverState *bs, const char *key)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Provide an encryption key for @bs.
|
||||
* If @key is non-null:
|
||||
* If @bs is not encrypted, fail.
|
||||
* Else if the key is invalid, fail.
|
||||
* Else set @bs's key to @key, replacing the existing key, if any.
|
||||
* If @key is null:
|
||||
* If @bs is encrypted and still lacks a key, fail.
|
||||
* Else do nothing.
|
||||
* On failure, store an error object through @errp if non-null.
|
||||
*/
|
||||
void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp)
|
||||
{
|
||||
if (key) {
|
||||
if (!bdrv_is_encrypted(bs)) {
|
||||
error_setg(errp, "Device '%s' is not encrypted",
|
||||
bdrv_get_device_name(bs));
|
||||
} else if (bdrv_set_key(bs, key) < 0) {
|
||||
error_set(errp, QERR_INVALID_PASSWORD);
|
||||
}
|
||||
} else {
|
||||
if (bdrv_key_required(bs)) {
|
||||
error_set(errp, ERROR_CLASS_DEVICE_ENCRYPTED,
|
||||
"'%s' (%s) is encrypted",
|
||||
bdrv_get_device_name(bs),
|
||||
bdrv_get_encrypted_filename(bs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const char *bdrv_get_format_name(BlockDriverState *bs)
|
||||
{
|
||||
return bs->drv ? bs->drv->format_name : NULL;
|
||||
@@ -3854,6 +3712,15 @@ void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
|
||||
g_free(formats);
|
||||
}
|
||||
|
||||
/* This function is to find block backend bs */
|
||||
/* TODO convert callers to blk_by_name(), then remove */
|
||||
BlockDriverState *bdrv_find(const char *name)
|
||||
{
|
||||
BlockBackend *blk = blk_by_name(name);
|
||||
|
||||
return blk ? blk_bs(blk) : NULL;
|
||||
}
|
||||
|
||||
/* This function is to find a node in the bs graph */
|
||||
BlockDriverState *bdrv_find_node(const char *node_name)
|
||||
{
|
||||
@@ -3926,14 +3793,6 @@ bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base)
|
||||
return top != NULL;
|
||||
}
|
||||
|
||||
BlockDriverState *bdrv_next_node(BlockDriverState *bs)
|
||||
{
|
||||
if (!bs) {
|
||||
return QTAILQ_FIRST(&graph_bdrv_states);
|
||||
}
|
||||
return QTAILQ_NEXT(bs, node_list);
|
||||
}
|
||||
|
||||
BlockDriverState *bdrv_next(BlockDriverState *bs)
|
||||
{
|
||||
if (!bs) {
|
||||
@@ -3942,11 +3801,6 @@ BlockDriverState *bdrv_next(BlockDriverState *bs)
|
||||
return QTAILQ_NEXT(bs, device_list);
|
||||
}
|
||||
|
||||
const char *bdrv_get_node_name(const BlockDriverState *bs)
|
||||
{
|
||||
return bs->node_name;
|
||||
}
|
||||
|
||||
/* TODO check what callers really want: bs->node_name or blk_name() */
|
||||
const char *bdrv_get_device_name(const BlockDriverState *bs)
|
||||
{
|
||||
@@ -4266,18 +4120,12 @@ int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
|
||||
const uint8_t *buf, int nb_sectors)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
int ret;
|
||||
|
||||
if (!drv) {
|
||||
if (!drv)
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
if (!drv->bdrv_write_compressed) {
|
||||
if (!drv->bdrv_write_compressed)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
ret = bdrv_check_request(bs, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
if (bdrv_check_request(bs, sector_num, nb_sectors))
|
||||
return -EIO;
|
||||
|
||||
assert(QLIST_EMPTY(&bs->dirty_bitmaps));
|
||||
|
||||
@@ -4652,8 +4500,6 @@ static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
|
||||
}
|
||||
}
|
||||
|
||||
block_acct_merge_done(&bs->stats, BLOCK_ACCT_WRITE, num_reqs - outidx - 1);
|
||||
|
||||
return outidx + 1;
|
||||
}
|
||||
|
||||
@@ -5189,18 +5035,20 @@ static void coroutine_fn bdrv_discard_co_entry(void *opaque)
|
||||
rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
|
||||
}
|
||||
|
||||
/* if no limit is specified in the BlockLimits use a default
|
||||
* of 32768 512-byte sectors (16 MiB) per request.
|
||||
*/
|
||||
#define MAX_DISCARD_DEFAULT 32768
|
||||
|
||||
int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors)
|
||||
{
|
||||
int max_discard, ret;
|
||||
int max_discard;
|
||||
|
||||
if (!bs->drv) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
ret = bdrv_check_request(bs, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
} else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
|
||||
return -EIO;
|
||||
} else if (bs->read_only) {
|
||||
return -EROFS;
|
||||
}
|
||||
@@ -5216,7 +5064,7 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||
return 0;
|
||||
}
|
||||
|
||||
max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
|
||||
max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
|
||||
while (nb_sectors > 0) {
|
||||
int ret;
|
||||
int num = nb_sectors;
|
||||
@@ -5505,20 +5353,8 @@ void bdrv_dirty_iter_init(BlockDriverState *bs,
|
||||
hbitmap_iter_init(hbi, bitmap->bitmap, 0);
|
||||
}
|
||||
|
||||
void bdrv_set_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
|
||||
int64_t cur_sector, int nr_sectors)
|
||||
{
|
||||
hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
|
||||
}
|
||||
|
||||
void bdrv_reset_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
|
||||
int64_t cur_sector, int nr_sectors)
|
||||
{
|
||||
hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
|
||||
}
|
||||
|
||||
static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
|
||||
int nr_sectors)
|
||||
void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
|
||||
int nr_sectors)
|
||||
{
|
||||
BdrvDirtyBitmap *bitmap;
|
||||
QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
|
||||
@@ -5526,8 +5362,7 @@ static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
|
||||
}
|
||||
}
|
||||
|
||||
static void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
|
||||
int nr_sectors)
|
||||
void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
|
||||
{
|
||||
BdrvDirtyBitmap *bitmap;
|
||||
QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
|
||||
@@ -5692,8 +5527,9 @@ void bdrv_img_create(const char *filename, const char *fmt,
|
||||
return;
|
||||
}
|
||||
|
||||
proto_drv = bdrv_find_protocol(filename, true, errp);
|
||||
proto_drv = bdrv_find_protocol(filename, true);
|
||||
if (!proto_drv) {
|
||||
error_setg(errp, "Unknown protocol '%s'", filename);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -5714,22 +5550,18 @@ void bdrv_img_create(const char *filename, const char *fmt,
|
||||
|
||||
/* Create parameter list with default values */
|
||||
opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
|
||||
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size, &error_abort);
|
||||
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size);
|
||||
|
||||
/* Parse -o options */
|
||||
if (options) {
|
||||
qemu_opts_do_parse(opts, options, NULL, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
local_err = NULL;
|
||||
if (qemu_opts_do_parse(opts, options, NULL) != 0) {
|
||||
error_setg(errp, "Invalid options for file format '%s'", fmt);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (base_filename) {
|
||||
qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename, &local_err);
|
||||
if (local_err) {
|
||||
if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) {
|
||||
error_setg(errp, "Backing file not supported for file format '%s'",
|
||||
fmt);
|
||||
goto out;
|
||||
@@ -5737,8 +5569,7 @@ void bdrv_img_create(const char *filename, const char *fmt,
|
||||
}
|
||||
|
||||
if (base_fmt) {
|
||||
qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt, &local_err);
|
||||
if (local_err) {
|
||||
if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) {
|
||||
error_setg(errp, "Backing file format not supported for file "
|
||||
"format '%s'", fmt);
|
||||
goto out;
|
||||
@@ -5770,26 +5601,16 @@ void bdrv_img_create(const char *filename, const char *fmt,
|
||||
if (size == -1) {
|
||||
if (backing_file) {
|
||||
BlockDriverState *bs;
|
||||
char *full_backing = g_new0(char, PATH_MAX);
|
||||
int64_t size;
|
||||
int back_flags;
|
||||
|
||||
bdrv_get_full_backing_filename_from_filename(filename, backing_file,
|
||||
full_backing, PATH_MAX,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
g_free(full_backing);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* backing files always opened read-only */
|
||||
back_flags =
|
||||
flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
|
||||
|
||||
bs = NULL;
|
||||
ret = bdrv_open(&bs, full_backing, NULL, NULL, back_flags,
|
||||
ret = bdrv_open(&bs, backing_file, NULL, NULL, back_flags,
|
||||
backing_drv, &local_err);
|
||||
g_free(full_backing);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@@ -5801,7 +5622,7 @@ void bdrv_img_create(const char *filename, const char *fmt,
|
||||
goto out;
|
||||
}
|
||||
|
||||
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size, &error_abort);
|
||||
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size);
|
||||
|
||||
bdrv_unref(bs);
|
||||
} else {
|
||||
@@ -5811,8 +5632,8 @@ void bdrv_img_create(const char *filename, const char *fmt,
|
||||
}
|
||||
|
||||
if (!quiet) {
|
||||
printf("Formatting '%s', fmt=%s", filename, fmt);
|
||||
qemu_opts_print(opts, " ");
|
||||
printf("Formatting '%s', fmt=%s ", filename, fmt);
|
||||
qemu_opts_print(opts);
|
||||
puts("");
|
||||
}
|
||||
|
||||
|
@@ -20,7 +20,6 @@ block-obj-$(CONFIG_GLUSTERFS) += gluster.o
|
||||
block-obj-$(CONFIG_ARCHIPELAGO) += archipelago.o
|
||||
block-obj-$(CONFIG_LIBSSH2) += ssh.o
|
||||
block-obj-y += accounting.o
|
||||
block-obj-y += write-threshold.o
|
||||
|
||||
common-obj-y += stream.o
|
||||
common-obj-y += commit.o
|
||||
@@ -37,6 +36,5 @@ gluster.o-libs := $(GLUSTERFS_LIBS)
|
||||
ssh.o-cflags := $(LIBSSH2_CFLAGS)
|
||||
ssh.o-libs := $(LIBSSH2_LIBS)
|
||||
archipelago.o-libs := $(ARCHIPELAGO_LIBS)
|
||||
dmg.o-libs := $(BZIP2_LIBS)
|
||||
qcow.o-libs := -lz
|
||||
linux-aio.o-libs := -laio
|
||||
|
@@ -24,7 +24,6 @@
|
||||
|
||||
#include "block/accounting.h"
|
||||
#include "block/block_int.h"
|
||||
#include "qemu/timer.h"
|
||||
|
||||
void block_acct_start(BlockAcctStats *stats, BlockAcctCookie *cookie,
|
||||
int64_t bytes, enum BlockAcctType type)
|
||||
@@ -32,7 +31,7 @@ void block_acct_start(BlockAcctStats *stats, BlockAcctCookie *cookie,
|
||||
assert(type < BLOCK_MAX_IOTYPE);
|
||||
|
||||
cookie->bytes = bytes;
|
||||
cookie->start_time_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
cookie->start_time_ns = get_clock();
|
||||
cookie->type = type;
|
||||
}
|
||||
|
||||
@@ -42,8 +41,7 @@ void block_acct_done(BlockAcctStats *stats, BlockAcctCookie *cookie)
|
||||
|
||||
stats->nr_bytes[cookie->type] += cookie->bytes;
|
||||
stats->nr_ops[cookie->type]++;
|
||||
stats->total_time_ns[cookie->type] +=
|
||||
qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - cookie->start_time_ns;
|
||||
stats->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
|
||||
}
|
||||
|
||||
|
||||
@@ -54,10 +52,3 @@ void block_acct_highest_sector(BlockAcctStats *stats, int64_t sector_num,
|
||||
stats->wr_highest_sector = sector_num + nb_sectors - 1;
|
||||
}
|
||||
}
|
||||
|
||||
void block_acct_merge_done(BlockAcctStats *stats, enum BlockAcctType type,
|
||||
int num_requests)
|
||||
{
|
||||
assert(type < BLOCK_MAX_IOTYPE);
|
||||
stats->merged[type] += num_requests;
|
||||
}
|
||||
|
@@ -291,7 +291,7 @@ static int qemu_archipelago_init(BDRVArchipelagoState *s)
|
||||
|
||||
ret = qemu_archipelago_xseg_init(s);
|
||||
if (ret < 0) {
|
||||
error_report("Cannot initialize XSEG. Aborting...");
|
||||
error_report("Cannot initialize XSEG. Aborting...\n");
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
@@ -645,7 +645,7 @@ static int qemu_archipelago_create_volume(Error **errp, const char *volname,
|
||||
|
||||
target = xseg_get_target(xseg, req);
|
||||
if (!target) {
|
||||
error_setg(errp, "Cannot get XSEG target.");
|
||||
error_setg(errp, "Cannot get XSEG target.\n");
|
||||
goto err_exit;
|
||||
}
|
||||
memcpy(target, volname, targetlen);
|
||||
@@ -889,7 +889,7 @@ static BlockAIOCB *qemu_archipelago_aio_rw(BlockDriverState *bs,
|
||||
return &aio_cb->common;
|
||||
|
||||
err_exit:
|
||||
error_report("qemu_archipelago_aio_rw(): I/O Error");
|
||||
error_report("qemu_archipelago_aio_rw(): I/O Error\n");
|
||||
qemu_aio_unref(aio_cb);
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -360,7 +360,6 @@ static void coroutine_fn backup_run(void *opaque)
|
||||
hbitmap_free(job->bitmap);
|
||||
|
||||
bdrv_iostatus_disable(target);
|
||||
bdrv_op_unblock_all(target, job->common.blocker);
|
||||
|
||||
data = g_malloc(sizeof(*data));
|
||||
data->ret = ret;
|
||||
@@ -380,11 +379,6 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target,
|
||||
assert(target);
|
||||
assert(cb);
|
||||
|
||||
if (bs == target) {
|
||||
error_setg(errp, "Source and target cannot be the same");
|
||||
return;
|
||||
}
|
||||
|
||||
if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
|
||||
on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
|
||||
!bdrv_iostatus_is_enabled(bs)) {
|
||||
@@ -392,26 +386,6 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!bdrv_is_inserted(bs)) {
|
||||
error_setg(errp, "Device is not inserted: %s",
|
||||
bdrv_get_device_name(bs));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!bdrv_is_inserted(target)) {
|
||||
error_setg(errp, "Device is not inserted: %s",
|
||||
bdrv_get_device_name(target));
|
||||
return;
|
||||
}
|
||||
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
len = bdrv_getlength(bs);
|
||||
if (len < 0) {
|
||||
error_setg_errno(errp, -len, "unable to get length for '%s'",
|
||||
@@ -425,8 +399,6 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target,
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_op_block_all(target, job->common.blocker);
|
||||
|
||||
job->on_source_error = on_source_error;
|
||||
job->on_target_error = on_target_error;
|
||||
job->target = target;
|
||||
|
105
block/blkdebug.c
105
block/blkdebug.c
@@ -472,14 +472,12 @@ static BlockAIOCB *inject_error(BlockDriverState *bs,
|
||||
int error = rule->options.inject.error;
|
||||
struct BlkdebugAIOCB *acb;
|
||||
QEMUBH *bh;
|
||||
bool immediately = rule->options.inject.immediately;
|
||||
|
||||
if (rule->options.inject.once) {
|
||||
QSIMPLEQ_REMOVE(&s->active_rules, rule, BlkdebugRule, active_next);
|
||||
remove_rule(rule);
|
||||
QSIMPLEQ_INIT(&s->active_rules);
|
||||
}
|
||||
|
||||
if (immediately) {
|
||||
if (rule->options.inject.immediately) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -723,50 +721,93 @@ static int64_t blkdebug_getlength(BlockDriverState *bs)
|
||||
|
||||
static void blkdebug_refresh_filename(BlockDriverState *bs)
|
||||
{
|
||||
BDRVBlkdebugState *s = bs->opaque;
|
||||
struct BlkdebugRule *rule;
|
||||
QDict *opts;
|
||||
const QDictEntry *e;
|
||||
bool force_json = false;
|
||||
QList *inject_error_list = NULL, *set_state_list = NULL;
|
||||
QList *suspend_list = NULL;
|
||||
int event;
|
||||
|
||||
for (e = qdict_first(bs->options); e; e = qdict_next(bs->options, e)) {
|
||||
if (strcmp(qdict_entry_key(e), "config") &&
|
||||
strcmp(qdict_entry_key(e), "x-image") &&
|
||||
strcmp(qdict_entry_key(e), "image") &&
|
||||
strncmp(qdict_entry_key(e), "image.", strlen("image.")))
|
||||
{
|
||||
force_json = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (force_json && !bs->file->full_open_options) {
|
||||
if (!bs->file->full_open_options) {
|
||||
/* The config file cannot be recreated, so creating a plain filename
|
||||
* is impossible */
|
||||
return;
|
||||
}
|
||||
|
||||
if (!force_json && bs->file->exact_filename[0]) {
|
||||
snprintf(bs->exact_filename, sizeof(bs->exact_filename),
|
||||
"blkdebug:%s:%s",
|
||||
qdict_get_try_str(bs->options, "config") ?: "",
|
||||
bs->file->exact_filename);
|
||||
}
|
||||
|
||||
opts = qdict_new();
|
||||
qdict_put_obj(opts, "driver", QOBJECT(qstring_from_str("blkdebug")));
|
||||
|
||||
QINCREF(bs->file->full_open_options);
|
||||
qdict_put_obj(opts, "image", QOBJECT(bs->file->full_open_options));
|
||||
|
||||
for (e = qdict_first(bs->options); e; e = qdict_next(bs->options, e)) {
|
||||
if (strcmp(qdict_entry_key(e), "x-image") &&
|
||||
strcmp(qdict_entry_key(e), "image") &&
|
||||
strncmp(qdict_entry_key(e), "image.", strlen("image.")))
|
||||
{
|
||||
qobject_incref(qdict_entry_value(e));
|
||||
qdict_put_obj(opts, qdict_entry_key(e), qdict_entry_value(e));
|
||||
for (event = 0; event < BLKDBG_EVENT_MAX; event++) {
|
||||
QLIST_FOREACH(rule, &s->rules[event], next) {
|
||||
if (rule->action == ACTION_INJECT_ERROR) {
|
||||
QDict *inject_error = qdict_new();
|
||||
|
||||
qdict_put_obj(inject_error, "event", QOBJECT(qstring_from_str(
|
||||
BlkdebugEvent_lookup[rule->event])));
|
||||
qdict_put_obj(inject_error, "state",
|
||||
QOBJECT(qint_from_int(rule->state)));
|
||||
qdict_put_obj(inject_error, "errno", QOBJECT(qint_from_int(
|
||||
rule->options.inject.error)));
|
||||
qdict_put_obj(inject_error, "sector", QOBJECT(qint_from_int(
|
||||
rule->options.inject.sector)));
|
||||
qdict_put_obj(inject_error, "once", QOBJECT(qbool_from_int(
|
||||
rule->options.inject.once)));
|
||||
qdict_put_obj(inject_error, "immediately",
|
||||
QOBJECT(qbool_from_int(
|
||||
rule->options.inject.immediately)));
|
||||
|
||||
if (!inject_error_list) {
|
||||
inject_error_list = qlist_new();
|
||||
}
|
||||
|
||||
qlist_append_obj(inject_error_list, QOBJECT(inject_error));
|
||||
} else if (rule->action == ACTION_SET_STATE) {
|
||||
QDict *set_state = qdict_new();
|
||||
|
||||
qdict_put_obj(set_state, "event", QOBJECT(qstring_from_str(
|
||||
BlkdebugEvent_lookup[rule->event])));
|
||||
qdict_put_obj(set_state, "state",
|
||||
QOBJECT(qint_from_int(rule->state)));
|
||||
qdict_put_obj(set_state, "new_state", QOBJECT(qint_from_int(
|
||||
rule->options.set_state.new_state)));
|
||||
|
||||
if (!set_state_list) {
|
||||
set_state_list = qlist_new();
|
||||
}
|
||||
|
||||
qlist_append_obj(set_state_list, QOBJECT(set_state));
|
||||
} else if (rule->action == ACTION_SUSPEND) {
|
||||
QDict *suspend = qdict_new();
|
||||
|
||||
qdict_put_obj(suspend, "event", QOBJECT(qstring_from_str(
|
||||
BlkdebugEvent_lookup[rule->event])));
|
||||
qdict_put_obj(suspend, "state",
|
||||
QOBJECT(qint_from_int(rule->state)));
|
||||
qdict_put_obj(suspend, "tag", QOBJECT(qstring_from_str(
|
||||
rule->options.suspend.tag)));
|
||||
|
||||
if (!suspend_list) {
|
||||
suspend_list = qlist_new();
|
||||
}
|
||||
|
||||
qlist_append_obj(suspend_list, QOBJECT(suspend));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (inject_error_list) {
|
||||
qdict_put_obj(opts, "inject-error", QOBJECT(inject_error_list));
|
||||
}
|
||||
if (set_state_list) {
|
||||
qdict_put_obj(opts, "set-state", QOBJECT(set_state_list));
|
||||
}
|
||||
if (suspend_list) {
|
||||
qdict_put_obj(opts, "suspend", QOBJECT(suspend_list));
|
||||
}
|
||||
|
||||
bs->full_open_options = opts;
|
||||
}
|
||||
|
||||
|
@@ -31,16 +31,6 @@ struct BlockBackend {
|
||||
void *dev_opaque;
|
||||
};
|
||||
|
||||
typedef struct BlockBackendAIOCB {
|
||||
BlockAIOCB common;
|
||||
QEMUBH *bh;
|
||||
int ret;
|
||||
} BlockBackendAIOCB;
|
||||
|
||||
static const AIOCBInfo block_backend_aiocb_info = {
|
||||
.aiocb_size = sizeof(BlockBackendAIOCB),
|
||||
};
|
||||
|
||||
static void drive_info_del(DriveInfo *dinfo);
|
||||
|
||||
/* All the BlockBackends (except for hidden ones) */
|
||||
@@ -101,40 +91,6 @@ BlockBackend *blk_new_with_bs(const char *name, Error **errp)
|
||||
return blk;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
|
||||
*
|
||||
* Just as with bdrv_open(), after having called this function the reference to
|
||||
* @options belongs to the block layer (even on failure).
|
||||
*
|
||||
* TODO: Remove @filename and @flags; it should be possible to specify a whole
|
||||
* BDS tree just by specifying the @options QDict (or @reference,
|
||||
* alternatively). At the time of adding this function, this is not possible,
|
||||
* though, so callers of this function have to be able to specify @filename and
|
||||
* @flags.
|
||||
*/
|
||||
BlockBackend *blk_new_open(const char *name, const char *filename,
|
||||
const char *reference, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
BlockBackend *blk;
|
||||
int ret;
|
||||
|
||||
blk = blk_new_with_bs(name, errp);
|
||||
if (!blk) {
|
||||
QDECREF(options);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = bdrv_open(&blk->bs, filename, reference, options, flags, NULL, errp);
|
||||
if (ret < 0) {
|
||||
blk_unref(blk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return blk;
|
||||
}
|
||||
|
||||
static void blk_delete(BlockBackend *blk)
|
||||
{
|
||||
assert(!blk->refcnt);
|
||||
@@ -145,7 +101,7 @@ static void blk_delete(BlockBackend *blk)
|
||||
bdrv_unref(blk->bs);
|
||||
blk->bs = NULL;
|
||||
}
|
||||
/* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
|
||||
/* Avoid double-remove after blk_hide_on_behalf_of_do_drive_del() */
|
||||
if (blk->name[0]) {
|
||||
QTAILQ_REMOVE(&blk_backends, blk, link);
|
||||
}
|
||||
@@ -206,7 +162,7 @@ BlockBackend *blk_next(BlockBackend *blk)
|
||||
/*
|
||||
* Return @blk's name, a non-null string.
|
||||
* Wart: the name is empty iff @blk has been hidden with
|
||||
* blk_hide_on_behalf_of_hmp_drive_del().
|
||||
* blk_hide_on_behalf_of_do_drive_del().
|
||||
*/
|
||||
const char *blk_name(BlockBackend *blk)
|
||||
{
|
||||
@@ -282,7 +238,7 @@ BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
|
||||
* Strictly for use by do_drive_del().
|
||||
* TODO get rid of it!
|
||||
*/
|
||||
void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk)
|
||||
void blk_hide_on_behalf_of_do_drive_del(BlockBackend *blk)
|
||||
{
|
||||
QTAILQ_REMOVE(&blk_backends, blk, link);
|
||||
blk->name[0] = 0;
|
||||
@@ -304,6 +260,9 @@ int blk_attach_dev(BlockBackend *blk, void *dev)
|
||||
blk_ref(blk);
|
||||
blk->dev = dev;
|
||||
bdrv_iostatus_reset(blk->bs);
|
||||
|
||||
/* We're expecting I/O from the device so bump up coroutine pool size */
|
||||
qemu_coroutine_adjust_pool_size(COROUTINE_POOL_RESERVATION);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -331,6 +290,7 @@ void blk_detach_dev(BlockBackend *blk, void *dev)
|
||||
blk->dev_ops = NULL;
|
||||
blk->dev_opaque = NULL;
|
||||
bdrv_set_guest_block_size(blk->bs, 512);
|
||||
qemu_coroutine_adjust_pool_size(-COROUTINE_POOL_RESERVATION);
|
||||
blk_unref(blk);
|
||||
}
|
||||
|
||||
@@ -438,137 +398,39 @@ void blk_iostatus_enable(BlockBackend *blk)
|
||||
bdrv_iostatus_enable(blk->bs);
|
||||
}
|
||||
|
||||
static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
|
||||
size_t size)
|
||||
{
|
||||
int64_t len;
|
||||
|
||||
if (size > INT_MAX) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!blk_is_inserted(blk)) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
len = blk_getlength(blk);
|
||||
if (len < 0) {
|
||||
return len;
|
||||
}
|
||||
|
||||
if (offset < 0) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (offset > len || len - offset < size) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_check_request(BlockBackend *blk, int64_t sector_num,
|
||||
int nb_sectors)
|
||||
{
|
||||
if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
|
||||
nb_sectors * BDRV_SECTOR_SIZE);
|
||||
}
|
||||
|
||||
int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
|
||||
int nb_sectors)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_read(blk->bs, sector_num, buf, nb_sectors);
|
||||
}
|
||||
|
||||
int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
|
||||
int nb_sectors)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors);
|
||||
}
|
||||
|
||||
int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
|
||||
int nb_sectors)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_write(blk->bs, sector_num, buf, nb_sectors);
|
||||
}
|
||||
|
||||
static void error_callback_bh(void *opaque)
|
||||
{
|
||||
struct BlockBackendAIOCB *acb = opaque;
|
||||
qemu_bh_delete(acb->bh);
|
||||
acb->common.cb(acb->common.opaque, acb->ret);
|
||||
qemu_aio_unref(acb);
|
||||
}
|
||||
|
||||
static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb,
|
||||
void *opaque, int ret)
|
||||
{
|
||||
struct BlockBackendAIOCB *acb;
|
||||
QEMUBH *bh;
|
||||
|
||||
acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
|
||||
acb->ret = ret;
|
||||
|
||||
bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
|
||||
acb->bh = bh;
|
||||
qemu_bh_schedule(bh);
|
||||
|
||||
return &acb->common;
|
||||
}
|
||||
|
||||
BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
|
||||
int nb_sectors, BdrvRequestFlags flags,
|
||||
BlockCompletionFunc *cb, void *opaque)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return abort_aio_request(blk, cb, opaque, ret);
|
||||
}
|
||||
|
||||
return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags,
|
||||
cb, opaque);
|
||||
}
|
||||
|
||||
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
|
||||
{
|
||||
int ret = blk_check_byte_request(blk, offset, count);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_pread(blk->bs, offset, buf, count);
|
||||
}
|
||||
|
||||
int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
|
||||
{
|
||||
int ret = blk_check_byte_request(blk, offset, count);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_pwrite(blk->bs, offset, buf, count);
|
||||
}
|
||||
|
||||
@@ -582,20 +444,10 @@ void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
|
||||
bdrv_get_geometry(blk->bs, nb_sectors_ptr);
|
||||
}
|
||||
|
||||
int64_t blk_nb_sectors(BlockBackend *blk)
|
||||
{
|
||||
return bdrv_nb_sectors(blk->bs);
|
||||
}
|
||||
|
||||
BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
|
||||
QEMUIOVector *iov, int nb_sectors,
|
||||
BlockCompletionFunc *cb, void *opaque)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return abort_aio_request(blk, cb, opaque, ret);
|
||||
}
|
||||
|
||||
return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
|
||||
}
|
||||
|
||||
@@ -603,11 +455,6 @@ BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
|
||||
QEMUIOVector *iov, int nb_sectors,
|
||||
BlockCompletionFunc *cb, void *opaque)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return abort_aio_request(blk, cb, opaque, ret);
|
||||
}
|
||||
|
||||
return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
|
||||
}
|
||||
|
||||
@@ -621,11 +468,6 @@ BlockAIOCB *blk_aio_discard(BlockBackend *blk,
|
||||
int64_t sector_num, int nb_sectors,
|
||||
BlockCompletionFunc *cb, void *opaque)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return abort_aio_request(blk, cb, opaque, ret);
|
||||
}
|
||||
|
||||
return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque);
|
||||
}
|
||||
|
||||
@@ -641,15 +483,6 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
|
||||
|
||||
int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < num_reqs; i++) {
|
||||
ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs);
|
||||
}
|
||||
|
||||
@@ -664,21 +497,6 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
||||
return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
|
||||
}
|
||||
|
||||
int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_co_discard(blk->bs, sector_num, nb_sectors);
|
||||
}
|
||||
|
||||
int blk_co_flush(BlockBackend *blk)
|
||||
{
|
||||
return bdrv_co_flush(blk->bs);
|
||||
}
|
||||
|
||||
int blk_flush(BlockBackend *blk)
|
||||
{
|
||||
return bdrv_flush(blk->bs);
|
||||
@@ -731,11 +549,6 @@ void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
|
||||
bdrv_set_enable_write_cache(blk->bs, wce);
|
||||
}
|
||||
|
||||
void blk_invalidate_cache(BlockBackend *blk, Error **errp)
|
||||
{
|
||||
bdrv_invalidate_cache(blk->bs, errp);
|
||||
}
|
||||
|
||||
int blk_is_inserted(BlockBackend *blk)
|
||||
{
|
||||
return bdrv_is_inserted(blk->bs);
|
||||
@@ -756,11 +569,6 @@ int blk_get_flags(BlockBackend *blk)
|
||||
return bdrv_get_flags(blk->bs);
|
||||
}
|
||||
|
||||
int blk_get_max_transfer_length(BlockBackend *blk)
|
||||
{
|
||||
return blk->bs->bl.max_transfer_length;
|
||||
}
|
||||
|
||||
void blk_set_guest_block_size(BlockBackend *blk, int align)
|
||||
{
|
||||
bdrv_set_guest_block_size(blk->bs, align);
|
||||
@@ -801,29 +609,6 @@ void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
|
||||
bdrv_set_aio_context(blk->bs, new_context);
|
||||
}
|
||||
|
||||
void blk_add_aio_context_notifier(BlockBackend *blk,
|
||||
void (*attached_aio_context)(AioContext *new_context, void *opaque),
|
||||
void (*detach_aio_context)(void *opaque), void *opaque)
|
||||
{
|
||||
bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
|
||||
detach_aio_context, opaque);
|
||||
}
|
||||
|
||||
void blk_remove_aio_context_notifier(BlockBackend *blk,
|
||||
void (*attached_aio_context)(AioContext *,
|
||||
void *),
|
||||
void (*detach_aio_context)(void *),
|
||||
void *opaque)
|
||||
{
|
||||
bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
|
||||
detach_aio_context, opaque);
|
||||
}
|
||||
|
||||
void blk_add_close_notifier(BlockBackend *blk, Notifier *notify)
|
||||
{
|
||||
bdrv_add_close_notifier(blk->bs, notify);
|
||||
}
|
||||
|
||||
void blk_io_plug(BlockBackend *blk)
|
||||
{
|
||||
bdrv_io_plug(blk->bs);
|
||||
@@ -844,61 +629,3 @@ void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
|
||||
{
|
||||
return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
|
||||
int nb_sectors, BdrvRequestFlags flags)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
|
||||
}
|
||||
|
||||
int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
|
||||
const uint8_t *buf, int nb_sectors)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors);
|
||||
}
|
||||
|
||||
int blk_truncate(BlockBackend *blk, int64_t offset)
|
||||
{
|
||||
return bdrv_truncate(blk->bs, offset);
|
||||
}
|
||||
|
||||
int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
|
||||
{
|
||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bdrv_discard(blk->bs, sector_num, nb_sectors);
|
||||
}
|
||||
|
||||
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
|
||||
int64_t pos, int size)
|
||||
{
|
||||
return bdrv_save_vmstate(blk->bs, buf, pos, size);
|
||||
}
|
||||
|
||||
int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
|
||||
{
|
||||
return bdrv_load_vmstate(blk->bs, buf, pos, size);
|
||||
}
|
||||
|
||||
int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
|
||||
{
|
||||
return bdrv_probe_blocksizes(blk->bs, bsz);
|
||||
}
|
||||
|
||||
int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
|
||||
{
|
||||
return bdrv_probe_geometry(blk->bs, geo);
|
||||
}
|
||||
|
506
block/dmg.c
506
block/dmg.c
@@ -26,10 +26,6 @@
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/module.h"
|
||||
#include <zlib.h>
|
||||
#ifdef CONFIG_BZIP2
|
||||
#include <bzlib.h>
|
||||
#endif
|
||||
#include <glib.h>
|
||||
|
||||
enum {
|
||||
/* Limit chunk sizes to prevent unreasonable amounts of memory being used
|
||||
@@ -59,9 +55,6 @@ typedef struct BDRVDMGState {
|
||||
uint8_t *compressed_chunk;
|
||||
uint8_t *uncompressed_chunk;
|
||||
z_stream zstream;
|
||||
#ifdef CONFIG_BZIP2
|
||||
bz_stream bzstream;
|
||||
#endif
|
||||
} BDRVDMGState;
|
||||
|
||||
static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
|
||||
@@ -107,16 +100,6 @@ static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
|
||||
{
|
||||
return be64_to_cpu(*(uint64_t *)&buffer[offset]);
|
||||
}
|
||||
|
||||
static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
|
||||
{
|
||||
return be32_to_cpu(*(uint32_t *)&buffer[offset]);
|
||||
}
|
||||
|
||||
/* Increase max chunk sizes, if necessary. This function is used to calculate
|
||||
* the buffer sizes needed for compressed/uncompressed chunk I/O.
|
||||
*/
|
||||
@@ -129,7 +112,6 @@ static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
|
||||
|
||||
switch (s->types[chunk]) {
|
||||
case 0x80000005: /* zlib compressed */
|
||||
case 0x80000006: /* bzip2 compressed */
|
||||
compressed_size = s->lengths[chunk];
|
||||
uncompressed_sectors = s->sectorcounts[chunk];
|
||||
break;
|
||||
@@ -137,9 +119,7 @@ static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
|
||||
uncompressed_sectors = (s->lengths[chunk] + 511) / 512;
|
||||
break;
|
||||
case 2: /* zero */
|
||||
/* as the all-zeroes block may be large, it is treated specially: the
|
||||
* sector is not copied from a large buffer, a simple memset is used
|
||||
* instead. Therefore uncompressed_sectors does not need to be set. */
|
||||
uncompressed_sectors = s->sectorcounts[chunk];
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -151,372 +131,163 @@ static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t dmg_find_koly_offset(BlockDriverState *file_bs, Error **errp)
|
||||
{
|
||||
int64_t length;
|
||||
int64_t offset = 0;
|
||||
uint8_t buffer[515];
|
||||
int i, ret;
|
||||
|
||||
/* bdrv_getlength returns a multiple of block size (512), rounded up. Since
|
||||
* dmg images can have odd sizes, try to look for the "koly" magic which
|
||||
* marks the begin of the UDIF trailer (512 bytes). This magic can be found
|
||||
* in the last 511 bytes of the second-last sector or the first 4 bytes of
|
||||
* the last sector (search space: 515 bytes) */
|
||||
length = bdrv_getlength(file_bs);
|
||||
if (length < 0) {
|
||||
error_setg_errno(errp, -length,
|
||||
"Failed to get file size while reading UDIF trailer");
|
||||
return length;
|
||||
} else if (length < 512) {
|
||||
error_setg(errp, "dmg file must be at least 512 bytes long");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (length > 511 + 512) {
|
||||
offset = length - 511 - 512;
|
||||
}
|
||||
length = length < 515 ? length : 515;
|
||||
ret = bdrv_pread(file_bs, offset, buffer, length);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
|
||||
return ret;
|
||||
}
|
||||
for (i = 0; i < length - 3; i++) {
|
||||
if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
|
||||
buffer[i+2] == 'l' && buffer[i+3] == 'y') {
|
||||
return offset + i;
|
||||
}
|
||||
}
|
||||
error_setg(errp, "Could not locate UDIF trailer in dmg file");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* used when building the sector table */
|
||||
typedef struct DmgHeaderState {
|
||||
/* used internally by dmg_read_mish_block to remember offsets of blocks
|
||||
* across calls */
|
||||
uint64_t data_fork_offset;
|
||||
/* exported for dmg_open */
|
||||
uint32_t max_compressed_size;
|
||||
uint32_t max_sectors_per_chunk;
|
||||
} DmgHeaderState;
|
||||
|
||||
static bool dmg_is_known_block_type(uint32_t entry_type)
|
||||
{
|
||||
switch (entry_type) {
|
||||
case 0x00000001: /* uncompressed */
|
||||
case 0x00000002: /* zeroes */
|
||||
case 0x80000005: /* zlib */
|
||||
#ifdef CONFIG_BZIP2
|
||||
case 0x80000006: /* bzip2 */
|
||||
#endif
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
|
||||
uint8_t *buffer, uint32_t count)
|
||||
{
|
||||
uint32_t type, i;
|
||||
int ret;
|
||||
size_t new_size;
|
||||
uint32_t chunk_count;
|
||||
int64_t offset = 0;
|
||||
uint64_t data_offset;
|
||||
uint64_t in_offset = ds->data_fork_offset;
|
||||
uint64_t out_offset;
|
||||
|
||||
type = buff_read_uint32(buffer, offset);
|
||||
/* skip data that is not a valid MISH block (invalid magic or too small) */
|
||||
if (type != 0x6d697368 || count < 244) {
|
||||
/* assume success for now */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* chunk offsets are relative to this sector number */
|
||||
out_offset = buff_read_uint64(buffer, offset + 8);
|
||||
|
||||
/* location in data fork for (compressed) blob (in bytes) */
|
||||
data_offset = buff_read_uint64(buffer, offset + 0x18);
|
||||
in_offset += data_offset;
|
||||
|
||||
/* move to begin of chunk entries */
|
||||
offset += 204;
|
||||
|
||||
chunk_count = (count - 204) / 40;
|
||||
new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
|
||||
s->types = g_realloc(s->types, new_size / 2);
|
||||
s->offsets = g_realloc(s->offsets, new_size);
|
||||
s->lengths = g_realloc(s->lengths, new_size);
|
||||
s->sectors = g_realloc(s->sectors, new_size);
|
||||
s->sectorcounts = g_realloc(s->sectorcounts, new_size);
|
||||
|
||||
for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
|
||||
s->types[i] = buff_read_uint32(buffer, offset);
|
||||
if (!dmg_is_known_block_type(s->types[i])) {
|
||||
chunk_count--;
|
||||
i--;
|
||||
offset += 40;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* sector number */
|
||||
s->sectors[i] = buff_read_uint64(buffer, offset + 8);
|
||||
s->sectors[i] += out_offset;
|
||||
|
||||
/* sector count */
|
||||
s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
|
||||
|
||||
/* all-zeroes sector (type 2) does not need to be "uncompressed" and can
|
||||
* therefore be unbounded. */
|
||||
if (s->types[i] != 2 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
|
||||
error_report("sector count %" PRIu64 " for chunk %" PRIu32
|
||||
" is larger than max (%u)",
|
||||
s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* offset in (compressed) data fork */
|
||||
s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
|
||||
s->offsets[i] += in_offset;
|
||||
|
||||
/* length in (compressed) data fork */
|
||||
s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
|
||||
|
||||
if (s->lengths[i] > DMG_LENGTHS_MAX) {
|
||||
error_report("length %" PRIu64 " for chunk %" PRIu32
|
||||
" is larger than max (%u)",
|
||||
s->lengths[i], i, DMG_LENGTHS_MAX);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
update_max_chunk_size(s, i, &ds->max_compressed_size,
|
||||
&ds->max_sectors_per_chunk);
|
||||
offset += 40;
|
||||
}
|
||||
s->n_chunks += chunk_count;
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
|
||||
uint64_t info_begin, uint64_t info_length)
|
||||
{
|
||||
BDRVDMGState *s = bs->opaque;
|
||||
int ret;
|
||||
uint32_t count, rsrc_data_offset;
|
||||
uint8_t *buffer = NULL;
|
||||
uint64_t info_end;
|
||||
uint64_t offset;
|
||||
|
||||
/* read offset from begin of resource fork (info_begin) to resource data */
|
||||
ret = read_uint32(bs, info_begin, &rsrc_data_offset);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
} else if (rsrc_data_offset > info_length) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* read length of resource data */
|
||||
ret = read_uint32(bs, info_begin + 8, &count);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
} else if (count == 0 || rsrc_data_offset + count > info_length) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* begin of resource data (consisting of one or more resources) */
|
||||
offset = info_begin + rsrc_data_offset;
|
||||
|
||||
/* end of resource data (there is possibly a following resource map
|
||||
* which will be ignored). */
|
||||
info_end = offset + count;
|
||||
|
||||
/* read offsets (mish blocks) from one or more resources in resource data */
|
||||
while (offset < info_end) {
|
||||
/* size of following resource */
|
||||
ret = read_uint32(bs, offset, &count);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
} else if (count == 0 || count > info_end - offset) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
offset += 4;
|
||||
|
||||
buffer = g_realloc(buffer, count);
|
||||
ret = bdrv_pread(bs->file, offset, buffer, count);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = dmg_read_mish_block(s, ds, buffer, count);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
/* advance offset by size of resource */
|
||||
offset += count;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
fail:
|
||||
g_free(buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
|
||||
uint64_t info_begin, uint64_t info_length)
|
||||
{
|
||||
BDRVDMGState *s = bs->opaque;
|
||||
int ret;
|
||||
uint8_t *buffer = NULL;
|
||||
char *data_begin, *data_end;
|
||||
|
||||
/* Have at least some length to avoid NULL for g_malloc. Attempt to set a
|
||||
* safe upper cap on the data length. A test sample had a XML length of
|
||||
* about 1 MiB. */
|
||||
if (info_length == 0 || info_length > 16 * 1024 * 1024) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
buffer = g_malloc(info_length + 1);
|
||||
buffer[info_length] = '\0';
|
||||
ret = bdrv_pread(bs->file, info_begin, buffer, info_length);
|
||||
if (ret != info_length) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
|
||||
* decode. The actual data element has 431 (0x1af) bytes which includes tabs
|
||||
* and line feeds. */
|
||||
data_end = (char *)buffer;
|
||||
while ((data_begin = strstr(data_end, "<data>")) != NULL) {
|
||||
guchar *mish;
|
||||
gsize out_len = 0;
|
||||
|
||||
data_begin += 6;
|
||||
data_end = strstr(data_begin, "</data>");
|
||||
/* malformed XML? */
|
||||
if (data_end == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
*data_end++ = '\0';
|
||||
mish = g_base64_decode(data_begin, &out_len);
|
||||
ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
|
||||
g_free(mish);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
fail:
|
||||
g_free(buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVDMGState *s = bs->opaque;
|
||||
DmgHeaderState ds;
|
||||
uint64_t rsrc_fork_offset, rsrc_fork_length;
|
||||
uint64_t plist_xml_offset, plist_xml_length;
|
||||
uint64_t info_begin, info_end, last_in_offset, last_out_offset;
|
||||
uint32_t count, tmp;
|
||||
uint32_t max_compressed_size = 1, max_sectors_per_chunk = 1, i;
|
||||
int64_t offset;
|
||||
int ret;
|
||||
|
||||
bs->read_only = 1;
|
||||
s->n_chunks = 0;
|
||||
s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
|
||||
/* used by dmg_read_mish_block to keep track of the current I/O position */
|
||||
ds.data_fork_offset = 0;
|
||||
ds.max_compressed_size = 1;
|
||||
ds.max_sectors_per_chunk = 1;
|
||||
|
||||
/* locate the UDIF trailer */
|
||||
offset = dmg_find_koly_offset(bs->file, errp);
|
||||
/* read offset of info blocks */
|
||||
offset = bdrv_getlength(bs->file);
|
||||
if (offset < 0) {
|
||||
ret = offset;
|
||||
goto fail;
|
||||
}
|
||||
offset -= 0x1d8;
|
||||
|
||||
/* offset of data fork (DataForkOffset) */
|
||||
ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
|
||||
ret = read_uint64(bs, offset, &info_begin);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
} else if (ds.data_fork_offset > offset) {
|
||||
} else if (info_begin == 0) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* offset of resource fork (RsrcForkOffset) */
|
||||
ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
|
||||
ret = read_uint32(bs, info_begin, &tmp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
if (rsrc_fork_offset >= offset ||
|
||||
rsrc_fork_length > offset - rsrc_fork_offset) {
|
||||
} else if (tmp != 0x100) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
/* offset of property list (XMLOffset) */
|
||||
ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
|
||||
|
||||
ret = read_uint32(bs, info_begin + 4, &count);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
if (plist_xml_offset >= offset ||
|
||||
plist_xml_length > offset - plist_xml_offset) {
|
||||
} else if (count == 0) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
if (bs->total_sectors < 0) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (rsrc_fork_length != 0) {
|
||||
ret = dmg_read_resource_fork(bs, &ds,
|
||||
rsrc_fork_offset, rsrc_fork_length);
|
||||
info_end = info_begin + count;
|
||||
|
||||
offset = info_begin + 0x100;
|
||||
|
||||
/* read offsets */
|
||||
last_in_offset = last_out_offset = 0;
|
||||
while (offset < info_end) {
|
||||
uint32_t type;
|
||||
|
||||
ret = read_uint32(bs, offset, &count);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
} else if (count == 0) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
offset += 4;
|
||||
|
||||
ret = read_uint32(bs, offset, &type);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
} else if (plist_xml_length != 0) {
|
||||
ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
|
||||
if (type == 0x6d697368 && count >= 244) {
|
||||
size_t new_size;
|
||||
uint32_t chunk_count;
|
||||
|
||||
offset += 4;
|
||||
offset += 200;
|
||||
|
||||
chunk_count = (count - 204) / 40;
|
||||
new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
|
||||
s->types = g_realloc(s->types, new_size / 2);
|
||||
s->offsets = g_realloc(s->offsets, new_size);
|
||||
s->lengths = g_realloc(s->lengths, new_size);
|
||||
s->sectors = g_realloc(s->sectors, new_size);
|
||||
s->sectorcounts = g_realloc(s->sectorcounts, new_size);
|
||||
|
||||
for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
|
||||
ret = read_uint32(bs, offset, &s->types[i]);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
offset += 4;
|
||||
if (s->types[i] != 0x80000005 && s->types[i] != 1 &&
|
||||
s->types[i] != 2) {
|
||||
if (s->types[i] == 0xffffffff && i > 0) {
|
||||
last_in_offset = s->offsets[i - 1] + s->lengths[i - 1];
|
||||
last_out_offset = s->sectors[i - 1] +
|
||||
s->sectorcounts[i - 1];
|
||||
}
|
||||
chunk_count--;
|
||||
i--;
|
||||
offset += 36;
|
||||
continue;
|
||||
}
|
||||
offset += 4;
|
||||
|
||||
ret = read_uint64(bs, offset, &s->sectors[i]);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
s->sectors[i] += last_out_offset;
|
||||
offset += 8;
|
||||
|
||||
ret = read_uint64(bs, offset, &s->sectorcounts[i]);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
offset += 8;
|
||||
|
||||
if (s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
|
||||
error_report("sector count %" PRIu64 " for chunk %" PRIu32
|
||||
" is larger than max (%u)",
|
||||
s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = read_uint64(bs, offset, &s->offsets[i]);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
s->offsets[i] += last_in_offset;
|
||||
offset += 8;
|
||||
|
||||
ret = read_uint64(bs, offset, &s->lengths[i]);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
offset += 8;
|
||||
|
||||
if (s->lengths[i] > DMG_LENGTHS_MAX) {
|
||||
error_report("length %" PRIu64 " for chunk %" PRIu32
|
||||
" is larger than max (%u)",
|
||||
s->lengths[i], i, DMG_LENGTHS_MAX);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
update_max_chunk_size(s, i, &max_compressed_size,
|
||||
&max_sectors_per_chunk);
|
||||
}
|
||||
s->n_chunks += chunk_count;
|
||||
}
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* initialize zlib engine */
|
||||
s->compressed_chunk = qemu_try_blockalign(bs->file,
|
||||
ds.max_compressed_size + 1);
|
||||
max_compressed_size + 1);
|
||||
s->uncompressed_chunk = qemu_try_blockalign(bs->file,
|
||||
512 * ds.max_sectors_per_chunk);
|
||||
512 * max_sectors_per_chunk);
|
||||
if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
@@ -578,16 +349,13 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
|
||||
if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
|
||||
int ret;
|
||||
uint32_t chunk = search_chunk(s, sector_num);
|
||||
#ifdef CONFIG_BZIP2
|
||||
uint64_t total_out;
|
||||
#endif
|
||||
|
||||
if (chunk >= s->n_chunks) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
s->current_chunk = s->n_chunks;
|
||||
switch (s->types[chunk]) { /* block entry type */
|
||||
switch (s->types[chunk]) {
|
||||
case 0x80000005: { /* zlib compressed */
|
||||
/* we need to buffer, because only the chunk as whole can be
|
||||
* inflated. */
|
||||
@@ -611,34 +379,6 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
|
||||
return -1;
|
||||
}
|
||||
break; }
|
||||
#ifdef CONFIG_BZIP2
|
||||
case 0x80000006: /* bzip2 compressed */
|
||||
/* we need to buffer, because only the chunk as whole can be
|
||||
* inflated. */
|
||||
ret = bdrv_pread(bs->file, s->offsets[chunk],
|
||||
s->compressed_chunk, s->lengths[chunk]);
|
||||
if (ret != s->lengths[chunk]) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = BZ2_bzDecompressInit(&s->bzstream, 0, 0);
|
||||
if (ret != BZ_OK) {
|
||||
return -1;
|
||||
}
|
||||
s->bzstream.next_in = (char *)s->compressed_chunk;
|
||||
s->bzstream.avail_in = (unsigned int) s->lengths[chunk];
|
||||
s->bzstream.next_out = (char *)s->uncompressed_chunk;
|
||||
s->bzstream.avail_out = (unsigned int) 512 * s->sectorcounts[chunk];
|
||||
ret = BZ2_bzDecompress(&s->bzstream);
|
||||
total_out = ((uint64_t)s->bzstream.total_out_hi32 << 32) +
|
||||
s->bzstream.total_out_lo32;
|
||||
BZ2_bzDecompressEnd(&s->bzstream);
|
||||
if (ret != BZ_STREAM_END ||
|
||||
total_out != 512 * s->sectorcounts[chunk]) {
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
#endif /* CONFIG_BZIP2 */
|
||||
case 1: /* copy */
|
||||
ret = bdrv_pread(bs->file, s->offsets[chunk],
|
||||
s->uncompressed_chunk, s->lengths[chunk]);
|
||||
@@ -647,8 +387,7 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
|
||||
}
|
||||
break;
|
||||
case 2: /* zero */
|
||||
/* see dmg_read, it is treated specially. No buffer needs to be
|
||||
* pre-filled, the zeroes can be set directly. */
|
||||
memset(s->uncompressed_chunk, 0, 512 * s->sectorcounts[chunk]);
|
||||
break;
|
||||
}
|
||||
s->current_chunk = chunk;
|
||||
@@ -667,13 +406,6 @@ static int dmg_read(BlockDriverState *bs, int64_t sector_num,
|
||||
if (dmg_read_chunk(bs, sector_num + i) != 0) {
|
||||
return -1;
|
||||
}
|
||||
/* Special case: current chunk is all zeroes. Do not perform a memcpy as
|
||||
* s->uncompressed_chunk may be too small to cover the large all-zeroes
|
||||
* section. dmg_read_chunk is called to find s->current_chunk */
|
||||
if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */
|
||||
memset(buf + i * 512, 0, 512);
|
||||
continue;
|
||||
}
|
||||
sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
|
||||
memcpy(buf + i * 512,
|
||||
s->uncompressed_chunk + sector_offset_in_chunk * 512, 512);
|
||||
|
@@ -56,7 +56,6 @@ typedef struct IscsiLun {
|
||||
uint64_t num_blocks;
|
||||
int events;
|
||||
QEMUTimer *nop_timer;
|
||||
QEMUTimer *event_timer;
|
||||
uint8_t lbpme;
|
||||
uint8_t lbprz;
|
||||
uint8_t has_write_same;
|
||||
@@ -66,7 +65,6 @@ typedef struct IscsiLun {
|
||||
unsigned long *allocationmap;
|
||||
int cluster_sectors;
|
||||
bool use_16_for_rw;
|
||||
bool write_protected;
|
||||
} IscsiLun;
|
||||
|
||||
typedef struct IscsiTask {
|
||||
@@ -96,7 +94,6 @@ typedef struct IscsiAIOCB {
|
||||
#endif
|
||||
} IscsiAIOCB;
|
||||
|
||||
#define EVENT_INTERVAL 250
|
||||
#define NOP_INTERVAL 5000
|
||||
#define MAX_NOP_FAILURES 3
|
||||
#define ISCSI_CMD_RETRIES ARRAY_SIZE(iscsi_retry_times)
|
||||
@@ -258,30 +255,21 @@ static void
|
||||
iscsi_set_events(IscsiLun *iscsilun)
|
||||
{
|
||||
struct iscsi_context *iscsi = iscsilun->iscsi;
|
||||
int ev = iscsi_which_events(iscsi);
|
||||
int ev;
|
||||
|
||||
/* We always register a read handler. */
|
||||
ev = POLLIN;
|
||||
ev |= iscsi_which_events(iscsi);
|
||||
if (ev != iscsilun->events) {
|
||||
aio_set_fd_handler(iscsilun->aio_context,
|
||||
iscsi_get_fd(iscsi),
|
||||
(ev & POLLIN) ? iscsi_process_read : NULL,
|
||||
iscsi_process_read,
|
||||
(ev & POLLOUT) ? iscsi_process_write : NULL,
|
||||
iscsilun);
|
||||
iscsilun->events = ev;
|
||||
|
||||
}
|
||||
|
||||
/* newer versions of libiscsi may return zero events. In this
|
||||
* case start a timer to ensure we are able to return to service
|
||||
* once this situation changes. */
|
||||
if (!ev) {
|
||||
timer_mod(iscsilun->event_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + EVENT_INTERVAL);
|
||||
}
|
||||
}
|
||||
|
||||
static void iscsi_timed_set_events(void *opaque)
|
||||
{
|
||||
IscsiLun *iscsilun = opaque;
|
||||
iscsi_set_events(iscsilun);
|
||||
iscsilun->events = ev;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1225,11 +1213,6 @@ static void iscsi_detach_aio_context(BlockDriverState *bs)
|
||||
timer_free(iscsilun->nop_timer);
|
||||
iscsilun->nop_timer = NULL;
|
||||
}
|
||||
if (iscsilun->event_timer) {
|
||||
timer_del(iscsilun->event_timer);
|
||||
timer_free(iscsilun->event_timer);
|
||||
iscsilun->event_timer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void iscsi_attach_aio_context(BlockDriverState *bs,
|
||||
@@ -1246,11 +1229,6 @@ static void iscsi_attach_aio_context(BlockDriverState *bs,
|
||||
iscsi_nop_timed_event, iscsilun);
|
||||
timer_mod(iscsilun->nop_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
|
||||
|
||||
/* Prepare a timer for a delayed call to iscsi_set_events */
|
||||
iscsilun->event_timer = aio_timer_new(iscsilun->aio_context,
|
||||
QEMU_CLOCK_REALTIME, SCALE_MS,
|
||||
iscsi_timed_set_events, iscsilun);
|
||||
}
|
||||
|
||||
static bool iscsi_is_write_protected(IscsiLun *iscsilun)
|
||||
@@ -1290,6 +1268,10 @@ out:
|
||||
/*
|
||||
* We support iscsi url's on the form
|
||||
* iscsi://[<username>%<password>@]<host>[:<port>]/<targetname>/<lun>
|
||||
*
|
||||
* Note: flags are currently not used by iscsi_open. If this function
|
||||
* is changed such that flags are used, please examine iscsi_reopen_prepare()
|
||||
* to see if needs to be changed as well.
|
||||
*/
|
||||
static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
@@ -1347,7 +1329,7 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (iscsi_url->user[0] != '\0') {
|
||||
if (iscsi_url->user != NULL) {
|
||||
ret = iscsi_set_initiator_username_pwd(iscsi, iscsi_url->user,
|
||||
iscsi_url->passwd);
|
||||
if (ret != 0) {
|
||||
@@ -1403,10 +1385,9 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
scsi_free_scsi_task(task);
|
||||
task = NULL;
|
||||
|
||||
iscsilun->write_protected = iscsi_is_write_protected(iscsilun);
|
||||
/* Check the write protect flag of the LUN if we want to write */
|
||||
if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) &&
|
||||
iscsilun->write_protected) {
|
||||
iscsi_is_write_protected(iscsilun)) {
|
||||
error_setg(errp, "Cannot open a write protected LUN as read-write");
|
||||
ret = -EACCES;
|
||||
goto out;
|
||||
@@ -1560,17 +1541,13 @@ static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
sector_limits_lun2qemu(iscsilun->bl.opt_xfer_len, iscsilun);
|
||||
}
|
||||
|
||||
/* Note that this will not re-establish a connection with an iSCSI target - it
|
||||
* is effectively a NOP. */
|
||||
/* Since iscsi_open() ignores bdrv_flags, there is nothing to do here in
|
||||
* prepare. Note that this will not re-establish a connection with an iSCSI
|
||||
* target - it is effectively a NOP. */
|
||||
static int iscsi_reopen_prepare(BDRVReopenState *state,
|
||||
BlockReopenQueue *queue, Error **errp)
|
||||
{
|
||||
IscsiLun *iscsilun = state->bs->opaque;
|
||||
|
||||
if (state->flags & BDRV_O_RDWR && iscsilun->write_protected) {
|
||||
error_setg(errp, "Cannot open a write protected LUN as read-write");
|
||||
return -EACCES;
|
||||
}
|
||||
/* NOP */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -35,14 +35,14 @@ struct qemu_laiocb {
|
||||
size_t nbytes;
|
||||
QEMUIOVector *qiov;
|
||||
bool is_read;
|
||||
QSIMPLEQ_ENTRY(qemu_laiocb) next;
|
||||
QLIST_ENTRY(qemu_laiocb) node;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
struct iocb *iocbs[MAX_QUEUED_IO];
|
||||
int plugged;
|
||||
unsigned int n;
|
||||
bool blocked;
|
||||
QSIMPLEQ_HEAD(, qemu_laiocb) pending;
|
||||
unsigned int size;
|
||||
unsigned int idx;
|
||||
} LaioQueue;
|
||||
|
||||
struct qemu_laio_state {
|
||||
@@ -59,8 +59,6 @@ struct qemu_laio_state {
|
||||
int event_max;
|
||||
};
|
||||
|
||||
static void ioq_submit(struct qemu_laio_state *s);
|
||||
|
||||
static inline ssize_t io_event_ret(struct io_event *ev)
|
||||
{
|
||||
return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
|
||||
@@ -137,10 +135,6 @@ static void qemu_laio_completion_bh(void *opaque)
|
||||
|
||||
qemu_laio_process_completion(s, laiocb);
|
||||
}
|
||||
|
||||
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
|
||||
ioq_submit(s);
|
||||
}
|
||||
}
|
||||
|
||||
static void qemu_laio_completion_cb(EventNotifier *e)
|
||||
@@ -178,41 +172,50 @@ static const AIOCBInfo laio_aiocb_info = {
|
||||
|
||||
static void ioq_init(LaioQueue *io_q)
|
||||
{
|
||||
QSIMPLEQ_INIT(&io_q->pending);
|
||||
io_q->size = MAX_QUEUED_IO;
|
||||
io_q->idx = 0;
|
||||
io_q->plugged = 0;
|
||||
io_q->n = 0;
|
||||
io_q->blocked = false;
|
||||
}
|
||||
|
||||
static void ioq_submit(struct qemu_laio_state *s)
|
||||
static int ioq_submit(struct qemu_laio_state *s)
|
||||
{
|
||||
int ret, len;
|
||||
struct qemu_laiocb *aiocb;
|
||||
struct iocb *iocbs[MAX_QUEUED_IO];
|
||||
QSIMPLEQ_HEAD(, qemu_laiocb) completed;
|
||||
int ret, i = 0;
|
||||
int len = s->io_q.idx;
|
||||
|
||||
do {
|
||||
len = 0;
|
||||
QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
|
||||
iocbs[len++] = &aiocb->iocb;
|
||||
if (len == MAX_QUEUED_IO) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
ret = io_submit(s->ctx, len, s->io_q.iocbs);
|
||||
} while (i++ < 3 && ret == -EAGAIN);
|
||||
|
||||
ret = io_submit(s->ctx, len, iocbs);
|
||||
if (ret == -EAGAIN) {
|
||||
break;
|
||||
}
|
||||
if (ret < 0) {
|
||||
abort();
|
||||
}
|
||||
/* empty io queue */
|
||||
s->io_q.idx = 0;
|
||||
|
||||
s->io_q.n -= ret;
|
||||
aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
|
||||
QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
|
||||
} while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
|
||||
s->io_q.blocked = (s->io_q.n > 0);
|
||||
if (ret < 0) {
|
||||
i = 0;
|
||||
} else {
|
||||
i = ret;
|
||||
}
|
||||
|
||||
for (; i < len; i++) {
|
||||
struct qemu_laiocb *laiocb =
|
||||
container_of(s->io_q.iocbs[i], struct qemu_laiocb, iocb);
|
||||
|
||||
laiocb->ret = (ret < 0) ? ret : -EIO;
|
||||
qemu_laio_process_completion(s, laiocb);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ioq_enqueue(struct qemu_laio_state *s, struct iocb *iocb)
|
||||
{
|
||||
unsigned int idx = s->io_q.idx;
|
||||
|
||||
s->io_q.iocbs[idx++] = iocb;
|
||||
s->io_q.idx = idx;
|
||||
|
||||
/* submit immediately if queue is full */
|
||||
if (idx == s->io_q.size) {
|
||||
ioq_submit(s);
|
||||
}
|
||||
}
|
||||
|
||||
void laio_io_plug(BlockDriverState *bs, void *aio_ctx)
|
||||
@@ -222,19 +225,22 @@ void laio_io_plug(BlockDriverState *bs, void *aio_ctx)
|
||||
s->io_q.plugged++;
|
||||
}
|
||||
|
||||
void laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug)
|
||||
int laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug)
|
||||
{
|
||||
struct qemu_laio_state *s = aio_ctx;
|
||||
int ret = 0;
|
||||
|
||||
assert(s->io_q.plugged > 0 || !unplug);
|
||||
|
||||
if (unplug && --s->io_q.plugged > 0) {
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
|
||||
ioq_submit(s);
|
||||
if (s->io_q.idx > 0) {
|
||||
ret = ioq_submit(s);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
BlockAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
|
||||
@@ -270,11 +276,12 @@ BlockAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
|
||||
}
|
||||
io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
|
||||
|
||||
QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
|
||||
s->io_q.n++;
|
||||
if (!s->io_q.blocked &&
|
||||
(!s->io_q.plugged || s->io_q.n >= MAX_QUEUED_IO)) {
|
||||
ioq_submit(s);
|
||||
if (!s->io_q.plugged) {
|
||||
if (io_submit(s->ctx, 1, &iocbs) < 0) {
|
||||
goto out_free_aiocb;
|
||||
}
|
||||
} else {
|
||||
ioq_enqueue(s, iocbs);
|
||||
}
|
||||
return &laiocb->common;
|
||||
|
||||
|
@@ -128,8 +128,7 @@ static void mirror_write_complete(void *opaque, int ret)
|
||||
BlockDriverState *source = s->common.bs;
|
||||
BlockErrorAction action;
|
||||
|
||||
bdrv_set_dirty_bitmap(source, s->dirty_bitmap, op->sector_num,
|
||||
op->nb_sectors);
|
||||
bdrv_set_dirty(source, op->sector_num, op->nb_sectors);
|
||||
action = mirror_error_action(s, false, -ret);
|
||||
if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
|
||||
s->ret = ret;
|
||||
@@ -146,8 +145,7 @@ static void mirror_read_complete(void *opaque, int ret)
|
||||
BlockDriverState *source = s->common.bs;
|
||||
BlockErrorAction action;
|
||||
|
||||
bdrv_set_dirty_bitmap(source, s->dirty_bitmap, op->sector_num,
|
||||
op->nb_sectors);
|
||||
bdrv_set_dirty(source, op->sector_num, op->nb_sectors);
|
||||
action = mirror_error_action(s, true, -ret);
|
||||
if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
|
||||
s->ret = ret;
|
||||
@@ -288,8 +286,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
|
||||
next_sector += sectors_per_chunk;
|
||||
}
|
||||
|
||||
bdrv_reset_dirty_bitmap(source, s->dirty_bitmap, sector_num,
|
||||
nb_sectors);
|
||||
bdrv_reset_dirty(source, sector_num, nb_sectors);
|
||||
|
||||
/* Copy the dirty cluster. */
|
||||
s->in_flight++;
|
||||
@@ -378,8 +375,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
||||
int64_t sector_num, end, sectors_per_chunk, length;
|
||||
uint64_t last_pause_ns;
|
||||
BlockDriverInfo bdi;
|
||||
char backing_filename[2]; /* we only need 2 characters because we are only
|
||||
checking for a NULL string */
|
||||
char backing_filename[1024];
|
||||
int ret = 0;
|
||||
int n;
|
||||
|
||||
@@ -446,7 +442,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
||||
|
||||
assert(n > 0);
|
||||
if (ret == 1) {
|
||||
bdrv_set_dirty_bitmap(bs, s->dirty_bitmap, sector_num, n);
|
||||
bdrv_set_dirty(bs, sector_num, n);
|
||||
sector_num = next;
|
||||
} else {
|
||||
sector_num += n;
|
||||
|
@@ -43,23 +43,20 @@ static void nbd_recv_coroutines_enter_all(NbdClientSession *s)
|
||||
}
|
||||
}
|
||||
|
||||
static void nbd_teardown_connection(BlockDriverState *bs)
|
||||
static void nbd_teardown_connection(NbdClientSession *client)
|
||||
{
|
||||
NbdClientSession *client = nbd_get_client_session(bs);
|
||||
|
||||
/* finish any pending coroutines */
|
||||
shutdown(client->sock, 2);
|
||||
nbd_recv_coroutines_enter_all(client);
|
||||
|
||||
nbd_client_detach_aio_context(bs);
|
||||
nbd_client_session_detach_aio_context(client);
|
||||
closesocket(client->sock);
|
||||
client->sock = -1;
|
||||
}
|
||||
|
||||
static void nbd_reply_ready(void *opaque)
|
||||
{
|
||||
BlockDriverState *bs = opaque;
|
||||
NbdClientSession *s = nbd_get_client_session(bs);
|
||||
NbdClientSession *s = opaque;
|
||||
uint64_t i;
|
||||
int ret;
|
||||
|
||||
@@ -92,40 +89,28 @@ static void nbd_reply_ready(void *opaque)
|
||||
}
|
||||
|
||||
fail:
|
||||
nbd_teardown_connection(bs);
|
||||
nbd_teardown_connection(s);
|
||||
}
|
||||
|
||||
static void nbd_restart_write(void *opaque)
|
||||
{
|
||||
BlockDriverState *bs = opaque;
|
||||
NbdClientSession *s = opaque;
|
||||
|
||||
qemu_coroutine_enter(nbd_get_client_session(bs)->send_coroutine, NULL);
|
||||
qemu_coroutine_enter(s->send_coroutine, NULL);
|
||||
}
|
||||
|
||||
static int nbd_co_send_request(BlockDriverState *bs,
|
||||
struct nbd_request *request,
|
||||
QEMUIOVector *qiov, int offset)
|
||||
static int nbd_co_send_request(NbdClientSession *s,
|
||||
struct nbd_request *request,
|
||||
QEMUIOVector *qiov, int offset)
|
||||
{
|
||||
NbdClientSession *s = nbd_get_client_session(bs);
|
||||
AioContext *aio_context;
|
||||
int rc, ret, i;
|
||||
int rc, ret;
|
||||
|
||||
qemu_co_mutex_lock(&s->send_mutex);
|
||||
|
||||
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
|
||||
if (s->recv_coroutine[i] == NULL) {
|
||||
s->recv_coroutine[i] = qemu_coroutine_self();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert(i < MAX_NBD_REQUESTS);
|
||||
request->handle = INDEX_TO_HANDLE(s, i);
|
||||
s->send_coroutine = qemu_coroutine_self();
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context = bdrv_get_aio_context(s->bs);
|
||||
aio_set_fd_handler(aio_context, s->sock,
|
||||
nbd_reply_ready, nbd_restart_write, bs);
|
||||
nbd_reply_ready, nbd_restart_write, s);
|
||||
if (qiov) {
|
||||
if (!s->is_unix) {
|
||||
socket_set_cork(s->sock, 1);
|
||||
@@ -144,7 +129,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
||||
} else {
|
||||
rc = nbd_send_request(s->sock, request);
|
||||
}
|
||||
aio_set_fd_handler(aio_context, s->sock, nbd_reply_ready, NULL, bs);
|
||||
aio_set_fd_handler(aio_context, s->sock, nbd_reply_ready, NULL, s);
|
||||
s->send_coroutine = NULL;
|
||||
qemu_co_mutex_unlock(&s->send_mutex);
|
||||
return rc;
|
||||
@@ -179,6 +164,8 @@ static void nbd_co_receive_reply(NbdClientSession *s,
|
||||
static void nbd_coroutine_start(NbdClientSession *s,
|
||||
struct nbd_request *request)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Poor man semaphore. The free_sema is locked when no other request
|
||||
* can be accepted, and unlocked after receiving one reply. */
|
||||
if (s->in_flight >= MAX_NBD_REQUESTS - 1) {
|
||||
@@ -187,7 +174,15 @@ static void nbd_coroutine_start(NbdClientSession *s,
|
||||
}
|
||||
s->in_flight++;
|
||||
|
||||
/* s->recv_coroutine[i] is set as soon as we get the send_lock. */
|
||||
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
|
||||
if (s->recv_coroutine[i] == NULL) {
|
||||
s->recv_coroutine[i] = qemu_coroutine_self();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert(i < MAX_NBD_REQUESTS);
|
||||
request->handle = INDEX_TO_HANDLE(s, i);
|
||||
}
|
||||
|
||||
static void nbd_coroutine_end(NbdClientSession *s,
|
||||
@@ -200,11 +195,10 @@ static void nbd_coroutine_end(NbdClientSession *s,
|
||||
}
|
||||
}
|
||||
|
||||
static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
|
||||
static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov,
|
||||
int offset)
|
||||
{
|
||||
NbdClientSession *client = nbd_get_client_session(bs);
|
||||
struct nbd_request request = { .type = NBD_CMD_READ };
|
||||
struct nbd_reply reply;
|
||||
ssize_t ret;
|
||||
@@ -213,7 +207,7 @@ static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
|
||||
request.len = nb_sectors * 512;
|
||||
|
||||
nbd_coroutine_start(client, &request);
|
||||
ret = nbd_co_send_request(bs, &request, NULL, 0);
|
||||
ret = nbd_co_send_request(client, &request, NULL, 0);
|
||||
if (ret < 0) {
|
||||
reply.error = -ret;
|
||||
} else {
|
||||
@@ -224,16 +218,15 @@ static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
|
||||
|
||||
}
|
||||
|
||||
static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
|
||||
static int nbd_co_writev_1(NbdClientSession *client, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov,
|
||||
int offset)
|
||||
{
|
||||
NbdClientSession *client = nbd_get_client_session(bs);
|
||||
struct nbd_request request = { .type = NBD_CMD_WRITE };
|
||||
struct nbd_reply reply;
|
||||
ssize_t ret;
|
||||
|
||||
if (!bdrv_enable_write_cache(bs) &&
|
||||
if (!bdrv_enable_write_cache(client->bs) &&
|
||||
(client->nbdflags & NBD_FLAG_SEND_FUA)) {
|
||||
request.type |= NBD_CMD_FLAG_FUA;
|
||||
}
|
||||
@@ -242,7 +235,7 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
|
||||
request.len = nb_sectors * 512;
|
||||
|
||||
nbd_coroutine_start(client, &request);
|
||||
ret = nbd_co_send_request(bs, &request, qiov, offset);
|
||||
ret = nbd_co_send_request(client, &request, qiov, offset);
|
||||
if (ret < 0) {
|
||||
reply.error = -ret;
|
||||
} else {
|
||||
@@ -256,13 +249,14 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
|
||||
* remain aligned to 4K. */
|
||||
#define NBD_MAX_SECTORS 2040
|
||||
|
||||
int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov)
|
||||
int nbd_client_session_co_readv(NbdClientSession *client, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov)
|
||||
{
|
||||
int offset = 0;
|
||||
int ret;
|
||||
while (nb_sectors > NBD_MAX_SECTORS) {
|
||||
ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
|
||||
ret = nbd_co_readv_1(client, sector_num,
|
||||
NBD_MAX_SECTORS, qiov, offset);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -270,16 +264,17 @@ int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||
sector_num += NBD_MAX_SECTORS;
|
||||
nb_sectors -= NBD_MAX_SECTORS;
|
||||
}
|
||||
return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset);
|
||||
return nbd_co_readv_1(client, sector_num, nb_sectors, qiov, offset);
|
||||
}
|
||||
|
||||
int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov)
|
||||
int nbd_client_session_co_writev(NbdClientSession *client, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov)
|
||||
{
|
||||
int offset = 0;
|
||||
int ret;
|
||||
while (nb_sectors > NBD_MAX_SECTORS) {
|
||||
ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
|
||||
ret = nbd_co_writev_1(client, sector_num,
|
||||
NBD_MAX_SECTORS, qiov, offset);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -287,12 +282,11 @@ int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
|
||||
sector_num += NBD_MAX_SECTORS;
|
||||
nb_sectors -= NBD_MAX_SECTORS;
|
||||
}
|
||||
return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset);
|
||||
return nbd_co_writev_1(client, sector_num, nb_sectors, qiov, offset);
|
||||
}
|
||||
|
||||
int nbd_client_co_flush(BlockDriverState *bs)
|
||||
int nbd_client_session_co_flush(NbdClientSession *client)
|
||||
{
|
||||
NbdClientSession *client = nbd_get_client_session(bs);
|
||||
struct nbd_request request = { .type = NBD_CMD_FLUSH };
|
||||
struct nbd_reply reply;
|
||||
ssize_t ret;
|
||||
@@ -309,7 +303,7 @@ int nbd_client_co_flush(BlockDriverState *bs)
|
||||
request.len = 0;
|
||||
|
||||
nbd_coroutine_start(client, &request);
|
||||
ret = nbd_co_send_request(bs, &request, NULL, 0);
|
||||
ret = nbd_co_send_request(client, &request, NULL, 0);
|
||||
if (ret < 0) {
|
||||
reply.error = -ret;
|
||||
} else {
|
||||
@@ -319,10 +313,9 @@ int nbd_client_co_flush(BlockDriverState *bs)
|
||||
return -reply.error;
|
||||
}
|
||||
|
||||
int nbd_client_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors)
|
||||
int nbd_client_session_co_discard(NbdClientSession *client, int64_t sector_num,
|
||||
int nb_sectors)
|
||||
{
|
||||
NbdClientSession *client = nbd_get_client_session(bs);
|
||||
struct nbd_request request = { .type = NBD_CMD_TRIM };
|
||||
struct nbd_reply reply;
|
||||
ssize_t ret;
|
||||
@@ -334,7 +327,7 @@ int nbd_client_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||
request.len = nb_sectors * 512;
|
||||
|
||||
nbd_coroutine_start(client, &request);
|
||||
ret = nbd_co_send_request(bs, &request, NULL, 0);
|
||||
ret = nbd_co_send_request(client, &request, NULL, 0);
|
||||
if (ret < 0) {
|
||||
reply.error = -ret;
|
||||
} else {
|
||||
@@ -345,48 +338,51 @@ int nbd_client_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||
|
||||
}
|
||||
|
||||
void nbd_client_detach_aio_context(BlockDriverState *bs)
|
||||
void nbd_client_session_detach_aio_context(NbdClientSession *client)
|
||||
{
|
||||
aio_set_fd_handler(bdrv_get_aio_context(bs),
|
||||
nbd_get_client_session(bs)->sock, NULL, NULL, NULL);
|
||||
aio_set_fd_handler(bdrv_get_aio_context(client->bs), client->sock,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
void nbd_client_attach_aio_context(BlockDriverState *bs,
|
||||
AioContext *new_context)
|
||||
void nbd_client_session_attach_aio_context(NbdClientSession *client,
|
||||
AioContext *new_context)
|
||||
{
|
||||
aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sock,
|
||||
nbd_reply_ready, NULL, bs);
|
||||
aio_set_fd_handler(new_context, client->sock,
|
||||
nbd_reply_ready, NULL, client);
|
||||
}
|
||||
|
||||
void nbd_client_close(BlockDriverState *bs)
|
||||
void nbd_client_session_close(NbdClientSession *client)
|
||||
{
|
||||
NbdClientSession *client = nbd_get_client_session(bs);
|
||||
struct nbd_request request = {
|
||||
.type = NBD_CMD_DISC,
|
||||
.from = 0,
|
||||
.len = 0
|
||||
};
|
||||
|
||||
if (!client->bs) {
|
||||
return;
|
||||
}
|
||||
if (client->sock == -1) {
|
||||
return;
|
||||
}
|
||||
|
||||
nbd_send_request(client->sock, &request);
|
||||
|
||||
nbd_teardown_connection(bs);
|
||||
nbd_teardown_connection(client);
|
||||
client->bs = NULL;
|
||||
}
|
||||
|
||||
int nbd_client_init(BlockDriverState *bs, int sock, const char *export,
|
||||
Error **errp)
|
||||
int nbd_client_session_init(NbdClientSession *client, BlockDriverState *bs,
|
||||
int sock, const char *export)
|
||||
{
|
||||
NbdClientSession *client = nbd_get_client_session(bs);
|
||||
int ret;
|
||||
|
||||
/* NBD handshake */
|
||||
logout("session init %s\n", export);
|
||||
qemu_set_block(sock);
|
||||
ret = nbd_receive_negotiate(sock, export,
|
||||
&client->nbdflags, &client->size, errp);
|
||||
&client->nbdflags, &client->size,
|
||||
&client->blocksize);
|
||||
if (ret < 0) {
|
||||
logout("Failed to negotiate with the NBD server\n");
|
||||
closesocket(sock);
|
||||
@@ -395,12 +391,13 @@ int nbd_client_init(BlockDriverState *bs, int sock, const char *export,
|
||||
|
||||
qemu_co_mutex_init(&client->send_mutex);
|
||||
qemu_co_mutex_init(&client->free_sema);
|
||||
client->bs = bs;
|
||||
client->sock = sock;
|
||||
|
||||
/* Now that we're connected, set the socket to be non-blocking and
|
||||
* kick the reply mechanism. */
|
||||
qemu_set_nonblock(sock);
|
||||
nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs));
|
||||
nbd_client_session_attach_aio_context(client, bdrv_get_aio_context(bs));
|
||||
|
||||
logout("Established connection with NBD server\n");
|
||||
return 0;
|
||||
|
@@ -20,6 +20,7 @@ typedef struct NbdClientSession {
|
||||
int sock;
|
||||
uint32_t nbdflags;
|
||||
off_t size;
|
||||
size_t blocksize;
|
||||
|
||||
CoMutex send_mutex;
|
||||
CoMutex free_sema;
|
||||
@@ -30,24 +31,24 @@ typedef struct NbdClientSession {
|
||||
struct nbd_reply reply;
|
||||
|
||||
bool is_unix;
|
||||
|
||||
BlockDriverState *bs;
|
||||
} NbdClientSession;
|
||||
|
||||
NbdClientSession *nbd_get_client_session(BlockDriverState *bs);
|
||||
int nbd_client_session_init(NbdClientSession *client, BlockDriverState *bs,
|
||||
int sock, const char *export_name);
|
||||
void nbd_client_session_close(NbdClientSession *client);
|
||||
|
||||
int nbd_client_init(BlockDriverState *bs, int sock, const char *export_name,
|
||||
Error **errp);
|
||||
void nbd_client_close(BlockDriverState *bs);
|
||||
int nbd_client_session_co_discard(NbdClientSession *client, int64_t sector_num,
|
||||
int nb_sectors);
|
||||
int nbd_client_session_co_flush(NbdClientSession *client);
|
||||
int nbd_client_session_co_writev(NbdClientSession *client, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov);
|
||||
int nbd_client_session_co_readv(NbdClientSession *client, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov);
|
||||
|
||||
int nbd_client_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors);
|
||||
int nbd_client_co_flush(BlockDriverState *bs);
|
||||
int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov);
|
||||
int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov);
|
||||
|
||||
void nbd_client_detach_aio_context(BlockDriverState *bs);
|
||||
void nbd_client_attach_aio_context(BlockDriverState *bs,
|
||||
AioContext *new_context);
|
||||
void nbd_client_session_detach_aio_context(NbdClientSession *client);
|
||||
void nbd_client_session_attach_aio_context(NbdClientSession *client,
|
||||
AioContext *new_context);
|
||||
|
||||
#endif /* NBD_CLIENT_H */
|
||||
|
50
block/nbd.c
50
block/nbd.c
@@ -215,8 +215,7 @@ static void nbd_config(BDRVNBDState *s, QDict *options, char **export,
|
||||
}
|
||||
|
||||
if (!qemu_opt_get(s->socket_opts, "port")) {
|
||||
qemu_opt_set_number(s->socket_opts, "port", NBD_DEFAULT_PORT,
|
||||
&error_abort);
|
||||
qemu_opt_set_number(s->socket_opts, "port", NBD_DEFAULT_PORT);
|
||||
}
|
||||
|
||||
*export = g_strdup(qdict_get_try_str(options, "export"));
|
||||
@@ -225,12 +224,6 @@ static void nbd_config(BDRVNBDState *s, QDict *options, char **export,
|
||||
}
|
||||
}
|
||||
|
||||
NbdClientSession *nbd_get_client_session(BlockDriverState *bs)
|
||||
{
|
||||
BDRVNBDState *s = bs->opaque;
|
||||
return &s->client;
|
||||
}
|
||||
|
||||
static int nbd_establish_connection(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BDRVNBDState *s = bs->opaque;
|
||||
@@ -248,7 +241,7 @@ static int nbd_establish_connection(BlockDriverState *bs, Error **errp)
|
||||
/* Failed to establish connection */
|
||||
if (sock < 0) {
|
||||
logout("Failed to establish connection to NBD server\n");
|
||||
return -EIO;
|
||||
return -errno;
|
||||
}
|
||||
|
||||
return sock;
|
||||
@@ -274,12 +267,11 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
*/
|
||||
sock = nbd_establish_connection(bs, errp);
|
||||
if (sock < 0) {
|
||||
g_free(export);
|
||||
return sock;
|
||||
}
|
||||
|
||||
/* NBD handshake */
|
||||
result = nbd_client_init(bs, sock, export, errp);
|
||||
result = nbd_client_session_init(&s->client, bs, sock, export);
|
||||
g_free(export);
|
||||
return result;
|
||||
}
|
||||
@@ -287,30 +279,35 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov)
|
||||
{
|
||||
return nbd_client_co_readv(bs, sector_num, nb_sectors, qiov);
|
||||
BDRVNBDState *s = bs->opaque;
|
||||
|
||||
return nbd_client_session_co_readv(&s->client, sector_num,
|
||||
nb_sectors, qiov);
|
||||
}
|
||||
|
||||
static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov)
|
||||
{
|
||||
return nbd_client_co_writev(bs, sector_num, nb_sectors, qiov);
|
||||
BDRVNBDState *s = bs->opaque;
|
||||
|
||||
return nbd_client_session_co_writev(&s->client, sector_num,
|
||||
nb_sectors, qiov);
|
||||
}
|
||||
|
||||
static int nbd_co_flush(BlockDriverState *bs)
|
||||
{
|
||||
return nbd_client_co_flush(bs);
|
||||
}
|
||||
BDRVNBDState *s = bs->opaque;
|
||||
|
||||
static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
bs->bl.max_discard = UINT32_MAX >> BDRV_SECTOR_BITS;
|
||||
bs->bl.max_transfer_length = UINT32_MAX >> BDRV_SECTOR_BITS;
|
||||
return nbd_client_session_co_flush(&s->client);
|
||||
}
|
||||
|
||||
static int nbd_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors)
|
||||
{
|
||||
return nbd_client_co_discard(bs, sector_num, nb_sectors);
|
||||
BDRVNBDState *s = bs->opaque;
|
||||
|
||||
return nbd_client_session_co_discard(&s->client, sector_num,
|
||||
nb_sectors);
|
||||
}
|
||||
|
||||
static void nbd_close(BlockDriverState *bs)
|
||||
@@ -318,7 +315,7 @@ static void nbd_close(BlockDriverState *bs)
|
||||
BDRVNBDState *s = bs->opaque;
|
||||
|
||||
qemu_opts_del(s->socket_opts);
|
||||
nbd_client_close(bs);
|
||||
nbd_client_session_close(&s->client);
|
||||
}
|
||||
|
||||
static int64_t nbd_getlength(BlockDriverState *bs)
|
||||
@@ -330,13 +327,17 @@ static int64_t nbd_getlength(BlockDriverState *bs)
|
||||
|
||||
static void nbd_detach_aio_context(BlockDriverState *bs)
|
||||
{
|
||||
nbd_client_detach_aio_context(bs);
|
||||
BDRVNBDState *s = bs->opaque;
|
||||
|
||||
nbd_client_session_detach_aio_context(&s->client);
|
||||
}
|
||||
|
||||
static void nbd_attach_aio_context(BlockDriverState *bs,
|
||||
AioContext *new_context)
|
||||
{
|
||||
nbd_client_attach_aio_context(bs, new_context);
|
||||
BDRVNBDState *s = bs->opaque;
|
||||
|
||||
nbd_client_session_attach_aio_context(&s->client, new_context);
|
||||
}
|
||||
|
||||
static void nbd_refresh_filename(BlockDriverState *bs)
|
||||
@@ -395,7 +396,6 @@ static BlockDriver bdrv_nbd = {
|
||||
.bdrv_close = nbd_close,
|
||||
.bdrv_co_flush_to_os = nbd_co_flush,
|
||||
.bdrv_co_discard = nbd_co_discard,
|
||||
.bdrv_refresh_limits = nbd_refresh_limits,
|
||||
.bdrv_getlength = nbd_getlength,
|
||||
.bdrv_detach_aio_context = nbd_detach_aio_context,
|
||||
.bdrv_attach_aio_context = nbd_attach_aio_context,
|
||||
@@ -413,7 +413,6 @@ static BlockDriver bdrv_nbd_tcp = {
|
||||
.bdrv_close = nbd_close,
|
||||
.bdrv_co_flush_to_os = nbd_co_flush,
|
||||
.bdrv_co_discard = nbd_co_discard,
|
||||
.bdrv_refresh_limits = nbd_refresh_limits,
|
||||
.bdrv_getlength = nbd_getlength,
|
||||
.bdrv_detach_aio_context = nbd_detach_aio_context,
|
||||
.bdrv_attach_aio_context = nbd_attach_aio_context,
|
||||
@@ -431,7 +430,6 @@ static BlockDriver bdrv_nbd_unix = {
|
||||
.bdrv_close = nbd_close,
|
||||
.bdrv_co_flush_to_os = nbd_co_flush,
|
||||
.bdrv_co_discard = nbd_co_discard,
|
||||
.bdrv_refresh_limits = nbd_refresh_limits,
|
||||
.bdrv_getlength = nbd_getlength,
|
||||
.bdrv_detach_aio_context = nbd_detach_aio_context,
|
||||
.bdrv_attach_aio_context = nbd_attach_aio_context,
|
||||
|
51
block/qapi.c
51
block/qapi.c
@@ -24,7 +24,6 @@
|
||||
|
||||
#include "block/qapi.h"
|
||||
#include "block/block_int.h"
|
||||
#include "block/write-threshold.h"
|
||||
#include "qmp-commands.h"
|
||||
#include "qapi-visit.h"
|
||||
#include "qapi/qmp-output-visitor.h"
|
||||
@@ -41,13 +40,6 @@ BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs)
|
||||
info->encrypted = bs->encrypted;
|
||||
info->encryption_key_missing = bdrv_key_required(bs);
|
||||
|
||||
info->cache = g_new(BlockdevCacheInfo, 1);
|
||||
*info->cache = (BlockdevCacheInfo) {
|
||||
.writeback = bdrv_enable_write_cache(bs),
|
||||
.direct = !!(bs->open_flags & BDRV_O_NOCACHE),
|
||||
.no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH),
|
||||
};
|
||||
|
||||
if (bs->node_name[0]) {
|
||||
info->has_node_name = true;
|
||||
info->node_name = g_strdup(bs->node_name);
|
||||
@@ -90,8 +82,6 @@ BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs)
|
||||
info->iops_size = cfg.op_size;
|
||||
}
|
||||
|
||||
info->write_threshold = bdrv_write_threshold_get(bs);
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
@@ -178,6 +168,7 @@ void bdrv_query_image_info(BlockDriverState *bs,
|
||||
{
|
||||
int64_t size;
|
||||
const char *backing_filename;
|
||||
char backing_filename2[1024];
|
||||
BlockDriverInfo bdi;
|
||||
int ret;
|
||||
Error *err = NULL;
|
||||
@@ -213,16 +204,10 @@ void bdrv_query_image_info(BlockDriverState *bs,
|
||||
|
||||
backing_filename = bs->backing_file;
|
||||
if (backing_filename[0] != '\0') {
|
||||
char *backing_filename2 = g_malloc0(PATH_MAX);
|
||||
info->backing_filename = g_strdup(backing_filename);
|
||||
info->has_backing_filename = true;
|
||||
bdrv_get_full_backing_filename(bs, backing_filename2, PATH_MAX, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
qapi_free_ImageInfo(info);
|
||||
g_free(backing_filename2);
|
||||
return;
|
||||
}
|
||||
bdrv_get_full_backing_filename(bs, backing_filename2,
|
||||
sizeof(backing_filename2));
|
||||
|
||||
if (strcmp(backing_filename, backing_filename2) != 0) {
|
||||
info->full_backing_filename =
|
||||
@@ -234,7 +219,6 @@ void bdrv_query_image_info(BlockDriverState *bs,
|
||||
info->backing_filename_format = g_strdup(bs->backing_format);
|
||||
info->has_backing_filename_format = true;
|
||||
}
|
||||
g_free(backing_filename2);
|
||||
}
|
||||
|
||||
ret = bdrv_query_snapshot_info_list(bs, &info->snapshots, &err);
|
||||
@@ -316,8 +300,7 @@ static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
|
||||
qapi_free_BlockInfo(info);
|
||||
}
|
||||
|
||||
static BlockStats *bdrv_query_stats(const BlockDriverState *bs,
|
||||
bool query_backing)
|
||||
static BlockStats *bdrv_query_stats(const BlockDriverState *bs)
|
||||
{
|
||||
BlockStats *s;
|
||||
|
||||
@@ -328,18 +311,11 @@ static BlockStats *bdrv_query_stats(const BlockDriverState *bs,
|
||||
s->device = g_strdup(bdrv_get_device_name(bs));
|
||||
}
|
||||
|
||||
if (bdrv_get_node_name(bs)[0]) {
|
||||
s->has_node_name = true;
|
||||
s->node_name = g_strdup(bdrv_get_node_name(bs));
|
||||
}
|
||||
|
||||
s->stats = g_malloc0(sizeof(*s->stats));
|
||||
s->stats->rd_bytes = bs->stats.nr_bytes[BLOCK_ACCT_READ];
|
||||
s->stats->wr_bytes = bs->stats.nr_bytes[BLOCK_ACCT_WRITE];
|
||||
s->stats->rd_operations = bs->stats.nr_ops[BLOCK_ACCT_READ];
|
||||
s->stats->wr_operations = bs->stats.nr_ops[BLOCK_ACCT_WRITE];
|
||||
s->stats->rd_merged = bs->stats.merged[BLOCK_ACCT_READ];
|
||||
s->stats->wr_merged = bs->stats.merged[BLOCK_ACCT_WRITE];
|
||||
s->stats->wr_highest_offset =
|
||||
bs->stats.wr_highest_sector * BDRV_SECTOR_SIZE;
|
||||
s->stats->flush_operations = bs->stats.nr_ops[BLOCK_ACCT_FLUSH];
|
||||
@@ -349,12 +325,12 @@ static BlockStats *bdrv_query_stats(const BlockDriverState *bs,
|
||||
|
||||
if (bs->file) {
|
||||
s->has_parent = true;
|
||||
s->parent = bdrv_query_stats(bs->file, query_backing);
|
||||
s->parent = bdrv_query_stats(bs->file);
|
||||
}
|
||||
|
||||
if (query_backing && bs->backing_hd) {
|
||||
if (bs->backing_hd) {
|
||||
s->has_backing = true;
|
||||
s->backing = bdrv_query_stats(bs->backing_hd, query_backing);
|
||||
s->backing = bdrv_query_stats(bs->backing_hd);
|
||||
}
|
||||
|
||||
return s;
|
||||
@@ -385,22 +361,17 @@ BlockInfoList *qmp_query_block(Error **errp)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
|
||||
bool query_nodes,
|
||||
Error **errp)
|
||||
BlockStatsList *qmp_query_blockstats(Error **errp)
|
||||
{
|
||||
BlockStatsList *head = NULL, **p_next = &head;
|
||||
BlockDriverState *bs = NULL;
|
||||
|
||||
/* Just to be safe if query_nodes is not always initialized */
|
||||
query_nodes = has_query_nodes && query_nodes;
|
||||
|
||||
while ((bs = query_nodes ? bdrv_next_node(bs) : bdrv_next(bs))) {
|
||||
while ((bs = bdrv_next(bs))) {
|
||||
BlockStatsList *info = g_malloc0(sizeof(*info));
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
info->value = bdrv_query_stats(bs, !query_nodes);
|
||||
info->value = bdrv_query_stats(bs);
|
||||
aio_context_release(ctx);
|
||||
|
||||
*p_next = info;
|
||||
@@ -414,7 +385,7 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
|
||||
|
||||
static char *get_human_readable_size(char *buf, int buf_size, int64_t size)
|
||||
{
|
||||
static const char suffixes[NB_SUFFIXES] = {'K', 'M', 'G', 'T'};
|
||||
static const char suffixes[NB_SUFFIXES] = "KMGT";
|
||||
int64_t base;
|
||||
int i;
|
||||
|
||||
|
@@ -215,7 +215,7 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
/* read the backing file name */
|
||||
if (header.backing_file_offset != 0) {
|
||||
len = header.backing_file_size;
|
||||
if (len > 1023 || len >= sizeof(bs->backing_file)) {
|
||||
if (len > 1023) {
|
||||
error_setg(errp, "Backing file name too long");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
|
@@ -253,9 +253,7 @@ static int qcow2_cache_find_entry_to_replace(Qcow2Cache *c)
|
||||
|
||||
/* Give newer hits priority */
|
||||
/* TODO Check how to optimize the replacement strategy */
|
||||
if (c->entries[i].cache_hits > 1) {
|
||||
c->entries[i].cache_hits /= 2;
|
||||
}
|
||||
c->entries[i].cache_hits /= 2;
|
||||
}
|
||||
|
||||
if (min_index == -1) {
|
||||
|
@@ -1640,7 +1640,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
|
||||
for (i = 0; i < l1_size; i++) {
|
||||
uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
|
||||
bool l2_dirty = false;
|
||||
uint64_t l2_refcount;
|
||||
int l2_refcount;
|
||||
|
||||
if (!l2_offset) {
|
||||
/* unallocated */
|
||||
@@ -1651,14 +1651,6 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (offset_into_cluster(s, l2_offset)) {
|
||||
qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
|
||||
PRIx64 " unaligned (L1 index: %#x)",
|
||||
l2_offset, i);
|
||||
ret = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (is_active_l1) {
|
||||
/* get active L2 tables from cache */
|
||||
ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
|
||||
@@ -1672,9 +1664,9 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
|
||||
&l2_refcount);
|
||||
if (ret < 0) {
|
||||
l2_refcount = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits);
|
||||
if (l2_refcount < 0) {
|
||||
ret = l2_refcount;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -1707,8 +1699,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
|
||||
/* For shared L2 tables, set the refcount accordingly (it is
|
||||
* already 1 and needs to be l2_refcount) */
|
||||
ret = qcow2_update_cluster_refcount(bs,
|
||||
offset >> s->cluster_bits,
|
||||
refcount_diff(1, l2_refcount), false,
|
||||
offset >> s->cluster_bits, l2_refcount - 1,
|
||||
QCOW2_DISCARD_OTHER);
|
||||
if (ret < 0) {
|
||||
qcow2_free_clusters(bs, offset, s->cluster_size,
|
||||
@@ -1718,19 +1709,6 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
|
||||
}
|
||||
}
|
||||
|
||||
if (offset_into_cluster(s, offset)) {
|
||||
qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
|
||||
"%#" PRIx64 " unaligned (L2 offset: %#"
|
||||
PRIx64 ", L2 index: %#x)", offset,
|
||||
l2_offset, j);
|
||||
if (!preallocated) {
|
||||
qcow2_free_clusters(bs, offset, s->cluster_size,
|
||||
QCOW2_DISCARD_ALWAYS);
|
||||
}
|
||||
ret = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
|
||||
if (ret < 0) {
|
||||
if (!preallocated) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -702,7 +702,7 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs,
|
||||
sn = &s->snapshots[snapshot_index];
|
||||
|
||||
/* Allocate and read in the snapshot's L1 table */
|
||||
if (sn->l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
|
||||
if (sn->l1_size > QCOW_MAX_L1_SIZE) {
|
||||
error_setg(errp, "Snapshot L1 table too large");
|
||||
return -EFBIG;
|
||||
}
|
||||
|
156
block/qcow2.c
156
block/qcow2.c
@@ -140,7 +140,6 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
|
||||
return 3;
|
||||
}
|
||||
bs->backing_format[ext.len] = '\0';
|
||||
s->image_backing_format = g_strdup(bs->backing_format);
|
||||
#ifdef DEBUG_EXT
|
||||
printf("Qcow2: Got format extension %s\n", bs->backing_format);
|
||||
#endif
|
||||
@@ -678,16 +677,13 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
}
|
||||
|
||||
/* Check support for various header values */
|
||||
if (header.refcount_order > 6) {
|
||||
error_setg(errp, "Reference count entry width too large; may not "
|
||||
"exceed 64 bits");
|
||||
ret = -EINVAL;
|
||||
if (header.refcount_order != 4) {
|
||||
report_unsupported(bs, errp, "%d bit reference counts",
|
||||
1 << header.refcount_order);
|
||||
ret = -ENOTSUP;
|
||||
goto fail;
|
||||
}
|
||||
s->refcount_order = header.refcount_order;
|
||||
s->refcount_bits = 1 << s->refcount_order;
|
||||
s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1);
|
||||
s->refcount_max += s->refcount_max - 1;
|
||||
|
||||
if (header.crypt_method > QCOW_CRYPT_AES) {
|
||||
error_setg(errp, "Unsupported encryption method: %" PRIu32,
|
||||
@@ -743,7 +739,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
}
|
||||
|
||||
/* read the level 1 table */
|
||||
if (header.l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
|
||||
if (header.l1_size > QCOW_MAX_L1_SIZE) {
|
||||
error_setg(errp, "Active L1 table too large");
|
||||
ret = -EFBIG;
|
||||
goto fail;
|
||||
@@ -872,8 +868,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
/* read the backing file name */
|
||||
if (header.backing_file_offset != 0) {
|
||||
len = header.backing_file_size;
|
||||
if (len > MIN(1023, s->cluster_size - header.backing_file_offset) ||
|
||||
len >= sizeof(bs->backing_file)) {
|
||||
if (len > MIN(1023, s->cluster_size - header.backing_file_offset)) {
|
||||
error_setg(errp, "Backing file name too long");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
@@ -885,7 +880,6 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
goto fail;
|
||||
}
|
||||
bs->backing_file[len] = '\0';
|
||||
s->image_backing_file = g_strdup(bs->backing_file);
|
||||
}
|
||||
|
||||
/* Internal snapshots */
|
||||
@@ -1459,9 +1453,6 @@ static void qcow2_close(BlockDriverState *bs)
|
||||
g_free(s->unknown_header_fields);
|
||||
cleanup_unknown_header_ext(bs);
|
||||
|
||||
g_free(s->image_backing_file);
|
||||
g_free(s->image_backing_format);
|
||||
|
||||
g_free(s->cluster_cache);
|
||||
qemu_vfree(s->cluster_data);
|
||||
qcow2_refcount_close(bs);
|
||||
@@ -1627,10 +1618,9 @@ int qcow2_update_header(BlockDriverState *bs)
|
||||
}
|
||||
|
||||
/* Backing file format header extension */
|
||||
if (s->image_backing_format) {
|
||||
if (*bs->backing_format) {
|
||||
ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT,
|
||||
s->image_backing_format,
|
||||
strlen(s->image_backing_format),
|
||||
bs->backing_format, strlen(bs->backing_format),
|
||||
buflen);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
@@ -1688,8 +1678,8 @@ int qcow2_update_header(BlockDriverState *bs)
|
||||
buflen -= ret;
|
||||
|
||||
/* Backing file name */
|
||||
if (s->image_backing_file) {
|
||||
size_t backing_file_len = strlen(s->image_backing_file);
|
||||
if (*bs->backing_file) {
|
||||
size_t backing_file_len = strlen(bs->backing_file);
|
||||
|
||||
if (buflen < backing_file_len) {
|
||||
ret = -ENOSPC;
|
||||
@@ -1697,7 +1687,7 @@ int qcow2_update_header(BlockDriverState *bs)
|
||||
}
|
||||
|
||||
/* Using strncpy is ok here, since buf is not NUL-terminated. */
|
||||
strncpy(buf, s->image_backing_file, buflen);
|
||||
strncpy(buf, bs->backing_file, buflen);
|
||||
|
||||
header->backing_file_offset = cpu_to_be64(buf - ((char*) header));
|
||||
header->backing_file_size = cpu_to_be32(backing_file_len);
|
||||
@@ -1718,17 +1708,9 @@ fail:
|
||||
static int qcow2_change_backing_file(BlockDriverState *bs,
|
||||
const char *backing_file, const char *backing_fmt)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
|
||||
pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
|
||||
pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
|
||||
|
||||
g_free(s->image_backing_file);
|
||||
g_free(s->image_backing_format);
|
||||
|
||||
s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL;
|
||||
s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL;
|
||||
|
||||
return qcow2_update_header(bs);
|
||||
}
|
||||
|
||||
@@ -1797,7 +1779,7 @@ static int preallocate(BlockDriverState *bs)
|
||||
static int qcow2_create2(const char *filename, int64_t total_size,
|
||||
const char *backing_file, const char *backing_format,
|
||||
int flags, size_t cluster_size, PreallocMode prealloc,
|
||||
QemuOpts *opts, int version, int refcount_order,
|
||||
QemuOpts *opts, int version,
|
||||
Error **errp)
|
||||
{
|
||||
/* Calculate cluster_bits */
|
||||
@@ -1830,21 +1812,9 @@ static int qcow2_create2(const char *filename, int64_t total_size,
|
||||
int ret;
|
||||
|
||||
if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) {
|
||||
/* Note: The following calculation does not need to be exact; if it is a
|
||||
* bit off, either some bytes will be "leaked" (which is fine) or we
|
||||
* will need to increase the file size by some bytes (which is fine,
|
||||
* too, as long as the bulk is allocated here). Therefore, using
|
||||
* floating point arithmetic is fine. */
|
||||
int64_t meta_size = 0;
|
||||
uint64_t nreftablee, nrefblocke, nl1e, nl2e;
|
||||
int64_t aligned_total_size = align_offset(total_size, cluster_size);
|
||||
int refblock_bits, refblock_size;
|
||||
/* refcount entry size in bytes */
|
||||
double rces = (1 << refcount_order) / 8.;
|
||||
|
||||
/* see qcow2_open() */
|
||||
refblock_bits = cluster_bits - (refcount_order - 3);
|
||||
refblock_size = 1 << refblock_bits;
|
||||
|
||||
/* header: 1 cluster */
|
||||
meta_size += cluster_size;
|
||||
@@ -1869,27 +1839,26 @@ static int qcow2_create2(const char *filename, int64_t total_size,
|
||||
* c = cluster size
|
||||
* y1 = number of refcount blocks entries
|
||||
* y2 = meta size including everything
|
||||
* rces = refcount entry size in bytes
|
||||
* then,
|
||||
* y1 = (y2 + a)/c
|
||||
* y2 = y1 * rces + y1 * rces * sizeof(u64) / c + m
|
||||
* y2 = y1 * sizeof(u16) + y1 * sizeof(u16) * sizeof(u64) / c + m
|
||||
* we can get y1:
|
||||
* y1 = (a + m) / (c - rces - rces * sizeof(u64) / c)
|
||||
* y1 = (a + m) / (c - sizeof(u16) - sizeof(u16) * sizeof(u64) / c)
|
||||
*/
|
||||
nrefblocke = (aligned_total_size + meta_size + cluster_size)
|
||||
/ (cluster_size - rces - rces * sizeof(uint64_t)
|
||||
/ cluster_size);
|
||||
meta_size += DIV_ROUND_UP(nrefblocke, refblock_size) * cluster_size;
|
||||
nrefblocke = (aligned_total_size + meta_size + cluster_size) /
|
||||
(cluster_size - sizeof(uint16_t) -
|
||||
1.0 * sizeof(uint16_t) * sizeof(uint64_t) / cluster_size);
|
||||
nrefblocke = align_offset(nrefblocke, cluster_size / sizeof(uint16_t));
|
||||
meta_size += nrefblocke * sizeof(uint16_t);
|
||||
|
||||
/* total size of refcount tables */
|
||||
nreftablee = nrefblocke / refblock_size;
|
||||
nreftablee = nrefblocke * sizeof(uint16_t) / cluster_size;
|
||||
nreftablee = align_offset(nreftablee, cluster_size / sizeof(uint64_t));
|
||||
meta_size += nreftablee * sizeof(uint64_t);
|
||||
|
||||
qemu_opt_set_number(opts, BLOCK_OPT_SIZE,
|
||||
aligned_total_size + meta_size, &error_abort);
|
||||
qemu_opt_set(opts, BLOCK_OPT_PREALLOC, PreallocMode_lookup[prealloc],
|
||||
&error_abort);
|
||||
aligned_total_size + meta_size);
|
||||
qemu_opt_set(opts, BLOCK_OPT_PREALLOC, PreallocMode_lookup[prealloc]);
|
||||
}
|
||||
|
||||
ret = bdrv_create_file(filename, opts, &local_err);
|
||||
@@ -1918,7 +1887,7 @@ static int qcow2_create2(const char *filename, int64_t total_size,
|
||||
.l1_size = cpu_to_be32(0),
|
||||
.refcount_table_offset = cpu_to_be64(cluster_size),
|
||||
.refcount_table_clusters = cpu_to_be32(1),
|
||||
.refcount_order = cpu_to_be32(refcount_order),
|
||||
.refcount_order = cpu_to_be32(4),
|
||||
.header_length = cpu_to_be32(sizeof(*header)),
|
||||
};
|
||||
|
||||
@@ -2037,8 +2006,6 @@ static int qcow2_create(const char *filename, QemuOpts *opts, Error **errp)
|
||||
size_t cluster_size = DEFAULT_CLUSTER_SIZE;
|
||||
PreallocMode prealloc;
|
||||
int version = 3;
|
||||
uint64_t refcount_bits = 16;
|
||||
int refcount_order;
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
@@ -2093,28 +2060,8 @@ static int qcow2_create(const char *filename, QemuOpts *opts, Error **errp)
|
||||
goto finish;
|
||||
}
|
||||
|
||||
refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS,
|
||||
refcount_bits);
|
||||
if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) {
|
||||
error_setg(errp, "Refcount width must be a power of two and may not "
|
||||
"exceed 64 bits");
|
||||
ret = -EINVAL;
|
||||
goto finish;
|
||||
}
|
||||
|
||||
if (version < 3 && refcount_bits != 16) {
|
||||
error_setg(errp, "Different refcount widths than 16 bits require "
|
||||
"compatibility level 1.1 or above (use compat=1.1 or "
|
||||
"greater)");
|
||||
ret = -EINVAL;
|
||||
goto finish;
|
||||
}
|
||||
|
||||
refcount_order = ffs(refcount_bits) - 1;
|
||||
|
||||
ret = qcow2_create2(filename, size, backing_file, backing_fmt, flags,
|
||||
cluster_size, prealloc, opts, version, refcount_order,
|
||||
&local_err);
|
||||
cluster_size, prealloc, opts, version, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
}
|
||||
@@ -2530,8 +2477,7 @@ static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs)
|
||||
};
|
||||
if (s->qcow_version == 2) {
|
||||
*spec_info->qcow2 = (ImageInfoSpecificQCow2){
|
||||
.compat = g_strdup("0.10"),
|
||||
.refcount_bits = s->refcount_bits,
|
||||
.compat = g_strdup("0.10"),
|
||||
};
|
||||
} else if (s->qcow_version == 3) {
|
||||
*spec_info->qcow2 = (ImageInfoSpecificQCow2){
|
||||
@@ -2542,7 +2488,6 @@ static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs)
|
||||
.corrupt = s->incompatible_features &
|
||||
QCOW2_INCOMPAT_CORRUPT,
|
||||
.has_corrupt = true,
|
||||
.refcount_bits = s->refcount_bits,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2575,12 +2520,15 @@ static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int64_t total_sectors = bs->total_sectors;
|
||||
int growable = bs->growable;
|
||||
bool zero_beyond_eof = bs->zero_beyond_eof;
|
||||
int ret;
|
||||
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
|
||||
bs->growable = 1;
|
||||
bs->zero_beyond_eof = false;
|
||||
ret = bdrv_pwritev(bs, qcow2_vm_state_offset(s) + pos, qiov);
|
||||
bs->growable = growable;
|
||||
bs->zero_beyond_eof = zero_beyond_eof;
|
||||
|
||||
/* bdrv_co_do_writev will have increased the total_sectors value to include
|
||||
@@ -2595,12 +2543,15 @@ static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf,
|
||||
int64_t pos, int size)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int growable = bs->growable;
|
||||
bool zero_beyond_eof = bs->zero_beyond_eof;
|
||||
int ret;
|
||||
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
|
||||
bs->growable = 1;
|
||||
bs->zero_beyond_eof = false;
|
||||
ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size);
|
||||
bs->growable = growable;
|
||||
bs->zero_beyond_eof = zero_beyond_eof;
|
||||
|
||||
return ret;
|
||||
@@ -2695,8 +2646,8 @@ static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) {
|
||||
compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL);
|
||||
if (!strcmp(desc->name, "compat")) {
|
||||
compat = qemu_opt_get(opts, "compat");
|
||||
if (!compat) {
|
||||
/* preserve default */
|
||||
} else if (!strcmp(compat, "0.10")) {
|
||||
@@ -2707,37 +2658,33 @@ static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
|
||||
fprintf(stderr, "Unknown compatibility level %s.\n", compat);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) {
|
||||
} else if (!strcmp(desc->name, "preallocation")) {
|
||||
fprintf(stderr, "Cannot change preallocation mode.\n");
|
||||
return -ENOTSUP;
|
||||
} else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) {
|
||||
new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
|
||||
} else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) {
|
||||
backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
|
||||
} else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) {
|
||||
backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
|
||||
} else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) {
|
||||
encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT,
|
||||
s->crypt_method);
|
||||
} else if (!strcmp(desc->name, "size")) {
|
||||
new_size = qemu_opt_get_size(opts, "size", 0);
|
||||
} else if (!strcmp(desc->name, "backing_file")) {
|
||||
backing_file = qemu_opt_get(opts, "backing_file");
|
||||
} else if (!strcmp(desc->name, "backing_fmt")) {
|
||||
backing_format = qemu_opt_get(opts, "backing_fmt");
|
||||
} else if (!strcmp(desc->name, "encryption")) {
|
||||
encrypt = qemu_opt_get_bool(opts, "encryption", s->crypt_method);
|
||||
if (encrypt != !!s->crypt_method) {
|
||||
fprintf(stderr, "Changing the encryption flag is not "
|
||||
"supported.\n");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
} else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) {
|
||||
cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE,
|
||||
} else if (!strcmp(desc->name, "cluster_size")) {
|
||||
cluster_size = qemu_opt_get_size(opts, "cluster_size",
|
||||
cluster_size);
|
||||
if (cluster_size != s->cluster_size) {
|
||||
fprintf(stderr, "Changing the cluster size is not "
|
||||
"supported.\n");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
} else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) {
|
||||
lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS,
|
||||
} else if (!strcmp(desc->name, "lazy_refcounts")) {
|
||||
lazy_refcounts = qemu_opt_get_bool(opts, "lazy_refcounts",
|
||||
lazy_refcounts);
|
||||
} else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) {
|
||||
error_report("Cannot change refcount entry width");
|
||||
return -ENOTSUP;
|
||||
} else {
|
||||
/* if this assertion fails, this probably means a new option was
|
||||
* added without having it covered here */
|
||||
@@ -2765,9 +2712,8 @@ static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
|
||||
}
|
||||
|
||||
if (backing_file || backing_format) {
|
||||
ret = qcow2_change_backing_file(bs,
|
||||
backing_file ?: s->image_backing_file,
|
||||
backing_format ?: s->image_backing_format);
|
||||
ret = qcow2_change_backing_file(bs, backing_file ?: bs->backing_file,
|
||||
backing_format ?: bs->backing_format);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -2908,12 +2854,6 @@ static QemuOptsList qcow2_create_opts = {
|
||||
.help = "Postpone refcount updates",
|
||||
.def_value_str = "off"
|
||||
},
|
||||
{
|
||||
.name = BLOCK_OPT_REFCOUNT_BITS,
|
||||
.type = QEMU_OPT_NUMBER,
|
||||
.help = "Width of a reference count entry in bits",
|
||||
.def_value_str = "16"
|
||||
},
|
||||
{ /* end of list */ }
|
||||
}
|
||||
};
|
||||
|
@@ -213,11 +213,6 @@ typedef struct Qcow2DiscardRegion {
|
||||
QTAILQ_ENTRY(Qcow2DiscardRegion) next;
|
||||
} Qcow2DiscardRegion;
|
||||
|
||||
typedef uint64_t Qcow2GetRefcountFunc(const void *refcount_array,
|
||||
uint64_t index);
|
||||
typedef void Qcow2SetRefcountFunc(void *refcount_array,
|
||||
uint64_t index, uint64_t value);
|
||||
|
||||
typedef struct BDRVQcowState {
|
||||
int cluster_bits;
|
||||
int cluster_size;
|
||||
@@ -263,11 +258,6 @@ typedef struct BDRVQcowState {
|
||||
int qcow_version;
|
||||
bool use_lazy_refcounts;
|
||||
int refcount_order;
|
||||
int refcount_bits;
|
||||
uint64_t refcount_max;
|
||||
|
||||
Qcow2GetRefcountFunc *get_refcount;
|
||||
Qcow2SetRefcountFunc *set_refcount;
|
||||
|
||||
bool discard_passthrough[QCOW2_DISCARD_MAX];
|
||||
|
||||
@@ -283,14 +273,19 @@ typedef struct BDRVQcowState {
|
||||
QLIST_HEAD(, Qcow2UnknownHeaderExtension) unknown_header_ext;
|
||||
QTAILQ_HEAD (, Qcow2DiscardRegion) discards;
|
||||
bool cache_discards;
|
||||
|
||||
/* Backing file path and format as stored in the image (this is not the
|
||||
* effective path/format, which may be the result of a runtime option
|
||||
* override) */
|
||||
char *image_backing_file;
|
||||
char *image_backing_format;
|
||||
} BDRVQcowState;
|
||||
|
||||
/* XXX: use std qcow open function ? */
|
||||
typedef struct QCowCreateState {
|
||||
int cluster_size;
|
||||
int cluster_bits;
|
||||
uint16_t *refcount_block;
|
||||
uint64_t *refcount_table;
|
||||
int64_t l1_table_offset;
|
||||
int64_t refcount_table_offset;
|
||||
int64_t refcount_block_offset;
|
||||
} QCowCreateState;
|
||||
|
||||
struct QCowAIOCB;
|
||||
|
||||
typedef struct Qcow2COWRegion {
|
||||
@@ -473,11 +468,6 @@ static inline uint64_t l2meta_cow_end(QCowL2Meta *m)
|
||||
+ (m->cow_end.nb_sectors << BDRV_SECTOR_BITS);
|
||||
}
|
||||
|
||||
static inline uint64_t refcount_diff(uint64_t r1, uint64_t r2)
|
||||
{
|
||||
return r1 > r2 ? r1 - r2 : r2 - r1;
|
||||
}
|
||||
|
||||
// FIXME Need qcow2_ prefix to global functions
|
||||
|
||||
/* qcow2.c functions */
|
||||
@@ -497,12 +487,10 @@ void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
|
||||
int qcow2_refcount_init(BlockDriverState *bs);
|
||||
void qcow2_refcount_close(BlockDriverState *bs);
|
||||
|
||||
int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
|
||||
uint64_t *refcount);
|
||||
int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index);
|
||||
|
||||
int qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
|
||||
uint64_t addend, bool decrease,
|
||||
enum qcow2_discard_type type);
|
||||
int addend, enum qcow2_discard_type type);
|
||||
|
||||
int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
|
||||
int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
|
||||
|
@@ -440,11 +440,6 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
s->l2_mask = s->table_nelems - 1;
|
||||
s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
|
||||
|
||||
/* Header size calculation must not overflow uint32_t */
|
||||
if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((s->header.features & QED_F_BACKING_FILE)) {
|
||||
if ((uint64_t)s->header.backing_filename_offset +
|
||||
s->header.backing_filename_size >
|
||||
|
@@ -133,6 +133,7 @@ typedef struct QEDAIOCB {
|
||||
int bh_ret; /* final return status for completion bh */
|
||||
QSIMPLEQ_ENTRY(QEDAIOCB) next; /* next request */
|
||||
int flags; /* QED_AIOCB_* bits ORed together */
|
||||
bool *finished; /* signal for cancel completion */
|
||||
uint64_t end_pos; /* request end on block device, in bytes */
|
||||
|
||||
/* User scatter-gather list */
|
||||
|
@@ -41,7 +41,7 @@ BlockAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
|
||||
void laio_detach_aio_context(void *s, AioContext *old_context);
|
||||
void laio_attach_aio_context(void *s, AioContext *new_context);
|
||||
void laio_io_plug(BlockDriverState *bs, void *aio_ctx);
|
||||
void laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug);
|
||||
int laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug);
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
|
@@ -56,15 +56,11 @@
|
||||
#include <linux/cdrom.h>
|
||||
#include <linux/fd.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/hdreg.h>
|
||||
#ifdef __s390__
|
||||
#include <asm/dasd.h>
|
||||
#endif
|
||||
#ifndef FS_NOCOW_FL
|
||||
#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
|
||||
#endif
|
||||
#endif
|
||||
#if defined(CONFIG_FALLOCATE_PUNCH_HOLE) || defined(CONFIG_FALLOCATE_ZERO_RANGE)
|
||||
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
|
||||
#include <linux/falloc.h>
|
||||
#endif
|
||||
#if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||
@@ -151,7 +147,6 @@ typedef struct BDRVRawState {
|
||||
bool has_discard:1;
|
||||
bool has_write_zeroes:1;
|
||||
bool discard_zeroes:1;
|
||||
bool has_fallocate;
|
||||
bool needs_alignment;
|
||||
} BDRVRawState;
|
||||
|
||||
@@ -222,85 +217,11 @@ static int raw_normalize_devicepath(const char **filename)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Get logical block size via ioctl. On success store it in @sector_size_p.
|
||||
*/
|
||||
static int probe_logical_blocksize(int fd, unsigned int *sector_size_p)
|
||||
{
|
||||
unsigned int sector_size;
|
||||
bool success = false;
|
||||
|
||||
errno = ENOTSUP;
|
||||
|
||||
/* Try a few ioctls to get the right size */
|
||||
#ifdef BLKSSZGET
|
||||
if (ioctl(fd, BLKSSZGET, §or_size) >= 0) {
|
||||
*sector_size_p = sector_size;
|
||||
success = true;
|
||||
}
|
||||
#endif
|
||||
#ifdef DKIOCGETBLOCKSIZE
|
||||
if (ioctl(fd, DKIOCGETBLOCKSIZE, §or_size) >= 0) {
|
||||
*sector_size_p = sector_size;
|
||||
success = true;
|
||||
}
|
||||
#endif
|
||||
#ifdef DIOCGSECTORSIZE
|
||||
if (ioctl(fd, DIOCGSECTORSIZE, §or_size) >= 0) {
|
||||
*sector_size_p = sector_size;
|
||||
success = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
return success ? 0 : -errno;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get physical block size of @fd.
|
||||
* On success, store it in @blk_size and return 0.
|
||||
* On failure, return -errno.
|
||||
*/
|
||||
static int probe_physical_blocksize(int fd, unsigned int *blk_size)
|
||||
{
|
||||
#ifdef BLKPBSZGET
|
||||
if (ioctl(fd, BLKPBSZGET, blk_size) < 0) {
|
||||
return -errno;
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
return -ENOTSUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Check if read is allowed with given memory buffer and length.
|
||||
*
|
||||
* This function is used to check O_DIRECT memory buffer and request alignment.
|
||||
*/
|
||||
static bool raw_is_io_aligned(int fd, void *buf, size_t len)
|
||||
{
|
||||
ssize_t ret = pread(fd, buf, len, 0);
|
||||
|
||||
if (ret >= 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
/* The Linux kernel returns EINVAL for misaligned O_DIRECT reads. Ignore
|
||||
* other errors (e.g. real I/O error), which could happen on a failed
|
||||
* drive, since we only care about probing alignment.
|
||||
*/
|
||||
if (errno != EINVAL) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
char *buf;
|
||||
unsigned int sector_size;
|
||||
|
||||
/* For /dev/sg devices the alignment is not really used.
|
||||
With buffered I/O, we don't have any restrictions. */
|
||||
@@ -310,12 +231,25 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Try a few ioctls to get the right size */
|
||||
bs->request_alignment = 0;
|
||||
s->buf_align = 0;
|
||||
/* Let's try to use the logical blocksize for the alignment. */
|
||||
if (probe_logical_blocksize(fd, &bs->request_alignment) < 0) {
|
||||
bs->request_alignment = 0;
|
||||
|
||||
#ifdef BLKSSZGET
|
||||
if (ioctl(fd, BLKSSZGET, §or_size) >= 0) {
|
||||
bs->request_alignment = sector_size;
|
||||
}
|
||||
#endif
|
||||
#ifdef DKIOCGETBLOCKSIZE
|
||||
if (ioctl(fd, DKIOCGETBLOCKSIZE, §or_size) >= 0) {
|
||||
bs->request_alignment = sector_size;
|
||||
}
|
||||
#endif
|
||||
#ifdef DIOCGSECTORSIZE
|
||||
if (ioctl(fd, DIOCGSECTORSIZE, §or_size) >= 0) {
|
||||
bs->request_alignment = sector_size;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_XFS
|
||||
if (s->is_xfs) {
|
||||
struct dioattr da;
|
||||
@@ -332,7 +266,7 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
|
||||
size_t align;
|
||||
buf = qemu_memalign(MAX_BLOCKSIZE, 2 * MAX_BLOCKSIZE);
|
||||
for (align = 512; align <= MAX_BLOCKSIZE; align <<= 1) {
|
||||
if (raw_is_io_aligned(fd, buf + align, MAX_BLOCKSIZE)) {
|
||||
if (pread(fd, buf + align, MAX_BLOCKSIZE, 0) >= 0) {
|
||||
s->buf_align = align;
|
||||
break;
|
||||
}
|
||||
@@ -344,7 +278,7 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
|
||||
size_t align;
|
||||
buf = qemu_memalign(s->buf_align, MAX_BLOCKSIZE);
|
||||
for (align = 512; align <= MAX_BLOCKSIZE; align <<= 1) {
|
||||
if (raw_is_io_aligned(fd, buf, align)) {
|
||||
if (pread(fd, buf, align, 0) >= 0) {
|
||||
bs->request_alignment = align;
|
||||
break;
|
||||
}
|
||||
@@ -503,14 +437,6 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
|
||||
error_setg_errno(errp, -ret, "Could not set AIO state");
|
||||
goto fail;
|
||||
}
|
||||
if (!s->use_aio && (bdrv_flags & BDRV_O_NATIVE_AIO)) {
|
||||
error_printf("WARNING: aio=native was specified for '%s', but "
|
||||
"it requires cache.direct=on, which was not "
|
||||
"specified. Falling back to aio=threads.\n"
|
||||
" This will become an error condition in "
|
||||
"future QEMU versions.\n",
|
||||
bs->filename);
|
||||
}
|
||||
#endif
|
||||
|
||||
s->has_discard = true;
|
||||
@@ -526,7 +452,6 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
|
||||
}
|
||||
if (S_ISREG(st.st_mode)) {
|
||||
s->discard_zeroes = true;
|
||||
s->has_fallocate = true;
|
||||
}
|
||||
if (S_ISBLK(st.st_mode)) {
|
||||
#ifdef BLKDISCARDZEROES
|
||||
@@ -728,86 +653,6 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
bs->bl.opt_mem_alignment = s->buf_align;
|
||||
}
|
||||
|
||||
static int check_for_dasd(int fd)
|
||||
{
|
||||
#ifdef BIODASDINFO2
|
||||
struct dasd_information2_t info = {0};
|
||||
|
||||
return ioctl(fd, BIODASDINFO2, &info);
|
||||
#else
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to get @bs's logical and physical block size.
|
||||
* On success, store them in @bsz and return zero.
|
||||
* On failure, return negative errno.
|
||||
*/
|
||||
static int hdev_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
int ret;
|
||||
|
||||
/* If DASD, get blocksizes */
|
||||
if (check_for_dasd(s->fd) < 0) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
ret = probe_logical_blocksize(s->fd, &bsz->log);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
return probe_physical_blocksize(s->fd, &bsz->phys);
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to get @bs's geometry: cyls, heads, sectors.
|
||||
* On success, store them in @geo and return 0.
|
||||
* On failure return -errno.
|
||||
* (Allows block driver to assign default geometry values that guest sees)
|
||||
*/
|
||||
#ifdef __linux__
|
||||
static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
struct hd_geometry ioctl_geo = {0};
|
||||
uint32_t blksize;
|
||||
|
||||
/* If DASD, get its geometry */
|
||||
if (check_for_dasd(s->fd) < 0) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
if (ioctl(s->fd, HDIO_GETGEO, &ioctl_geo) < 0) {
|
||||
return -errno;
|
||||
}
|
||||
/* HDIO_GETGEO may return success even though geo contains zeros
|
||||
(e.g. certain multipath setups) */
|
||||
if (!ioctl_geo.heads || !ioctl_geo.sectors || !ioctl_geo.cylinders) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
/* Do not return a geometry for partition */
|
||||
if (ioctl_geo.start != 0) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
geo->heads = ioctl_geo.heads;
|
||||
geo->sectors = ioctl_geo.sectors;
|
||||
if (!probe_physical_blocksize(s->fd, &blksize)) {
|
||||
/* overwrite cyls: HDIO_GETGEO result is incorrect for big drives */
|
||||
geo->cylinders = bdrv_nb_sectors(bs) / (blksize / BDRV_SECTOR_SIZE)
|
||||
/ (geo->heads * geo->sectors);
|
||||
return 0;
|
||||
}
|
||||
geo->cylinders = ioctl_geo.cylinders;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else /* __linux__ */
|
||||
static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
|
||||
{
|
||||
return -ENOTSUP;
|
||||
}
|
||||
#endif
|
||||
|
||||
static ssize_t handle_aiocb_ioctl(RawPosixAIOData *aiocb)
|
||||
{
|
||||
int ret;
|
||||
@@ -1048,112 +893,42 @@ static int xfs_discard(BDRVRawState *s, int64_t offset, uint64_t bytes)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int translate_err(int err)
|
||||
static ssize_t handle_aiocb_write_zeroes(RawPosixAIOData *aiocb)
|
||||
{
|
||||
if (err == -ENODEV || err == -ENOSYS || err == -EOPNOTSUPP ||
|
||||
err == -ENOTTY) {
|
||||
err = -ENOTSUP;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FALLOCATE
|
||||
static int do_fallocate(int fd, int mode, off_t offset, off_t len)
|
||||
{
|
||||
do {
|
||||
if (fallocate(fd, mode, offset, len) == 0) {
|
||||
return 0;
|
||||
}
|
||||
} while (errno == EINTR);
|
||||
return translate_err(-errno);
|
||||
}
|
||||
#endif
|
||||
|
||||
static ssize_t handle_aiocb_write_zeroes_block(RawPosixAIOData *aiocb)
|
||||
{
|
||||
int ret = -ENOTSUP;
|
||||
int ret = -EOPNOTSUPP;
|
||||
BDRVRawState *s = aiocb->bs->opaque;
|
||||
|
||||
if (!s->has_write_zeroes) {
|
||||
if (s->has_write_zeroes == 0) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
|
||||
#ifdef BLKZEROOUT
|
||||
do {
|
||||
uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
|
||||
if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) {
|
||||
return 0;
|
||||
}
|
||||
} while (errno == EINTR);
|
||||
do {
|
||||
uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
|
||||
if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) {
|
||||
return 0;
|
||||
}
|
||||
} while (errno == EINTR);
|
||||
|
||||
ret = translate_err(-errno);
|
||||
ret = -errno;
|
||||
#endif
|
||||
} else {
|
||||
#ifdef CONFIG_XFS
|
||||
if (s->is_xfs) {
|
||||
return xfs_write_zeroes(s, aiocb->aio_offset, aiocb->aio_nbytes);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (ret == -ENOTSUP) {
|
||||
if (ret == -ENODEV || ret == -ENOSYS || ret == -EOPNOTSUPP ||
|
||||
ret == -ENOTTY) {
|
||||
s->has_write_zeroes = false;
|
||||
ret = -ENOTSUP;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t handle_aiocb_write_zeroes(RawPosixAIOData *aiocb)
|
||||
{
|
||||
#if defined(CONFIG_FALLOCATE) || defined(CONFIG_XFS)
|
||||
BDRVRawState *s = aiocb->bs->opaque;
|
||||
#endif
|
||||
|
||||
if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
|
||||
return handle_aiocb_write_zeroes_block(aiocb);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XFS
|
||||
if (s->is_xfs) {
|
||||
return xfs_write_zeroes(s, aiocb->aio_offset, aiocb->aio_nbytes);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FALLOCATE_ZERO_RANGE
|
||||
if (s->has_write_zeroes) {
|
||||
int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE,
|
||||
aiocb->aio_offset, aiocb->aio_nbytes);
|
||||
if (ret == 0 || ret != -ENOTSUP) {
|
||||
return ret;
|
||||
}
|
||||
s->has_write_zeroes = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
|
||||
if (s->has_discard && s->has_fallocate) {
|
||||
int ret = do_fallocate(s->fd,
|
||||
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
||||
aiocb->aio_offset, aiocb->aio_nbytes);
|
||||
if (ret == 0) {
|
||||
ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
|
||||
if (ret == 0 || ret != -ENOTSUP) {
|
||||
return ret;
|
||||
}
|
||||
s->has_fallocate = false;
|
||||
} else if (ret != -ENOTSUP) {
|
||||
return ret;
|
||||
} else {
|
||||
s->has_discard = false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FALLOCATE
|
||||
if (s->has_fallocate && aiocb->aio_offset >= bdrv_getlength(aiocb->bs)) {
|
||||
int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
|
||||
if (ret == 0 || ret != -ENOTSUP) {
|
||||
return ret;
|
||||
}
|
||||
s->has_fallocate = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
static ssize_t handle_aiocb_discard(RawPosixAIOData *aiocb)
|
||||
{
|
||||
int ret = -EOPNOTSUPP;
|
||||
@@ -1182,14 +957,21 @@ static ssize_t handle_aiocb_discard(RawPosixAIOData *aiocb)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
|
||||
ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
||||
aiocb->aio_offset, aiocb->aio_nbytes);
|
||||
do {
|
||||
if (fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
||||
aiocb->aio_offset, aiocb->aio_nbytes) == 0) {
|
||||
return 0;
|
||||
}
|
||||
} while (errno == EINTR);
|
||||
|
||||
ret = -errno;
|
||||
#endif
|
||||
}
|
||||
|
||||
ret = translate_err(ret);
|
||||
if (ret == -ENOTSUP) {
|
||||
if (ret == -ENODEV || ret == -ENOSYS || ret == -EOPNOTSUPP ||
|
||||
ret == -ENOTTY) {
|
||||
s->has_discard = false;
|
||||
ret = -ENOTSUP;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -1202,7 +984,7 @@ static int aio_worker(void *arg)
|
||||
switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
|
||||
case QEMU_AIO_READ:
|
||||
ret = handle_aiocb_rw(aiocb);
|
||||
if (ret >= 0 && ret < aiocb->aio_nbytes) {
|
||||
if (ret >= 0 && ret < aiocb->aio_nbytes && aiocb->bs->growable) {
|
||||
iov_memset(aiocb->aio_iov, aiocb->aio_niov, ret,
|
||||
0, aiocb->aio_nbytes - ret);
|
||||
|
||||
@@ -1530,20 +1312,7 @@ again:
|
||||
if (size == 0)
|
||||
#endif
|
||||
#if defined(__APPLE__) && defined(__MACH__)
|
||||
{
|
||||
uint64_t sectors = 0;
|
||||
uint32_t sector_size = 0;
|
||||
|
||||
if (ioctl(fd, DKIOCGETBLOCKCOUNT, §ors) == 0
|
||||
&& ioctl(fd, DKIOCGETBLOCKSIZE, §or_size) == 0) {
|
||||
size = sectors * sector_size;
|
||||
} else {
|
||||
size = lseek(fd, 0LL, SEEK_END);
|
||||
if (size < 0) {
|
||||
return -errno;
|
||||
}
|
||||
}
|
||||
}
|
||||
size = LLONG_MAX;
|
||||
#else
|
||||
size = lseek(fd, 0LL, SEEK_END);
|
||||
if (size < 0) {
|
||||
@@ -2154,7 +1923,7 @@ static int fd_open(BlockDriverState *bs)
|
||||
return 0;
|
||||
last_media_present = (s->fd >= 0);
|
||||
if (s->fd >= 0 &&
|
||||
(qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->fd_open_time) >= FD_OPEN_TIMEOUT) {
|
||||
(get_clock() - s->fd_open_time) >= FD_OPEN_TIMEOUT) {
|
||||
qemu_close(s->fd);
|
||||
s->fd = -1;
|
||||
#ifdef DEBUG_FLOPPY
|
||||
@@ -2163,7 +1932,7 @@ static int fd_open(BlockDriverState *bs)
|
||||
}
|
||||
if (s->fd < 0) {
|
||||
if (s->fd_got_error &&
|
||||
(qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->fd_error_time) < FD_OPEN_TIMEOUT) {
|
||||
(get_clock() - s->fd_error_time) < FD_OPEN_TIMEOUT) {
|
||||
#ifdef DEBUG_FLOPPY
|
||||
printf("No floppy (open delayed)\n");
|
||||
#endif
|
||||
@@ -2171,7 +1940,7 @@ static int fd_open(BlockDriverState *bs)
|
||||
}
|
||||
s->fd = qemu_open(bs->filename, s->open_flags & ~O_NONBLOCK);
|
||||
if (s->fd < 0) {
|
||||
s->fd_error_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
s->fd_error_time = get_clock();
|
||||
s->fd_got_error = 1;
|
||||
if (last_media_present)
|
||||
s->fd_media_changed = 1;
|
||||
@@ -2186,7 +1955,7 @@ static int fd_open(BlockDriverState *bs)
|
||||
}
|
||||
if (!last_media_present)
|
||||
s->fd_media_changed = 1;
|
||||
s->fd_open_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
s->fd_open_time = get_clock();
|
||||
s->fd_got_error = 0;
|
||||
return 0;
|
||||
}
|
||||
@@ -2349,8 +2118,6 @@ static BlockDriver bdrv_host_device = {
|
||||
.bdrv_get_info = raw_get_info,
|
||||
.bdrv_get_allocated_file_size
|
||||
= raw_get_allocated_file_size,
|
||||
.bdrv_probe_blocksizes = hdev_probe_blocksizes,
|
||||
.bdrv_probe_geometry = hdev_probe_geometry,
|
||||
|
||||
.bdrv_detach_aio_context = raw_detach_aio_context,
|
||||
.bdrv_attach_aio_context = raw_attach_aio_context,
|
||||
@@ -2395,8 +2162,6 @@ static int floppy_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
s->fd = -1;
|
||||
s->fd_media_changed = 1;
|
||||
|
||||
error_report("Host floppy pass-through is deprecated");
|
||||
error_printf("Support for it will be removed in a future release.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -101,7 +101,7 @@ static int aio_worker(void *arg)
|
||||
switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
|
||||
case QEMU_AIO_READ:
|
||||
count = handle_aiocb_rw(aiocb);
|
||||
if (count < aiocb->aio_nbytes) {
|
||||
if (count < aiocb->aio_nbytes && aiocb->bs->growable) {
|
||||
/* A short read means that we have reached EOF. Pad the buffer
|
||||
* with zeros for bytes after EOF. */
|
||||
iov_memset(aiocb->aio_iov, aiocb->aio_niov, count,
|
||||
|
@@ -58,58 +58,8 @@ static int coroutine_fn raw_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||
static int coroutine_fn raw_co_writev(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors, QEMUIOVector *qiov)
|
||||
{
|
||||
void *buf = NULL;
|
||||
BlockDriver *drv;
|
||||
QEMUIOVector local_qiov;
|
||||
int ret;
|
||||
|
||||
if (bs->probed && sector_num == 0) {
|
||||
/* As long as these conditions are true, we can't get partial writes to
|
||||
* the probe buffer and can just directly check the request. */
|
||||
QEMU_BUILD_BUG_ON(BLOCK_PROBE_BUF_SIZE != 512);
|
||||
QEMU_BUILD_BUG_ON(BDRV_SECTOR_SIZE != 512);
|
||||
|
||||
if (nb_sectors == 0) {
|
||||
/* qemu_iovec_to_buf() would fail, but we want to return success
|
||||
* instead of -EINVAL in this case. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
buf = qemu_try_blockalign(bs->file, 512);
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = qemu_iovec_to_buf(qiov, 0, buf, 512);
|
||||
if (ret != 512) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
drv = bdrv_probe_all(buf, 512, NULL);
|
||||
if (drv != bs->drv) {
|
||||
ret = -EPERM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Use the checked buffer, a malicious guest might be overwriting its
|
||||
* original buffer in the background. */
|
||||
qemu_iovec_init(&local_qiov, qiov->niov + 1);
|
||||
qemu_iovec_add(&local_qiov, buf, 512);
|
||||
qemu_iovec_concat(&local_qiov, qiov, 512, qiov->size - 512);
|
||||
qiov = &local_qiov;
|
||||
}
|
||||
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
|
||||
ret = bdrv_co_writev(bs->file, sector_num, nb_sectors, qiov);
|
||||
|
||||
fail:
|
||||
if (qiov == &local_qiov) {
|
||||
qemu_iovec_destroy(&local_qiov);
|
||||
}
|
||||
qemu_vfree(buf);
|
||||
return ret;
|
||||
return bdrv_co_writev(bs->file, sector_num, nb_sectors, qiov);
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn raw_co_get_block_status(BlockDriverState *bs,
|
||||
@@ -208,18 +158,6 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
bs->sg = bs->file->sg;
|
||||
|
||||
if (bs->probed && !bdrv_is_read_only(bs)) {
|
||||
fprintf(stderr,
|
||||
"WARNING: Image format was not specified for '%s' and probing "
|
||||
"guessed raw.\n"
|
||||
" Automatically detecting the format is dangerous for "
|
||||
"raw images, write operations on block 0 will be restricted.\n"
|
||||
" Specify the 'raw' format explicitly to remove the "
|
||||
"restrictions.\n",
|
||||
bs->file->filename);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -235,16 +173,6 @@ static int raw_probe(const uint8_t *buf, int buf_size, const char *filename)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int raw_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
|
||||
{
|
||||
return bdrv_probe_blocksizes(bs->file, bsz);
|
||||
}
|
||||
|
||||
static int raw_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
|
||||
{
|
||||
return bdrv_probe_geometry(bs->file, geo);
|
||||
}
|
||||
|
||||
BlockDriver bdrv_raw = {
|
||||
.format_name = "raw",
|
||||
.bdrv_probe = &raw_probe,
|
||||
@@ -262,8 +190,6 @@ BlockDriver bdrv_raw = {
|
||||
.has_variable_length = true,
|
||||
.bdrv_get_info = &raw_get_info,
|
||||
.bdrv_refresh_limits = &raw_refresh_limits,
|
||||
.bdrv_probe_blocksizes = &raw_probe_blocksizes,
|
||||
.bdrv_probe_geometry = &raw_probe_geometry,
|
||||
.bdrv_is_inserted = &raw_is_inserted,
|
||||
.bdrv_media_changed = &raw_media_changed,
|
||||
.bdrv_eject = &raw_eject,
|
||||
|
@@ -459,7 +459,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
clientname = qemu_rbd_parse_clientname(conf, clientname_buf);
|
||||
r = rados_create(&s->cluster, clientname);
|
||||
if (r < 0) {
|
||||
error_setg(errp, "error initializing");
|
||||
error_setg(&local_err, "error initializing");
|
||||
goto failed_opts;
|
||||
}
|
||||
|
||||
@@ -495,19 +495,19 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
|
||||
r = rados_connect(s->cluster);
|
||||
if (r < 0) {
|
||||
error_setg(errp, "error connecting");
|
||||
error_setg(&local_err, "error connecting");
|
||||
goto failed_shutdown;
|
||||
}
|
||||
|
||||
r = rados_ioctx_create(s->cluster, pool, &s->io_ctx);
|
||||
if (r < 0) {
|
||||
error_setg(errp, "error opening pool %s", pool);
|
||||
error_setg(&local_err, "error opening pool %s", pool);
|
||||
goto failed_shutdown;
|
||||
}
|
||||
|
||||
r = rbd_open(s->io_ctx, s->name, &s->image, s->snap);
|
||||
if (r < 0) {
|
||||
error_setg(errp, "error reading header from %s", s->name);
|
||||
error_setg(&local_err, "error reading header from %s", s->name);
|
||||
goto failed_open;
|
||||
}
|
||||
|
||||
|
212
block/sheepdog.c
212
block/sheepdog.c
@@ -37,7 +37,6 @@
|
||||
#define SD_OP_READ_VDIS 0x15
|
||||
#define SD_OP_FLUSH_VDI 0x16
|
||||
#define SD_OP_DEL_VDI 0x17
|
||||
#define SD_OP_GET_CLUSTER_DEFAULT 0x18
|
||||
|
||||
#define SD_FLAG_CMD_WRITE 0x01
|
||||
#define SD_FLAG_CMD_COW 0x02
|
||||
@@ -92,7 +91,6 @@
|
||||
#define SD_NR_VDIS (1U << 24)
|
||||
#define SD_DATA_OBJ_SIZE (UINT64_C(1) << 22)
|
||||
#define SD_MAX_VDI_SIZE (SD_DATA_OBJ_SIZE * MAX_DATA_OBJS)
|
||||
#define SD_DEFAULT_BLOCK_SIZE_SHIFT 22
|
||||
/*
|
||||
* For erasure coding, we use at most SD_EC_MAX_STRIP for data strips and
|
||||
* (SD_EC_MAX_STRIP - 1) for parity strips
|
||||
@@ -169,8 +167,7 @@ typedef struct SheepdogVdiReq {
|
||||
uint32_t base_vdi_id;
|
||||
uint8_t copies;
|
||||
uint8_t copy_policy;
|
||||
uint8_t store_policy;
|
||||
uint8_t block_size_shift;
|
||||
uint8_t reserved[2];
|
||||
uint32_t snapid;
|
||||
uint32_t type;
|
||||
uint32_t pad[2];
|
||||
@@ -189,21 +186,6 @@ typedef struct SheepdogVdiRsp {
|
||||
uint32_t pad[5];
|
||||
} SheepdogVdiRsp;
|
||||
|
||||
typedef struct SheepdogClusterRsp {
|
||||
uint8_t proto_ver;
|
||||
uint8_t opcode;
|
||||
uint16_t flags;
|
||||
uint32_t epoch;
|
||||
uint32_t id;
|
||||
uint32_t data_length;
|
||||
uint32_t result;
|
||||
uint8_t nr_copies;
|
||||
uint8_t copy_policy;
|
||||
uint8_t block_size_shift;
|
||||
uint8_t __pad1;
|
||||
uint32_t __pad2[6];
|
||||
} SheepdogClusterRsp;
|
||||
|
||||
typedef struct SheepdogInode {
|
||||
char name[SD_MAX_VDI_LEN];
|
||||
char tag[SD_MAX_VDI_TAG_LEN];
|
||||
@@ -545,7 +527,6 @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov,
|
||||
return acb;
|
||||
}
|
||||
|
||||
/* Return -EIO in case of error, file descriptor on success */
|
||||
static int connect_to_sdog(BDRVSheepdogState *s, Error **errp)
|
||||
{
|
||||
int fd;
|
||||
@@ -565,14 +546,11 @@ static int connect_to_sdog(BDRVSheepdogState *s, Error **errp)
|
||||
|
||||
if (fd >= 0) {
|
||||
qemu_set_nonblock(fd);
|
||||
} else {
|
||||
fd = -EIO;
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
/* Return 0 on success and -errno in case of error */
|
||||
static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
|
||||
unsigned int *wlen)
|
||||
{
|
||||
@@ -581,13 +559,11 @@ static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
|
||||
ret = qemu_co_send(sockfd, hdr, sizeof(*hdr));
|
||||
if (ret != sizeof(*hdr)) {
|
||||
error_report("failed to send a req, %s", strerror(errno));
|
||||
ret = -socket_error();
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qemu_co_send(sockfd, data, *wlen);
|
||||
if (ret != *wlen) {
|
||||
ret = -socket_error();
|
||||
error_report("failed to send a req, %s", strerror(errno));
|
||||
}
|
||||
|
||||
@@ -662,11 +638,6 @@ out:
|
||||
srco->finished = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send the request to the sheep in a synchronous manner.
|
||||
*
|
||||
* Return 0 on success, -errno in case of error.
|
||||
*/
|
||||
static int do_req(int sockfd, AioContext *aio_context, SheepdogReq *hdr,
|
||||
void *data, unsigned int *wlen, unsigned int *rlen)
|
||||
{
|
||||
@@ -755,7 +726,8 @@ static coroutine_fn void reconnect_to_sdog(void *opaque)
|
||||
s->fd = get_sheep_fd(s, &local_err);
|
||||
if (s->fd < 0) {
|
||||
DPRINTF("Wait for connection to be established\n");
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));
|
||||
error_free(local_err);
|
||||
co_aio_sleep_ns(bdrv_get_aio_context(s->bs), QEMU_CLOCK_REALTIME,
|
||||
1000000000ULL);
|
||||
}
|
||||
@@ -1311,7 +1283,8 @@ static int reload_inode(BDRVSheepdogState *s, uint32_t snapid, const char *tag)
|
||||
|
||||
fd = connect_to_sdog(s, &local_err);
|
||||
if (fd < 0) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@@ -1319,7 +1292,8 @@ static int reload_inode(BDRVSheepdogState *s, uint32_t snapid, const char *tag)
|
||||
|
||||
ret = find_vdi_name(s, s->name, snapid, tag, &vid, false, &local_err);
|
||||
if (ret) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1570,7 +1544,6 @@ static int do_sd_create(BDRVSheepdogState *s, uint32_t *vdi_id, int snapshot,
|
||||
hdr.vdi_size = s->inode.vdi_size;
|
||||
hdr.copy_policy = s->inode.copy_policy;
|
||||
hdr.copies = s->inode.nr_copies;
|
||||
hdr.block_size_shift = s->inode.block_size_shift;
|
||||
|
||||
ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
|
||||
|
||||
@@ -1596,12 +1569,9 @@ static int do_sd_create(BDRVSheepdogState *s, uint32_t *vdi_id, int snapshot,
|
||||
static int sd_prealloc(const char *filename, Error **errp)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
BDRVSheepdogState *base = NULL;
|
||||
unsigned long buf_size;
|
||||
uint32_t idx, max_idx;
|
||||
uint32_t object_size;
|
||||
int64_t vdi_size;
|
||||
void *buf = NULL;
|
||||
void *buf = g_malloc0(SD_DATA_OBJ_SIZE);
|
||||
int ret;
|
||||
|
||||
ret = bdrv_open(&bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
|
||||
@@ -1615,24 +1585,18 @@ static int sd_prealloc(const char *filename, Error **errp)
|
||||
ret = vdi_size;
|
||||
goto out;
|
||||
}
|
||||
|
||||
base = bs->opaque;
|
||||
object_size = (UINT32_C(1) << base->inode.block_size_shift);
|
||||
buf_size = MIN(object_size, SD_DATA_OBJ_SIZE);
|
||||
buf = g_malloc0(buf_size);
|
||||
|
||||
max_idx = DIV_ROUND_UP(vdi_size, buf_size);
|
||||
max_idx = DIV_ROUND_UP(vdi_size, SD_DATA_OBJ_SIZE);
|
||||
|
||||
for (idx = 0; idx < max_idx; idx++) {
|
||||
/*
|
||||
* The created image can be a cloned image, so we need to read
|
||||
* a data from the source image.
|
||||
*/
|
||||
ret = bdrv_pread(bs, idx * buf_size, buf, buf_size);
|
||||
ret = bdrv_pread(bs, idx * SD_DATA_OBJ_SIZE, buf, SD_DATA_OBJ_SIZE);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
ret = bdrv_pwrite(bs, idx * buf_size, buf, buf_size);
|
||||
ret = bdrv_pwrite(bs, idx * SD_DATA_OBJ_SIZE, buf, SD_DATA_OBJ_SIZE);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@@ -1705,27 +1669,6 @@ static int parse_redundancy(BDRVSheepdogState *s, const char *opt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_block_size_shift(BDRVSheepdogState *s, QemuOpts *opt)
|
||||
{
|
||||
struct SheepdogInode *inode = &s->inode;
|
||||
uint64_t object_size;
|
||||
int obj_order;
|
||||
|
||||
object_size = qemu_opt_get_size_del(opt, BLOCK_OPT_OBJECT_SIZE, 0);
|
||||
if (object_size) {
|
||||
if ((object_size - 1) & object_size) { /* not a power of 2? */
|
||||
return -EINVAL;
|
||||
}
|
||||
obj_order = ffs(object_size) - 1;
|
||||
if (obj_order < 20 || obj_order > 31) {
|
||||
return -EINVAL;
|
||||
}
|
||||
inode->block_size_shift = (uint8_t)obj_order;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sd_create(const char *filename, QemuOpts *opts,
|
||||
Error **errp)
|
||||
{
|
||||
@@ -1736,7 +1679,6 @@ static int sd_create(const char *filename, QemuOpts *opts,
|
||||
BDRVSheepdogState *s;
|
||||
char tag[SD_MAX_VDI_TAG_LEN];
|
||||
uint32_t snapid;
|
||||
uint64_t max_vdi_size;
|
||||
bool prealloc = false;
|
||||
|
||||
s = g_new0(BDRVSheepdogState, 1);
|
||||
@@ -1775,11 +1717,10 @@ static int sd_create(const char *filename, QemuOpts *opts,
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ret = parse_block_size_shift(s, opts);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Invalid object_size."
|
||||
" obect_size needs to be power of 2"
|
||||
" and be limited from 2^20 to 2^31");
|
||||
|
||||
if (s->inode.vdi_size > SD_MAX_VDI_SIZE) {
|
||||
error_setg(errp, "too big image size");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1789,7 +1730,7 @@ static int sd_create(const char *filename, QemuOpts *opts,
|
||||
BlockDriver *drv;
|
||||
|
||||
/* Currently, only Sheepdog backing image is supported. */
|
||||
drv = bdrv_find_protocol(backing_file, true, NULL);
|
||||
drv = bdrv_find_protocol(backing_file, true);
|
||||
if (!drv || strcmp(drv->protocol_name, "sheepdog") != 0) {
|
||||
error_setg(errp, "backing_file must be a sheepdog image");
|
||||
ret = -EINVAL;
|
||||
@@ -1816,51 +1757,6 @@ static int sd_create(const char *filename, QemuOpts *opts,
|
||||
}
|
||||
|
||||
s->aio_context = qemu_get_aio_context();
|
||||
|
||||
/* if block_size_shift is not specified, get cluster default value */
|
||||
if (s->inode.block_size_shift == 0) {
|
||||
SheepdogVdiReq hdr;
|
||||
SheepdogClusterRsp *rsp = (SheepdogClusterRsp *)&hdr;
|
||||
Error *local_err = NULL;
|
||||
int fd;
|
||||
unsigned int wlen = 0, rlen = 0;
|
||||
|
||||
fd = connect_to_sdog(s, &local_err);
|
||||
if (fd < 0) {
|
||||
error_report("%s", error_get_pretty(local_err));
|
||||
error_free(local_err);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&hdr, 0, sizeof(hdr));
|
||||
hdr.opcode = SD_OP_GET_CLUSTER_DEFAULT;
|
||||
hdr.proto_ver = SD_PROTO_VER;
|
||||
|
||||
ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr,
|
||||
NULL, &wlen, &rlen);
|
||||
closesocket(fd);
|
||||
if (ret) {
|
||||
error_setg_errno(errp, -ret, "failed to get cluster default");
|
||||
goto out;
|
||||
}
|
||||
if (rsp->result == SD_RES_SUCCESS) {
|
||||
s->inode.block_size_shift = rsp->block_size_shift;
|
||||
} else {
|
||||
s->inode.block_size_shift = SD_DEFAULT_BLOCK_SIZE_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
max_vdi_size = (UINT64_C(1) << s->inode.block_size_shift) * MAX_DATA_OBJS;
|
||||
|
||||
if (s->inode.vdi_size > max_vdi_size) {
|
||||
error_setg(errp, "An image is too large."
|
||||
" The maximum image size is %"PRIu64 "GB",
|
||||
max_vdi_size / 1024 / 1024 / 1024);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = do_sd_create(s, &vid, 0, errp);
|
||||
if (ret) {
|
||||
goto out;
|
||||
@@ -1889,7 +1785,8 @@ static void sd_close(BlockDriverState *bs)
|
||||
|
||||
fd = connect_to_sdog(s, &local_err);
|
||||
if (fd < 0) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1930,20 +1827,19 @@ static int sd_truncate(BlockDriverState *bs, int64_t offset)
|
||||
BDRVSheepdogState *s = bs->opaque;
|
||||
int ret, fd;
|
||||
unsigned int datalen;
|
||||
uint64_t max_vdi_size;
|
||||
|
||||
max_vdi_size = (UINT64_C(1) << s->inode.block_size_shift) * MAX_DATA_OBJS;
|
||||
if (offset < s->inode.vdi_size) {
|
||||
error_report("shrinking is not supported");
|
||||
return -EINVAL;
|
||||
} else if (offset > max_vdi_size) {
|
||||
} else if (offset > SD_MAX_VDI_SIZE) {
|
||||
error_report("too big image size");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fd = connect_to_sdog(s, &local_err);
|
||||
if (fd < 0) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
return fd;
|
||||
}
|
||||
|
||||
@@ -2016,7 +1912,8 @@ static bool sd_delete(BDRVSheepdogState *s)
|
||||
|
||||
fd = connect_to_sdog(s, &local_err);
|
||||
if (fd < 0) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -2063,7 +1960,8 @@ static int sd_create_branch(BDRVSheepdogState *s)
|
||||
deleted = sd_delete(s);
|
||||
ret = do_sd_create(s, &vid, !deleted, &local_err);
|
||||
if (ret) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -2071,7 +1969,8 @@ static int sd_create_branch(BDRVSheepdogState *s)
|
||||
|
||||
fd = connect_to_sdog(s, &local_err);
|
||||
if (fd < 0) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
ret = fd;
|
||||
goto out;
|
||||
}
|
||||
@@ -2114,10 +2013,9 @@ static int coroutine_fn sd_co_rw_vector(void *p)
|
||||
SheepdogAIOCB *acb = p;
|
||||
int ret = 0;
|
||||
unsigned long len, done = 0, total = acb->nb_sectors * BDRV_SECTOR_SIZE;
|
||||
unsigned long idx;
|
||||
uint32_t object_size;
|
||||
unsigned long idx = acb->sector_num * BDRV_SECTOR_SIZE / SD_DATA_OBJ_SIZE;
|
||||
uint64_t oid;
|
||||
uint64_t offset;
|
||||
uint64_t offset = (acb->sector_num * BDRV_SECTOR_SIZE) % SD_DATA_OBJ_SIZE;
|
||||
BDRVSheepdogState *s = acb->common.bs->opaque;
|
||||
SheepdogInode *inode = &s->inode;
|
||||
AIOReq *aio_req;
|
||||
@@ -2134,10 +2032,6 @@ static int coroutine_fn sd_co_rw_vector(void *p)
|
||||
}
|
||||
}
|
||||
|
||||
object_size = (UINT32_C(1) << inode->block_size_shift);
|
||||
idx = acb->sector_num * BDRV_SECTOR_SIZE / object_size;
|
||||
offset = (acb->sector_num * BDRV_SECTOR_SIZE) % object_size;
|
||||
|
||||
/*
|
||||
* Make sure we don't free the aiocb before we are done with all requests.
|
||||
* This additional reference is dropped at the end of this function.
|
||||
@@ -2151,7 +2045,7 @@ static int coroutine_fn sd_co_rw_vector(void *p)
|
||||
|
||||
oid = vid_to_data_oid(inode->data_vdi_id[idx], idx);
|
||||
|
||||
len = MIN(total - done, object_size - offset);
|
||||
len = MIN(total - done, SD_DATA_OBJ_SIZE - offset);
|
||||
|
||||
switch (acb->aiocb_type) {
|
||||
case AIOCB_READ_UDATA:
|
||||
@@ -2175,7 +2069,7 @@ static int coroutine_fn sd_co_rw_vector(void *p)
|
||||
* We discard the object only when the whole object is
|
||||
* 1) allocated 2) trimmed. Otherwise, simply skip it.
|
||||
*/
|
||||
if (len != object_size || inode->data_vdi_id[idx] == 0) {
|
||||
if (len != SD_DATA_OBJ_SIZE || inode->data_vdi_id[idx] == 0) {
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
@@ -2223,7 +2117,7 @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
|
||||
int64_t offset = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE;
|
||||
BDRVSheepdogState *s = bs->opaque;
|
||||
|
||||
if (offset > s->inode.vdi_size) {
|
||||
if (bs->growable && offset > s->inode.vdi_size) {
|
||||
ret = sd_truncate(bs, offset);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
@@ -2324,7 +2218,8 @@ static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
|
||||
/* refresh inode. */
|
||||
fd = connect_to_sdog(s, &local_err);
|
||||
if (fd < 0) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
ret = fd;
|
||||
goto cleanup;
|
||||
}
|
||||
@@ -2339,8 +2234,10 @@ static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
|
||||
|
||||
ret = do_sd_create(s, &new_vid, 1, &local_err);
|
||||
if (ret < 0) {
|
||||
error_report("failed to create inode for snapshot: %s",
|
||||
error_get_pretty(local_err));
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
error_report("failed to create inode for snapshot. %s",
|
||||
strerror(errno));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
@@ -2439,7 +2336,8 @@ static int sd_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab)
|
||||
|
||||
fd = connect_to_sdog(s, &local_err);
|
||||
if (fd < 0) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
ret = fd;
|
||||
goto out;
|
||||
}
|
||||
@@ -2468,7 +2366,8 @@ static int sd_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab)
|
||||
|
||||
fd = connect_to_sdog(s, &local_err);
|
||||
if (fd < 0) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
ret = fd;
|
||||
goto out;
|
||||
}
|
||||
@@ -2527,19 +2426,19 @@ static int do_load_save_vmstate(BDRVSheepdogState *s, uint8_t *data,
|
||||
uint64_t offset;
|
||||
uint32_t vdi_index;
|
||||
uint32_t vdi_id = load ? s->inode.parent_vdi_id : s->inode.vdi_id;
|
||||
uint32_t object_size = (UINT32_C(1) << s->inode.block_size_shift);
|
||||
|
||||
fd = connect_to_sdog(s, &local_err);
|
||||
if (fd < 0) {
|
||||
error_report_err(local_err);
|
||||
error_report("%s", error_get_pretty(local_err));;
|
||||
error_free(local_err);
|
||||
return fd;
|
||||
}
|
||||
|
||||
while (remaining) {
|
||||
vdi_index = pos / object_size;
|
||||
offset = pos % object_size;
|
||||
vdi_index = pos / SD_DATA_OBJ_SIZE;
|
||||
offset = pos % SD_DATA_OBJ_SIZE;
|
||||
|
||||
data_len = MIN(remaining, object_size - offset);
|
||||
data_len = MIN(remaining, SD_DATA_OBJ_SIZE - offset);
|
||||
|
||||
vmstate_oid = vid_to_vmstate_oid(vdi_id, vdi_index);
|
||||
|
||||
@@ -2626,11 +2525,10 @@ sd_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
|
||||
{
|
||||
BDRVSheepdogState *s = bs->opaque;
|
||||
SheepdogInode *inode = &s->inode;
|
||||
uint32_t object_size = (UINT32_C(1) << inode->block_size_shift);
|
||||
uint64_t offset = sector_num * BDRV_SECTOR_SIZE;
|
||||
unsigned long start = offset / object_size,
|
||||
unsigned long start = offset / SD_DATA_OBJ_SIZE,
|
||||
end = DIV_ROUND_UP((sector_num + nb_sectors) *
|
||||
BDRV_SECTOR_SIZE, object_size);
|
||||
BDRV_SECTOR_SIZE, SD_DATA_OBJ_SIZE);
|
||||
unsigned long idx;
|
||||
int64_t ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
|
||||
|
||||
@@ -2649,7 +2547,7 @@ sd_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
|
||||
}
|
||||
}
|
||||
|
||||
*pnum = (idx - start) * object_size / BDRV_SECTOR_SIZE;
|
||||
*pnum = (idx - start) * SD_DATA_OBJ_SIZE / BDRV_SECTOR_SIZE;
|
||||
if (*pnum > nb_sectors) {
|
||||
*pnum = nb_sectors;
|
||||
}
|
||||
@@ -2660,15 +2558,14 @@ static int64_t sd_get_allocated_file_size(BlockDriverState *bs)
|
||||
{
|
||||
BDRVSheepdogState *s = bs->opaque;
|
||||
SheepdogInode *inode = &s->inode;
|
||||
uint32_t object_size = (UINT32_C(1) << inode->block_size_shift);
|
||||
unsigned long i, last = DIV_ROUND_UP(inode->vdi_size, object_size);
|
||||
unsigned long i, last = DIV_ROUND_UP(inode->vdi_size, SD_DATA_OBJ_SIZE);
|
||||
uint64_t size = 0;
|
||||
|
||||
for (i = 0; i < last; i++) {
|
||||
if (inode->data_vdi_id[i] == 0) {
|
||||
continue;
|
||||
}
|
||||
size += object_size;
|
||||
size += SD_DATA_OBJ_SIZE;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
@@ -2697,11 +2594,6 @@ static QemuOptsList sd_create_opts = {
|
||||
.type = QEMU_OPT_STRING,
|
||||
.help = "Redundancy of the image"
|
||||
},
|
||||
{
|
||||
.name = BLOCK_OPT_OBJECT_SIZE,
|
||||
.type = QEMU_OPT_SIZE,
|
||||
.help = "Object size of the image"
|
||||
},
|
||||
{ /* end of list */ }
|
||||
}
|
||||
};
|
||||
|
30
block/vdi.c
30
block/vdi.c
@@ -53,7 +53,6 @@
|
||||
#include "block/block_int.h"
|
||||
#include "qemu/module.h"
|
||||
#include "migration/migration.h"
|
||||
#include "block/coroutine.h"
|
||||
|
||||
#if defined(CONFIG_UUID)
|
||||
#include <uuid/uuid.h>
|
||||
@@ -197,8 +196,6 @@ typedef struct {
|
||||
/* VDI header (converted to host endianness). */
|
||||
VdiHeader header;
|
||||
|
||||
CoMutex write_lock;
|
||||
|
||||
Error *migration_blocker;
|
||||
} BDRVVdiState;
|
||||
|
||||
@@ -507,8 +504,6 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
"vdi", bdrv_get_device_name(bs), "live migration");
|
||||
migrate_add_blocker(s->migration_blocker);
|
||||
|
||||
qemu_co_mutex_init(&s->write_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_free_bmap:
|
||||
@@ -644,31 +639,11 @@ static int vdi_co_write(BlockDriverState *bs,
|
||||
buf, n_sectors * SECTOR_SIZE);
|
||||
memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,
|
||||
(s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
|
||||
|
||||
/* Note that this coroutine does not yield anywhere from reading the
|
||||
* bmap entry until here, so in regards to all the coroutines trying
|
||||
* to write to this cluster, the one doing the allocation will
|
||||
* always be the first to try to acquire the lock.
|
||||
* Therefore, it is also the first that will actually be able to
|
||||
* acquire the lock and thus the padded cluster is written before
|
||||
* the other coroutines can write to the affected area. */
|
||||
qemu_co_mutex_lock(&s->write_lock);
|
||||
ret = bdrv_write(bs->file, offset, block, s->block_sectors);
|
||||
qemu_co_mutex_unlock(&s->write_lock);
|
||||
} else {
|
||||
uint64_t offset = s->header.offset_data / SECTOR_SIZE +
|
||||
(uint64_t)bmap_entry * s->block_sectors +
|
||||
sector_in_block;
|
||||
qemu_co_mutex_lock(&s->write_lock);
|
||||
/* This lock is only used to make sure the following write operation
|
||||
* is executed after the write issued by the coroutine allocating
|
||||
* this cluster, therefore we do not need to keep it locked.
|
||||
* As stated above, the allocating coroutine will always try to lock
|
||||
* the mutex before all the other concurrent accesses to that
|
||||
* cluster, therefore at this point we can be absolutely certain
|
||||
* that that write operation has returned (there may be other writes
|
||||
* in flight, but they do not concern this very operation). */
|
||||
qemu_co_mutex_unlock(&s->write_lock);
|
||||
ret = bdrv_write(bs->file, offset, buf, n_sectors);
|
||||
}
|
||||
|
||||
@@ -877,6 +852,11 @@ static QemuOptsList vdi_create_opts = {
|
||||
.def_value_str = "off"
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.name = BLOCK_OPT_NOCOW,
|
||||
.type = QEMU_OPT_BOOL,
|
||||
.help = "Turn off copy-on-write (valid only on btrfs)"
|
||||
},
|
||||
/* TODO: An additional option to set UUID values might be useful. */
|
||||
{ /* end of list */ }
|
||||
}
|
||||
|
31
block/vhdx.c
31
block/vhdx.c
@@ -1109,9 +1109,8 @@ static coroutine_fn int vhdx_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||
/* check the payload block state */
|
||||
switch (s->bat[sinfo.bat_idx] & VHDX_BAT_STATE_BIT_MASK) {
|
||||
case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */
|
||||
case PAYLOAD_BLOCK_UNDEFINED:
|
||||
case PAYLOAD_BLOCK_UNMAPPED:
|
||||
case PAYLOAD_BLOCK_UNMAPPED_v095:
|
||||
case PAYLOAD_BLOCK_UNDEFINED: /* fall through */
|
||||
case PAYLOAD_BLOCK_UNMAPPED: /* fall through */
|
||||
case PAYLOAD_BLOCK_ZERO:
|
||||
/* return zero */
|
||||
qemu_iovec_memset(&hd_qiov, 0, 0, sinfo.bytes_avail);
|
||||
@@ -1174,18 +1173,7 @@ static void vhdx_update_bat_table_entry(BlockDriverState *bs, BDRVVHDXState *s,
|
||||
{
|
||||
/* The BAT entry is a uint64, with 44 bits for the file offset in units of
|
||||
* 1MB, and 3 bits for the block state. */
|
||||
if ((state == PAYLOAD_BLOCK_ZERO) ||
|
||||
(state == PAYLOAD_BLOCK_UNDEFINED) ||
|
||||
(state == PAYLOAD_BLOCK_NOT_PRESENT) ||
|
||||
(state == PAYLOAD_BLOCK_UNMAPPED)) {
|
||||
s->bat[sinfo->bat_idx] = 0; /* For PAYLOAD_BLOCK_ZERO, the
|
||||
FileOffsetMB field is denoted as
|
||||
'reserved' in the v1.0 spec. If it is
|
||||
non-zero, MS Hyper-V will fail to read
|
||||
the disk image */
|
||||
} else {
|
||||
s->bat[sinfo->bat_idx] = sinfo->file_offset;
|
||||
}
|
||||
s->bat[sinfo->bat_idx] = sinfo->file_offset;
|
||||
|
||||
s->bat[sinfo->bat_idx] |= state & VHDX_BAT_STATE_BIT_MASK;
|
||||
|
||||
@@ -1289,11 +1277,11 @@ static coroutine_fn int vhdx_co_writev(BlockDriverState *bs, int64_t sector_num,
|
||||
sectors_to_write += iov2.iov_len >> BDRV_SECTOR_BITS;
|
||||
}
|
||||
}
|
||||
|
||||
/* fall through */
|
||||
case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */
|
||||
case PAYLOAD_BLOCK_UNMAPPED:
|
||||
case PAYLOAD_BLOCK_UNMAPPED_v095:
|
||||
case PAYLOAD_BLOCK_UNDEFINED:
|
||||
case PAYLOAD_BLOCK_UNMAPPED: /* fall through */
|
||||
case PAYLOAD_BLOCK_UNDEFINED: /* fall through */
|
||||
bat_prior_offset = sinfo.file_offset;
|
||||
ret = vhdx_allocate_block(bs, s, &sinfo.file_offset);
|
||||
if (ret < 0) {
|
||||
@@ -1785,7 +1773,7 @@ static int vhdx_create(const char *filename, QemuOpts *opts, Error **errp)
|
||||
log_size = qemu_opt_get_size_del(opts, VHDX_BLOCK_OPT_LOG_SIZE, 0);
|
||||
block_size = qemu_opt_get_size_del(opts, VHDX_BLOCK_OPT_BLOCK_SIZE, 0);
|
||||
type = qemu_opt_get_del(opts, BLOCK_OPT_SUBFMT);
|
||||
use_zero_blocks = qemu_opt_get_bool_del(opts, VHDX_BLOCK_OPT_ZERO, true);
|
||||
use_zero_blocks = qemu_opt_get_bool_del(opts, VHDX_BLOCK_OPT_ZERO, false);
|
||||
|
||||
if (image_size > VHDX_MAX_IMAGE_SIZE) {
|
||||
error_setg_errno(errp, EINVAL, "Image size too large; max of 64TB");
|
||||
@@ -1947,9 +1935,7 @@ static QemuOptsList vhdx_create_opts = {
|
||||
{
|
||||
.name = VHDX_BLOCK_OPT_ZERO,
|
||||
.type = QEMU_OPT_BOOL,
|
||||
.help = "Force use of payload blocks of type 'ZERO'. "\
|
||||
"Non-standard, but default. Do not set to 'off' when "\
|
||||
"using 'qemu-img convert' with subformat=dynamic."
|
||||
.help = "Force use of payload blocks of type 'ZERO'. Non-standard."
|
||||
},
|
||||
{ NULL }
|
||||
}
|
||||
@@ -1967,7 +1953,6 @@ static BlockDriver bdrv_vhdx = {
|
||||
.bdrv_create = vhdx_create,
|
||||
.bdrv_get_info = vhdx_get_info,
|
||||
.bdrv_check = vhdx_check,
|
||||
.bdrv_has_zero_init = bdrv_has_zero_init_1,
|
||||
|
||||
.create_opts = &vhdx_create_opts,
|
||||
};
|
||||
|
@@ -226,8 +226,7 @@ typedef struct QEMU_PACKED VHDXLogDataSector {
|
||||
#define PAYLOAD_BLOCK_NOT_PRESENT 0
|
||||
#define PAYLOAD_BLOCK_UNDEFINED 1
|
||||
#define PAYLOAD_BLOCK_ZERO 2
|
||||
#define PAYLOAD_BLOCK_UNMAPPED 3
|
||||
#define PAYLOAD_BLOCK_UNMAPPED_v095 5
|
||||
#define PAYLOAD_BLOCK_UNMAPPED 5
|
||||
#define PAYLOAD_BLOCK_FULLY_PRESENT 6
|
||||
#define PAYLOAD_BLOCK_PARTIALLY_PRESENT 7
|
||||
|
||||
|
106
block/vmdk.c
106
block/vmdk.c
@@ -28,7 +28,6 @@
|
||||
#include "qemu/module.h"
|
||||
#include "migration/migration.h"
|
||||
#include <zlib.h>
|
||||
#include <glib.h>
|
||||
|
||||
#define VMDK3_MAGIC (('C' << 24) | ('O' << 16) | ('W' << 8) | 'D')
|
||||
#define VMDK4_MAGIC (('K' << 24) | ('D' << 16) | ('M' << 8) | 'V')
|
||||
@@ -557,16 +556,8 @@ static char *vmdk_read_desc(BlockDriverState *file, uint64_t desc_offset,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (size < 4) {
|
||||
/* Both descriptor file and sparse image must be much larger than 4
|
||||
* bytes, also callers of vmdk_read_desc want to compare the first 4
|
||||
* bytes with VMDK4_MAGIC, let's error out if less is read. */
|
||||
error_setg(errp, "File is too small, not a valid image");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size = MIN(size, (1 << 20) - 1); /* avoid unbounded allocation */
|
||||
buf = g_malloc(size + 1);
|
||||
size = MIN(size, 1 << 20); /* avoid unbounded allocation */
|
||||
buf = g_malloc0(size + 1);
|
||||
|
||||
ret = bdrv_pread(file, desc_offset, buf, size);
|
||||
if (ret < 0) {
|
||||
@@ -574,7 +565,6 @@ static char *vmdk_read_desc(BlockDriverState *file, uint64_t desc_offset,
|
||||
g_free(buf);
|
||||
return NULL;
|
||||
}
|
||||
buf[ret] = 0;
|
||||
|
||||
return buf;
|
||||
}
|
||||
@@ -645,7 +635,6 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
|
||||
bs->file->total_sectors * 512 - 1536,
|
||||
&footer, sizeof(footer));
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Failed to read footer");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -657,7 +646,6 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
|
||||
le32_to_cpu(footer.eos_marker.size) != 0 ||
|
||||
le32_to_cpu(footer.eos_marker.type) != MARKER_END_OF_STREAM)
|
||||
{
|
||||
error_setg(errp, "Invalid footer");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -688,7 +676,6 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
|
||||
l1_entry_sectors = le32_to_cpu(header.num_gtes_per_gt)
|
||||
* le64_to_cpu(header.granularity);
|
||||
if (l1_entry_sectors == 0) {
|
||||
error_setg(errp, "L1 entry size is invalid");
|
||||
return -EINVAL;
|
||||
}
|
||||
l1_size = (le64_to_cpu(header.capacity) + l1_entry_sectors - 1)
|
||||
@@ -785,44 +772,41 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
|
||||
const char *desc_file_path, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
int matches;
|
||||
char access[11];
|
||||
char type[11];
|
||||
char fname[512];
|
||||
const char *p = desc;
|
||||
int64_t sectors = 0;
|
||||
int64_t flat_offset;
|
||||
char *extent_path;
|
||||
char extent_path[PATH_MAX];
|
||||
BlockDriverState *extent_file;
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
VmdkExtent *extent;
|
||||
|
||||
while (*p) {
|
||||
/* parse extent line in one of below formats:
|
||||
*
|
||||
/* parse extent line:
|
||||
* RW [size in sectors] FLAT "file-name.vmdk" OFFSET
|
||||
* or
|
||||
* RW [size in sectors] SPARSE "file-name.vmdk"
|
||||
* RW [size in sectors] VMFS "file-name.vmdk"
|
||||
* RW [size in sectors] VMFSSPARSE "file-name.vmdk"
|
||||
*/
|
||||
flat_offset = -1;
|
||||
matches = sscanf(p, "%10s %" SCNd64 " %10s \"%511[^\n\r\"]\" %" SCNd64,
|
||||
access, §ors, type, fname, &flat_offset);
|
||||
if (matches < 4 || strcmp(access, "RW")) {
|
||||
ret = sscanf(p, "%10s %" SCNd64 " %10s \"%511[^\n\r\"]\" %" SCNd64,
|
||||
access, §ors, type, fname, &flat_offset);
|
||||
if (ret < 4 || strcmp(access, "RW")) {
|
||||
goto next_line;
|
||||
} else if (!strcmp(type, "FLAT")) {
|
||||
if (matches != 5 || flat_offset < 0) {
|
||||
if (ret != 5 || flat_offset < 0) {
|
||||
error_setg(errp, "Invalid extent lines: \n%s", p);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (!strcmp(type, "VMFS")) {
|
||||
if (matches == 4) {
|
||||
if (ret == 4) {
|
||||
flat_offset = 0;
|
||||
} else {
|
||||
error_setg(errp, "Invalid extent lines:\n%s", p);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (matches != 4) {
|
||||
} else if (ret != 4) {
|
||||
error_setg(errp, "Invalid extent lines:\n%s", p);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -834,20 +818,11 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
|
||||
goto next_line;
|
||||
}
|
||||
|
||||
if (!path_is_absolute(fname) && !path_has_protocol(fname) &&
|
||||
!desc_file_path[0])
|
||||
{
|
||||
error_setg(errp, "Cannot use relative extent paths with VMDK "
|
||||
"descriptor file '%s'", bs->file->filename);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
extent_path = g_malloc0(PATH_MAX);
|
||||
path_combine(extent_path, PATH_MAX, desc_file_path, fname);
|
||||
path_combine(extent_path, sizeof(extent_path),
|
||||
desc_file_path, fname);
|
||||
extent_file = NULL;
|
||||
ret = bdrv_open(&extent_file, extent_path, NULL, NULL,
|
||||
bs->open_flags | BDRV_O_PROTOCOL, NULL, errp);
|
||||
g_free(extent_path);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
@@ -919,7 +894,7 @@ static int vmdk_open_desc_file(BlockDriverState *bs, int flags, char *buf,
|
||||
}
|
||||
s->create_type = g_strdup(ct);
|
||||
s->desc_offset = 0;
|
||||
ret = vmdk_parse_extents(buf, bs, bs->file->exact_filename, errp);
|
||||
ret = vmdk_parse_extents(buf, bs, bs->file->filename, errp);
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
@@ -927,7 +902,7 @@ exit:
|
||||
static int vmdk_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
char *buf;
|
||||
char *buf = NULL;
|
||||
int ret;
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
uint32_t magic;
|
||||
@@ -1563,7 +1538,7 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
|
||||
/* update CID on the first write every time the virtual disk is
|
||||
* opened */
|
||||
if (!s->cid_updated) {
|
||||
ret = vmdk_write_cid(bs, g_random_int());
|
||||
ret = vmdk_write_cid(bs, time(NULL));
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -1797,15 +1772,10 @@ static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp)
|
||||
int ret = 0;
|
||||
bool flat, split, compress;
|
||||
GString *ext_desc_lines;
|
||||
char *path = g_malloc0(PATH_MAX);
|
||||
char *prefix = g_malloc0(PATH_MAX);
|
||||
char *postfix = g_malloc0(PATH_MAX);
|
||||
char *desc_line = g_malloc0(BUF_SIZE);
|
||||
char *ext_filename = g_malloc0(PATH_MAX);
|
||||
char *desc_filename = g_malloc0(PATH_MAX);
|
||||
char path[PATH_MAX], prefix[PATH_MAX], postfix[PATH_MAX];
|
||||
const int64_t split_size = 0x80000000; /* VMDK has constant split size */
|
||||
const char *desc_extent_line;
|
||||
char *parent_desc_line = g_malloc0(BUF_SIZE);
|
||||
char parent_desc_line[BUF_SIZE] = "";
|
||||
uint32_t parent_cid = 0xffffffff;
|
||||
uint32_t number_heads = 16;
|
||||
bool zeroed_grain = false;
|
||||
@@ -1898,19 +1868,8 @@ static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp)
|
||||
}
|
||||
if (backing_file) {
|
||||
BlockDriverState *bs = NULL;
|
||||
char *full_backing = g_new0(char, PATH_MAX);
|
||||
bdrv_get_full_backing_filename_from_filename(filename, backing_file,
|
||||
full_backing, PATH_MAX,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
g_free(full_backing);
|
||||
error_propagate(errp, local_err);
|
||||
ret = -ENOENT;
|
||||
goto exit;
|
||||
}
|
||||
ret = bdrv_open(&bs, full_backing, NULL, NULL, BDRV_O_NO_BACKING, NULL,
|
||||
ret = bdrv_open(&bs, backing_file, NULL, NULL, BDRV_O_NO_BACKING, NULL,
|
||||
errp);
|
||||
g_free(full_backing);
|
||||
if (ret != 0) {
|
||||
goto exit;
|
||||
}
|
||||
@@ -1921,27 +1880,33 @@ static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp)
|
||||
}
|
||||
parent_cid = vmdk_read_cid(bs, 0);
|
||||
bdrv_unref(bs);
|
||||
snprintf(parent_desc_line, BUF_SIZE,
|
||||
snprintf(parent_desc_line, sizeof(parent_desc_line),
|
||||
"parentFileNameHint=\"%s\"", backing_file);
|
||||
}
|
||||
|
||||
/* Create extents */
|
||||
filesize = total_size;
|
||||
while (filesize > 0) {
|
||||
char desc_line[BUF_SIZE];
|
||||
char ext_filename[PATH_MAX];
|
||||
char desc_filename[PATH_MAX];
|
||||
int64_t size = filesize;
|
||||
|
||||
if (split && size > split_size) {
|
||||
size = split_size;
|
||||
}
|
||||
if (split) {
|
||||
snprintf(desc_filename, PATH_MAX, "%s-%c%03d%s",
|
||||
snprintf(desc_filename, sizeof(desc_filename), "%s-%c%03d%s",
|
||||
prefix, flat ? 'f' : 's', ++idx, postfix);
|
||||
} else if (flat) {
|
||||
snprintf(desc_filename, PATH_MAX, "%s-flat%s", prefix, postfix);
|
||||
snprintf(desc_filename, sizeof(desc_filename), "%s-flat%s",
|
||||
prefix, postfix);
|
||||
} else {
|
||||
snprintf(desc_filename, PATH_MAX, "%s%s", prefix, postfix);
|
||||
snprintf(desc_filename, sizeof(desc_filename), "%s%s",
|
||||
prefix, postfix);
|
||||
}
|
||||
snprintf(ext_filename, PATH_MAX, "%s%s", path, desc_filename);
|
||||
snprintf(ext_filename, sizeof(ext_filename), "%s%s",
|
||||
path, desc_filename);
|
||||
|
||||
if (vmdk_create_extent(ext_filename, size,
|
||||
flat, compress, zeroed_grain, opts, errp)) {
|
||||
@@ -1951,13 +1916,13 @@ static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp)
|
||||
filesize -= size;
|
||||
|
||||
/* Format description line */
|
||||
snprintf(desc_line, BUF_SIZE,
|
||||
snprintf(desc_line, sizeof(desc_line),
|
||||
desc_extent_line, size / BDRV_SECTOR_SIZE, desc_filename);
|
||||
g_string_append(ext_desc_lines, desc_line);
|
||||
}
|
||||
/* generate descriptor file */
|
||||
desc = g_strdup_printf(desc_template,
|
||||
g_random_int(),
|
||||
(uint32_t)time(NULL),
|
||||
parent_cid,
|
||||
fmt,
|
||||
parent_desc_line,
|
||||
@@ -2006,13 +1971,6 @@ exit:
|
||||
g_free(backing_file);
|
||||
g_free(fmt);
|
||||
g_free(desc);
|
||||
g_free(path);
|
||||
g_free(prefix);
|
||||
g_free(postfix);
|
||||
g_free(desc_line);
|
||||
g_free(ext_filename);
|
||||
g_free(desc_filename);
|
||||
g_free(parent_desc_line);
|
||||
g_string_free(ext_desc_lines, true);
|
||||
return ret;
|
||||
}
|
||||
|
141
block/vpc.c
141
block/vpc.c
@@ -46,7 +46,6 @@ enum vhd_type {
|
||||
#define VHD_TIMESTAMP_BASE 946684800
|
||||
|
||||
#define VHD_MAX_SECTORS (65535LL * 255 * 255)
|
||||
#define VHD_MAX_GEOMETRY (65535LL * 16 * 255)
|
||||
|
||||
// always big-endian
|
||||
typedef struct vhd_footer {
|
||||
@@ -66,7 +65,7 @@ typedef struct vhd_footer {
|
||||
char creator_os[4]; // "Wi2k"
|
||||
|
||||
uint64_t orig_size;
|
||||
uint64_t current_size;
|
||||
uint64_t size;
|
||||
|
||||
uint16_t cyls;
|
||||
uint8_t heads;
|
||||
@@ -216,12 +215,13 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
bs->total_sectors = (int64_t)
|
||||
be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl;
|
||||
|
||||
/* Images that have exactly the maximum geometry are probably bigger and
|
||||
* would be truncated if we adhered to the geometry for them. Rely on
|
||||
* footer->current_size for them. */
|
||||
if (bs->total_sectors == VHD_MAX_GEOMETRY) {
|
||||
bs->total_sectors = be64_to_cpu(footer->current_size) /
|
||||
BDRV_SECTOR_SIZE;
|
||||
/* images created with disk2vhd report a far higher virtual size
|
||||
* than expected with the cyls * heads * sectors_per_cyl formula.
|
||||
* use the footer->size instead if the image was created with
|
||||
* disk2vhd.
|
||||
*/
|
||||
if (!strncmp(footer->creator_app, "d2v", 4)) {
|
||||
bs->total_sectors = be64_to_cpu(footer->size) / BDRV_SECTOR_SIZE;
|
||||
}
|
||||
|
||||
/* Allow a maximum disk size of approximately 2 TB */
|
||||
@@ -376,6 +376,38 @@ static inline int64_t get_sector_offset(BlockDriverState *bs,
|
||||
bdrv_pwrite_sync(bs->file, bitmap_offset, bitmap, s->bitmap_size);
|
||||
}
|
||||
|
||||
// printf("sector: %" PRIx64 ", index: %x, offset: %x, bioff: %" PRIx64 ", bloff: %" PRIx64 "\n",
|
||||
// sector_num, pagetable_index, pageentry_index,
|
||||
// bitmap_offset, block_offset);
|
||||
|
||||
// disabled by reason
|
||||
#if 0
|
||||
#ifdef CACHE
|
||||
if (bitmap_offset != s->last_bitmap)
|
||||
{
|
||||
lseek(s->fd, bitmap_offset, SEEK_SET);
|
||||
|
||||
s->last_bitmap = bitmap_offset;
|
||||
|
||||
// Scary! Bitmap is stored as big endian 32bit entries,
|
||||
// while we used to look it up byte by byte
|
||||
read(s->fd, s->pageentry_u8, 512);
|
||||
for (i = 0; i < 128; i++)
|
||||
be32_to_cpus(&s->pageentry_u32[i]);
|
||||
}
|
||||
|
||||
if ((s->pageentry_u8[pageentry_index / 8] >> (pageentry_index % 8)) & 1)
|
||||
return -1;
|
||||
#else
|
||||
lseek(s->fd, bitmap_offset + (pageentry_index / 8), SEEK_SET);
|
||||
|
||||
read(s->fd, &bitmap_entry, 1);
|
||||
|
||||
if ((bitmap_entry >> (pageentry_index % 8)) & 1)
|
||||
return -1; // not allocated
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return block_offset;
|
||||
}
|
||||
|
||||
@@ -565,49 +597,6 @@ static coroutine_fn int vpc_co_write(BlockDriverState *bs, int64_t sector_num,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn vpc_co_get_block_status(BlockDriverState *bs,
|
||||
int64_t sector_num, int nb_sectors, int *pnum)
|
||||
{
|
||||
BDRVVPCState *s = bs->opaque;
|
||||
VHDFooter *footer = (VHDFooter*) s->footer_buf;
|
||||
int64_t start, offset;
|
||||
bool allocated;
|
||||
int n;
|
||||
|
||||
if (be32_to_cpu(footer->type) == VHD_FIXED) {
|
||||
*pnum = nb_sectors;
|
||||
return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID | BDRV_BLOCK_DATA |
|
||||
(sector_num << BDRV_SECTOR_BITS);
|
||||
}
|
||||
|
||||
offset = get_sector_offset(bs, sector_num, 0);
|
||||
start = offset;
|
||||
allocated = (offset != -1);
|
||||
*pnum = 0;
|
||||
|
||||
do {
|
||||
/* All sectors in a block are contiguous (without using the bitmap) */
|
||||
n = ROUND_UP(sector_num + 1, s->block_size / BDRV_SECTOR_SIZE)
|
||||
- sector_num;
|
||||
n = MIN(n, nb_sectors);
|
||||
|
||||
*pnum += n;
|
||||
sector_num += n;
|
||||
nb_sectors -= n;
|
||||
/* *pnum can't be greater than one block for allocated
|
||||
* sectors since there is always a bitmap in between. */
|
||||
if (allocated) {
|
||||
return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | start;
|
||||
}
|
||||
if (nb_sectors == 0) {
|
||||
break;
|
||||
}
|
||||
offset = get_sector_offset(bs, sector_num, 0);
|
||||
} while (offset == -1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculates the number of cylinders, heads and sectors per cylinder
|
||||
* based on a given number of sectors. This is the algorithm described
|
||||
@@ -625,20 +614,26 @@ static int calculate_geometry(int64_t total_sectors, uint16_t* cyls,
|
||||
{
|
||||
uint32_t cyls_times_heads;
|
||||
|
||||
total_sectors = MIN(total_sectors, VHD_MAX_GEOMETRY);
|
||||
/* Allow a maximum disk size of approximately 2 TB */
|
||||
if (total_sectors > 65535LL * 255 * 255) {
|
||||
return -EFBIG;
|
||||
}
|
||||
|
||||
if (total_sectors >= 65535LL * 16 * 63) {
|
||||
if (total_sectors > 65535 * 16 * 63) {
|
||||
*secs_per_cyl = 255;
|
||||
*heads = 16;
|
||||
if (total_sectors > 65535 * 16 * 255) {
|
||||
*heads = 255;
|
||||
} else {
|
||||
*heads = 16;
|
||||
}
|
||||
cyls_times_heads = total_sectors / *secs_per_cyl;
|
||||
} else {
|
||||
*secs_per_cyl = 17;
|
||||
cyls_times_heads = total_sectors / *secs_per_cyl;
|
||||
*heads = (cyls_times_heads + 1023) / 1024;
|
||||
|
||||
if (*heads < 4) {
|
||||
if (*heads < 4)
|
||||
*heads = 4;
|
||||
}
|
||||
|
||||
if (cyls_times_heads >= (*heads * 1024) || *heads > 16) {
|
||||
*secs_per_cyl = 31;
|
||||
@@ -794,28 +789,20 @@ static int vpc_create(const char *filename, QemuOpts *opts, Error **errp)
|
||||
* Calculate matching total_size and geometry. Increase the number of
|
||||
* sectors requested until we get enough (or fail). This ensures that
|
||||
* qemu-img convert doesn't truncate images, but rather rounds up.
|
||||
*
|
||||
* If the image size can't be represented by a spec conform CHS geometry,
|
||||
* we set the geometry to 65535 x 16 x 255 (CxHxS) sectors and use
|
||||
* the image size from the VHD footer to calculate total_sectors.
|
||||
*/
|
||||
total_sectors = MIN(VHD_MAX_GEOMETRY, total_size / BDRV_SECTOR_SIZE);
|
||||
total_sectors = total_size / BDRV_SECTOR_SIZE;
|
||||
for (i = 0; total_sectors > (int64_t)cyls * heads * secs_per_cyl; i++) {
|
||||
calculate_geometry(total_sectors + i, &cyls, &heads, &secs_per_cyl);
|
||||
}
|
||||
|
||||
if ((int64_t)cyls * heads * secs_per_cyl == VHD_MAX_GEOMETRY) {
|
||||
total_sectors = total_size / BDRV_SECTOR_SIZE;
|
||||
/* Allow a maximum disk size of approximately 2 TB */
|
||||
if (total_sectors > VHD_MAX_SECTORS) {
|
||||
if (calculate_geometry(total_sectors + i, &cyls, &heads,
|
||||
&secs_per_cyl))
|
||||
{
|
||||
ret = -EFBIG;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
total_sectors = (int64_t)cyls * heads * secs_per_cyl;
|
||||
total_size = total_sectors * BDRV_SECTOR_SIZE;
|
||||
}
|
||||
|
||||
total_sectors = (int64_t) cyls * heads * secs_per_cyl;
|
||||
total_size = total_sectors * BDRV_SECTOR_SIZE;
|
||||
|
||||
/* Prepare the Hard Disk Footer */
|
||||
memset(buf, 0, 1024);
|
||||
|
||||
@@ -837,7 +824,7 @@ static int vpc_create(const char *filename, QemuOpts *opts, Error **errp)
|
||||
footer->major = cpu_to_be16(0x0005);
|
||||
footer->minor = cpu_to_be16(0x0003);
|
||||
footer->orig_size = cpu_to_be64(total_size);
|
||||
footer->current_size = cpu_to_be64(total_size);
|
||||
footer->size = cpu_to_be64(total_size);
|
||||
footer->cyls = cpu_to_be16(cyls);
|
||||
footer->heads = heads;
|
||||
footer->secs_per_cyl = secs_per_cyl;
|
||||
@@ -902,6 +889,11 @@ static QemuOptsList vpc_create_opts = {
|
||||
"Type of virtual hard disk format. Supported formats are "
|
||||
"{dynamic (default) | fixed} "
|
||||
},
|
||||
{
|
||||
.name = BLOCK_OPT_NOCOW,
|
||||
.type = QEMU_OPT_BOOL,
|
||||
.help = "Turn off copy-on-write (valid only on btrfs)"
|
||||
},
|
||||
{ /* end of list */ }
|
||||
}
|
||||
};
|
||||
@@ -916,9 +908,8 @@ static BlockDriver bdrv_vpc = {
|
||||
.bdrv_reopen_prepare = vpc_reopen_prepare,
|
||||
.bdrv_create = vpc_create,
|
||||
|
||||
.bdrv_read = vpc_co_read,
|
||||
.bdrv_write = vpc_co_write,
|
||||
.bdrv_co_get_block_status = vpc_co_get_block_status,
|
||||
.bdrv_read = vpc_co_read,
|
||||
.bdrv_write = vpc_co_write,
|
||||
|
||||
.bdrv_get_info = vpc_get_info,
|
||||
|
||||
|
@@ -2909,8 +2909,8 @@ static int enable_write_target(BDRVVVFATState *s, Error **errp)
|
||||
|
||||
array_init(&(s->commits), sizeof(commit_t));
|
||||
|
||||
s->qcow_filename = g_malloc(PATH_MAX);
|
||||
ret = get_tmp_filename(s->qcow_filename, PATH_MAX);
|
||||
s->qcow_filename = g_malloc(1024);
|
||||
ret = get_tmp_filename(s->qcow_filename, 1024);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "can't create temporary file");
|
||||
goto err;
|
||||
@@ -2924,9 +2924,8 @@ static int enable_write_target(BDRVVVFATState *s, Error **errp)
|
||||
}
|
||||
|
||||
opts = qemu_opts_create(bdrv_qcow->create_opts, NULL, 0, &error_abort);
|
||||
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, s->sector_count * 512,
|
||||
&error_abort);
|
||||
qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, "fat:", &error_abort);
|
||||
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, s->sector_count * 512);
|
||||
qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, "fat:");
|
||||
|
||||
ret = bdrv_create(bdrv_qcow, s->qcow_filename, opts, errp);
|
||||
qemu_opts_del(opts);
|
||||
|
@@ -1,125 +0,0 @@
|
||||
/*
|
||||
* QEMU System Emulator block write threshold notification
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2014
|
||||
*
|
||||
* Authors:
|
||||
* Francesco Romani <fromani@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
||||
* See the COPYING.LIB file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "block/block_int.h"
|
||||
#include "block/coroutine.h"
|
||||
#include "block/write-threshold.h"
|
||||
#include "qemu/notify.h"
|
||||
#include "qapi-event.h"
|
||||
#include "qmp-commands.h"
|
||||
|
||||
|
||||
uint64_t bdrv_write_threshold_get(const BlockDriverState *bs)
|
||||
{
|
||||
return bs->write_threshold_offset;
|
||||
}
|
||||
|
||||
bool bdrv_write_threshold_is_set(const BlockDriverState *bs)
|
||||
{
|
||||
return bs->write_threshold_offset > 0;
|
||||
}
|
||||
|
||||
static void write_threshold_disable(BlockDriverState *bs)
|
||||
{
|
||||
if (bdrv_write_threshold_is_set(bs)) {
|
||||
notifier_with_return_remove(&bs->write_threshold_notifier);
|
||||
bs->write_threshold_offset = 0;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t bdrv_write_threshold_exceeded(const BlockDriverState *bs,
|
||||
const BdrvTrackedRequest *req)
|
||||
{
|
||||
if (bdrv_write_threshold_is_set(bs)) {
|
||||
if (req->offset > bs->write_threshold_offset) {
|
||||
return (req->offset - bs->write_threshold_offset) + req->bytes;
|
||||
}
|
||||
if ((req->offset + req->bytes) > bs->write_threshold_offset) {
|
||||
return (req->offset + req->bytes) - bs->write_threshold_offset;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int coroutine_fn before_write_notify(NotifierWithReturn *notifier,
|
||||
void *opaque)
|
||||
{
|
||||
BdrvTrackedRequest *req = opaque;
|
||||
BlockDriverState *bs = req->bs;
|
||||
uint64_t amount = 0;
|
||||
|
||||
amount = bdrv_write_threshold_exceeded(bs, req);
|
||||
if (amount > 0) {
|
||||
qapi_event_send_block_write_threshold(
|
||||
bs->node_name,
|
||||
amount,
|
||||
bs->write_threshold_offset,
|
||||
&error_abort);
|
||||
|
||||
/* autodisable to avoid flooding the monitor */
|
||||
write_threshold_disable(bs);
|
||||
}
|
||||
|
||||
return 0; /* should always let other notifiers run */
|
||||
}
|
||||
|
||||
static void write_threshold_register_notifier(BlockDriverState *bs)
|
||||
{
|
||||
bs->write_threshold_notifier.notify = before_write_notify;
|
||||
notifier_with_return_list_add(&bs->before_write_notifiers,
|
||||
&bs->write_threshold_notifier);
|
||||
}
|
||||
|
||||
static void write_threshold_update(BlockDriverState *bs,
|
||||
int64_t threshold_bytes)
|
||||
{
|
||||
bs->write_threshold_offset = threshold_bytes;
|
||||
}
|
||||
|
||||
void bdrv_write_threshold_set(BlockDriverState *bs, uint64_t threshold_bytes)
|
||||
{
|
||||
if (bdrv_write_threshold_is_set(bs)) {
|
||||
if (threshold_bytes > 0) {
|
||||
write_threshold_update(bs, threshold_bytes);
|
||||
} else {
|
||||
write_threshold_disable(bs);
|
||||
}
|
||||
} else {
|
||||
if (threshold_bytes > 0) {
|
||||
/* avoid multiple registration */
|
||||
write_threshold_register_notifier(bs);
|
||||
write_threshold_update(bs, threshold_bytes);
|
||||
}
|
||||
/* discard bogus disable request */
|
||||
}
|
||||
}
|
||||
|
||||
void qmp_block_set_write_threshold(const char *node_name,
|
||||
uint64_t threshold_bytes,
|
||||
Error **errp)
|
||||
{
|
||||
BlockDriverState *bs;
|
||||
AioContext *aio_context;
|
||||
|
||||
bs = bdrv_find_node(node_name);
|
||||
if (!bs) {
|
||||
error_setg(errp, "Device '%s' not found", node_name);
|
||||
return;
|
||||
}
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
bdrv_write_threshold_set(bs, threshold_bytes);
|
||||
|
||||
aio_context_release(aio_context);
|
||||
}
|
@@ -10,7 +10,6 @@
|
||||
*/
|
||||
|
||||
#include "sysemu/blockdev.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
#include "hw/block/block.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "qapi/qmp/qerror.h"
|
||||
@@ -47,9 +46,8 @@ void qmp_nbd_server_start(SocketAddress *addr, Error **errp)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Hook into the BlockBackend notifiers to close the export when the
|
||||
* backend is closed.
|
||||
/* Hook into the BlockDriverState notifiers to close the export when
|
||||
* the file is closed.
|
||||
*/
|
||||
typedef struct NBDCloseNotifier {
|
||||
Notifier n;
|
||||
@@ -75,7 +73,7 @@ static void nbd_close_notifier(Notifier *n, void *data)
|
||||
void qmp_nbd_server_add(const char *device, bool has_writable, bool writable,
|
||||
Error **errp)
|
||||
{
|
||||
BlockBackend *blk;
|
||||
BlockDriverState *bs;
|
||||
NBDExport *exp;
|
||||
NBDCloseNotifier *n;
|
||||
|
||||
@@ -89,12 +87,12 @@ void qmp_nbd_server_add(const char *device, bool has_writable, bool writable,
|
||||
return;
|
||||
}
|
||||
|
||||
blk = blk_by_name(device);
|
||||
if (!blk) {
|
||||
bs = bdrv_find(device);
|
||||
if (!bs) {
|
||||
error_set(errp, QERR_DEVICE_NOT_FOUND, device);
|
||||
return;
|
||||
}
|
||||
if (!blk_is_inserted(blk)) {
|
||||
if (!bdrv_is_inserted(bs)) {
|
||||
error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
|
||||
return;
|
||||
}
|
||||
@@ -102,22 +100,18 @@ void qmp_nbd_server_add(const char *device, bool has_writable, bool writable,
|
||||
if (!has_writable) {
|
||||
writable = false;
|
||||
}
|
||||
if (blk_is_read_only(blk)) {
|
||||
if (bdrv_is_read_only(bs)) {
|
||||
writable = false;
|
||||
}
|
||||
|
||||
exp = nbd_export_new(blk, 0, -1, writable ? 0 : NBD_FLAG_READ_ONLY, NULL,
|
||||
errp);
|
||||
if (!exp) {
|
||||
return;
|
||||
}
|
||||
exp = nbd_export_new(bs, 0, -1, writable ? 0 : NBD_FLAG_READ_ONLY, NULL);
|
||||
|
||||
nbd_export_set_name(exp, device);
|
||||
|
||||
n = g_new0(NBDCloseNotifier, 1);
|
||||
n->n.notify = nbd_close_notifier;
|
||||
n->exp = exp;
|
||||
blk_add_close_notifier(blk, &n->n);
|
||||
bdrv_add_close_notifier(bs, &n->n);
|
||||
QTAILQ_INSERT_TAIL(&close_notifiers, n, next);
|
||||
}
|
||||
|
||||
|
535
blockdev.c
535
blockdev.c
File diff suppressed because it is too large
Load Diff
113
bootdevice.c
113
bootdevice.c
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* QEMU Boot Device Implement
|
||||
*
|
||||
* Copyright (c) 2014 HUAWEI TECHNOLOGIES CO., LTD.
|
||||
* Copyright (c) 2014 HUAWEI TECHNOLOGIES CO.,LTD.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
@@ -25,7 +25,6 @@
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qapi/visitor.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "hw/hw.h"
|
||||
|
||||
typedef struct FWBootEntry FWBootEntry;
|
||||
|
||||
@@ -38,80 +37,6 @@ struct FWBootEntry {
|
||||
|
||||
static QTAILQ_HEAD(, FWBootEntry) fw_boot_order =
|
||||
QTAILQ_HEAD_INITIALIZER(fw_boot_order);
|
||||
static QEMUBootSetHandler *boot_set_handler;
|
||||
static void *boot_set_opaque;
|
||||
|
||||
void qemu_register_boot_set(QEMUBootSetHandler *func, void *opaque)
|
||||
{
|
||||
boot_set_handler = func;
|
||||
boot_set_opaque = opaque;
|
||||
}
|
||||
|
||||
void qemu_boot_set(const char *boot_order, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (!boot_set_handler) {
|
||||
error_setg(errp, "no function defined to set boot device list for"
|
||||
" this architecture");
|
||||
return;
|
||||
}
|
||||
|
||||
validate_bootdevices(boot_order, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
boot_set_handler(boot_set_opaque, boot_order, errp);
|
||||
}
|
||||
|
||||
void validate_bootdevices(const char *devices, Error **errp)
|
||||
{
|
||||
/* We just do some generic consistency checks */
|
||||
const char *p;
|
||||
int bitmap = 0;
|
||||
|
||||
for (p = devices; *p != '\0'; p++) {
|
||||
/* Allowed boot devices are:
|
||||
* a-b: floppy disk drives
|
||||
* c-f: IDE disk drives
|
||||
* g-m: machine implementation dependent drives
|
||||
* n-p: network devices
|
||||
* It's up to each machine implementation to check if the given boot
|
||||
* devices match the actual hardware implementation and firmware
|
||||
* features.
|
||||
*/
|
||||
if (*p < 'a' || *p > 'p') {
|
||||
error_setg(errp, "Invalid boot device '%c'", *p);
|
||||
return;
|
||||
}
|
||||
if (bitmap & (1 << (*p - 'a'))) {
|
||||
error_setg(errp, "Boot device '%c' was given twice", *p);
|
||||
return;
|
||||
}
|
||||
bitmap |= 1 << (*p - 'a');
|
||||
}
|
||||
}
|
||||
|
||||
void restore_boot_order(void *opaque)
|
||||
{
|
||||
char *normal_boot_order = opaque;
|
||||
static int first = 1;
|
||||
|
||||
/* Restore boot order and remove ourselves after the first boot */
|
||||
if (first) {
|
||||
first = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (boot_set_handler) {
|
||||
qemu_boot_set(normal_boot_order, &error_abort);
|
||||
}
|
||||
|
||||
qemu_unregister_reset(restore_boot_order, normal_boot_order);
|
||||
g_free(normal_boot_order);
|
||||
}
|
||||
|
||||
void check_boot_index(int32_t bootindex, Error **errp)
|
||||
{
|
||||
@@ -212,9 +137,7 @@ char *get_boot_devices_list(size_t *size, bool ignore_suffixes)
|
||||
char *list = NULL;
|
||||
|
||||
QTAILQ_FOREACH(i, &fw_boot_order, link) {
|
||||
char *devpath = NULL, *suffix = NULL;
|
||||
char *bootpath;
|
||||
char *d;
|
||||
char *devpath = NULL, *bootpath;
|
||||
size_t len;
|
||||
|
||||
if (i->dev) {
|
||||
@@ -222,26 +145,20 @@ char *get_boot_devices_list(size_t *size, bool ignore_suffixes)
|
||||
assert(devpath);
|
||||
}
|
||||
|
||||
if (!ignore_suffixes) {
|
||||
if (i->dev) {
|
||||
d = qdev_get_own_fw_dev_path_from_handler(i->dev->parent_bus,
|
||||
i->dev);
|
||||
if (d) {
|
||||
assert(!i->suffix);
|
||||
suffix = d;
|
||||
} else {
|
||||
suffix = g_strdup(i->suffix);
|
||||
}
|
||||
} else {
|
||||
suffix = g_strdup(i->suffix);
|
||||
}
|
||||
}
|
||||
if (i->suffix && !ignore_suffixes && devpath) {
|
||||
size_t bootpathlen = strlen(devpath) + strlen(i->suffix) + 1;
|
||||
|
||||
bootpath = g_strdup_printf("%s%s",
|
||||
devpath ? devpath : "",
|
||||
suffix ? suffix : "");
|
||||
g_free(devpath);
|
||||
g_free(suffix);
|
||||
bootpath = g_malloc(bootpathlen);
|
||||
snprintf(bootpath, bootpathlen, "%s%s", devpath, i->suffix);
|
||||
g_free(devpath);
|
||||
} else if (devpath) {
|
||||
bootpath = devpath;
|
||||
} else if (!ignore_suffixes) {
|
||||
assert(i->suffix);
|
||||
bootpath = g_strdup(i->suffix);
|
||||
} else {
|
||||
bootpath = g_strdup("");
|
||||
}
|
||||
|
||||
if (total) {
|
||||
list[total-1] = '\n';
|
||||
|
@@ -351,10 +351,8 @@ static inline void init_thread(struct target_pt_regs *_regs, struct image_info *
|
||||
|
||||
_regs->gpr[1] = infop->start_stack;
|
||||
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
|
||||
get_user_u64(entry, infop->entry);
|
||||
entry += infop->load_addr;
|
||||
get_user_u64(toc, infop->entry + 8);
|
||||
toc += infop->load_addr;
|
||||
entry = ldq_raw(infop->entry) + infop->load_addr;
|
||||
toc = ldq_raw(infop->entry + 8) + infop->load_addr;
|
||||
_regs->gpr[2] = toc;
|
||||
infop->entry = entry;
|
||||
#endif
|
||||
@@ -367,9 +365,8 @@ static inline void init_thread(struct target_pt_regs *_regs, struct image_info *
|
||||
get_user_ual(_regs->gpr[3], pos);
|
||||
pos += sizeof(abi_ulong);
|
||||
_regs->gpr[4] = pos;
|
||||
for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong)) {
|
||||
get_user_ual(tmp, pos);
|
||||
}
|
||||
for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong))
|
||||
tmp = ldl(pos);
|
||||
_regs->gpr[5] = pos;
|
||||
}
|
||||
|
||||
|
@@ -908,12 +908,12 @@ int main(int argc, char **argv)
|
||||
cpu_exec_init_all();
|
||||
/* NOTE: we need to init the CPU at this stage to get
|
||||
qemu_host_page_size */
|
||||
cpu = cpu_init(cpu_model);
|
||||
if (!cpu) {
|
||||
env = cpu_init(cpu_model);
|
||||
if (!env) {
|
||||
fprintf(stderr, "Unable to find CPU definition\n");
|
||||
exit(1);
|
||||
}
|
||||
env = cpu->env_ptr;
|
||||
cpu = ENV_GET_CPU(env);
|
||||
#if defined(TARGET_SPARC) || defined(TARGET_PPC)
|
||||
cpu_reset(cpu);
|
||||
#endif
|
||||
|
158
configure
vendored
158
configure
vendored
@@ -309,11 +309,10 @@ rbd=""
|
||||
smartcard_nss=""
|
||||
libusb=""
|
||||
usb_redir=""
|
||||
opengl=""
|
||||
glx=""
|
||||
zlib="yes"
|
||||
lzo=""
|
||||
snappy=""
|
||||
bzip2=""
|
||||
guest_agent=""
|
||||
guest_agent_with_vss="no"
|
||||
vss_win32_sdk=""
|
||||
@@ -327,7 +326,7 @@ seccomp=""
|
||||
glusterfs=""
|
||||
glusterfs_discard="no"
|
||||
glusterfs_zerofill="no"
|
||||
archipelago="no"
|
||||
archipelago=""
|
||||
gtk=""
|
||||
gtkabi=""
|
||||
vte=""
|
||||
@@ -1027,9 +1026,9 @@ for opt do
|
||||
;;
|
||||
--enable-vhost-scsi) vhost_scsi="yes"
|
||||
;;
|
||||
--disable-opengl) opengl="no"
|
||||
--disable-glx) glx="no"
|
||||
;;
|
||||
--enable-opengl) opengl="yes"
|
||||
--enable-glx) glx="yes"
|
||||
;;
|
||||
--disable-rbd) rbd="no"
|
||||
;;
|
||||
@@ -1061,10 +1060,6 @@ for opt do
|
||||
;;
|
||||
--enable-snappy) snappy="yes"
|
||||
;;
|
||||
--disable-bzip2) bzip2="no"
|
||||
;;
|
||||
--enable-bzip2) bzip2="yes"
|
||||
;;
|
||||
--enable-guest-agent) guest_agent="yes"
|
||||
;;
|
||||
--disable-guest-agent) guest_agent="no"
|
||||
@@ -1379,8 +1374,6 @@ Advanced options (experts only):
|
||||
--enable-usb-redir enable usb network redirection support
|
||||
--enable-lzo enable the support of lzo compression library
|
||||
--enable-snappy enable the support of snappy compression library
|
||||
--enable-bzip2 enable the support of bzip2 compression library (for
|
||||
reading bzip2-compressed dmg images)
|
||||
--disable-guest-agent disable building of the QEMU Guest Agent
|
||||
--enable-guest-agent enable building of the QEMU Guest Agent
|
||||
--with-vss-sdk=SDK-path enable Windows VSS support in QEMU Guest Agent
|
||||
@@ -1826,24 +1819,6 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
##########################################
|
||||
# bzip2 check
|
||||
|
||||
if test "$bzip2" != "no" ; then
|
||||
cat > $TMPC << EOF
|
||||
#include <bzlib.h>
|
||||
int main(void) { BZ2_bzlibVersion(); return 0; }
|
||||
EOF
|
||||
if compile_prog "" "-lbz2" ; then
|
||||
bzip2="yes"
|
||||
else
|
||||
if test "$bzip2" = "yes"; then
|
||||
feature_not_found "libbzip2" "Install libbzip2 devel"
|
||||
fi
|
||||
bzip2="no"
|
||||
fi
|
||||
fi
|
||||
|
||||
##########################################
|
||||
# libseccomp check
|
||||
|
||||
@@ -1855,7 +1830,7 @@ if test "$seccomp" != "no" ; then
|
||||
seccomp="yes"
|
||||
else
|
||||
if test "$seccomp" = "yes"; then
|
||||
feature_not_found "libseccomp" "Install libseccomp devel >= 2.1.1"
|
||||
feature_not_found "libseccomp" "Install libseccomp devel >= 2.1.0"
|
||||
fi
|
||||
seccomp="no"
|
||||
fi
|
||||
@@ -1894,32 +1869,6 @@ EOF
|
||||
#if !defined(HVM_MAX_VCPUS)
|
||||
# error HVM_MAX_VCPUS not defined
|
||||
#endif
|
||||
int main(void) {
|
||||
xc_interface *xc;
|
||||
xs_daemon_open();
|
||||
xc = xc_interface_open(0, 0, 0);
|
||||
xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0);
|
||||
xc_gnttab_open(NULL, 0);
|
||||
xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0);
|
||||
xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000);
|
||||
xc_hvm_create_ioreq_server(xc, 0, 0, NULL);
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
compile_prog "" "$xen_libs"
|
||||
then
|
||||
xen_ctrl_version=450
|
||||
xen=yes
|
||||
|
||||
elif
|
||||
cat > $TMPC <<EOF &&
|
||||
#include <xenctrl.h>
|
||||
#include <xenstore.h>
|
||||
#include <stdint.h>
|
||||
#include <xen/hvm/hvm_info_table.h>
|
||||
#if !defined(HVM_MAX_VCPUS)
|
||||
# error HVM_MAX_VCPUS not defined
|
||||
#endif
|
||||
int main(void) {
|
||||
xc_interface *xc;
|
||||
xs_daemon_open();
|
||||
@@ -2084,15 +2033,6 @@ if test "$sparse" != "no" ; then
|
||||
fi
|
||||
fi
|
||||
|
||||
##########################################
|
||||
# X11 probe
|
||||
x11_cflags=
|
||||
x11_libs=-lX11
|
||||
if $pkg_config --exists "x11"; then
|
||||
x11_cflags=`$pkg_config --cflags x11`
|
||||
x11_libs=`$pkg_config --libs x11`
|
||||
fi
|
||||
|
||||
##########################################
|
||||
# GTK probe
|
||||
|
||||
@@ -2120,8 +2060,7 @@ if test "$gtk" != "no"; then
|
||||
gtk_cflags=`$pkg_config --cflags $gtkpackage`
|
||||
gtk_libs=`$pkg_config --libs $gtkpackage`
|
||||
if $pkg_config --exists "$gtkx11package >= $gtkversion"; then
|
||||
gtk_cflags="$gtk_cflags $x11_cflags"
|
||||
gtk_libs="$gtk_libs $x11_libs"
|
||||
gtk_libs="$gtk_libs -lX11"
|
||||
fi
|
||||
libs_softmmu="$gtk_libs $libs_softmmu"
|
||||
gtk="yes"
|
||||
@@ -2246,9 +2185,8 @@ if test "$sdl" = "yes" ; then
|
||||
#endif
|
||||
int main(void) { return 0; }
|
||||
EOF
|
||||
if compile_prog "$sdl_cflags $x11_cflags" "$sdl_libs $x11_libs" ; then
|
||||
sdl_cflags="$sdl_cflags $x11_cflags"
|
||||
sdl_libs="$sdl_libs $x11_libs"
|
||||
if compile_prog "$sdl_cflags" "$sdl_libs" ; then
|
||||
sdl_libs="$sdl_libs -lX11"
|
||||
fi
|
||||
libs_softmmu="$sdl_libs $libs_softmmu"
|
||||
fi
|
||||
@@ -2789,7 +2727,7 @@ fi
|
||||
if test "$modules" = yes; then
|
||||
shacmd_probe="sha1sum sha1 shasum"
|
||||
for c in $shacmd_probe; do
|
||||
if has $c; then
|
||||
if which $c >/dev/null 2>&1; then
|
||||
shacmd="$c"
|
||||
break
|
||||
fi
|
||||
@@ -3118,35 +3056,23 @@ fi
|
||||
libs_softmmu="$libs_softmmu $fdt_libs"
|
||||
|
||||
##########################################
|
||||
# opengl probe (for sdl2, milkymist-tmu2)
|
||||
|
||||
# GLX probe, used by milkymist-tmu2
|
||||
# this is temporary, code will be switched to egl mid-term.
|
||||
cat > $TMPC << EOF
|
||||
if test "$glx" != "no" ; then
|
||||
glx_libs="-lGL -lX11"
|
||||
cat > $TMPC << EOF
|
||||
#include <X11/Xlib.h>
|
||||
#include <GL/gl.h>
|
||||
#include <GL/glx.h>
|
||||
int main(void) { glBegin(0); glXQueryVersion(0,0,0); return 0; }
|
||||
EOF
|
||||
if compile_prog "" "-lGL -lX11" ; then
|
||||
have_glx=yes
|
||||
else
|
||||
have_glx=no
|
||||
fi
|
||||
|
||||
if test "$opengl" != "no" ; then
|
||||
opengl_pkgs="gl"
|
||||
if $pkg_config $opengl_pkgs x11 && test "$have_glx" = "yes"; then
|
||||
opengl_cflags="$($pkg_config --cflags $opengl_pkgs) $x11_cflags"
|
||||
opengl_libs="$($pkg_config --libs $opengl_pkgs) $x11_libs"
|
||||
opengl=yes
|
||||
if compile_prog "" "-lGL -lX11" ; then
|
||||
glx=yes
|
||||
else
|
||||
if test "$opengl" = "yes" ; then
|
||||
feature_not_found "opengl" "Install GL devel (e.g. MESA)"
|
||||
if test "$glx" = "yes" ; then
|
||||
feature_not_found "glx" "Install GL devel (e.g. MESA)"
|
||||
fi
|
||||
opengl_cflags=""
|
||||
opengl_libs=""
|
||||
opengl=no
|
||||
glx_libs=
|
||||
glx=no
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -3168,12 +3094,6 @@ EOF
|
||||
archipelago="yes"
|
||||
libs_tools="$archipelago_libs $libs_tools"
|
||||
libs_softmmu="$archipelago_libs $libs_softmmu"
|
||||
|
||||
echo "WARNING: Please check the licenses of QEMU and libxseg carefully."
|
||||
echo "GPLv3 versions of libxseg may not be compatible with QEMU's"
|
||||
echo "license and therefore prevent redistribution."
|
||||
echo
|
||||
echo "To disable Archipelago, use --disable-archipelago"
|
||||
else
|
||||
if test "$archipelago" = "yes" ; then
|
||||
feature_not_found "Archipelago backend support" "Install libxseg devel"
|
||||
@@ -3389,22 +3309,6 @@ if compile_prog "" "" ; then
|
||||
fallocate_punch_hole=yes
|
||||
fi
|
||||
|
||||
# check that fallocate supports range zeroing inside the file
|
||||
fallocate_zero_range=no
|
||||
cat > $TMPC << EOF
|
||||
#include <fcntl.h>
|
||||
#include <linux/falloc.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
fallocate(0, FALLOC_FL_ZERO_RANGE, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if compile_prog "" "" ; then
|
||||
fallocate_zero_range=yes
|
||||
fi
|
||||
|
||||
# check for posix_fallocate
|
||||
posix_fallocate=no
|
||||
cat > $TMPC << EOF
|
||||
@@ -4379,9 +4283,6 @@ if test -n "$sparc_cpu"; then
|
||||
echo "Target Sparc Arch $sparc_cpu"
|
||||
fi
|
||||
echo "xen support $xen"
|
||||
if test "$xen" = "yes" ; then
|
||||
echo "xen ctrl version $xen_ctrl_version"
|
||||
fi
|
||||
echo "brlapi support $brlapi"
|
||||
echo "bluez support $bluez"
|
||||
echo "Documentation $docs"
|
||||
@@ -4419,7 +4320,7 @@ echo "xfsctl support $xfs"
|
||||
echo "nss used $smartcard_nss"
|
||||
echo "libusb $libusb"
|
||||
echo "usb net redir $usb_redir"
|
||||
echo "OpenGL support $opengl"
|
||||
echo "GLX support $glx"
|
||||
echo "libiscsi support $libiscsi"
|
||||
echo "libnfs support $libnfs"
|
||||
echo "build guest agent $guest_agent"
|
||||
@@ -4439,7 +4340,6 @@ echo "vhdx $vhdx"
|
||||
echo "Quorum $quorum"
|
||||
echo "lzo support $lzo"
|
||||
echo "snappy support $snappy"
|
||||
echo "bzip2 support $bzip2"
|
||||
echo "NUMA host support $numa"
|
||||
|
||||
if test "$sdl_too_old" = "yes"; then
|
||||
@@ -4638,9 +4538,6 @@ fi
|
||||
if test "$fallocate_punch_hole" = "yes" ; then
|
||||
echo "CONFIG_FALLOCATE_PUNCH_HOLE=y" >> $config_host_mak
|
||||
fi
|
||||
if test "$fallocate_zero_range" = "yes" ; then
|
||||
echo "CONFIG_FALLOCATE_ZERO_RANGE=y" >> $config_host_mak
|
||||
fi
|
||||
if test "$posix_fallocate" = "yes" ; then
|
||||
echo "CONFIG_POSIX_FALLOCATE=y" >> $config_host_mak
|
||||
fi
|
||||
@@ -4785,10 +4682,9 @@ if test "$usb_redir" = "yes" ; then
|
||||
echo "CONFIG_USB_REDIR=y" >> $config_host_mak
|
||||
fi
|
||||
|
||||
if test "$opengl" = "yes" ; then
|
||||
echo "CONFIG_OPENGL=y" >> $config_host_mak
|
||||
echo "OPENGL_CFLAGS=$opengl_cflags" >> $config_host_mak
|
||||
echo "OPENGL_LIBS=$opengl_libs" >> $config_host_mak
|
||||
if test "$glx" = "yes" ; then
|
||||
echo "CONFIG_GLX=y" >> $config_host_mak
|
||||
echo "GLX_LIBS=$glx_libs" >> $config_host_mak
|
||||
fi
|
||||
|
||||
if test "$lzo" = "yes" ; then
|
||||
@@ -4799,11 +4695,6 @@ if test "$snappy" = "yes" ; then
|
||||
echo "CONFIG_SNAPPY=y" >> $config_host_mak
|
||||
fi
|
||||
|
||||
if test "$bzip2" = "yes" ; then
|
||||
echo "CONFIG_BZIP2=y" >> $config_host_mak
|
||||
echo "BZIP2_LIBS=-lbz2" >> $config_host_mak
|
||||
fi
|
||||
|
||||
if test "$libiscsi" = "yes" ; then
|
||||
echo "CONFIG_LIBISCSI=m" >> $config_host_mak
|
||||
echo "LIBISCSI_CFLAGS=$libiscsi_cflags" >> $config_host_mak
|
||||
@@ -5018,7 +4909,6 @@ echo "QEMU_CFLAGS=$QEMU_CFLAGS" >> $config_host_mak
|
||||
echo "QEMU_INCLUDES=$QEMU_INCLUDES" >> $config_host_mak
|
||||
if test "$sparse" = "yes" ; then
|
||||
echo "CC := REAL_CC=\"\$(CC)\" cgcc" >> $config_host_mak
|
||||
echo "CPP := REAL_CC=\"\$(CPP)\" cgcc" >> $config_host_mak
|
||||
echo "CXX := REAL_CC=\"\$(CXX)\" cgcc" >> $config_host_mak
|
||||
echo "HOST_CC := REAL_CC=\"\$(HOST_CC)\" cgcc" >> $config_host_mak
|
||||
echo "QEMU_CFLAGS += -Wbitwise -Wno-transparent-union -Wno-old-initializer -Wno-non-pointer-null" >> $config_host_mak
|
||||
@@ -5274,9 +5164,7 @@ case "$target_name" in
|
||||
\( "$target_name" = "ppcemb" -a "$cpu" = "ppc64" \) -o \
|
||||
\( "$target_name" = "mipsel" -a "$cpu" = "mips" \) -o \
|
||||
\( "$target_name" = "x86_64" -a "$cpu" = "i386" \) -o \
|
||||
\( "$target_name" = "i386" -a "$cpu" = "x86_64" \) -o \
|
||||
\( "$target_name" = "x86_64" -a "$cpu" = "x32" \) -o \
|
||||
\( "$target_name" = "i386" -a "$cpu" = "x32" \) \) ; then
|
||||
\( "$target_name" = "i386" -a "$cpu" = "x86_64" \) \) ; then
|
||||
echo "CONFIG_KVM=y" >> $config_target_mak
|
||||
if test "$vhost_net" = "yes" ; then
|
||||
echo "CONFIG_VHOST_NET=y" >> $config_target_mak
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <setjmp.h>
|
||||
#include <stdint.h>
|
||||
#include <pthread.h>
|
||||
#include <ucontext.h>
|
||||
#include "qemu-common.h"
|
||||
#include "block/coroutine_int.h"
|
||||
@@ -47,8 +48,15 @@ typedef struct {
|
||||
/**
|
||||
* Per-thread coroutine bookkeeping
|
||||
*/
|
||||
static __thread CoroutineUContext leader;
|
||||
static __thread Coroutine *current;
|
||||
typedef struct {
|
||||
/** Currently executing coroutine */
|
||||
Coroutine *current;
|
||||
|
||||
/** The default coroutine */
|
||||
CoroutineUContext leader;
|
||||
} CoroutineThreadState;
|
||||
|
||||
static pthread_key_t thread_state_key;
|
||||
|
||||
/*
|
||||
* va_args to makecontext() must be type 'int', so passing
|
||||
@@ -60,6 +68,36 @@ union cc_arg {
|
||||
int i[2];
|
||||
};
|
||||
|
||||
static CoroutineThreadState *coroutine_get_thread_state(void)
|
||||
{
|
||||
CoroutineThreadState *s = pthread_getspecific(thread_state_key);
|
||||
|
||||
if (!s) {
|
||||
s = g_malloc0(sizeof(*s));
|
||||
s->current = &s->leader.base;
|
||||
pthread_setspecific(thread_state_key, s);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
static void qemu_coroutine_thread_cleanup(void *opaque)
|
||||
{
|
||||
CoroutineThreadState *s = opaque;
|
||||
|
||||
g_free(s);
|
||||
}
|
||||
|
||||
static void __attribute__((constructor)) coroutine_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = pthread_key_create(&thread_state_key, qemu_coroutine_thread_cleanup);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "unable to create leader key: %s\n", strerror(errno));
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void coroutine_trampoline(int i0, int i1)
|
||||
{
|
||||
union cc_arg arg;
|
||||
@@ -155,23 +193,15 @@ void qemu_coroutine_delete(Coroutine *co_)
|
||||
g_free(co);
|
||||
}
|
||||
|
||||
/* This function is marked noinline to prevent GCC from inlining it
|
||||
* into coroutine_trampoline(). If we allow it to do that then it
|
||||
* hoists the code to get the address of the TLS variable "current"
|
||||
* out of the while() loop. This is an invalid transformation because
|
||||
* the sigsetjmp() call may be called when running thread A but
|
||||
* return in thread B, and so we might be in a different thread
|
||||
* context each time round the loop.
|
||||
*/
|
||||
CoroutineAction __attribute__((noinline))
|
||||
qemu_coroutine_switch(Coroutine *from_, Coroutine *to_,
|
||||
CoroutineAction action)
|
||||
CoroutineAction qemu_coroutine_switch(Coroutine *from_, Coroutine *to_,
|
||||
CoroutineAction action)
|
||||
{
|
||||
CoroutineUContext *from = DO_UPCAST(CoroutineUContext, base, from_);
|
||||
CoroutineUContext *to = DO_UPCAST(CoroutineUContext, base, to_);
|
||||
CoroutineThreadState *s = coroutine_get_thread_state();
|
||||
int ret;
|
||||
|
||||
current = to_;
|
||||
s->current = to_;
|
||||
|
||||
ret = sigsetjmp(from->env, 0);
|
||||
if (ret == 0) {
|
||||
@@ -182,13 +212,14 @@ qemu_coroutine_switch(Coroutine *from_, Coroutine *to_,
|
||||
|
||||
Coroutine *qemu_coroutine_self(void)
|
||||
{
|
||||
if (!current) {
|
||||
current = &leader.base;
|
||||
}
|
||||
return current;
|
||||
CoroutineThreadState *s = coroutine_get_thread_state();
|
||||
|
||||
return s->current;
|
||||
}
|
||||
|
||||
bool qemu_in_coroutine(void)
|
||||
{
|
||||
return current && current->caller;
|
||||
CoroutineThreadState *s = pthread_getspecific(thread_state_key);
|
||||
|
||||
return s && s->current->caller;
|
||||
}
|
||||
|
69
cpu-exec.c
69
cpu-exec.c
@@ -24,9 +24,6 @@
|
||||
#include "qemu/atomic.h"
|
||||
#include "sysemu/qtest.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "exec/memory-internal.h"
|
||||
#include "qemu/rcu.h"
|
||||
|
||||
/* -icount align implementation. */
|
||||
|
||||
@@ -64,7 +61,8 @@ static void align_clocks(SyncClocks *sc, const CPUState *cpu)
|
||||
sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
|
||||
sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
|
||||
if (nanosleep(&sleep_delay, &rem_delay) < 0) {
|
||||
sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
|
||||
sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
|
||||
sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
|
||||
} else {
|
||||
sc->diff_clk = 0;
|
||||
}
|
||||
@@ -103,8 +101,10 @@ static void init_delay_params(SyncClocks *sc,
|
||||
if (!icount_align_option) {
|
||||
return;
|
||||
}
|
||||
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
|
||||
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
|
||||
sc->realtime_clock +
|
||||
cpu_get_clock_offset();
|
||||
sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
|
||||
if (sc->diff_clk < max_delay) {
|
||||
max_delay = sc->diff_clk;
|
||||
@@ -144,33 +144,6 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
|
||||
cpu->exception_index = -1;
|
||||
siglongjmp(cpu->jmp_env, 1);
|
||||
}
|
||||
|
||||
void cpu_reload_memory_map(CPUState *cpu)
|
||||
{
|
||||
AddressSpaceDispatch *d;
|
||||
|
||||
if (qemu_in_vcpu_thread()) {
|
||||
/* Do not let the guest prolong the critical section as much as it
|
||||
* as it desires.
|
||||
*
|
||||
* Currently, this is prevented by the I/O thread's periodinc kicking
|
||||
* of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
|
||||
* but this will go away once TCG's execution moves out of the global
|
||||
* mutex.
|
||||
*
|
||||
* This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
|
||||
* only protects cpu->as->dispatch. Since we reload it below, we can
|
||||
* split the critical section.
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
/* The CPU and TLB are protected by the iothread lock. */
|
||||
d = atomic_rcu_read(&cpu->as->dispatch);
|
||||
cpu->memory_dispatch = d;
|
||||
tlb_flush(cpu, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Execute a TB, and fix up the CPU state afterwards if necessary */
|
||||
@@ -195,9 +168,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
|
||||
}
|
||||
#endif /* DEBUG_DISAS */
|
||||
|
||||
cpu->can_do_io = 0;
|
||||
next_tb = tcg_qemu_tb_exec(env, tb_ptr);
|
||||
cpu->can_do_io = 1;
|
||||
trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
|
||||
next_tb & TB_EXIT_MASK);
|
||||
|
||||
@@ -231,19 +202,14 @@ static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
|
||||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
TranslationBlock *tb;
|
||||
target_ulong pc = orig_tb->pc;
|
||||
target_ulong cs_base = orig_tb->cs_base;
|
||||
uint64_t flags = orig_tb->flags;
|
||||
|
||||
/* Should never happen.
|
||||
We only end up here when an existing TB is too long. */
|
||||
if (max_cycles > CF_COUNT_MASK)
|
||||
max_cycles = CF_COUNT_MASK;
|
||||
|
||||
/* tb_gen_code can flush our orig_tb, invalidate it now */
|
||||
tb_phys_invalidate(orig_tb, -1);
|
||||
tb = tb_gen_code(cpu, pc, cs_base, flags,
|
||||
max_cycles | CF_NOCACHE);
|
||||
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
|
||||
max_cycles);
|
||||
cpu->current_tb = tb;
|
||||
/* execute the generated code */
|
||||
trace_exec_tb_nocache(tb, tb->pc);
|
||||
@@ -382,13 +348,12 @@ int cpu_exec(CPUArchState *env)
|
||||
* an instruction scheduling constraint on modern architectures. */
|
||||
smp_mb();
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
if (unlikely(exit_request)) {
|
||||
cpu->exit_request = 1;
|
||||
}
|
||||
|
||||
cc->cpu_exec_enter(cpu);
|
||||
cpu->exception_index = -1;
|
||||
|
||||
/* Calculate difference between guest clock and host clock.
|
||||
* This delay includes the delay of the last cycle, so
|
||||
@@ -408,7 +373,6 @@ int cpu_exec(CPUArchState *env)
|
||||
if (ret == EXCP_DEBUG) {
|
||||
cpu_handle_debug_exception(env);
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
break;
|
||||
} else {
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
@@ -419,7 +383,6 @@ int cpu_exec(CPUArchState *env)
|
||||
cc->do_interrupt(cpu);
|
||||
#endif
|
||||
ret = cpu->exception_index;
|
||||
cpu->exception_index = -1;
|
||||
break;
|
||||
#else
|
||||
cc->do_interrupt(cpu);
|
||||
@@ -526,22 +489,28 @@ int cpu_exec(CPUArchState *env)
|
||||
* interrupt_request) which we will handle
|
||||
* next time around the loop.
|
||||
*/
|
||||
tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
|
||||
next_tb = 0;
|
||||
break;
|
||||
case TB_EXIT_ICOUNT_EXPIRED:
|
||||
{
|
||||
/* Instruction counter expired. */
|
||||
int insns_left = cpu->icount_decr.u32;
|
||||
int insns_left;
|
||||
tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
|
||||
insns_left = cpu->icount_decr.u32;
|
||||
if (cpu->icount_extra && insns_left >= 0) {
|
||||
/* Refill decrementer and continue execution. */
|
||||
cpu->icount_extra += insns_left;
|
||||
insns_left = MIN(0xffff, cpu->icount_extra);
|
||||
if (cpu->icount_extra > 0xffff) {
|
||||
insns_left = 0xffff;
|
||||
} else {
|
||||
insns_left = cpu->icount_extra;
|
||||
}
|
||||
cpu->icount_extra -= insns_left;
|
||||
cpu->icount_decr.u16.low = insns_left;
|
||||
} else {
|
||||
if (insns_left > 0) {
|
||||
/* Execute remaining instructions. */
|
||||
tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
|
||||
cpu_exec_nocache(env, insns_left, tb);
|
||||
align_clocks(&sc, cpu);
|
||||
}
|
||||
@@ -568,7 +537,6 @@ int cpu_exec(CPUArchState *env)
|
||||
cpu = current_cpu;
|
||||
env = cpu->env_ptr;
|
||||
cc = CPU_GET_CLASS(cpu);
|
||||
cpu->can_do_io = 1;
|
||||
#ifdef TARGET_I386
|
||||
x86_cpu = X86_CPU(cpu);
|
||||
#endif
|
||||
@@ -580,7 +548,6 @@ int cpu_exec(CPUArchState *env)
|
||||
} /* for(;;) */
|
||||
|
||||
cc->cpu_exec_exit(cpu);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* fail safe : never use current_cpu outside cpu_exec() */
|
||||
current_cpu = NULL;
|
||||
|
76
cpus.c
76
cpus.c
@@ -136,7 +136,8 @@ typedef struct TimersState {
|
||||
|
||||
static TimersState timers_state;
|
||||
|
||||
int64_t cpu_get_icount_raw(void)
|
||||
/* Return the virtual CPU time, based on the instruction counter. */
|
||||
static int64_t cpu_get_icount_locked(void)
|
||||
{
|
||||
int64_t icount;
|
||||
CPUState *cpu = current_cpu;
|
||||
@@ -144,18 +145,10 @@ int64_t cpu_get_icount_raw(void)
|
||||
icount = timers_state.qemu_icount;
|
||||
if (cpu) {
|
||||
if (!cpu_can_do_io(cpu)) {
|
||||
fprintf(stderr, "Bad icount read\n");
|
||||
exit(1);
|
||||
fprintf(stderr, "Bad clock read\n");
|
||||
}
|
||||
icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
|
||||
}
|
||||
return icount;
|
||||
}
|
||||
|
||||
/* Return the virtual CPU time, based on the instruction counter. */
|
||||
static int64_t cpu_get_icount_locked(void)
|
||||
{
|
||||
int64_t icount = cpu_get_icount_raw();
|
||||
return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
|
||||
}
|
||||
|
||||
@@ -229,6 +222,23 @@ int64_t cpu_get_clock(void)
|
||||
return ti;
|
||||
}
|
||||
|
||||
/* return the offset between the host clock and virtual CPU clock */
|
||||
int64_t cpu_get_clock_offset(void)
|
||||
{
|
||||
int64_t ti;
|
||||
unsigned start;
|
||||
|
||||
do {
|
||||
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||
ti = timers_state.cpu_clock_offset;
|
||||
if (!timers_state.cpu_ticks_enabled) {
|
||||
ti -= get_clock();
|
||||
}
|
||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
||||
|
||||
return -ti;
|
||||
}
|
||||
|
||||
/* enable cpu_get_ticks()
|
||||
* Caller must hold BQL which server as mutex for vm_clock_seqlock.
|
||||
*/
|
||||
@@ -307,7 +317,7 @@ static void icount_adjust(void)
|
||||
static void icount_adjust_rt(void *opaque)
|
||||
{
|
||||
timer_mod(icount_rt_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
||||
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
|
||||
icount_adjust();
|
||||
}
|
||||
|
||||
@@ -335,7 +345,7 @@ static void icount_warp_rt(void *opaque)
|
||||
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock);
|
||||
if (runstate_is_running()) {
|
||||
int64_t clock = cpu_get_clock_locked();
|
||||
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
int64_t warp_delta;
|
||||
|
||||
warp_delta = clock - vm_clock_warp_start;
|
||||
@@ -344,8 +354,9 @@ static void icount_warp_rt(void *opaque)
|
||||
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
|
||||
* far ahead of real time.
|
||||
*/
|
||||
int64_t cur_time = cpu_get_clock_locked();
|
||||
int64_t cur_icount = cpu_get_icount_locked();
|
||||
int64_t delta = clock - cur_icount;
|
||||
int64_t delta = cur_time - cur_icount;
|
||||
warp_delta = MIN(warp_delta, delta);
|
||||
}
|
||||
timers_state.qemu_icount_bias += warp_delta;
|
||||
@@ -412,7 +423,7 @@ void qemu_clock_warp(QEMUClockType type)
|
||||
}
|
||||
|
||||
/* We want to use the earliest deadline from ALL vm_clocks */
|
||||
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||
clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
|
||||
if (deadline < 0) {
|
||||
return;
|
||||
@@ -430,8 +441,8 @@ void qemu_clock_warp(QEMUClockType type)
|
||||
* sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
|
||||
* timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
|
||||
* event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
|
||||
* after some "real" time, (related to the time left until the next
|
||||
* event) has passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
|
||||
* after some e"real" time, (related to the time left until the next
|
||||
* event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
|
||||
* This avoids that the warps are visible externally; for example,
|
||||
* you will not be sending network packets continuously instead of
|
||||
* every 100ms.
|
||||
@@ -505,8 +516,8 @@ void configure_icount(QemuOpts *opts, Error **errp)
|
||||
return;
|
||||
}
|
||||
icount_align_option = qemu_opt_get_bool(opts, "align", false);
|
||||
icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
|
||||
icount_warp_rt, NULL);
|
||||
icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
|
||||
icount_warp_rt, NULL);
|
||||
if (strcmp(option, "auto") != 0) {
|
||||
errno = 0;
|
||||
icount_time_shift = strtol(option, &rem_str, 0);
|
||||
@@ -530,10 +541,10 @@ void configure_icount(QemuOpts *opts, Error **errp)
|
||||
the virtual time trigger catches emulated time passing too fast.
|
||||
Realtime triggers occur even when idle, so use them less frequently
|
||||
than VM triggers. */
|
||||
icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
|
||||
icount_adjust_rt, NULL);
|
||||
icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
|
||||
icount_adjust_rt, NULL);
|
||||
timer_mod(icount_rt_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
||||
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
|
||||
icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||
icount_adjust_vm, NULL);
|
||||
timer_mod(icount_vm_timer,
|
||||
@@ -778,7 +789,7 @@ static void qemu_tcg_init_cpu_signals(void)
|
||||
|
||||
static QemuMutex qemu_global_mutex;
|
||||
static QemuCond qemu_io_proceeded_cond;
|
||||
static unsigned iothread_requesting_mutex;
|
||||
static bool iothread_requesting_mutex;
|
||||
|
||||
static QemuThread io_thread;
|
||||
|
||||
@@ -927,7 +938,6 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
|
||||
qemu_mutex_lock(&qemu_global_mutex);
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
r = kvm_init_vcpu(cpu);
|
||||
@@ -968,7 +978,6 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
|
||||
sigemptyset(&waitset);
|
||||
sigaddset(&waitset, SIG_IPI);
|
||||
@@ -1011,7 +1020,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->created = true;
|
||||
cpu->can_do_io = 1;
|
||||
}
|
||||
qemu_cond_signal(&qemu_cpu_cond);
|
||||
|
||||
@@ -1025,9 +1033,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
/* process any pending work */
|
||||
exit_request = 1;
|
||||
|
||||
while (1) {
|
||||
tcg_exec_all();
|
||||
|
||||
@@ -1111,23 +1116,22 @@ bool qemu_cpu_is_self(CPUState *cpu)
|
||||
return qemu_thread_is_self(cpu->thread);
|
||||
}
|
||||
|
||||
bool qemu_in_vcpu_thread(void)
|
||||
static bool qemu_in_vcpu_thread(void)
|
||||
{
|
||||
return current_cpu && qemu_cpu_is_self(current_cpu);
|
||||
}
|
||||
|
||||
void qemu_mutex_lock_iothread(void)
|
||||
{
|
||||
atomic_inc(&iothread_requesting_mutex);
|
||||
if (!tcg_enabled() || !first_cpu || !first_cpu->thread) {
|
||||
if (!tcg_enabled()) {
|
||||
qemu_mutex_lock(&qemu_global_mutex);
|
||||
atomic_dec(&iothread_requesting_mutex);
|
||||
} else {
|
||||
iothread_requesting_mutex = true;
|
||||
if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
||||
qemu_cpu_kick_thread(first_cpu);
|
||||
qemu_mutex_lock(&qemu_global_mutex);
|
||||
}
|
||||
atomic_dec(&iothread_requesting_mutex);
|
||||
iothread_requesting_mutex = false;
|
||||
qemu_cond_broadcast(&qemu_io_proceeded_cond);
|
||||
}
|
||||
}
|
||||
@@ -1353,7 +1357,7 @@ static int tcg_cpu_exec(CPUArchState *env)
|
||||
}
|
||||
ret = cpu_exec(env);
|
||||
#ifdef CONFIG_PROFILER
|
||||
tcg_time += profile_getclock() - ti;
|
||||
qemu_time += profile_getclock() - ti;
|
||||
#endif
|
||||
if (use_icount) {
|
||||
/* Fold pending instructions back into the
|
||||
@@ -1474,7 +1478,6 @@ void qmp_memsave(int64_t addr, int64_t size, const char *filename,
|
||||
uint32_t l;
|
||||
CPUState *cpu;
|
||||
uint8_t buf[1024];
|
||||
int64_t orig_addr = addr, orig_size = size;
|
||||
|
||||
if (!has_cpu) {
|
||||
cpu_index = 0;
|
||||
@@ -1498,8 +1501,7 @@ void qmp_memsave(int64_t addr, int64_t size, const char *filename,
|
||||
if (l > size)
|
||||
l = size;
|
||||
if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
|
||||
error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
|
||||
" specified", orig_addr, orig_size);
|
||||
error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
|
||||
goto exit;
|
||||
}
|
||||
if (fwrite(buf, 1, l, f) != l) {
|
||||
|
16
cputlb.c
16
cputlb.c
@@ -243,12 +243,8 @@ static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
|
||||
}
|
||||
|
||||
/* Add a new TLB entry. At most one entry for a given virtual address
|
||||
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
|
||||
* supplied size is only used by tlb_flush_page.
|
||||
*
|
||||
* Called from TCG-generated code, which is under an RCU read-side
|
||||
* critical section.
|
||||
*/
|
||||
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
|
||||
supplied size is only used by tlb_flush_page. */
|
||||
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
|
||||
hwaddr paddr, int prot,
|
||||
int mmu_idx, target_ulong size)
|
||||
@@ -269,12 +265,12 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
|
||||
}
|
||||
|
||||
sz = size;
|
||||
section = address_space_translate_for_iotlb(cpu, paddr, &xlat, &sz);
|
||||
section = address_space_translate_for_iotlb(cpu->as, paddr,
|
||||
&xlat, &sz);
|
||||
assert(sz >= TARGET_PAGE_SIZE);
|
||||
|
||||
#if defined(DEBUG_TLB)
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
|
||||
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
|
||||
" prot=%x idx=%d\n",
|
||||
vaddr, paddr, prot, mmu_idx);
|
||||
#endif
|
||||
@@ -350,7 +346,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
||||
cpu_ldub_code(env1, addr);
|
||||
}
|
||||
pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
|
||||
mr = iotlb_to_region(cpu, pd);
|
||||
mr = iotlb_to_region(cpu->as, pd);
|
||||
if (memory_region_is_unassigned(mr)) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
|
@@ -5,6 +5,8 @@ include usb.mak
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_I8254=y
|
||||
CONFIG_PCKBD=y
|
||||
CONFIG_VGA=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_VGA_CIRRUS=y
|
||||
CONFIG_IDE_CORE=y
|
||||
CONFIG_IDE_QDEV=y
|
||||
|
@@ -32,10 +32,7 @@ CONFIG_DS1338=y
|
||||
CONFIG_PFLASH_CFI01=y
|
||||
CONFIG_PFLASH_CFI02=y
|
||||
CONFIG_MICRODRIVE=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_MUSB=y
|
||||
CONFIG_USB_EHCI_SYSBUS=y
|
||||
CONFIG_PLATFORM_BUS=y
|
||||
|
||||
CONFIG_ARM11MPCORE=y
|
||||
CONFIG_A9MPCORE=y
|
||||
@@ -81,23 +78,13 @@ CONFIG_NSERIES=y
|
||||
CONFIG_REALVIEW=y
|
||||
CONFIG_ZAURUS=y
|
||||
CONFIG_ZYNQ=y
|
||||
CONFIG_STM32F2XX_TIMER=y
|
||||
CONFIG_STM32F2XX_USART=y
|
||||
CONFIG_STM32F2XX_SYSCFG=y
|
||||
CONFIG_STM32F205_SOC=y
|
||||
|
||||
CONFIG_VERSATILE_PCI=y
|
||||
CONFIG_VERSATILE_I2C=y
|
||||
|
||||
CONFIG_PCI_GENERIC=y
|
||||
|
||||
CONFIG_SDHCI=y
|
||||
CONFIG_INTEGRATOR_DEBUG=y
|
||||
|
||||
CONFIG_ALLWINNER_A10_PIT=y
|
||||
CONFIG_ALLWINNER_A10_PIC=y
|
||||
CONFIG_ALLWINNER_A10=y
|
||||
|
||||
CONFIG_XIO3130=y
|
||||
CONFIG_IOH3420=y
|
||||
CONFIG_I82801B11=y
|
||||
|
@@ -3,7 +3,9 @@
|
||||
include pci.mak
|
||||
include sound.mak
|
||||
include usb.mak
|
||||
CONFIG_VGA=y
|
||||
CONFIG_QXL=$(CONFIG_SPICE)
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_VGA_ISA=y
|
||||
CONFIG_VGA_CIRRUS=y
|
||||
CONFIG_VMWARE_VGA=y
|
||||
@@ -26,6 +28,7 @@ CONFIG_APPLESMC=y
|
||||
CONFIG_I8259=y
|
||||
CONFIG_PFLASH_CFI01=y
|
||||
CONFIG_TPM_TIS=$(CONFIG_TPM)
|
||||
CONFIG_PCI_HOTPLUG_OLD=y
|
||||
CONFIG_MC146818RTC=y
|
||||
CONFIG_PAM=y
|
||||
CONFIG_PCI_PIIX=y
|
||||
@@ -42,6 +45,3 @@ CONFIG_IOAPIC=y
|
||||
CONFIG_ICC_BUS=y
|
||||
CONFIG_PVPANIC=y
|
||||
CONFIG_MEM_HOTPLUG=y
|
||||
CONFIG_XIO3130=y
|
||||
CONFIG_IOH3420=y
|
||||
CONFIG_I82801B11=y
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
CONFIG_LM32=y
|
||||
CONFIG_MILKYMIST=y
|
||||
CONFIG_MILKYMIST_TMU2=$(CONFIG_OPENGL)
|
||||
CONFIG_MILKYMIST_TMU2=$(CONFIG_GLX)
|
||||
CONFIG_FRAMEBUFFER=y
|
||||
CONFIG_PTIMER=y
|
||||
CONFIG_PFLASH_CFI01=y
|
||||
|
@@ -4,6 +4,8 @@ include pci.mak
|
||||
include sound.mak
|
||||
include usb.mak
|
||||
CONFIG_ESP=y
|
||||
CONFIG_VGA=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_VGA_ISA=y
|
||||
CONFIG_VGA_ISA_MM=y
|
||||
CONFIG_VGA_CIRRUS=y
|
||||
|
@@ -4,6 +4,8 @@ include pci.mak
|
||||
include sound.mak
|
||||
include usb.mak
|
||||
CONFIG_ESP=y
|
||||
CONFIG_VGA=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_VGA_ISA=y
|
||||
CONFIG_VGA_ISA_MM=y
|
||||
CONFIG_VGA_CIRRUS=y
|
||||
|
@@ -4,6 +4,8 @@ include pci.mak
|
||||
include sound.mak
|
||||
include usb.mak
|
||||
CONFIG_ESP=y
|
||||
CONFIG_VGA=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_VGA_ISA=y
|
||||
CONFIG_VGA_ISA_MM=y
|
||||
CONFIG_VGA_CIRRUS=y
|
||||
|
@@ -4,6 +4,8 @@ include pci.mak
|
||||
include sound.mak
|
||||
include usb.mak
|
||||
CONFIG_ESP=y
|
||||
CONFIG_VGA=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_VGA_ISA=y
|
||||
CONFIG_VGA_ISA_MM=y
|
||||
CONFIG_VGA_CIRRUS=y
|
||||
|
@@ -30,9 +30,3 @@ CONFIG_IPACK=y
|
||||
CONFIG_WDT_IB6300ESB=y
|
||||
CONFIG_PCI_TESTDEV=y
|
||||
CONFIG_NVME_PCI=y
|
||||
CONFIG_SD=y
|
||||
CONFIG_SDHCI=y
|
||||
CONFIG_EDU=y
|
||||
CONFIG_VGA=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_IVSHMEM=$(CONFIG_KVM)
|
||||
|
@@ -6,6 +6,8 @@ include usb.mak
|
||||
CONFIG_ISA_MMIO=y
|
||||
CONFIG_ESCC=y
|
||||
CONFIG_M48T59=y
|
||||
CONFIG_VGA=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_PARALLEL=y
|
||||
CONFIG_I8254=y
|
||||
@@ -38,11 +40,11 @@ CONFIG_PTIMER=y
|
||||
CONFIG_I8259=y
|
||||
CONFIG_XILINX=y
|
||||
CONFIG_XILINX_ETHLITE=y
|
||||
CONFIG_OPENPIC=y
|
||||
CONFIG_PREP=y
|
||||
CONFIG_MAC=y
|
||||
CONFIG_E500=y
|
||||
CONFIG_OPENPIC_KVM=$(and $(CONFIG_E500),$(CONFIG_KVM))
|
||||
CONFIG_PLATFORM_BUS=y
|
||||
CONFIG_ETSEC=y
|
||||
CONFIG_LIBDECNUMBER=y
|
||||
# For PReP
|
||||
|
@@ -6,6 +6,8 @@ include usb.mak
|
||||
CONFIG_ISA_MMIO=y
|
||||
CONFIG_ESCC=y
|
||||
CONFIG_M48T59=y
|
||||
CONFIG_VGA=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_PARALLEL=y
|
||||
CONFIG_I8254=y
|
||||
@@ -38,17 +40,23 @@ CONFIG_PTIMER=y
|
||||
CONFIG_I8259=y
|
||||
CONFIG_XILINX=y
|
||||
CONFIG_XILINX_ETHLITE=y
|
||||
CONFIG_OPENPIC=y
|
||||
CONFIG_PSERIES=y
|
||||
CONFIG_PREP=y
|
||||
CONFIG_MAC=y
|
||||
CONFIG_E500=y
|
||||
CONFIG_OPENPIC_KVM=$(and $(CONFIG_E500),$(CONFIG_KVM))
|
||||
CONFIG_PLATFORM_BUS=y
|
||||
CONFIG_ETSEC=y
|
||||
CONFIG_LIBDECNUMBER=y
|
||||
# For pSeries
|
||||
CONFIG_XICS=$(CONFIG_PSERIES)
|
||||
CONFIG_XICS_KVM=$(and $(CONFIG_PSERIES),$(CONFIG_KVM))
|
||||
# For PReP
|
||||
CONFIG_I82378=y
|
||||
CONFIG_I8259=y
|
||||
CONFIG_I8254=y
|
||||
CONFIG_PCSPK=y
|
||||
CONFIG_I82374=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_MC146818RTC=y
|
||||
CONFIG_ISA_TESTDEV=y
|
||||
|
@@ -4,6 +4,8 @@ include pci.mak
|
||||
include sound.mak
|
||||
include usb.mak
|
||||
CONFIG_M48T59=y
|
||||
CONFIG_VGA=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_OPENPIC=y
|
||||
@@ -13,4 +15,5 @@ CONFIG_PTIMER=y
|
||||
CONFIG_I8259=y
|
||||
CONFIG_XILINX=y
|
||||
CONFIG_XILINX_ETHLITE=y
|
||||
CONFIG_OPENPIC=y
|
||||
CONFIG_LIBDECNUMBER=y
|
||||
|
@@ -1,5 +1,3 @@
|
||||
CONFIG_PCI=y
|
||||
CONFIG_VIRTIO_PCI=y
|
||||
CONFIG_VIRTIO=y
|
||||
CONFIG_SCLPCONSOLE=y
|
||||
CONFIG_S390_FLIC=y
|
||||
|
@@ -5,6 +5,8 @@ include usb.mak
|
||||
CONFIG_ISA_MMIO=y
|
||||
CONFIG_M48T59=y
|
||||
CONFIG_PTIMER=y
|
||||
CONFIG_VGA=y
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_PARALLEL=y
|
||||
CONFIG_PCKBD=y
|
||||
|
@@ -1,4 +1,3 @@
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_TABLET_WACOM=y
|
||||
CONFIG_USB_STORAGE_BOT=y
|
||||
CONFIG_USB_STORAGE_UAS=y
|
||||
|
@@ -3,7 +3,9 @@
|
||||
include pci.mak
|
||||
include sound.mak
|
||||
include usb.mak
|
||||
CONFIG_VGA=y
|
||||
CONFIG_QXL=$(CONFIG_SPICE)
|
||||
CONFIG_VGA_PCI=y
|
||||
CONFIG_VGA_ISA=y
|
||||
CONFIG_VGA_CIRRUS=y
|
||||
CONFIG_VMWARE_VGA=y
|
||||
@@ -26,6 +28,7 @@ CONFIG_APPLESMC=y
|
||||
CONFIG_I8259=y
|
||||
CONFIG_PFLASH_CFI01=y
|
||||
CONFIG_TPM_TIS=$(CONFIG_TPM)
|
||||
CONFIG_PCI_HOTPLUG_OLD=y
|
||||
CONFIG_MC146818RTC=y
|
||||
CONFIG_PAM=y
|
||||
CONFIG_PCI_PIIX=y
|
||||
@@ -42,6 +45,3 @@ CONFIG_IOAPIC=y
|
||||
CONFIG_ICC_BUS=y
|
||||
CONFIG_PVPANIC=y
|
||||
CONFIG_MEM_HOTPLUG=y
|
||||
CONFIG_XIO3130=y
|
||||
CONFIG_IOH3420=y
|
||||
CONFIG_I82801B11=y
|
||||
|
@@ -30,7 +30,7 @@
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "monitor/monitor.h"
|
||||
|
||||
static DriveInfo *add_init_drive(const char *optstr)
|
||||
DriveInfo *add_init_drive(const char *optstr)
|
||||
{
|
||||
DriveInfo *dinfo;
|
||||
QemuOpts *opts;
|
||||
@@ -50,7 +50,7 @@ static DriveInfo *add_init_drive(const char *optstr)
|
||||
return dinfo;
|
||||
}
|
||||
|
||||
void hmp_drive_add(Monitor *mon, const QDict *qdict)
|
||||
void drive_hot_add(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
DriveInfo *dinfo = NULL;
|
||||
const char *opts = qdict_get_str(qdict, "opts");
|
||||
@@ -69,8 +69,9 @@ void hmp_drive_add(Monitor *mon, const QDict *qdict)
|
||||
monitor_printf(mon, "OK\n");
|
||||
break;
|
||||
default:
|
||||
monitor_printf(mon, "Can't hot-add drive to type %d\n", dinfo->type);
|
||||
goto err;
|
||||
if (pci_drive_hot_add(mon, qdict, dinfo)) {
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
|
@@ -24,7 +24,7 @@
|
||||
#include "sysemu/device_tree.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "hw/loader.h"
|
||||
#include "hw/boards.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/config-file.h"
|
||||
|
||||
#include <libfdt.h>
|
||||
@@ -245,7 +245,8 @@ uint32_t qemu_fdt_alloc_phandle(void *fdt)
|
||||
* which phandle id to start allocting phandles.
|
||||
*/
|
||||
if (!phandle) {
|
||||
phandle = machine_phandle_start(current_machine);
|
||||
phandle = qemu_opt_get_number(qemu_get_machine_opts(),
|
||||
"phandle_start", 0);
|
||||
}
|
||||
|
||||
if (!phandle) {
|
||||
@@ -323,7 +324,6 @@ int qemu_fdt_setprop_sized_cells_from_array(void *fdt,
|
||||
uint64_t value;
|
||||
int cellnum, vnum, ncells;
|
||||
uint32_t hival;
|
||||
int ret;
|
||||
|
||||
propcells = g_new0(uint32_t, numvalues * 2);
|
||||
|
||||
@@ -331,23 +331,18 @@ int qemu_fdt_setprop_sized_cells_from_array(void *fdt,
|
||||
for (vnum = 0; vnum < numvalues; vnum++) {
|
||||
ncells = values[vnum * 2];
|
||||
if (ncells != 1 && ncells != 2) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
return -1;
|
||||
}
|
||||
value = values[vnum * 2 + 1];
|
||||
hival = cpu_to_be32(value >> 32);
|
||||
if (ncells > 1) {
|
||||
propcells[cellnum++] = hival;
|
||||
} else if (hival != 0) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
return -1;
|
||||
}
|
||||
propcells[cellnum++] = cpu_to_be32(value);
|
||||
}
|
||||
|
||||
ret = qemu_fdt_setprop(fdt, node_path, property, propcells,
|
||||
cellnum * sizeof(uint32_t));
|
||||
out:
|
||||
g_free(propcells);
|
||||
return ret;
|
||||
return qemu_fdt_setprop(fdt, node_path, property, propcells,
|
||||
cellnum * sizeof(uint32_t));
|
||||
}
|
||||
|
@@ -67,8 +67,7 @@ static void vixl_init(FILE *f) {
|
||||
int print_insn_arm_a64(uint64_t addr, disassemble_info *info)
|
||||
{
|
||||
uint8_t bytes[INSN_SIZE];
|
||||
uint32_t instrval;
|
||||
const Instruction *instr;
|
||||
uint32_t instr;
|
||||
int status;
|
||||
|
||||
status = info->read_memory_func(addr, bytes, INSN_SIZE, info);
|
||||
@@ -81,10 +80,8 @@ int print_insn_arm_a64(uint64_t addr, disassemble_info *info)
|
||||
vixl_init(info->stream);
|
||||
}
|
||||
|
||||
instrval = bytes[0] | bytes[1] << 8 | bytes[2] << 16 | bytes[3] << 24;
|
||||
instr = reinterpret_cast<const Instruction *>(&instrval);
|
||||
vixl_disasm->MapCodeAddress(addr, instr);
|
||||
vixl_decoder->Decode(instr);
|
||||
instr = bytes[0] | bytes[1] << 8 | bytes[2] << 16 | bytes[3] << 24;
|
||||
vixl_decoder->Decode(reinterpret_cast<Instruction*>(&instr));
|
||||
|
||||
return INSN_SIZE;
|
||||
}
|
||||
|
128
disas/arm.c
128
disas/arm.c
@@ -1549,6 +1549,10 @@ enum map_type {
|
||||
MAP_DATA
|
||||
};
|
||||
|
||||
enum map_type last_type;
|
||||
int last_mapping_sym = -1;
|
||||
bfd_vma last_mapping_addr = 0;
|
||||
|
||||
/* Decode a bitfield of the form matching regexp (N(-N)?,)*N(-N)?.
|
||||
Returns pointer to following character of the format string and
|
||||
fills in *VALUEP and *WIDTHP with the extracted value and number of
|
||||
@@ -3874,11 +3878,135 @@ print_insn_arm (bfd_vma pc, struct disassemble_info *info)
|
||||
int is_data = false;
|
||||
unsigned int size = 4;
|
||||
void (*printer) (bfd_vma, struct disassemble_info *, long);
|
||||
#if 0
|
||||
bfd_boolean found = false;
|
||||
|
||||
if (info->disassembler_options)
|
||||
{
|
||||
parse_disassembler_options (info->disassembler_options);
|
||||
|
||||
/* To avoid repeated parsing of these options, we remove them here. */
|
||||
info->disassembler_options = NULL;
|
||||
}
|
||||
|
||||
/* First check the full symtab for a mapping symbol, even if there
|
||||
are no usable non-mapping symbols for this address. */
|
||||
if (info->symtab != NULL
|
||||
&& bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
|
||||
{
|
||||
bfd_vma addr;
|
||||
int n;
|
||||
int last_sym = -1;
|
||||
enum map_type type = MAP_ARM;
|
||||
|
||||
if (pc <= last_mapping_addr)
|
||||
last_mapping_sym = -1;
|
||||
is_thumb = (last_type == MAP_THUMB);
|
||||
found = false;
|
||||
/* Start scanning at the start of the function, or wherever
|
||||
we finished last time. */
|
||||
n = info->symtab_pos + 1;
|
||||
if (n < last_mapping_sym)
|
||||
n = last_mapping_sym;
|
||||
|
||||
/* Scan up to the location being disassembled. */
|
||||
for (; n < info->symtab_size; n++)
|
||||
{
|
||||
addr = bfd_asymbol_value (info->symtab[n]);
|
||||
if (addr > pc)
|
||||
break;
|
||||
if ((info->section == NULL
|
||||
|| info->section == info->symtab[n]->section)
|
||||
&& get_sym_code_type (info, n, &type))
|
||||
{
|
||||
last_sym = n;
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
{
|
||||
n = info->symtab_pos;
|
||||
if (n < last_mapping_sym - 1)
|
||||
n = last_mapping_sym - 1;
|
||||
|
||||
/* No mapping symbol found at this address. Look backwards
|
||||
for a preceding one. */
|
||||
for (; n >= 0; n--)
|
||||
{
|
||||
if (get_sym_code_type (info, n, &type))
|
||||
{
|
||||
last_sym = n;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
last_mapping_sym = last_sym;
|
||||
last_type = type;
|
||||
is_thumb = (last_type == MAP_THUMB);
|
||||
is_data = (last_type == MAP_DATA);
|
||||
|
||||
/* Look a little bit ahead to see if we should print out
|
||||
two or four bytes of data. If there's a symbol,
|
||||
mapping or otherwise, after two bytes then don't
|
||||
print more. */
|
||||
if (is_data)
|
||||
{
|
||||
size = 4 - (pc & 3);
|
||||
for (n = last_sym + 1; n < info->symtab_size; n++)
|
||||
{
|
||||
addr = bfd_asymbol_value (info->symtab[n]);
|
||||
if (addr > pc)
|
||||
{
|
||||
if (addr - pc < size)
|
||||
size = addr - pc;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* If the next symbol is after three bytes, we need to
|
||||
print only part of the data, so that we can use either
|
||||
.byte or .short. */
|
||||
if (size == 3)
|
||||
size = (pc & 1) ? 1 : 2;
|
||||
}
|
||||
}
|
||||
|
||||
if (info->symbols != NULL)
|
||||
{
|
||||
if (bfd_asymbol_flavour (*info->symbols) == bfd_target_coff_flavour)
|
||||
{
|
||||
coff_symbol_type * cs;
|
||||
|
||||
cs = coffsymbol (*info->symbols);
|
||||
is_thumb = ( cs->native->u.syment.n_sclass == C_THUMBEXT
|
||||
|| cs->native->u.syment.n_sclass == C_THUMBSTAT
|
||||
|| cs->native->u.syment.n_sclass == C_THUMBLABEL
|
||||
|| cs->native->u.syment.n_sclass == C_THUMBEXTFUNC
|
||||
|| cs->native->u.syment.n_sclass == C_THUMBSTATFUNC);
|
||||
}
|
||||
else if (bfd_asymbol_flavour (*info->symbols) == bfd_target_elf_flavour
|
||||
&& !found)
|
||||
{
|
||||
/* If no mapping symbol has been found then fall back to the type
|
||||
of the function symbol. */
|
||||
elf_symbol_type * es;
|
||||
unsigned int type;
|
||||
|
||||
es = *(elf_symbol_type **)(info->symbols);
|
||||
type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
|
||||
|
||||
is_thumb = (type == STT_ARM_TFUNC) || (type == STT_ARM_16BIT);
|
||||
}
|
||||
}
|
||||
#else
|
||||
int little;
|
||||
|
||||
little = (info->endian == BFD_ENDIAN_LITTLE);
|
||||
is_thumb |= (pc & 1);
|
||||
pc &= ~(bfd_vma)1;
|
||||
#endif
|
||||
|
||||
if (force_thumb)
|
||||
is_thumb = true;
|
||||
|
13
disas/cris.c
13
disas/cris.c
@@ -1210,10 +1210,21 @@ cris_cc_strings[] =
|
||||
"le",
|
||||
"a",
|
||||
/* This is a placeholder. In v0, this would be "ext". In v32, this
|
||||
is "sb". */
|
||||
is "sb". See cris_conds15. */
|
||||
"wf"
|
||||
};
|
||||
|
||||
/* Different names and semantics for condition 1111 (0xf). */
|
||||
const struct cris_cond15 cris_cond15s[] =
|
||||
{
|
||||
/* FIXME: In what version did condition "ext" disappear? */
|
||||
{"ext", cris_ver_v0_3},
|
||||
{"wf", cris_ver_v10},
|
||||
{"sb", cris_ver_v32p},
|
||||
{NULL, 0}
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* eval: (c-set-style "gnu")
|
||||
|
@@ -2,7 +2,7 @@
|
||||
The code in this directory is a subset of libvixl:
|
||||
https://github.com/armvixl/vixl
|
||||
(specifically, it is the set of files needed for disassembly only,
|
||||
taken from libvixl 1.7).
|
||||
taken from libvixl 1.6).
|
||||
Bugfixes should preferably be sent upstream initially.
|
||||
|
||||
The disassembler does not currently support the entire A64 instruction
|
||||
|
@@ -151,21 +151,21 @@ class CPURegister {
|
||||
return Aliases(other) && (size_ == other.size_);
|
||||
}
|
||||
|
||||
bool IsZero() const {
|
||||
inline bool IsZero() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return IsRegister() && (code_ == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool IsSP() const {
|
||||
inline bool IsSP() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return IsRegister() && (code_ == kSPRegInternalCode);
|
||||
}
|
||||
|
||||
bool IsRegister() const {
|
||||
inline bool IsRegister() const {
|
||||
return type_ == kRegister;
|
||||
}
|
||||
|
||||
bool IsFPRegister() const {
|
||||
inline bool IsFPRegister() const {
|
||||
return type_ == kFPRegister;
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ class CPURegister {
|
||||
const FPRegister& S() const;
|
||||
const FPRegister& D() const;
|
||||
|
||||
bool IsSameSizeAndType(const CPURegister& other) const {
|
||||
inline bool IsSameSizeAndType(const CPURegister& other) const {
|
||||
return (size_ == other.size_) && (type_ == other.type_);
|
||||
}
|
||||
|
||||
@@ -198,7 +198,7 @@ class CPURegister {
|
||||
class Register : public CPURegister {
|
||||
public:
|
||||
Register() : CPURegister() {}
|
||||
explicit Register(const CPURegister& other)
|
||||
inline explicit Register(const CPURegister& other)
|
||||
: CPURegister(other.code(), other.size(), other.type()) {
|
||||
VIXL_ASSERT(IsValidRegister());
|
||||
}
|
||||
@@ -213,6 +213,10 @@ class Register : public CPURegister {
|
||||
static const Register& WRegFromCode(unsigned code);
|
||||
static const Register& XRegFromCode(unsigned code);
|
||||
|
||||
// V8 compatibility.
|
||||
static const int kNumRegisters = kNumberOfRegisters;
|
||||
static const int kNumAllocatableRegisters = kNumberOfRegisters - 1;
|
||||
|
||||
private:
|
||||
static const Register wregisters[];
|
||||
static const Register xregisters[];
|
||||
@@ -221,12 +225,12 @@ class Register : public CPURegister {
|
||||
|
||||
class FPRegister : public CPURegister {
|
||||
public:
|
||||
FPRegister() : CPURegister() {}
|
||||
explicit FPRegister(const CPURegister& other)
|
||||
inline FPRegister() : CPURegister() {}
|
||||
inline explicit FPRegister(const CPURegister& other)
|
||||
: CPURegister(other.code(), other.size(), other.type()) {
|
||||
VIXL_ASSERT(IsValidFPRegister());
|
||||
}
|
||||
FPRegister(unsigned code, unsigned size)
|
||||
inline FPRegister(unsigned code, unsigned size)
|
||||
: CPURegister(code, size, kFPRegister) {}
|
||||
|
||||
bool IsValid() const {
|
||||
@@ -237,6 +241,10 @@ class FPRegister : public CPURegister {
|
||||
static const FPRegister& SRegFromCode(unsigned code);
|
||||
static const FPRegister& DRegFromCode(unsigned code);
|
||||
|
||||
// V8 compatibility.
|
||||
static const int kNumRegisters = kNumberOfFPRegisters;
|
||||
static const int kNumAllocatableRegisters = kNumberOfFPRegisters - 1;
|
||||
|
||||
private:
|
||||
static const FPRegister sregisters[];
|
||||
static const FPRegister dregisters[];
|
||||
@@ -304,23 +312,23 @@ bool AreSameSizeAndType(const CPURegister& reg1,
|
||||
// Lists of registers.
|
||||
class CPURegList {
|
||||
public:
|
||||
explicit CPURegList(CPURegister reg1,
|
||||
CPURegister reg2 = NoCPUReg,
|
||||
CPURegister reg3 = NoCPUReg,
|
||||
CPURegister reg4 = NoCPUReg)
|
||||
inline explicit CPURegList(CPURegister reg1,
|
||||
CPURegister reg2 = NoCPUReg,
|
||||
CPURegister reg3 = NoCPUReg,
|
||||
CPURegister reg4 = NoCPUReg)
|
||||
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
|
||||
size_(reg1.size()), type_(reg1.type()) {
|
||||
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
|
||||
inline CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
|
||||
: list_(list), size_(size), type_(type) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
CPURegList(CPURegister::RegisterType type, unsigned size,
|
||||
unsigned first_reg, unsigned last_reg)
|
||||
inline CPURegList(CPURegister::RegisterType type, unsigned size,
|
||||
unsigned first_reg, unsigned last_reg)
|
||||
: size_(size), type_(type) {
|
||||
VIXL_ASSERT(((type == CPURegister::kRegister) &&
|
||||
(last_reg < kNumberOfRegisters)) ||
|
||||
@@ -332,7 +340,7 @@ class CPURegList {
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
CPURegister::RegisterType type() const {
|
||||
inline CPURegister::RegisterType type() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return type_;
|
||||
}
|
||||
@@ -358,13 +366,13 @@ class CPURegList {
|
||||
}
|
||||
|
||||
// Variants of Combine and Remove which take a single register.
|
||||
void Combine(const CPURegister& other) {
|
||||
inline void Combine(const CPURegister& other) {
|
||||
VIXL_ASSERT(other.type() == type_);
|
||||
VIXL_ASSERT(other.size() == size_);
|
||||
Combine(other.code());
|
||||
}
|
||||
|
||||
void Remove(const CPURegister& other) {
|
||||
inline void Remove(const CPURegister& other) {
|
||||
VIXL_ASSERT(other.type() == type_);
|
||||
VIXL_ASSERT(other.size() == size_);
|
||||
Remove(other.code());
|
||||
@@ -372,51 +380,24 @@ class CPURegList {
|
||||
|
||||
// Variants of Combine and Remove which take a single register by its code;
|
||||
// the type and size of the register is inferred from this list.
|
||||
void Combine(int code) {
|
||||
inline void Combine(int code) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
|
||||
list_ |= (UINT64_C(1) << code);
|
||||
}
|
||||
|
||||
void Remove(int code) {
|
||||
inline void Remove(int code) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
|
||||
list_ &= ~(UINT64_C(1) << code);
|
||||
}
|
||||
|
||||
static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
|
||||
VIXL_ASSERT(list_1.type_ == list_2.type_);
|
||||
VIXL_ASSERT(list_1.size_ == list_2.size_);
|
||||
return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
|
||||
}
|
||||
static CPURegList Union(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3);
|
||||
static CPURegList Union(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3,
|
||||
const CPURegList& list_4);
|
||||
|
||||
static CPURegList Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2) {
|
||||
VIXL_ASSERT(list_1.type_ == list_2.type_);
|
||||
VIXL_ASSERT(list_1.size_ == list_2.size_);
|
||||
return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
|
||||
}
|
||||
static CPURegList Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3);
|
||||
static CPURegList Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3,
|
||||
const CPURegList& list_4);
|
||||
|
||||
RegList list() const {
|
||||
inline RegList list() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return list_;
|
||||
}
|
||||
|
||||
void set_list(RegList new_list) {
|
||||
inline void set_list(RegList new_list) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
list_ = new_list;
|
||||
}
|
||||
@@ -436,38 +417,38 @@ class CPURegList {
|
||||
static CPURegList GetCallerSaved(unsigned size = kXRegSize);
|
||||
static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
|
||||
|
||||
bool IsEmpty() const {
|
||||
inline bool IsEmpty() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return list_ == 0;
|
||||
}
|
||||
|
||||
bool IncludesAliasOf(const CPURegister& other) const {
|
||||
inline bool IncludesAliasOf(const CPURegister& other) const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return (type_ == other.type()) && ((other.Bit() & list_) != 0);
|
||||
}
|
||||
|
||||
bool IncludesAliasOf(int code) const {
|
||||
inline bool IncludesAliasOf(int code) const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return ((code & list_) != 0);
|
||||
}
|
||||
|
||||
int Count() const {
|
||||
inline int Count() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return CountSetBits(list_, kRegListSizeInBits);
|
||||
}
|
||||
|
||||
unsigned RegisterSizeInBits() const {
|
||||
inline unsigned RegisterSizeInBits() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_;
|
||||
}
|
||||
|
||||
unsigned RegisterSizeInBytes() const {
|
||||
inline unsigned RegisterSizeInBytes() const {
|
||||
int size_in_bits = RegisterSizeInBits();
|
||||
VIXL_ASSERT((size_in_bits % 8) == 0);
|
||||
return size_in_bits / 8;
|
||||
}
|
||||
|
||||
unsigned TotalSizeInBytes() const {
|
||||
inline unsigned TotalSizeInBytes() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return RegisterSizeInBytes() * Count();
|
||||
}
|
||||
@@ -606,10 +587,8 @@ class Label {
|
||||
VIXL_ASSERT(!IsLinked() || IsBound());
|
||||
}
|
||||
|
||||
bool IsBound() const { return location_ >= 0; }
|
||||
bool IsLinked() const { return !links_.empty(); }
|
||||
|
||||
ptrdiff_t location() const { return location_; }
|
||||
inline bool IsBound() const { return location_ >= 0; }
|
||||
inline bool IsLinked() const { return !links_.empty(); }
|
||||
|
||||
private:
|
||||
// The list of linked instructions is stored in a stack-like structure. We
|
||||
@@ -668,20 +647,22 @@ class Label {
|
||||
std::stack<ptrdiff_t> * links_extended_;
|
||||
};
|
||||
|
||||
void Bind(ptrdiff_t location) {
|
||||
inline ptrdiff_t location() const { return location_; }
|
||||
|
||||
inline void Bind(ptrdiff_t location) {
|
||||
// Labels can only be bound once.
|
||||
VIXL_ASSERT(!IsBound());
|
||||
location_ = location;
|
||||
}
|
||||
|
||||
void AddLink(ptrdiff_t instruction) {
|
||||
inline void AddLink(ptrdiff_t instruction) {
|
||||
// If a label is bound, the assembler already has the information it needs
|
||||
// to write the instruction, so there is no need to add it to links_.
|
||||
VIXL_ASSERT(!IsBound());
|
||||
links_.push(instruction);
|
||||
}
|
||||
|
||||
ptrdiff_t GetAndRemoveNextLink() {
|
||||
inline ptrdiff_t GetAndRemoveNextLink() {
|
||||
VIXL_ASSERT(IsLinked());
|
||||
ptrdiff_t link = links_.top();
|
||||
links_.pop();
|
||||
@@ -864,14 +845,14 @@ class Assembler {
|
||||
|
||||
// Return the address of an offset in the buffer.
|
||||
template <typename T>
|
||||
T GetOffsetAddress(ptrdiff_t offset) {
|
||||
inline T GetOffsetAddress(ptrdiff_t offset) {
|
||||
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
|
||||
return buffer_->GetOffsetAddress<T>(offset);
|
||||
}
|
||||
|
||||
// Return the address of a bound label.
|
||||
template <typename T>
|
||||
T GetLabelAddress(const Label * label) {
|
||||
inline T GetLabelAddress(const Label * label) {
|
||||
VIXL_ASSERT(label->IsBound());
|
||||
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
|
||||
return GetOffsetAddress<T>(label->location());
|
||||
@@ -879,14 +860,14 @@ class Assembler {
|
||||
|
||||
// Return the address of the cursor.
|
||||
template <typename T>
|
||||
T GetCursorAddress() {
|
||||
inline T GetCursorAddress() {
|
||||
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
|
||||
return GetOffsetAddress<T>(CursorOffset());
|
||||
}
|
||||
|
||||
// Return the address of the start of the buffer.
|
||||
template <typename T>
|
||||
T GetStartAddress() {
|
||||
inline T GetStartAddress() {
|
||||
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
|
||||
return GetOffsetAddress<T>(0);
|
||||
}
|
||||
@@ -1093,20 +1074,20 @@ class Assembler {
|
||||
|
||||
// Bfm aliases.
|
||||
// Bitfield insert.
|
||||
void bfi(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
inline void bfi(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
|
||||
}
|
||||
|
||||
// Bitfield extract and insert low.
|
||||
void bfxil(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
inline void bfxil(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
bfm(rd, rn, lsb, lsb + width - 1);
|
||||
@@ -1114,92 +1095,92 @@ class Assembler {
|
||||
|
||||
// Sbfm aliases.
|
||||
// Arithmetic shift right.
|
||||
void asr(const Register& rd, const Register& rn, unsigned shift) {
|
||||
inline void asr(const Register& rd, const Register& rn, unsigned shift) {
|
||||
VIXL_ASSERT(shift < rd.size());
|
||||
sbfm(rd, rn, shift, rd.size() - 1);
|
||||
}
|
||||
|
||||
// Signed bitfield insert with zero at right.
|
||||
void sbfiz(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
inline void sbfiz(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
|
||||
}
|
||||
|
||||
// Signed bitfield extract.
|
||||
void sbfx(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
inline void sbfx(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
sbfm(rd, rn, lsb, lsb + width - 1);
|
||||
}
|
||||
|
||||
// Signed extend byte.
|
||||
void sxtb(const Register& rd, const Register& rn) {
|
||||
inline void sxtb(const Register& rd, const Register& rn) {
|
||||
sbfm(rd, rn, 0, 7);
|
||||
}
|
||||
|
||||
// Signed extend halfword.
|
||||
void sxth(const Register& rd, const Register& rn) {
|
||||
inline void sxth(const Register& rd, const Register& rn) {
|
||||
sbfm(rd, rn, 0, 15);
|
||||
}
|
||||
|
||||
// Signed extend word.
|
||||
void sxtw(const Register& rd, const Register& rn) {
|
||||
inline void sxtw(const Register& rd, const Register& rn) {
|
||||
sbfm(rd, rn, 0, 31);
|
||||
}
|
||||
|
||||
// Ubfm aliases.
|
||||
// Logical shift left.
|
||||
void lsl(const Register& rd, const Register& rn, unsigned shift) {
|
||||
inline void lsl(const Register& rd, const Register& rn, unsigned shift) {
|
||||
unsigned reg_size = rd.size();
|
||||
VIXL_ASSERT(shift < reg_size);
|
||||
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
|
||||
}
|
||||
|
||||
// Logical shift right.
|
||||
void lsr(const Register& rd, const Register& rn, unsigned shift) {
|
||||
inline void lsr(const Register& rd, const Register& rn, unsigned shift) {
|
||||
VIXL_ASSERT(shift < rd.size());
|
||||
ubfm(rd, rn, shift, rd.size() - 1);
|
||||
}
|
||||
|
||||
// Unsigned bitfield insert with zero at right.
|
||||
void ubfiz(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
inline void ubfiz(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
|
||||
}
|
||||
|
||||
// Unsigned bitfield extract.
|
||||
void ubfx(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
inline void ubfx(const Register& rd,
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
ubfm(rd, rn, lsb, lsb + width - 1);
|
||||
}
|
||||
|
||||
// Unsigned extend byte.
|
||||
void uxtb(const Register& rd, const Register& rn) {
|
||||
inline void uxtb(const Register& rd, const Register& rn) {
|
||||
ubfm(rd, rn, 0, 7);
|
||||
}
|
||||
|
||||
// Unsigned extend halfword.
|
||||
void uxth(const Register& rd, const Register& rn) {
|
||||
inline void uxth(const Register& rd, const Register& rn) {
|
||||
ubfm(rd, rn, 0, 15);
|
||||
}
|
||||
|
||||
// Unsigned extend word.
|
||||
void uxtw(const Register& rd, const Register& rn) {
|
||||
inline void uxtw(const Register& rd, const Register& rn) {
|
||||
ubfm(rd, rn, 0, 31);
|
||||
}
|
||||
|
||||
@@ -1249,7 +1230,7 @@ class Assembler {
|
||||
void cneg(const Register& rd, const Register& rn, Condition cond);
|
||||
|
||||
// Rotate right.
|
||||
void ror(const Register& rd, const Register& rs, unsigned shift) {
|
||||
inline void ror(const Register& rd, const Register& rs, unsigned shift) {
|
||||
extr(rd, rs, rs, shift);
|
||||
}
|
||||
|
||||
@@ -1514,19 +1495,6 @@ class Assembler {
|
||||
// Load-acquire register.
|
||||
void ldar(const Register& rt, const MemOperand& src);
|
||||
|
||||
// Prefetch memory.
|
||||
void prfm(PrefetchOperation op, const MemOperand& addr,
|
||||
LoadStoreScalingOption option = PreferScaledOffset);
|
||||
|
||||
// Prefetch memory (with unscaled offset).
|
||||
void prfum(PrefetchOperation op, const MemOperand& addr,
|
||||
LoadStoreScalingOption option = PreferUnscaledOffset);
|
||||
|
||||
// Prefetch memory in the literal pool.
|
||||
void prfm(PrefetchOperation op, RawLiteral* literal);
|
||||
|
||||
// Prefetch from pc + imm19 << 2.
|
||||
void prfm(PrefetchOperation op, int imm19);
|
||||
|
||||
// Move instructions. The default shift of -1 indicates that the move
|
||||
// instruction will calculate an appropriate 16-bit immediate and left shift
|
||||
@@ -1670,21 +1638,12 @@ class Assembler {
|
||||
// FP round to integer (nearest with ties to away).
|
||||
void frinta(const FPRegister& fd, const FPRegister& fn);
|
||||
|
||||
// FP round to integer (implicit rounding).
|
||||
void frinti(const FPRegister& fd, const FPRegister& fn);
|
||||
|
||||
// FP round to integer (toward minus infinity).
|
||||
void frintm(const FPRegister& fd, const FPRegister& fn);
|
||||
|
||||
// FP round to integer (nearest with ties to even).
|
||||
void frintn(const FPRegister& fd, const FPRegister& fn);
|
||||
|
||||
// FP round to integer (toward plus infinity).
|
||||
void frintp(const FPRegister& fd, const FPRegister& fn);
|
||||
|
||||
// FP round to integer (exact, implicit rounding).
|
||||
void frintx(const FPRegister& fd, const FPRegister& fn);
|
||||
|
||||
// FP round to integer (towards zero).
|
||||
void frintz(const FPRegister& fd, const FPRegister& fn);
|
||||
|
||||
@@ -1746,16 +1705,16 @@ class Assembler {
|
||||
|
||||
// Emit generic instructions.
|
||||
// Emit raw instructions into the instruction stream.
|
||||
void dci(Instr raw_inst) { Emit(raw_inst); }
|
||||
inline void dci(Instr raw_inst) { Emit(raw_inst); }
|
||||
|
||||
// Emit 32 bits of data into the instruction stream.
|
||||
void dc32(uint32_t data) {
|
||||
inline void dc32(uint32_t data) {
|
||||
VIXL_ASSERT(buffer_monitor_ > 0);
|
||||
buffer_->Emit32(data);
|
||||
}
|
||||
|
||||
// Emit 64 bits of data into the instruction stream.
|
||||
void dc64(uint64_t data) {
|
||||
inline void dc64(uint64_t data) {
|
||||
VIXL_ASSERT(buffer_monitor_ > 0);
|
||||
buffer_->Emit64(data);
|
||||
}
|
||||
@@ -1890,14 +1849,14 @@ class Assembler {
|
||||
}
|
||||
}
|
||||
|
||||
static Instr ImmS(unsigned imms, unsigned reg_size) {
|
||||
static inline Instr ImmS(unsigned imms, unsigned reg_size) {
|
||||
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
|
||||
((reg_size == kWRegSize) && is_uint5(imms)));
|
||||
USE(reg_size);
|
||||
return imms << ImmS_offset;
|
||||
}
|
||||
|
||||
static Instr ImmR(unsigned immr, unsigned reg_size) {
|
||||
static inline Instr ImmR(unsigned immr, unsigned reg_size) {
|
||||
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
|
||||
((reg_size == kWRegSize) && is_uint5(immr)));
|
||||
USE(reg_size);
|
||||
@@ -1905,7 +1864,7 @@ class Assembler {
|
||||
return immr << ImmR_offset;
|
||||
}
|
||||
|
||||
static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
|
||||
static inline Instr ImmSetBits(unsigned imms, unsigned reg_size) {
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
VIXL_ASSERT(is_uint6(imms));
|
||||
VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
|
||||
@@ -1913,7 +1872,7 @@ class Assembler {
|
||||
return imms << ImmSetBits_offset;
|
||||
}
|
||||
|
||||
static Instr ImmRotate(unsigned immr, unsigned reg_size) {
|
||||
static inline Instr ImmRotate(unsigned immr, unsigned reg_size) {
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
|
||||
((reg_size == kWRegSize) && is_uint5(immr)));
|
||||
@@ -1921,12 +1880,12 @@ class Assembler {
|
||||
return immr << ImmRotate_offset;
|
||||
}
|
||||
|
||||
static Instr ImmLLiteral(int imm19) {
|
||||
static inline Instr ImmLLiteral(int imm19) {
|
||||
VIXL_ASSERT(is_int19(imm19));
|
||||
return truncate_to_int19(imm19) << ImmLLiteral_offset;
|
||||
}
|
||||
|
||||
static Instr BitN(unsigned bitn, unsigned reg_size) {
|
||||
static inline Instr BitN(unsigned bitn, unsigned reg_size) {
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
|
||||
USE(reg_size);
|
||||
@@ -1984,11 +1943,6 @@ class Assembler {
|
||||
return shift_amount << ImmShiftLS_offset;
|
||||
}
|
||||
|
||||
static Instr ImmPrefetchOperation(int imm5) {
|
||||
VIXL_ASSERT(is_uint5(imm5));
|
||||
return imm5 << ImmPrefetchOperation_offset;
|
||||
}
|
||||
|
||||
static Instr ImmException(int imm16) {
|
||||
VIXL_ASSERT(is_uint16(imm16));
|
||||
return imm16 << ImmException_offset;
|
||||
@@ -2049,32 +2003,12 @@ class Assembler {
|
||||
return scale << FPScale_offset;
|
||||
}
|
||||
|
||||
// Immediate field checking helpers.
|
||||
static bool IsImmAddSub(int64_t immediate);
|
||||
static bool IsImmConditionalCompare(int64_t immediate);
|
||||
static bool IsImmFP32(float imm);
|
||||
static bool IsImmFP64(double imm);
|
||||
static bool IsImmLogical(uint64_t value,
|
||||
unsigned width,
|
||||
unsigned* n = NULL,
|
||||
unsigned* imm_s = NULL,
|
||||
unsigned* imm_r = NULL);
|
||||
static bool IsImmLSPair(int64_t offset, LSDataSize size);
|
||||
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
|
||||
static bool IsImmLSUnscaled(int64_t offset);
|
||||
static bool IsImmMovn(uint64_t imm, unsigned reg_size);
|
||||
static bool IsImmMovz(uint64_t imm, unsigned reg_size);
|
||||
|
||||
// Size of the code generated since label to the current position.
|
||||
size_t SizeOfCodeGeneratedSince(Label* label) const {
|
||||
VIXL_ASSERT(label->IsBound());
|
||||
return buffer_->OffsetFrom(label->location());
|
||||
}
|
||||
|
||||
size_t SizeOfCodeGenerated() const {
|
||||
return buffer_->CursorOffset();
|
||||
}
|
||||
|
||||
size_t BufferCapacity() const { return buffer_->capacity(); }
|
||||
|
||||
size_t RemainingBufferSpace() const { return buffer_->RemainingBytes(); }
|
||||
@@ -2091,7 +2025,7 @@ class Assembler {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef VIXL_DEBUG
|
||||
#ifdef DEBUG
|
||||
void AcquireBuffer() {
|
||||
VIXL_ASSERT(buffer_monitor_ >= 0);
|
||||
buffer_monitor_++;
|
||||
@@ -2103,16 +2037,16 @@ class Assembler {
|
||||
}
|
||||
#endif
|
||||
|
||||
PositionIndependentCodeOption pic() const {
|
||||
inline PositionIndependentCodeOption pic() {
|
||||
return pic_;
|
||||
}
|
||||
|
||||
bool AllowPageOffsetDependentCode() const {
|
||||
inline bool AllowPageOffsetDependentCode() {
|
||||
return (pic() == PageOffsetDependentCode) ||
|
||||
(pic() == PositionDependentCode);
|
||||
}
|
||||
|
||||
static const Register& AppropriateZeroRegFor(const CPURegister& reg) {
|
||||
static inline const Register& AppropriateZeroRegFor(const CPURegister& reg) {
|
||||
return reg.Is64Bits() ? xzr : wzr;
|
||||
}
|
||||
|
||||
@@ -2122,15 +2056,14 @@ class Assembler {
|
||||
const MemOperand& addr,
|
||||
LoadStoreOp op,
|
||||
LoadStoreScalingOption option = PreferScaledOffset);
|
||||
static bool IsImmLSUnscaled(int64_t offset);
|
||||
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
|
||||
|
||||
void LoadStorePair(const CPURegister& rt,
|
||||
const CPURegister& rt2,
|
||||
const MemOperand& addr,
|
||||
LoadStorePairOp op);
|
||||
|
||||
void Prefetch(PrefetchOperation op,
|
||||
const MemOperand& addr,
|
||||
LoadStoreScalingOption option = PreferScaledOffset);
|
||||
static bool IsImmLSPair(int64_t offset, LSDataSize size);
|
||||
|
||||
// TODO(all): The third parameter should be passed by reference but gcc 4.8.2
|
||||
// reports a bogus uninitialised warning then.
|
||||
@@ -2144,12 +2077,18 @@ class Assembler {
|
||||
unsigned imm_s,
|
||||
unsigned imm_r,
|
||||
LogicalOp op);
|
||||
static bool IsImmLogical(uint64_t value,
|
||||
unsigned width,
|
||||
unsigned* n = NULL,
|
||||
unsigned* imm_s = NULL,
|
||||
unsigned* imm_r = NULL);
|
||||
|
||||
void ConditionalCompare(const Register& rn,
|
||||
const Operand& operand,
|
||||
StatusFlags nzcv,
|
||||
Condition cond,
|
||||
ConditionalCompareOp op);
|
||||
static bool IsImmConditionalCompare(int64_t immediate);
|
||||
|
||||
void AddSubWithCarry(const Register& rd,
|
||||
const Register& rn,
|
||||
@@ -2157,6 +2096,8 @@ class Assembler {
|
||||
FlagsUpdate S,
|
||||
AddSubWithCarryOp op);
|
||||
|
||||
static bool IsImmFP32(float imm);
|
||||
static bool IsImmFP64(double imm);
|
||||
|
||||
// Functions for emulating operands not directly supported by the instruction
|
||||
// set.
|
||||
@@ -2174,6 +2115,7 @@ class Assembler {
|
||||
const Operand& operand,
|
||||
FlagsUpdate S,
|
||||
AddSubOp op);
|
||||
static bool IsImmAddSub(int64_t immediate);
|
||||
|
||||
// Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
|
||||
// registers. Only simple loads are supported; sign- and zero-extension (such
|
||||
@@ -2238,12 +2180,6 @@ class Assembler {
|
||||
const FPRegister& fa,
|
||||
FPDataProcessing3SourceOp op);
|
||||
|
||||
// Encode the specified MemOperand for the specified access size and scaling
|
||||
// preference.
|
||||
Instr LoadStoreMemOperand(const MemOperand& addr,
|
||||
LSDataSize size,
|
||||
LoadStoreScalingOption option);
|
||||
|
||||
// Link the current (not-yet-emitted) instruction to the specified label, then
|
||||
// return an offset to be encoded in the instruction. If the label is not yet
|
||||
// bound, an offset of 0 is returned.
|
||||
@@ -2269,7 +2205,7 @@ class Assembler {
|
||||
CodeBuffer* buffer_;
|
||||
PositionIndependentCodeOption pic_;
|
||||
|
||||
#ifdef VIXL_DEBUG
|
||||
#ifdef DEBUG
|
||||
int64_t buffer_monitor_;
|
||||
#endif
|
||||
};
|
||||
@@ -2303,7 +2239,7 @@ class CodeBufferCheckScope {
|
||||
AssertPolicy assert_policy = kMaximumSize)
|
||||
: assm_(assm) {
|
||||
if (check_policy == kCheck) assm->EnsureSpaceFor(size);
|
||||
#ifdef VIXL_DEBUG
|
||||
#ifdef DEBUG
|
||||
assm->bind(&start_);
|
||||
size_ = size;
|
||||
assert_policy_ = assert_policy;
|
||||
@@ -2315,7 +2251,7 @@ class CodeBufferCheckScope {
|
||||
|
||||
// This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert).
|
||||
explicit CodeBufferCheckScope(Assembler* assm) : assm_(assm) {
|
||||
#ifdef VIXL_DEBUG
|
||||
#ifdef DEBUG
|
||||
size_ = 0;
|
||||
assert_policy_ = kNoAssert;
|
||||
assm->AcquireBuffer();
|
||||
@@ -2323,7 +2259,7 @@ class CodeBufferCheckScope {
|
||||
}
|
||||
|
||||
~CodeBufferCheckScope() {
|
||||
#ifdef VIXL_DEBUG
|
||||
#ifdef DEBUG
|
||||
assm_->ReleaseBuffer();
|
||||
switch (assert_policy_) {
|
||||
case kNoAssert: break;
|
||||
@@ -2341,7 +2277,7 @@ class CodeBufferCheckScope {
|
||||
|
||||
protected:
|
||||
Assembler* assm_;
|
||||
#ifdef VIXL_DEBUG
|
||||
#ifdef DEBUG
|
||||
Label start_;
|
||||
size_t size_;
|
||||
AssertPolicy assert_policy_;
|
||||
|
@@ -31,6 +31,12 @@ namespace vixl {
|
||||
|
||||
const unsigned kNumberOfRegisters = 32;
|
||||
const unsigned kNumberOfFPRegisters = 32;
|
||||
// Callee saved registers are x21-x30(lr).
|
||||
const int kNumberOfCalleeSavedRegisters = 10;
|
||||
const int kFirstCalleeSavedRegisterIndex = 21;
|
||||
// Callee saved FP registers are d8-d15.
|
||||
const int kNumberOfCalleeSavedFPRegisters = 8;
|
||||
const int kFirstCalleeSavedFPRegisterIndex = 8;
|
||||
|
||||
#define REGISTER_CODE_LIST(R) \
|
||||
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
|
||||
@@ -47,6 +53,7 @@ V_(Ra, 14, 10, Bits) /* Third source register. */ \
|
||||
V_(Rt, 4, 0, Bits) /* Load/store register. */ \
|
||||
V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \
|
||||
V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \
|
||||
V_(PrefetchMode, 4, 0, Bits) \
|
||||
\
|
||||
/* Common bits */ \
|
||||
V_(SixtyFourBits, 31, 31, Bits) \
|
||||
@@ -102,10 +109,6 @@ V_(ImmLSUnsigned, 21, 10, Bits) \
|
||||
V_(ImmLSPair, 21, 15, SignedBits) \
|
||||
V_(SizeLS, 31, 30, Bits) \
|
||||
V_(ImmShiftLS, 12, 12, Bits) \
|
||||
V_(ImmPrefetchOperation, 4, 0, Bits) \
|
||||
V_(PrefetchHint, 4, 3, Bits) \
|
||||
V_(PrefetchTarget, 2, 1, Bits) \
|
||||
V_(PrefetchStream, 0, 0, Bits) \
|
||||
\
|
||||
/* Other immediates */ \
|
||||
V_(ImmUncondBranch, 25, 0, SignedBits) \
|
||||
@@ -266,29 +269,6 @@ enum BarrierType {
|
||||
BarrierAll = 3
|
||||
};
|
||||
|
||||
enum PrefetchOperation {
|
||||
PLDL1KEEP = 0x00,
|
||||
PLDL1STRM = 0x01,
|
||||
PLDL2KEEP = 0x02,
|
||||
PLDL2STRM = 0x03,
|
||||
PLDL3KEEP = 0x04,
|
||||
PLDL3STRM = 0x05,
|
||||
|
||||
PLIL1KEEP = 0x08,
|
||||
PLIL1STRM = 0x09,
|
||||
PLIL2KEEP = 0x0a,
|
||||
PLIL2STRM = 0x0b,
|
||||
PLIL3KEEP = 0x0c,
|
||||
PLIL3STRM = 0x0d,
|
||||
|
||||
PSTL1KEEP = 0x10,
|
||||
PSTL1STRM = 0x11,
|
||||
PSTL2KEEP = 0x12,
|
||||
PSTL2STRM = 0x13,
|
||||
PSTL3KEEP = 0x14,
|
||||
PSTL3STRM = 0x15
|
||||
};
|
||||
|
||||
// System/special register names.
|
||||
// This information is not encoded as one field but as the concatenation of
|
||||
// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
|
||||
@@ -625,12 +605,6 @@ enum LoadStoreAnyOp {
|
||||
LoadStoreAnyFixed = 0x08000000
|
||||
};
|
||||
|
||||
// Any load pair or store pair.
|
||||
enum LoadStorePairAnyOp {
|
||||
LoadStorePairAnyFMask = 0x3a000000,
|
||||
LoadStorePairAnyFixed = 0x28000000
|
||||
};
|
||||
|
||||
#define LOAD_STORE_PAIR_OP_LIST(V) \
|
||||
V(STP, w, 0x00000000), \
|
||||
V(LDP, w, 0x00400000), \
|
||||
@@ -729,6 +703,17 @@ enum LoadLiteralOp {
|
||||
V(LD, R, d, 0xC4400000)
|
||||
|
||||
|
||||
// Load/store unscaled offset.
|
||||
enum LoadStoreUnscaledOffsetOp {
|
||||
LoadStoreUnscaledOffsetFixed = 0x38000000,
|
||||
LoadStoreUnscaledOffsetFMask = 0x3B200C00,
|
||||
LoadStoreUnscaledOffsetMask = 0xFFE00C00,
|
||||
#define LOAD_STORE_UNSCALED(A, B, C, D) \
|
||||
A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
|
||||
LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
|
||||
#undef LOAD_STORE_UNSCALED
|
||||
};
|
||||
|
||||
// Load/store (post, pre, offset and unsigned.)
|
||||
enum LoadStoreOp {
|
||||
LoadStoreOpMask = 0xC4C00000,
|
||||
@@ -739,18 +724,6 @@ enum LoadStoreOp {
|
||||
PRFM = 0xC0800000
|
||||
};
|
||||
|
||||
// Load/store unscaled offset.
|
||||
enum LoadStoreUnscaledOffsetOp {
|
||||
LoadStoreUnscaledOffsetFixed = 0x38000000,
|
||||
LoadStoreUnscaledOffsetFMask = 0x3B200C00,
|
||||
LoadStoreUnscaledOffsetMask = 0xFFE00C00,
|
||||
PRFUM = LoadStoreUnscaledOffsetFixed | PRFM,
|
||||
#define LOAD_STORE_UNSCALED(A, B, C, D) \
|
||||
A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
|
||||
LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
|
||||
#undef LOAD_STORE_UNSCALED
|
||||
};
|
||||
|
||||
// Load/store post index.
|
||||
enum LoadStorePostIndex {
|
||||
LoadStorePostIndexFixed = 0x38000400,
|
||||
|
@@ -108,7 +108,7 @@ class DecoderVisitor {
|
||||
}
|
||||
|
||||
private:
|
||||
const VisitorConstness constness_;
|
||||
VisitorConstness constness_;
|
||||
};
|
||||
|
||||
|
||||
|
@@ -34,7 +34,6 @@ Disassembler::Disassembler() {
|
||||
buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
|
||||
buffer_pos_ = 0;
|
||||
own_buffer_ = true;
|
||||
code_address_offset_ = 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +42,6 @@ Disassembler::Disassembler(char* text_buffer, int buffer_size) {
|
||||
buffer_ = text_buffer;
|
||||
buffer_pos_ = 0;
|
||||
own_buffer_ = false;
|
||||
code_address_offset_ = 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -741,25 +739,9 @@ void Disassembler::VisitMoveWideImmediate(const Instruction* instr) {
|
||||
// shift calculation.
|
||||
switch (instr->Mask(MoveWideImmediateMask)) {
|
||||
case MOVN_w:
|
||||
case MOVN_x:
|
||||
if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) {
|
||||
if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) {
|
||||
mnemonic = "movn";
|
||||
} else {
|
||||
mnemonic = "mov";
|
||||
form = "'Rd, 'IMoveNeg";
|
||||
}
|
||||
} else {
|
||||
mnemonic = "movn";
|
||||
}
|
||||
break;
|
||||
case MOVN_x: mnemonic = "movn"; break;
|
||||
case MOVZ_w:
|
||||
case MOVZ_x:
|
||||
if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0))
|
||||
mnemonic = "mov";
|
||||
else
|
||||
mnemonic = "movz";
|
||||
break;
|
||||
case MOVZ_x: mnemonic = "movz"; break;
|
||||
case MOVK_w:
|
||||
case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
|
||||
default: VIXL_UNREACHABLE();
|
||||
@@ -824,7 +806,7 @@ void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
|
||||
case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
|
||||
LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
|
||||
#undef LS_UNSIGNEDOFFSET
|
||||
case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]";
|
||||
case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@@ -851,7 +833,6 @@ void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
|
||||
const char *form_x = "'Xt, ['Xns'ILS]";
|
||||
const char *form_s = "'St, ['Xns'ILS]";
|
||||
const char *form_d = "'Dt, ['Xns'ILS]";
|
||||
const char *form_prefetch = "'PrefOp, ['Xns'ILS]";
|
||||
|
||||
switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
|
||||
case STURB_w: mnemonic = "sturb"; break;
|
||||
@@ -871,7 +852,6 @@ void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
|
||||
case LDURSH_x: form = form_x; // Fall through.
|
||||
case LDURSH_w: mnemonic = "ldursh"; break;
|
||||
case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
|
||||
case PRFUM: mnemonic = "prfum"; form = form_prefetch; break;
|
||||
default: form = "(LoadStoreUnscaledOffset)";
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
@@ -892,11 +872,6 @@ void Disassembler::VisitLoadLiteral(const Instruction* instr) {
|
||||
form = "'Xt, 'ILLiteral 'LValue";
|
||||
break;
|
||||
}
|
||||
case PRFM_lit: {
|
||||
mnemonic = "prfm";
|
||||
form = "'PrefOp, 'ILLiteral 'LValue";
|
||||
break;
|
||||
}
|
||||
default: mnemonic = "unimplemented";
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
@@ -1369,7 +1344,7 @@ void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr,
|
||||
void Disassembler::AppendAddressToOutput(const Instruction* instr,
|
||||
const void* addr) {
|
||||
USE(instr);
|
||||
AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast<uintptr_t>(addr));
|
||||
AppendToOutput("(addr %p)", addr);
|
||||
}
|
||||
|
||||
|
||||
@@ -1385,40 +1360,6 @@ void Disassembler::AppendDataAddressToOutput(const Instruction* instr,
|
||||
}
|
||||
|
||||
|
||||
void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr,
|
||||
const void* addr) {
|
||||
USE(instr);
|
||||
int64_t rel_addr = CodeRelativeAddress(addr);
|
||||
if (rel_addr >= 0) {
|
||||
AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr);
|
||||
} else {
|
||||
AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Disassembler::AppendCodeRelativeCodeAddressToOutput(
|
||||
const Instruction* instr, const void* addr) {
|
||||
AppendCodeRelativeAddressToOutput(instr, addr);
|
||||
}
|
||||
|
||||
|
||||
void Disassembler::AppendCodeRelativeDataAddressToOutput(
|
||||
const Instruction* instr, const void* addr) {
|
||||
AppendCodeRelativeAddressToOutput(instr, addr);
|
||||
}
|
||||
|
||||
|
||||
void Disassembler::MapCodeAddress(int64_t base_address,
|
||||
const Instruction* instr_address) {
|
||||
set_code_address_offset(
|
||||
base_address - reinterpret_cast<intptr_t>(instr_address));
|
||||
}
|
||||
int64_t Disassembler::CodeRelativeAddress(const void* addr) {
|
||||
return reinterpret_cast<intptr_t>(addr) + code_address_offset();
|
||||
}
|
||||
|
||||
|
||||
void Disassembler::Format(const Instruction* instr, const char* mnemonic,
|
||||
const char* format) {
|
||||
VIXL_ASSERT(mnemonic != NULL);
|
||||
@@ -1545,20 +1486,16 @@ int Disassembler::SubstituteImmediateField(const Instruction* instr,
|
||||
VIXL_ASSERT(format[0] == 'I');
|
||||
|
||||
switch (format[1]) {
|
||||
case 'M': { // IMoveImm, IMoveNeg or IMoveLSL.
|
||||
if (format[5] == 'L') {
|
||||
case 'M': { // IMoveImm or IMoveLSL.
|
||||
if (format[5] == 'I') {
|
||||
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
|
||||
AppendToOutput("#0x%" PRIx64, imm);
|
||||
} else {
|
||||
VIXL_ASSERT(format[5] == 'L');
|
||||
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
|
||||
if (instr->ShiftMoveWide() > 0) {
|
||||
AppendToOutput(", lsl #%" PRId64, 16 * instr->ShiftMoveWide());
|
||||
}
|
||||
} else {
|
||||
VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N'));
|
||||
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
|
||||
if (format[5] == 'N')
|
||||
imm = ~imm;
|
||||
if (!instr->SixtyFourBits())
|
||||
imm &= UINT64_C(0xffffffff);
|
||||
AppendToOutput("#0x%" PRIx64, imm);
|
||||
}
|
||||
return 8;
|
||||
}
|
||||
@@ -1697,31 +1634,14 @@ int Disassembler::SubstituteLiteralField(const Instruction* instr,
|
||||
VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
|
||||
USE(format);
|
||||
|
||||
const void * address = instr->LiteralAddress<const void *>();
|
||||
switch (instr->Mask(LoadLiteralMask)) {
|
||||
case LDR_w_lit:
|
||||
case LDR_x_lit:
|
||||
case LDRSW_x_lit:
|
||||
case LDR_s_lit:
|
||||
case LDR_d_lit:
|
||||
AppendCodeRelativeDataAddressToOutput(instr, address);
|
||||
AppendDataAddressToOutput(instr, instr->LiteralAddress());
|
||||
break;
|
||||
case PRFM_lit: {
|
||||
// Use the prefetch hint to decide how to print the address.
|
||||
switch (instr->PrefetchHint()) {
|
||||
case 0x0: // PLD: prefetch for load.
|
||||
case 0x2: // PST: prepare for store.
|
||||
AppendCodeRelativeDataAddressToOutput(instr, address);
|
||||
break;
|
||||
case 0x1: // PLI: preload instructions.
|
||||
AppendCodeRelativeCodeAddressToOutput(instr, address);
|
||||
break;
|
||||
case 0x3: // Unallocated hint.
|
||||
AppendCodeRelativeAddressToOutput(instr, address);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
@@ -1781,22 +1701,17 @@ int Disassembler::SubstitutePCRelAddressField(const Instruction* instr,
|
||||
(strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`.
|
||||
|
||||
int64_t offset = instr->ImmPCRel();
|
||||
const Instruction * base = instr;
|
||||
|
||||
// Compute the target address based on the effective address (after applying
|
||||
// code_address_offset). This is required for correct behaviour of adrp.
|
||||
const Instruction* base = instr + code_address_offset();
|
||||
if (format[9] == 'P') {
|
||||
offset *= kPageSize;
|
||||
base = AlignDown(base, kPageSize);
|
||||
}
|
||||
// Strip code_address_offset before printing, so we can use the
|
||||
// semantically-correct AppendCodeRelativeAddressToOutput.
|
||||
const void* target =
|
||||
reinterpret_cast<const void*>(base + offset - code_address_offset());
|
||||
|
||||
const void* target = reinterpret_cast<const void*>(base + offset);
|
||||
AppendPCRelativeOffsetToOutput(instr, offset);
|
||||
AppendToOutput(" ");
|
||||
AppendCodeRelativeAddressToOutput(instr, target);
|
||||
AppendAddressToOutput(instr, target);
|
||||
return 13;
|
||||
}
|
||||
|
||||
@@ -1823,7 +1738,7 @@ int Disassembler::SubstituteBranchTargetField(const Instruction* instr,
|
||||
|
||||
AppendPCRelativeOffsetToOutput(instr, offset);
|
||||
AppendToOutput(" ");
|
||||
AppendCodeRelativeCodeAddressToOutput(instr, target_address);
|
||||
AppendCodeAddressToOutput(instr, target_address);
|
||||
|
||||
return 8;
|
||||
}
|
||||
@@ -1890,26 +1805,13 @@ int Disassembler::SubstitutePrefetchField(const Instruction* instr,
|
||||
VIXL_ASSERT(format[0] == 'P');
|
||||
USE(format);
|
||||
|
||||
static const char* hints[] = {"ld", "li", "st"};
|
||||
static const char* stream_options[] = {"keep", "strm"};
|
||||
int prefetch_mode = instr->PrefetchMode();
|
||||
|
||||
unsigned hint = instr->PrefetchHint();
|
||||
unsigned target = instr->PrefetchTarget() + 1;
|
||||
unsigned stream = instr->PrefetchStream();
|
||||
const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
|
||||
int level = (prefetch_mode >> 1) + 1;
|
||||
const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
|
||||
|
||||
if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) {
|
||||
// Unallocated prefetch operations.
|
||||
int prefetch_mode = instr->ImmPrefetchOperation();
|
||||
AppendToOutput("#0b%c%c%c%c%c",
|
||||
(prefetch_mode & (1 << 4)) ? '1' : '0',
|
||||
(prefetch_mode & (1 << 3)) ? '1' : '0',
|
||||
(prefetch_mode & (1 << 2)) ? '1' : '0',
|
||||
(prefetch_mode & (1 << 1)) ? '1' : '0',
|
||||
(prefetch_mode & (1 << 0)) ? '1' : '0');
|
||||
} else {
|
||||
VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0])));
|
||||
AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]);
|
||||
}
|
||||
AppendToOutput("p%sl%d%s", ls, level, ks);
|
||||
return 6;
|
||||
}
|
||||
|
||||
|
@@ -43,7 +43,7 @@ class Disassembler: public DecoderVisitor {
|
||||
char* GetOutput();
|
||||
|
||||
// Declare all Visitor functions.
|
||||
#define DECLARE(A) virtual void Visit##A(const Instruction* instr);
|
||||
#define DECLARE(A) void Visit##A(const Instruction* instr);
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
@@ -65,45 +65,23 @@ class Disassembler: public DecoderVisitor {
|
||||
|
||||
// Prints an address, in the general case. It can be code or data. This is
|
||||
// used for example to print the target address of an ADR instruction.
|
||||
virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Prints the address of some code.
|
||||
// This is used for example to print the target address of a branch to an
|
||||
// immediate offset.
|
||||
// A sub-class can for example override this method to lookup the address and
|
||||
// print an appropriate name.
|
||||
virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendCodeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Prints the address of some data.
|
||||
// This is used for example to print the source address of a load literal
|
||||
// instruction.
|
||||
virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Same as the above, but for addresses that are not relative to the code
|
||||
// buffer. They are currently not used by VIXL.
|
||||
virtual void AppendAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendCodeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendDataAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
public:
|
||||
// Get/Set the offset that should be added to code addresses when printing
|
||||
// code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
|
||||
// helpers.
|
||||
// Below is an example of how a branch immediate instruction in memory at
|
||||
// address 0xb010200 would disassemble with different offsets.
|
||||
// Base address | Disassembly
|
||||
// 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
|
||||
// 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
|
||||
// 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
|
||||
void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
|
||||
int64_t CodeRelativeAddress(const void* instr);
|
||||
|
||||
private:
|
||||
void Format(
|
||||
const Instruction* instr, const char* mnemonic, const char* format);
|
||||
@@ -123,40 +101,32 @@ class Disassembler: public DecoderVisitor {
|
||||
int SubstitutePrefetchField(const Instruction* instr, const char* format);
|
||||
int SubstituteBarrierField(const Instruction* instr, const char* format);
|
||||
|
||||
bool RdIsZROrSP(const Instruction* instr) const {
|
||||
inline bool RdIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->Rd() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RnIsZROrSP(const Instruction* instr) const {
|
||||
inline bool RnIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->Rn() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RmIsZROrSP(const Instruction* instr) const {
|
||||
inline bool RmIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->Rm() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RaIsZROrSP(const Instruction* instr) const {
|
||||
inline bool RaIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->Ra() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
|
||||
|
||||
int64_t code_address_offset() const { return code_address_offset_; }
|
||||
|
||||
protected:
|
||||
void ResetOutput();
|
||||
void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
|
||||
|
||||
void set_code_address_offset(int64_t code_address_offset) {
|
||||
code_address_offset_ = code_address_offset;
|
||||
}
|
||||
|
||||
char* buffer_;
|
||||
uint32_t buffer_pos_;
|
||||
uint32_t buffer_size_;
|
||||
bool own_buffer_;
|
||||
|
||||
int64_t code_address_offset_;
|
||||
};
|
||||
|
||||
|
||||
|
@@ -30,20 +30,6 @@
|
||||
namespace vixl {
|
||||
|
||||
|
||||
// Floating-point infinity values.
|
||||
const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
|
||||
const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
|
||||
const double kFP64PositiveInfinity =
|
||||
rawbits_to_double(UINT64_C(0x7ff0000000000000));
|
||||
const double kFP64NegativeInfinity =
|
||||
rawbits_to_double(UINT64_C(0xfff0000000000000));
|
||||
|
||||
|
||||
// The default NaN values (for FPCR.DN=1).
|
||||
const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
|
||||
const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
|
||||
|
||||
|
||||
static uint64_t RotateRight(uint64_t value,
|
||||
unsigned int rotate,
|
||||
unsigned int width) {
|
||||
@@ -68,55 +54,6 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsLoad() const {
|
||||
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
|
||||
return Mask(LoadStorePairLBit) != 0;
|
||||
} else {
|
||||
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
|
||||
switch (op) {
|
||||
case LDRB_w:
|
||||
case LDRH_w:
|
||||
case LDR_w:
|
||||
case LDR_x:
|
||||
case LDRSB_w:
|
||||
case LDRSB_x:
|
||||
case LDRSH_w:
|
||||
case LDRSH_x:
|
||||
case LDRSW_x:
|
||||
case LDR_s:
|
||||
case LDR_d: return true;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsStore() const {
|
||||
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
|
||||
return Mask(LoadStorePairLBit) == 0;
|
||||
} else {
|
||||
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
|
||||
switch (op) {
|
||||
case STRB_w:
|
||||
case STRH_w:
|
||||
case STR_w:
|
||||
case STR_x:
|
||||
case STR_s:
|
||||
case STR_d: return true;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Logical immediates can't encode zero, so a return value of zero is used to
|
||||
// indicate a failure case. Specifically, where the constraints on imm_s are
|
||||
// not met.
|
||||
|
@@ -96,17 +96,6 @@ const unsigned kDoubleExponentBits = 11;
|
||||
const unsigned kFloatMantissaBits = 23;
|
||||
const unsigned kFloatExponentBits = 8;
|
||||
|
||||
// Floating-point infinity values.
|
||||
extern const float kFP32PositiveInfinity;
|
||||
extern const float kFP32NegativeInfinity;
|
||||
extern const double kFP64PositiveInfinity;
|
||||
extern const double kFP64NegativeInfinity;
|
||||
|
||||
// The default NaN values (for FPCR.DN=1).
|
||||
extern const double kFP64DefaultNaN;
|
||||
extern const float kFP32DefaultNaN;
|
||||
|
||||
|
||||
enum LSDataSize {
|
||||
LSByte = 0,
|
||||
LSHalfword = 1,
|
||||
@@ -151,33 +140,33 @@ enum Reg31Mode {
|
||||
|
||||
class Instruction {
|
||||
public:
|
||||
Instr InstructionBits() const {
|
||||
inline Instr InstructionBits() const {
|
||||
return *(reinterpret_cast<const Instr*>(this));
|
||||
}
|
||||
|
||||
void SetInstructionBits(Instr new_instr) {
|
||||
inline void SetInstructionBits(Instr new_instr) {
|
||||
*(reinterpret_cast<Instr*>(this)) = new_instr;
|
||||
}
|
||||
|
||||
int Bit(int pos) const {
|
||||
inline int Bit(int pos) const {
|
||||
return (InstructionBits() >> pos) & 1;
|
||||
}
|
||||
|
||||
uint32_t Bits(int msb, int lsb) const {
|
||||
inline uint32_t Bits(int msb, int lsb) const {
|
||||
return unsigned_bitextract_32(msb, lsb, InstructionBits());
|
||||
}
|
||||
|
||||
int32_t SignedBits(int msb, int lsb) const {
|
||||
inline int32_t SignedBits(int msb, int lsb) const {
|
||||
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
|
||||
return signed_bitextract_32(msb, lsb, bits);
|
||||
}
|
||||
|
||||
Instr Mask(uint32_t mask) const {
|
||||
inline Instr Mask(uint32_t mask) const {
|
||||
return InstructionBits() & mask;
|
||||
}
|
||||
|
||||
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
|
||||
int64_t Name() const { return Func(HighBit, LowBit); }
|
||||
inline int64_t Name() const { return Func(HighBit, LowBit); }
|
||||
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
|
||||
#undef DEFINE_GETTER
|
||||
|
||||
@@ -193,64 +182,56 @@ class Instruction {
|
||||
float ImmFP32() const;
|
||||
double ImmFP64() const;
|
||||
|
||||
LSDataSize SizeLSPair() const {
|
||||
inline LSDataSize SizeLSPair() const {
|
||||
return CalcLSPairDataSize(
|
||||
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
|
||||
}
|
||||
|
||||
// Helpers.
|
||||
bool IsCondBranchImm() const {
|
||||
inline bool IsCondBranchImm() const {
|
||||
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
|
||||
}
|
||||
|
||||
bool IsUncondBranchImm() const {
|
||||
inline bool IsUncondBranchImm() const {
|
||||
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
|
||||
}
|
||||
|
||||
bool IsCompareBranch() const {
|
||||
inline bool IsCompareBranch() const {
|
||||
return Mask(CompareBranchFMask) == CompareBranchFixed;
|
||||
}
|
||||
|
||||
bool IsTestBranch() const {
|
||||
inline bool IsTestBranch() const {
|
||||
return Mask(TestBranchFMask) == TestBranchFixed;
|
||||
}
|
||||
|
||||
bool IsPCRelAddressing() const {
|
||||
inline bool IsPCRelAddressing() const {
|
||||
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
|
||||
}
|
||||
|
||||
bool IsLogicalImmediate() const {
|
||||
inline bool IsLogicalImmediate() const {
|
||||
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
|
||||
}
|
||||
|
||||
bool IsAddSubImmediate() const {
|
||||
inline bool IsAddSubImmediate() const {
|
||||
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
|
||||
}
|
||||
|
||||
bool IsAddSubExtended() const {
|
||||
inline bool IsAddSubExtended() const {
|
||||
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
|
||||
}
|
||||
|
||||
bool IsLoadOrStore() const {
|
||||
inline bool IsLoadOrStore() const {
|
||||
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
|
||||
}
|
||||
|
||||
bool IsLoad() const;
|
||||
bool IsStore() const;
|
||||
|
||||
bool IsLoadLiteral() const {
|
||||
// This includes PRFM_lit.
|
||||
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
|
||||
}
|
||||
|
||||
bool IsMovn() const {
|
||||
inline bool IsMovn() const {
|
||||
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
|
||||
(Mask(MoveWideImmediateMask) == MOVN_w);
|
||||
}
|
||||
|
||||
// Indicate whether Rd can be the stack pointer or the zero register. This
|
||||
// does not check that the instruction actually has an Rd field.
|
||||
Reg31Mode RdMode() const {
|
||||
inline Reg31Mode RdMode() const {
|
||||
// The following instructions use sp or wsp as Rd:
|
||||
// Add/sub (immediate) when not setting the flags.
|
||||
// Add/sub (extended) when not setting the flags.
|
||||
@@ -279,7 +260,7 @@ class Instruction {
|
||||
|
||||
// Indicate whether Rn can be the stack pointer or the zero register. This
|
||||
// does not check that the instruction actually has an Rn field.
|
||||
Reg31Mode RnMode() const {
|
||||
inline Reg31Mode RnMode() const {
|
||||
// The following instructions use sp or wsp as Rn:
|
||||
// All loads and stores.
|
||||
// Add/sub (immediate).
|
||||
@@ -291,7 +272,7 @@ class Instruction {
|
||||
return Reg31IsZeroRegister;
|
||||
}
|
||||
|
||||
ImmBranchType BranchType() const {
|
||||
inline ImmBranchType BranchType() const {
|
||||
if (IsCondBranchImm()) {
|
||||
return CondBranchType;
|
||||
} else if (IsUncondBranchImm()) {
|
||||
@@ -315,66 +296,55 @@ class Instruction {
|
||||
// Patch a literal load instruction to load from 'source'.
|
||||
void SetImmLLiteral(const Instruction* source);
|
||||
|
||||
// Calculate the address of a literal referred to by a load-literal
|
||||
// instruction, and return it as the specified type.
|
||||
//
|
||||
// The literal itself is safely mutable only if the backing buffer is safely
|
||||
// mutable.
|
||||
template <typename T>
|
||||
T LiteralAddress() const {
|
||||
uint64_t base_raw = reinterpret_cast<uintptr_t>(this);
|
||||
ptrdiff_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
|
||||
uint64_t address_raw = base_raw + offset;
|
||||
|
||||
// Cast the address using a C-style cast. A reinterpret_cast would be
|
||||
// appropriate, but it can't cast one integral type to another.
|
||||
T address = (T)(address_raw);
|
||||
|
||||
// Assert that the address can be represented by the specified type.
|
||||
VIXL_ASSERT((uint64_t)(address) == address_raw);
|
||||
|
||||
return address;
|
||||
inline uint8_t* LiteralAddress() const {
|
||||
int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
|
||||
const uint8_t* address = reinterpret_cast<const uint8_t*>(this) + offset;
|
||||
// Note that the result is safely mutable only if the backing buffer is
|
||||
// safely mutable.
|
||||
return const_cast<uint8_t*>(address);
|
||||
}
|
||||
|
||||
uint32_t Literal32() const {
|
||||
inline uint32_t Literal32() const {
|
||||
uint32_t literal;
|
||||
memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
|
||||
memcpy(&literal, LiteralAddress(), sizeof(literal));
|
||||
|
||||
return literal;
|
||||
}
|
||||
|
||||
uint64_t Literal64() const {
|
||||
inline uint64_t Literal64() const {
|
||||
uint64_t literal;
|
||||
memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
|
||||
memcpy(&literal, LiteralAddress(), sizeof(literal));
|
||||
|
||||
return literal;
|
||||
}
|
||||
|
||||
float LiteralFP32() const {
|
||||
inline float LiteralFP32() const {
|
||||
return rawbits_to_float(Literal32());
|
||||
}
|
||||
|
||||
double LiteralFP64() const {
|
||||
inline double LiteralFP64() const {
|
||||
return rawbits_to_double(Literal64());
|
||||
}
|
||||
|
||||
const Instruction* NextInstruction() const {
|
||||
inline const Instruction* NextInstruction() const {
|
||||
return this + kInstructionSize;
|
||||
}
|
||||
|
||||
const Instruction* InstructionAtOffset(int64_t offset) const {
|
||||
inline const Instruction* InstructionAtOffset(int64_t offset) const {
|
||||
VIXL_ASSERT(IsWordAligned(this + offset));
|
||||
return this + offset;
|
||||
}
|
||||
|
||||
template<typename T> static Instruction* Cast(T src) {
|
||||
template<typename T> static inline Instruction* Cast(T src) {
|
||||
return reinterpret_cast<Instruction*>(src);
|
||||
}
|
||||
|
||||
template<typename T> static const Instruction* CastConst(T src) {
|
||||
template<typename T> static inline const Instruction* CastConst(T src) {
|
||||
return reinterpret_cast<const Instruction*>(src);
|
||||
}
|
||||
|
||||
private:
|
||||
int ImmBranch() const;
|
||||
inline int ImmBranch() const;
|
||||
|
||||
void SetPCRelImmTarget(const Instruction* target);
|
||||
void SetBranchImmTarget(const Instruction* target);
|
||||
|
@@ -58,7 +58,7 @@ const int KBytes = 1024;
|
||||
const int MBytes = 1024 * KBytes;
|
||||
|
||||
#define VIXL_ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
|
||||
#ifdef VIXL_DEBUG
|
||||
#ifdef DEBUG
|
||||
#define VIXL_ASSERT(condition) assert(condition)
|
||||
#define VIXL_CHECK(condition) VIXL_ASSERT(condition)
|
||||
#define VIXL_UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); VIXL_ABORT()
|
||||
|
@@ -135,17 +135,4 @@ bool IsPowerOf2(int64_t value) {
|
||||
return (value != 0) && ((value & (value - 1)) == 0);
|
||||
}
|
||||
|
||||
|
||||
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
|
||||
VIXL_ASSERT((reg_size % 8) == 0);
|
||||
int count = 0;
|
||||
for (unsigned i = 0; i < (reg_size / 16); i++) {
|
||||
if ((imm & 0xffff) == 0) {
|
||||
count++;
|
||||
}
|
||||
imm >>= 16;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
} // namespace vixl
|
||||
|
@@ -166,8 +166,6 @@ int CountSetBits(uint64_t value, int width);
|
||||
uint64_t LowestSetBit(uint64_t value);
|
||||
bool IsPowerOf2(int64_t value);
|
||||
|
||||
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
|
||||
|
||||
// Pointer alignment
|
||||
// TODO: rename/refactor to make it specific to instructions.
|
||||
template<typename T>
|
||||
@@ -176,14 +174,14 @@ bool IsWordAligned(T pointer) {
|
||||
return ((intptr_t)(pointer) & 3) == 0;
|
||||
}
|
||||
|
||||
// Increment a pointer (up to 64 bits) until it has the specified alignment.
|
||||
// Increment a pointer until it has the specified alignment.
|
||||
template<class T>
|
||||
T AlignUp(T pointer, size_t alignment) {
|
||||
// Use C-style casts to get static_cast behaviour for integral types (T), and
|
||||
// reinterpret_cast behaviour for other types.
|
||||
|
||||
uint64_t pointer_raw = (uint64_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
|
||||
uintptr_t pointer_raw = (uintptr_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw));
|
||||
|
||||
size_t align_step = (alignment - pointer_raw) % alignment;
|
||||
VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
|
||||
@@ -191,14 +189,14 @@ T AlignUp(T pointer, size_t alignment) {
|
||||
return (T)(pointer_raw + align_step);
|
||||
}
|
||||
|
||||
// Decrement a pointer (up to 64 bits) until it has the specified alignment.
|
||||
// Decrement a pointer until it has the specified alignment.
|
||||
template<class T>
|
||||
T AlignDown(T pointer, size_t alignment) {
|
||||
// Use C-style casts to get static_cast behaviour for integral types (T), and
|
||||
// reinterpret_cast behaviour for other types.
|
||||
|
||||
uint64_t pointer_raw = (uint64_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
|
||||
uintptr_t pointer_raw = (uintptr_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw));
|
||||
|
||||
size_t align_step = pointer_raw % alignment;
|
||||
VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);
|
||||
|
@@ -275,7 +275,7 @@ enum microblaze_instr_type {
|
||||
|
||||
#define MAX_OPCODES 280
|
||||
|
||||
static struct op_code_struct {
|
||||
struct op_code_struct {
|
||||
const char *name;
|
||||
short inst_type; /* registers and immediate values involved */
|
||||
short inst_offset_type; /* immediate vals offset from PC? (= 1 for branches) */
|
||||
@@ -567,9 +567,10 @@ static struct op_code_struct {
|
||||
};
|
||||
|
||||
/* prefix for register names */
|
||||
static const char register_prefix[] = "r";
|
||||
static const char fsl_register_prefix[] = "rfsl";
|
||||
static const char pvr_register_prefix[] = "rpvr";
|
||||
char register_prefix[] = "r";
|
||||
char special_register_prefix[] = "spr";
|
||||
char fsl_register_prefix[] = "rfsl";
|
||||
char pvr_register_prefix[] = "rpvr";
|
||||
|
||||
|
||||
/* #defines for valid immediate range */
|
||||
@@ -737,9 +738,7 @@ get_field_special (long instr, struct op_code_struct * op)
|
||||
default :
|
||||
{
|
||||
if ( ((((instr & IMM_MASK) >> IMM_LOW) ^ op->immval_mask) & 0xE000) == REG_PVR_MASK) {
|
||||
sprintf(tmpstr, "%s%u", pvr_register_prefix,
|
||||
(unsigned short)(((instr & IMM_MASK) >> IMM_LOW) ^
|
||||
op->immval_mask) ^ REG_PVR_MASK);
|
||||
sprintf(tmpstr, "%spvr%d", register_prefix, (unsigned short)(((instr & IMM_MASK) >> IMM_LOW) ^ op->immval_mask) ^ REG_PVR_MASK);
|
||||
return(strdup(tmpstr));
|
||||
} else {
|
||||
strcpy(spr, "pc");
|
||||
|
10
disas/mips.c
10
disas/mips.c
@@ -3511,7 +3511,6 @@ struct mips_cp0sel_name
|
||||
const char * const name;
|
||||
};
|
||||
|
||||
#if 0
|
||||
/* The mips16 registers. */
|
||||
static const unsigned int mips16_to_32_reg_map[] =
|
||||
{
|
||||
@@ -3519,7 +3518,7 @@ static const unsigned int mips16_to_32_reg_map[] =
|
||||
};
|
||||
|
||||
#define mips16_reg_names(rn) mips_gpr_names[mips16_to_32_reg_map[rn]]
|
||||
#endif
|
||||
|
||||
|
||||
static const char * const mips_gpr_names_numeric[32] =
|
||||
{
|
||||
@@ -3802,6 +3801,13 @@ static const char * const mips_hwr_names_mips3264r2[32] =
|
||||
"$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31"
|
||||
};
|
||||
|
||||
static const char * const mips_msa_control_names_numeric[32] = {
|
||||
"$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
|
||||
"$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
|
||||
"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
|
||||
"$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31"
|
||||
};
|
||||
|
||||
static const char * const mips_msa_control_names_mips3264r2[32] = {
|
||||
"MSAIR", "MSACSR", "$2", "$3", "$4", "$5", "$6", "$7",
|
||||
"$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
|
||||
|
35
disas/s390.c
35
disas/s390.c
@@ -106,6 +106,10 @@ struct s390_opcode
|
||||
static const struct s390_opcode s390_opcodes[];
|
||||
static const int s390_num_opcodes;
|
||||
|
||||
/* A opcode format table for the .insn pseudo mnemonic. */
|
||||
static const struct s390_opcode s390_opformats[];
|
||||
static const int s390_num_opformats;
|
||||
|
||||
/* Values defined for the flags field of a struct powerpc_opcode. */
|
||||
|
||||
/* The operands table is an array of struct s390_operand. */
|
||||
@@ -840,6 +844,37 @@ static const struct s390_operand s390_operands[] =
|
||||
#define MASK_SIY_DRI { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
|
||||
/* QEMU-END */
|
||||
|
||||
/* The opcode formats table (blueprints for .insn pseudo mnemonic). */
|
||||
|
||||
static const struct s390_opcode s390_opformats[] =
|
||||
{
|
||||
{ "e", OP8(0x00LL), MASK_E, INSTR_E, 3, 0 },
|
||||
{ "ri", OP8(0x00LL), MASK_RI_RI, INSTR_RI_RI, 3, 0 },
|
||||
{ "rie", OP8(0x00LL), MASK_RIE_RRP, INSTR_RIE_RRP, 3, 0 },
|
||||
{ "ril", OP8(0x00LL), MASK_RIL_RP, INSTR_RIL_RP, 3, 0 },
|
||||
{ "rilu", OP8(0x00LL), MASK_RIL_RU, INSTR_RIL_RU, 3, 0 },
|
||||
{ "rr", OP8(0x00LL), MASK_RR_RR, INSTR_RR_RR, 3, 0 },
|
||||
{ "rre", OP8(0x00LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0 },
|
||||
{ "rrf", OP8(0x00LL), MASK_RRF_RURR, INSTR_RRF_RURR, 3, 0 },
|
||||
{ "rs", OP8(0x00LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0 },
|
||||
{ "rse", OP8(0x00LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 3, 0 },
|
||||
{ "rsi", OP8(0x00LL), MASK_RSI_RRP, INSTR_RSI_RRP, 3, 0 },
|
||||
{ "rsy", OP8(0x00LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 3 },
|
||||
{ "rx", OP8(0x00LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0 },
|
||||
{ "rxe", OP8(0x00LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 0 },
|
||||
{ "rxf", OP8(0x00LL), MASK_RXF_RRRDR, INSTR_RXF_RRRDR,3, 0 },
|
||||
{ "rxy", OP8(0x00LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3 },
|
||||
{ "s", OP8(0x00LL), MASK_S_RD, INSTR_S_RD, 3, 0 },
|
||||
{ "si", OP8(0x00LL), MASK_SI_URD, INSTR_SI_URD, 3, 0 },
|
||||
{ "siy", OP8(0x00LL), MASK_SIY_URD, INSTR_SIY_URD, 3, 3 },
|
||||
{ "ss", OP8(0x00LL), MASK_SS_RRRDRD, INSTR_SS_RRRDRD,3, 0 },
|
||||
{ "sse", OP8(0x00LL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 3, 0 },
|
||||
{ "ssf", OP8(0x00LL), MASK_SSF_RRDRD, INSTR_SSF_RRDRD,3, 0 },
|
||||
};
|
||||
|
||||
static const int s390_num_opformats =
|
||||
sizeof (s390_opformats) / sizeof (s390_opformats[0]);
|
||||
|
||||
/* include "s390-opc.tab" generated from opcodes/s390-opc.txt rev 1.17 */
|
||||
/* The opcode table. This file was generated by s390-mkopc.
|
||||
|
||||
|
@@ -332,7 +332,7 @@ typedef struct
|
||||
|
||||
#ifdef DEFINE_TABLE
|
||||
|
||||
static const sh_opcode_info sh_table[] =
|
||||
const sh_opcode_info sh_table[] =
|
||||
{
|
||||
/* 0111nnnni8*1.... add #<imm>,<REG_N> */{"add",{A_IMM,A_REG_N},{HEX_7,REG_N,IMM0_8}, arch_sh1_up},
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user