Compare commits

..

204 Commits

Author SHA1 Message Date
Michael Roth
0982a56a55 Update version for 2.11.2 release
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-26 22:27:39 -05:00
Philippe Mathieu-Daudé
aba59705d9 usb/dev-mtp: Fix use of uninitialized values
This fixes:

  hw/usb/dev-mtp.c:971:5: warning: 4th function call argument is an uninitialized value
      trace_usb_mtp_op_get_partial_object(s->dev.addr, o->handle, o->path,
                                           c->argv[1], c->argv[2]);
                                                       ^~~~~~~~~~
and:

  hw/usb/dev-mtp.c:981:12: warning: Assigned value is garbage or undefined
      offset = c->argv[1];
               ^ ~~~~~~~~~~

Reported-by: Clang Static Analyzer
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20180604151421.23385-3-f4bug@amsat.org
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 62713a2e50)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-21 10:18:22 -05:00
Philippe Mathieu-Daudé
17e3fcbc51 usb: correctly handle Zero Length Packets
USB Specification Revision 2.0, §5.5.3:
  The Data stage of a control transfer from an endpoint to the host is complete when the endpoint does one of the following:
  • Has transferred exactly the amount of data specified during the Setup stage
  • Transfers a packet with a payload size less than wMaxPacketSize or transfers a zero-length packet"

hw/usb/redirect.c:802:9: warning: Declared variable-length array (VLA) has zero size
        uint8_t buf[size];
        ^~~~~~~~~~~ ~~~~

Reported-by: Clang Static Analyzer
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20180604151421.23385-2-f4bug@amsat.org
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit bf78fb1c1b)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-21 10:18:16 -05:00
Philippe Mathieu-Daudé
9e4fa091ee gdbstub: fix off-by-one in gdb_handle_packet()
memtohex() adds an extra trailing NUL character.

Reported-by: AddressSanitizer
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20180408145933.1149-1-f4bug@amsat.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
(cherry picked from commit 9005774b27)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-21 10:18:10 -05:00
Michael Roth
a8e4217b0c Merge tag 'tags/s390x-20180621-211-stable' into stable-2.11-staging
update s390-ccw.img for stable

Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-21 10:16:52 -05:00
Cornelia Huck
728d6c602e pc-bios/s390-ccw.img: update image for stable
Contains the following commits:
- s390: Do not pass inofficial IPL type to the guest
- s390-ccw: force diag 308 subcode to unsigned long
- pc-bios/s390-ccw: struct tpi_info must be declared as aligned(4)

Signed-off-by: Cornelia Huck <cohuck@redhat.com>
2018-06-21 04:22:15 -04:00
Marc-André Lureau
1e13e7d93b tpm: lookup cancel path under tpm device class
Since Linux commit 313d21eeab9282e, tpm devices have their own device
class "tpm" and the cancel path must be looked up under
/sys/class/tpm/ instead of /sys/class/misc/.

Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Reviewed-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
(cherry picked from commit 05b71fb207)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:08 -05:00
Marc-André Lureau
a36591fa62 tpm-passthrough: don't save guessed cancel_path in options
The value is later unneeded, and may leak if the free visitor doesn't
consider it since has_cancel_path is false. And for consistency with
"path" it shouldn't be returned in get_tpm_options().

Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Reviewed-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
(cherry picked from commit 69c07db046)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:08 -05:00
Christian Borntraeger
0f04a8bba6 s390-ccw-virtio: allow for systems larger that 7.999TB
KVM does not allow memory regions > KVM_MEM_MAX_NR_PAGES, basically
limiting the memory per slot to 8TB-4k. As memory slots on s390/kvm must
be a multiple of 1MB we need start a new memory region if we cross
8TB-1M.

With that (and optimistic overcommitment in the kernel) I was able to
start a 24TB guest on a 1TB system.

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Message-Id: <20171211122146.162430-1-borntraeger@de.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
[CH: 1UL -> 1ULL in KVM_MEM_MAX_NR_PAGES; build fix on 32 bit hosts]
Signed-off-by: Cornelia Huck <cohuck@redhat.com>

(cherry picked from commit bb223055b9)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:08 -05:00
Daniel P. Berrangé
2ab0ce6e8d crypto: ensure we use a predictable TLS priority setting
The TLS test cert generation relies on a fixed set of algorithms that are
only usable under GNUTLS' default priority setting. When building QEMU
with a custom distro specific priority setting, this can cause the TLS
tests to fail. By forcing the tests to always use "NORMAL" priority we
can make them more robust.

Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
(cherry picked from commit 057ad0b469)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:08 -05:00
Daniel P. Berrange
8bbb76e050 qapi: ensure stable sort ordering when checking QAPI entities
Some early python 3.x versions will have different default
ordering when calling the 'values()' method on a dict, compared
to python 2.x and later 3.x versions. Explicitly sort the items
to get a stable ordering.

Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Message-Id: <20180116134217.8725-8-berrange@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit f7a5376d4b)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:07 -05:00
Shannon Zhao
6622e94897 arm_gicv3_kvm: kvm_dist_get/put_priority: skip the registers banked by GICR_IPRIORITYR
While for_each_dist_irq_reg loop starts from GIC_INTERNAL, it forgot to
offset the date array and index. This will overlap the GICR registers
value and leave the last GIC_INTERNAL irq's registers out of update.

Fixes: 367b9f527b
Cc: qemu-stable@nongnu.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Shannon Zhao <zhaoshenglong@huawei.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
(cherry picked from commit 1dcf367519)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:07 -05:00
Eric Blake
99e051b290 iotests: Add test 221 to catch qemu-img map regression
Although qemu-img creates aligned files (by rounding up), it
must also gracefully handle files that are not sector-aligned.
Test that the bug fixed in the previous patch does not recur.

It's a bit annoying that we can see the (implicit) hole past
the end of the file on to the next sector boundary, so if we
ever reach the point where we report a byte-accurate size rather
than our current behavior of always rounding up, this test will
probably need a slight modification.

Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit c6a9d2f6f9)
 Conflicts:
	tests/qemu-iotests/group
* drop context dep on tests not present in 2.11
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:07 -05:00
Eric Blake
9c5a8433ce qemu-img: Fix assert when mapping unaligned raw file
Commit a290f085 exposed a latent bug in qemu-img map introduced
during the conversion of block status to be byte-based.  Earlier in
commit 5e344dd8, the internal interface get_block_status() switched
to take byte-based parameters, but still called a sector-based
block layer function; as such, rounding was added in the lone
caller to obey the contract.  However, commit 237d78f8 changed
get_block_status() to truly be byte-based, at which point rounding
to sector boundaries can result in calling bdrv_block_status() with
'bytes == 0' (a coding error) when the boundary between data and a
hole falls mid-sector (true for the past-EOF implicit hole present
in POSIX files).  Fix things by removing the rounding that is now
no longer necessary.

See also https://bugzilla.redhat.com/1589738

Fixes: 237d78f8
Reported-by: Dan Kenigsberg <danken@redhat.com>
Reported-by: Nir Soffer <nsoffer@redhat.com>
Reported-by: Maor Lipchuk <mlipchuk@redhat.com>
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit e0b371ed5e)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:07 -05:00
linzhecheng
d8a919ffc7 vhost-user: delete net client if necessary
As qemu_new_net_client create new ncs but error happens later,
ncs will be left in global net_clients list and we can't use them any
more, so we need to cleanup them.

Cc: qemu-stable@nongnu.org
Signed-off-by: linzhecheng <linzhecheng@huawei.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
(cherry picked from commit c67daf4a24)
 Conflicts:
	net/vhost-user.c
* drop functional dep on 4d0cf552
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:07 -05:00
Brijesh Singh
f4e93bbd2e tap: set vhostfd passed from qemu cli to non-blocking
A guest boot hangs while probing the network interface when
iommu_platform=on is used.

The following qemu cli hangs without this patch:

# $QEMU \
  -netdev tap,fd=3,id=hostnet0,vhost=on,vhostfd=4 3<>/dev/tap67 4<>/dev/host-net \
  -device virtio-net-pci,netdev=hostnet0,id=net0,iommu_platform=on,disable-legacy=on \
  ...

Commit: c471ad0e9b (vhost_net: device IOTLB support) took care of
setting vhostfd to non-blocking when QEMU opens /dev/host-net but if
the fd is passed from qemu cli then we need to ensure that fd is set
to non-blocking.

Fixes: c471ad0e9b ("vhost_net: device IOTLB support")
Cc: qemu-stable@nongnu.org
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
(cherry picked from commit d542800d1e)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:07 -05:00
Konrad Rzeszutek Wilk
afa43eb63e i386: define the AMD 'virt-ssbd' CPUID feature bit (CVE-2018-3639)
AMD Zen expose the Intel equivalant to Speculative Store Bypass Disable
via the 0x80000008_EBX[25] CPUID feature bit.

This needs to be exposed to guest OS to allow them to protect
against CVE-2018-3639.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20180521215424.13520-3-berrange@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit 403503b162)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:07 -05:00
Konrad Rzeszutek Wilk
73521f60f4 i386: Define the Virt SSBD MSR and handling of it (CVE-2018-3639)
"Some AMD processors only support a non-architectural means of enabling
speculative store bypass disable (SSBD).  To allow a simplified view of
this to a guest, an architectural definition has been created through a new
CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f.  With this, a
hypervisor can virtualize the existence of this definition and provide an
architectural method for using SSBD to a guest.

Add the new CPUID feature, the new MSR and update the existing SSBD
support to use this MSR when present." (from x86/speculation: Add virtualized
speculative store bypass disable support in Linux).

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20180521215424.13520-4-berrange@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit cfeea0c021)
 Conflicts:
	target/i386/kvm.c
	target/i386/machine.c
* drop context dep on b77146e9a
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:07 -05:00
Daniel P. Berrangé
4ce0b750a2 i386: define the 'ssbd' CPUID feature bit (CVE-2018-3639)
New microcode introduces the "Speculative Store Bypass Disable"
CPUID feature bit. This needs to be exposed to guest OS to allow
them to protect against CVE-2018-3639.

Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Message-Id: <20180521215424.13520-2-berrange@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit d19d1f9659)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:07 -05:00
Gerd Hoffmann
84cfae0b6f vga: fix region calculation
Typically the scanline length and the line offset are identical.  But
in case they are not our calculation for region_end is incorrect.  Using
line_offset is fine for all scanlines, except the last one where we have
to use the actual scanline length.

Fixes: CVE-2018-7550
Reported-by: Ross Lagerwall <ross.lagerwall@citrix.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org>
Tested-by: Ross Lagerwall <ross.lagerwall@citrix.com>
Message-id: 20180309143704.13420-1-kraxel@redhat.com
(cherry picked from commit 7cdc61becd)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:07 -05:00
Alberto Garcia
d80b60fc24 throttle: Fix crash on reopen
The throttle block filter can be reopened, and with this it is
possible to change the throttle group that the filter belongs to.

The way the code does that is the following:

  - On throttle_reopen_prepare(): create a new ThrottleGroupMember
    and attach it to the new throttle group.

  - On throttle_reopen_commit(): detach the old ThrottleGroupMember,
    delete it and replace it with the new one.

The problem with this is that by replacing the ThrottleGroupMember the
previous value of io_limits_disabled is lost, causing an assertion
failure in throttle_co_drain_end().

This problem can be reproduced by reopening a throttle node:

   $QEMU -monitor stdio
   -object throttle-group,id=tg0,x-iops-total=1000 \
   -blockdev node-name=hd0,driver=qcow2,file.driver=file,file.filename=hd.qcow2 \
   -blockdev node-name=root,driver=throttle,throttle-group=tg0,file=hd0,read-only=on

   (qemu) block_stream root
   block/throttle.c:214: throttle_co_drain_end: Assertion `tgm->io_limits_disabled' failed.

Since we only want to change the throttle group on reopen there's no
need to create a ThrottleGroupMember and discard the old one. It's
easier if we simply detach it from its current group and attach it to
the new one.

Signed-off-by: Alberto Garcia <berto@igalia.com>
Message-id: 20180608151536.7378-1-berto@igalia.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit bc33c047d1)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:06 -05:00
Max Reitz
f647a4fc44 iotests: Add case for a corrupted inactive image
Reviewed-by: John Snow <jsnow@redhat.com>
Tested-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180606193702.7113-4-mreitz@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit c50abd175a)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:06 -05:00
Max Reitz
f91f44fcd1 qcow2: Do not mark inactive images corrupt
When signaling a corruption on a read-only image, qcow2 already makes
fatal events non-fatal (i.e., they will not result in the image being
closed, and the image header's corrupt flag will not be set).  This is
necessary because we cannot set the corrupt flag on read-only images,
and it is possible because further corruption of read-only images is
impossible.

Inactive images are effectively read-only, too, so we should do the same
for them.  bdrv_is_writable() can tell us whether an image can actually
be written to, so use its result instead of !bs->read_only.

(Otherwise, the assert(!(bs->open_flags & BDRV_O_INACTIVE)) in
bdrv_co_pwritev() will fail, crashing qemu.)

Cc: qemu-stable@nongnu.org
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180606193702.7113-3-mreitz@redhat.com
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit ddf3b47ef4)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:06 -05:00
Max Reitz
62891cad12 block: Make bdrv_is_writable() public
This is a useful function for the whole block layer, so make it public.
At the same time, users outside of block.c probably do not need to make
use of the reopen functionality, so rename the current function to
bdrv_is_writable_after_reopen() create a new bdrv_is_writable() function
that just passes NULL to it for the reopen queue.

Cc: qemu-stable@nongnu.org
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180606193702.7113-2-mreitz@redhat.com
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit cc02214097)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:06 -05:00
Shannon Zhao
5d639531b7 arm_gicv3_kvm: kvm_dist_get/put: skip the registers banked by GICR
While we skip the GIC_INTERNAL irqs, we don't change the register offset
accordingly. This will overlap the GICR registers value and leave the
last GIC_INTERNAL irq's registers out of update.

Fix this by skipping the registers banked by GICR.

Also for migration compatibility if the migration source (old version
qemu) doesn't send gicd_no_migration_shift_bug = 1 to destination, then
we shift the data of PPI to get the right data for SPI.

Fixes: 367b9f527b
Cc: qemu-stable@nongnu.org
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Shannon Zhao <zhaoshenglong@huawei.com>
Message-id: 1527816987-16108-1-git-send-email-zhaoshenglong@huawei.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
(cherry picked from commit 910e204841)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:06 -05:00
John Snow
deabb454dc ahci: fix PxCI register race
Fixes: https://bugs.launchpad.net/qemu/+bug/1769189

AHCI presently signals completion prior to the PxCI register being
cleared to indicate completion. If a guest driver attempts to issue
a new command in its IRQ handler, it might be surprised to learn there
is still a command pending.

In the case of Windows 10's boot driver, it will actually poll the IRQ
register hoping to find out when the command is done running -- which
will never happen, as there isn't a command running.

Fix this: clear PxCI in ahci_cmd_done and not in the asynchronous BH.
Because it now runs synchronously, we don't need to check if the command
is actually done by spying on the ATA registers. We know it's done.

CC: qemu-stable <qemu-stable@nongnu.org>
Reported-by: François Guerraz <kubrick@fgv6.net>
Tested-by: Bruce Rogers <brogers@suse.com>
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Message-id: 20180531004323.4611-3-jsnow@redhat.com
Signed-off-by: John Snow <jsnow@redhat.com>
(cherry picked from commit 5694c7eacc)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:06 -05:00
John Thomson
16238325f1 Fix libusb-1.0.22 deprecated libusb_set_debug with libusb_set_option
libusb-1.0.22 marked libusb_set_debug deprecated
it is replaced with
libusb_set_option(libusb_context, LIBUSB_OPTION_LOG_LEVEL, libusb_log_level);

details here: 539f22e2fd

Warning here:

  CC      hw/usb/host-libusb.o
/builds/xen/src/qemu-xen/hw/usb/host-libusb.c: In function 'usb_host_init':
/builds/xen/src/qemu-xen/hw/usb/host-libusb.c:250:5: error: 'libusb_set_debug' is deprecated: Use libusb_set_option instead [-Werror=deprecated-declarations]
     libusb_set_debug(ctx, loglevel);
     ^~~~~~~~~~~~~~~~
In file included from /builds/xen/src/qemu-xen/hw/usb/host-libusb.c:40:0:
/usr/include/libusb-1.0/libusb.h:1300:18: note: declared here
 void LIBUSB_CALL libusb_set_debug(libusb_context *ctx, int level);
                  ^~~~~~~~~~~~~~~~
cc1: all warnings being treated as errors
make: *** [/builds/xen/src/qemu-xen/rules.mak:66: hw/usb/host-libusb.o] Error 1
make: Leaving directory '/builds/xen/src/xen/tools/qemu-xen-build'

Signed-off-by: John Thomson <git@johnthomson.fastmail.com.au>
Message-id: 20180405132046.4968-1-git@johnthomson.fastmail.com.au
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 9d8fa0df49)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:06 -05:00
Shannon Zhao
e55c6b6795 arm_gicv3_kvm: increase clroffset accordingly
It forgot to increase clroffset during the loop. So it only clear the
first 4 bytes.

Fixes: 367b9f527b
Cc: qemu-stable@nongnu.org
Signed-off-by: Shannon Zhao <zhaoshenglong@huawei.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Message-id: 1527047633-12368-1-git-send-email-zhaoshenglong@huawei.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
(cherry picked from commit 34ffacae08)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:06 -05:00
Peter Xu
8da7a171c0 intel-iommu: rework the page walk logic
This patch fixes a potential small window that the DMA page table might
be incomplete or invalid when the guest sends domain/context
invalidations to a device.  This can cause random DMA errors for
assigned devices.

This is a major change to the VT-d shadow page walking logic. It
includes but is not limited to:

- For each VTDAddressSpace, now we maintain what IOVA ranges we have
  mapped and what we have not.  With that information, now we only send
  MAP or UNMAP when necessary.  Say, we don't send MAP notifies if we
  know we have already mapped the range, meanwhile we don't send UNMAP
  notifies if we know we never mapped the range at all.

- Introduce vtd_sync_shadow_page_table[_range] APIs so that we can call
  in any places to resync the shadow page table for a device.

- When we receive domain/context invalidation, we should not really run
  the replay logic, instead we use the new sync shadow page table API to
  resync the whole shadow page table without unmapping the whole
  region.  After this change, we'll only do the page walk once for each
  domain invalidations (before this, it can be multiple, depending on
  number of notifiers per address space).

While at it, the page walking logic is also refactored to be simpler.

CC: QEMU Stable <qemu-stable@nongnu.org>
Reported-by: Jintack Lim <jintack@cs.columbia.edu>
Tested-by: Jintack Lim <jintack@cs.columbia.edu>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit 63b88968f1)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:06 -05:00
Peter Xu
bdb6f9b6bc util: implement simple iova tree
Introduce a simplest iova tree implementation based on GTree.

CC: QEMU Stable <qemu-stable@nongnu.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit eecf5eedbd)
 Conflicts:
	util/Makefile.objs
* drop context dep
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:05 -05:00
Peter Xu
e2c44fc580 intel-iommu: trace domain id during page walk
This patch only modifies the trace points.

Previously we were tracing page walk levels.  They are redundant since
we have page mask (size) already.  Now we trace something much more
useful which is the domain ID of the page walking.  That can be very
useful when we trace more than one devices on the same system, so that
we can know which map is for which domain.

CC: QEMU Stable <qemu-stable@nongnu.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit d118c06ebb)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:05 -05:00
Peter Xu
e8aa1dc816 intel-iommu: pass in address space when page walk
We pass in the VTDAddressSpace too.  It'll be used in the follow up
patches.

CC: QEMU Stable <qemu-stable@nongnu.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit 2f764fa87d)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:05 -05:00
Peter Xu
04e0e11628 intel-iommu: introduce vtd_page_walk_info
During the recursive page walking of IOVA page tables, some stack
variables are constant variables and never changed during the whole page
walking procedure.  Isolate them into a struct so that we don't need to
pass those contants down the stack every time and multiple times.

CC: QEMU Stable <qemu-stable@nongnu.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit fe215b0cbb)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:05 -05:00
Peter Xu
21962e6958 intel-iommu: only do page walk for MAP notifiers
For UNMAP-only IOMMU notifiers, we don't need to walk the page tables.
Fasten that procedure by skipping the page table walk.  That should
boost performance for UNMAP-only notifiers like vhost.

CC: QEMU Stable <qemu-stable@nongnu.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit 4f8a62a933)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:05 -05:00
Peter Xu
3585497e86 intel-iommu: add iommu lock
SECURITY IMPLICATION: this patch fixes a potential race when multiple
threads access the IOMMU IOTLB cache.

Add a per-iommu big lock to protect IOMMU status.  Currently the only
thing to be protected is the IOTLB/context cache, since that can be
accessed even without BQL, e.g., in IO dataplane.

Note that we don't need to protect device page tables since that's fully
controlled by the guest kernel.  However there is still possibility that
malicious drivers will program the device to not obey the rule.  In that
case QEMU can't really do anything useful, instead the guest itself will
be responsible for all uncertainties.

CC: QEMU Stable <qemu-stable@nongnu.org>
Reported-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit 1d9efa73e1)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:05 -05:00
Peter Xu
99fc962b62 intel-iommu: remove IntelIOMMUNotifierNode
That is not really necessary.  Removing that node struct and put the
list entry directly into VTDAddressSpace.  It simplfies the code a lot.
Since at it, rename the old notifiers_list into vtd_as_with_notifiers.

CC: QEMU Stable <qemu-stable@nongnu.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit b4a4ba0d68)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:05 -05:00
Peter Xu
6c403b72b4 intel-iommu: send PSI always even if across PDEs
SECURITY IMPLICATION: without this patch, any guest with both assigned
device and a vIOMMU might encounter stale IO page mappings even if guest
has already unmapped the page, which may lead to guest memory
corruption.  The stale mappings will only be limited to the guest's own
memory range, so it should not affect the host memory or other guests on
the host.

During IOVA page table walking, there is a special case when the PSI
covers one whole PDE (Page Directory Entry, which contains 512 Page
Table Entries) or more.  In the past, we skip that entry and we don't
notify the IOMMU notifiers.  This is not correct.  We should send UNMAP
notification to registered UNMAP notifiers in this case.

For UNMAP only notifiers, this might cause IOTLBs cached in the devices
even if they were already invalid.  For MAP/UNMAP notifiers like
vfio-pci, this will cause stale page mappings.

This special case doesn't trigger often, but it is very easy to be
triggered by nested device assignments, since in that case we'll
possibly map the whole L2 guest RAM region into the device's IOVA
address space (several GBs at least), which is far bigger than normal
kernel driver usages of the device (tens of MBs normally).

Without this patch applied to L1 QEMU, nested device assignment to L2
guests will dump some errors like:

qemu-system-x86_64: VFIO_MAP_DMA: -17
qemu-system-x86_64: vfio_dma_map(0x557305420c30, 0xad000, 0x1000,
                    0x7f89a920d000) = -17 (File exists)

CC: QEMU Stable <qemu-stable@nongnu.org>
Acked-by: Jason Wang <jasowang@redhat.com>
[peterx: rewrite the commit message]
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

(cherry picked from commit 36d2d52bdb)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:05 -05:00
Prasad Singamsetty
0b250250b7 intel-iommu: Extend address width to 48 bits
The current implementation of Intel IOMMU code only supports 39 bits
iova address width. This patch provides a new parameter (x-aw-bits)
for intel-iommu to extend its address width to 48 bits but keeping the
default the same (39 bits). The reason for not changing the default
is to avoid potential compatibility problems with live migration of
intel-iommu enabled QEMU guest. The only valid values for 'x-aw-bits'
parameter are 39 and 48.

After enabling larger address width (48), we should be able to map
larger iova addresses in the guest. For example, a QEMU guest that
is configured with large memory ( >=1TB ). To check whether 48 bits
aw is enabled, we can grep in the guest dmesg output with line:
"DMAR: Host address width 48".

Signed-off-by: Prasad Singamsetty <prasad.singamsety@oracle.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit 37f51384ae)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:05 -05:00
Prasad Singamsetty
d45364e6d4 intel-iommu: Redefine macros to enable supporting 48 bit address width
The current implementation of Intel IOMMU code only supports 39 bits
host/iova address width so number of macros use hard coded values based
on that. This patch is to redefine them so they can be used with
variable address widths. This patch doesn't add any new functionality
but enables adding support for 48 bit address width.

Signed-off-by: Prasad Singamsetty <prasad.singamsety@oracle.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit 92e5d85e83)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:05 -05:00
Jan Kiszka
7128bcb221 hw/intc/arm_gicv3: Fix APxR<n> register dispatching
There was a nasty flip in identifying which register group an access is
targeting. The issue caused spuriously raised priorities of the guest
when handing CPUs over in the Jailhouse hypervisor.

Cc: qemu-stable@nongnu.org
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Message-id: 28b927d3-da58-bce4-cc13-bfec7f9b1cb9@siemens.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
(cherry picked from commit 887aae10f6)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:04 -05:00
Michal Privoznik
2e70085eb7 console: Avoid segfault in screendump
After f771c5440e it is possible to select device and
head which to take screendump from. And even though we check if
provided head number falls within range, it may still happen that
the console has no surface yet leading to SIGSEGV:

  qemu.git $ ./x86_64-softmmu/qemu-system-x86_64 \
    -qmp stdio \
    -device virtio-vga,id=video0,max_outputs=4

  {"execute":"qmp_capabilities"}
  {"execute":"screendump", "arguments":{"filename":"/tmp/screen.ppm", "device":"video0", "head":1}}
  Segmentation fault

 #0  0x00005628249dda88 in ppm_save (filename=0x56282826cbc0 "/tmp/screen.ppm", ds=0x0, errp=0x7fff52a6fae0) at ui/console.c:304
 #1  0x00005628249ddd9b in qmp_screendump (filename=0x56282826cbc0 "/tmp/screen.ppm", has_device=true, device=0x5628276902d0 "video0", has_head=true, head=1, errp=0x7fff52a6fae0) at ui/console.c:375
 #2  0x00005628247740df in qmp_marshal_screendump (args=0x562828265e00, ret=0x7fff52a6fb68, errp=0x7fff52a6fb60) at qapi/qapi-commands-ui.c:110

Here, @ds from frame #0 (or @surface from frame #1) is
dereferenced at the very beginning of ppm_save(). And because
it's NULL crash happens.

Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Message-id: cb05bb1909daa6ba62145c0194aafa05a14ed3d1.1526569138.git.mprivozn@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 08d9864fa4)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:04 -05:00
Cornelia Huck
b68ef5920a s390x/ccw: make sure all ccw devices are properly reset
Thomas reported that the subchannel for a  3270 device that ended up
in a broken state (status pending even though not enabled) did not
get out of that state even after a reboot (which involves a subsytem
reset). The reason for this is that the 3270 device did not define
a reset handler.

Let's fix this by introducing a base reset handler (set up for all
ccw devices) that resets the subchannel and have virtio-ccw call
its virtio-specific reset procedure in addition to that.

CC: qemu-stable@nongnu.org
Reported-by: Thomas Huth <thuth@redhat.com>
Suggested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Tested-by: Thomas Huth <thuth@redhat.com>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 838fb84f83)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:04 -05:00
Cornelia Huck
b2ca602627 virtio-ccw: common reset handler
All the different virtio ccw devices use the same reset handler,
so let's move setting it into the base virtio ccw device class.

CC: qemu-stable@nongnu.org
Reviewed-by: Thomas Huth <thuth@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 0c53057adb)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:04 -05:00
Nia Alarie
f7f5398dc7 s390x/virtio: Convert virtio-ccw from *_exit to *_unrealize
Signed-off-by: Nia Alarie <nia.alarie@gmail.com>
Message-Id: <20180307162958.11232-1-nia.alarie@gmail.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 24118af846)
*prereq for 0c53057adb
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:04 -05:00
Philippe Mathieu-Daudé
18f59bbc76 qdev: add helpers to be more explicit when using abstract QOM parent functions
QOM API learning curve is quite hard, in particular when devices inherit from
abstract parent.
To be more explicit about when a device class change the parent hooks, add few
helpers hoping a device class_init() will be easier to understand.

Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180114020412.26160-3-f4bug@amsat.org>
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 46795cf2e2)
*prereq for 0c53057adb
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:04 -05:00
Philippe Mathieu-Daudé
0db89523a6 qdev: rename typedef qdev_resetfn() -> DeviceReset()
following the DeviceRealize and DeviceUnrealize typedefs,
this unify a bit the new QOM API.

Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20180114020412.26160-2-f4bug@amsat.org>
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit b850f664a1)
*prereq for 0c53057adb
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:04 -05:00
Thomas Huth
ce4e7b03a7 pc-bios/s390-ccw: struct tpi_info must be declared as aligned(4)
I've run into a compilation error today with the current version of GCC 8:

In file included from s390-ccw.h:49,
                 from main.c:12:
cio.h:128:1: error: alignment 1 of 'struct tpi_info' is less than 4 [-Werror=packed-not-aligned]
 } __attribute__ ((packed));
 ^
cc1: all warnings being treated as errors

Since the struct tpi_info contains an element ("struct subchannel_id schid")
which is marked as aligned(4), we've got to mark the struct tpi_info as
aligned(4), too.

CC: qemu-stable@nongnu.org
Signed-off-by: Thomas Huth <thuth@redhat.com>
Message-Id: <1525774672-11913-1-git-send-email-thuth@redhat.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit a6e4385dea)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:04 -05:00
Cornelia Huck
435a31ff9a s390x/css: disabled subchannels cannot be status pending
The 3270 code will try to post an attention interrupt when the
3270 emulator (e.g. x3270) attaches. If the guest has not yet
enabled the subchannel for the 3270 device, we will present a spurious
cc 1 (status pending) when it uses msch on it later on, e.g. when
trying to enable the subchannel.

To fix this, just don't do anything in css_conditional_io_interrupt()
if the subchannel is not enabled. The 3270 code will work fine with
that, and the other user of this function (virtio-ccw) never
attempts to post an interrupt for a disabled device to begin with.

CC: qemu-stable@nongnu.org
Reported-by: Thomas Huth <thuth@redhat.com>
Tested-by: Thomas Huth <thuth@redhat.com>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 6e9c893ecd)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:04 -05:00
Fam Zheng
720db5deeb raw: Check byte range uniformly
We don't verify the request range against s->size in the I/O callbacks
except for raw_co_pwritev. This is inconsistent (especially for
raw_co_pwrite_zeroes and raw_co_pdiscard), so fix them, in the meanwhile
make the helper reusable by the coming new callbacks.

Note that in most cases the block layer already verifies the request
byte range against our reported image length, before invoking the driver
callbacks.  The exception is during image creating, after
blk_set_allow_write_beyond_eof(blk, true) is called. But in that case,
the requests are not directly from the user or guest. So there is no
visible behavior change in adding the check code.

The int64_t -> uint64_t inconsistency, as shown by the type casting, is
pre-existing due to the interface.

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
Message-id: 20180601092648.24614-3-famz@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
(cherry picked from commit 3844553852)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:04 -05:00
Michael Walle
979e7ea7eb lm32: take BQL before writing IP/IM register
Writing to these registers may raise an interrupt request. Actually,
this prevents the milkymist board from starting.

Cc: qemu-stable@nongnu.org
Signed-off-by: Michael Walle <michael@walle.cc>
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
(cherry picked from commit 81e9cbd0ca)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:03 -05:00
Max Reitz
15fb5dbf90 iotests: Add test for -U/force-share conflicts
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180502202051.15493-4-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 4e7d73c5fb)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:03 -05:00
Max Reitz
5704f53ca9 qemu-img: Use only string options in img_open_opts
img_open_opts() takes a QemuOpts and converts them to a QDict, so all
values therein are strings.  Then it may try to call qdict_get_bool(),
however, which will fail with a segmentation fault every time:

$ ./qemu-img info -U --image-opts \
    driver=file,filename=/dev/null,force-share=off
[1]    27869 segmentation fault (core dumped)  ./qemu-img info -U
--image-opts driver=file,filename=/dev/null,force-share=off

Fix this by using qdict_get_str() and comparing the value as a string.
Also, when adding a force-share value to the QDict, add it as a string
so it fits the rest of the dict.

Cc: qemu-stable@nongnu.org
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180502202051.15493-3-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 4615f87832)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:03 -05:00
Max Reitz
dbacc54944 qemu-io: Use purely string blockdev options
Currently, qemu-io only uses string-valued blockdev options (as all are
converted directly from QemuOpts) -- with one exception: -U adds the
force-share option as a boolean.  This in itself is already a bit
questionable, but a real issue is that it also assumes the value already
existing in the options QDict would be a boolean, which is wrong.

That has the following effect:

$ ./qemu-io -r -U --image-opts \
    driver=file,filename=/dev/null,force-share=off
[1]    15200 segmentation fault (core dumped)  ./qemu-io -r -U
--image-opts driver=file,filename=/dev/null,force-share=off

Since @opts is converted from QemuOpts, the value must be a string, and
we have to compare it as such.  Consequently, it makes sense to also set
it as a string instead of a boolean.

Cc: qemu-stable@nongnu.org
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180502202051.15493-2-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 2a01c01f9e)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:03 -05:00
Max Reitz
324bef41d3 iotests: Add test for rebasing with relative paths
Signed-off-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 20180509182002.8044-3-mreitz@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 28036a7f70)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:03 -05:00
Max Reitz
1bec53c6f4 qemu-img: Resolve relative backing paths in rebase
Currently, rebase interprets a relative path for the new backing image
as follows:
(1) Open the new backing image with the given relative path (thus relative to
    qemu-img's working directory).
(2) Write it directly into the overlay's backing path field (thus
    relative to the overlay).

If the overlay is not in qemu-img's working directory, both will be
different interpretations, which may either lead to an error somewhere
(either rebase fails because it cannot open the new backing image, or
your overlay becomes unusable because its backing path does not point to
a file), or, even worse, it may result in your rebase being performed
for a different backing file than what your overlay will point to after
the rebase.

Fix this by interpreting the target backing path as relative to the
overlay, like qemu-img does everywhere else.

Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1569835
Cc: qemu-stable@nongnu.org
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180509182002.8044-2-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit d16699b646)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:03 -05:00
Olaf Hering
088a22455b configure: recognize more rpmbuild macros
Extend the list of recognized, but ignored options from rpms %configure
macro. This fixes build on hosts running SUSE Linux.

Cc: qemu-stable@nongnu.org
Signed-off-by: Olaf Hering <olaf@aepfle.de>
Message-Id: <20180418075045.27393-1-olaf@aepfle.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 181ce1d05c)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:03 -05:00
Gerd Hoffmann
ecd54d6670 qxl: fix local renderer crash
Make sure we only ask the spice local renderer for display updates in
case we have a valid primary surface.  Without that spice is confused
and throws errors in case a display update request (triggered by
screendump for example) happens in parallel to a mode switch and hits
the race window where the old primary surface is gone and the new isn't
establisted yet.

Cc: qemu-stable@nongnu.org
Fixes: https://bugzilla.redhat.com//show_bug.cgi?id=1567733
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20180427115528.345-1-kraxel@redhat.com
(cherry picked from commit 5bd5c27c7d)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:03 -05:00
Greg Kurz
1378cb9db5 spapr: don't advertise radix GTSE if max-compat-cpu < power9
On a POWER9 host, if a guest runs in pre POWER9 compat mode, it necessarily
uses the hash MMU mode. In this case, we shouldn't advertise radix GTSE in
the ibm,arch-vec-5-platform-support DT property as the current code does.
The first reason is that it doesn't make sense, and the second one is that
causes the CAS-negotiated options subsection to be migrated. This breaks
backward migration to QEMU 2.7 and older versions on POWER8 hosts:

qemu-system-ppc64: error while loading state for instance 0x0 of device
 'spapr'
qemu-system-ppc64: load of migration failed: No such file or directory

This patch hence initialize CPUs a bit earlier so that we can check the
requested compat mode, and don't set OV5_MMU_RADIX_GTSE for power8 and
older.

Signed-off-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 0550b1206a)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:03 -05:00
Greg Kurz
0ab2f8e36d target/ppc: always set PPC_MEM_TLBIE in pre 2.8 migration hack
The pseries-2.7 and older machine types require CPUPPCState::insns_flags
to be strictly equal between source and destination. This checking is
abusive and breaks migration of KVM guests when the host CPU models
are different, even if they are compatible enough to allow the guest
to run transparently. This buggy behaviour was fixed for pseries-2.8
and we added some hacks to allow backward migration of older machine
types. These hacks assume that the CPU belongs to the POWER8 family,
which was true for most KVM based setup we cared about at the time.
But now POWER9 systems are coming, and backward migration of pre 2.8
guests running in POWER8 architected mode from a POWER9 host to a
POWER8 host is broken:

qemu-system-ppc64: error while loading state for instance 0x0 of device
 'cpu'
qemu-system-ppc64: load of migration failed: Invalid argument

This happens because POWER9 doesn't set PPC_MEM_TLBIE in insns_flags,
while POWER8 does. Let's force PPC_MEM_TLBIE in the migration hack to
fix the issue. This is an acceptable hack because these old machine
types only support CPU models that do set PPC_MEM_TLBIE.

Signed-off-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit bce009645b)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:03 -05:00
Peter Maydell
67bacd5be4 target/arm: Implement v8M VLLDM and VLSTM
For v8M the instructions VLLDM and VLSTM support lazy saving
and restoring of the secure floating-point registers. Even
if the floating point extension is not implemented, these
instructions must act as NOPs in Secure state, so they can
be used as part of the secure-to-nonsecure call sequence.

Fixes: https://bugs.launchpad.net/qemu/+bug/1768295
Cc: qemu-stable@nongnu.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180503105730.5958-1-peter.maydell@linaro.org
(cherry picked from commit b1e5336a98)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:02 -05:00
Henry Wertz
a27d261fe2 tcg/arm: Fix memory barrier encoding
I found with qemu 2.11.x or newer that I would get an illegal instruction
error running some Intel binaries on my ARM chromebook.  On investigation,
I found it was quitting on memory barriers.

qemu instruction:
mb $0x31
was translating as:
0x604050cc:  5bf07ff5  blpl     #0x600250a8

After patch it gives:
0x604050cc:  f57ff05b  dmb      ish

In short, I found INSN_DMB_ISH (memory barrier for ARMv7) appeared to be
correct based on online docs, but due to some endian-related shenanigans it
had to be byte-swapped to suit qemu; it appears INSN_DMB_MCR (memory
barrier for ARMv6) also should be byte swapped  (and this patch does so).
I have not checked for correctness of aarch64's barrier instruction.

Cc: qemu-stable@nongnu.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Henry Wertz <hwertz10@gmail.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
(cherry picked from commit 3f814b8037)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:02 -05:00
Cornelia Huck
834a846eee s390-ccw: force diag 308 subcode to unsigned long
We currently pass an integer as the subcode parameter. However,
the upper bits of the register containing the subcode need to
be 0, which is not guaranteed unless we explicitly specify the
subcode to be an unsigned long value.

Fixes: d046c51dad ("pc-bios/s390-ccw: Get device address via diag 308/6")
Cc: qemu-stable@nongnu.org
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
(cherry picked from commit 63d8b5ace3)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:02 -05:00
Viktor Mihajlovski
0304f75f61 s390: Do not pass inofficial IPL type to the guest
IPL over a virtio-scsi device requires special handling not
available in the real architecture. For this purpose the IPL
type 0xFF has been chosen as means of communication between
QEMU and the pc-bios. However, a guest OS could be confused
by seeing an unknown IPL type.

This change sets the IPL parameter type to 0x02 (CCW) to prevent
this. Pre-existing Linux has looked up the IPL parameters only in
the case of FCP IPL. This means that the behavior should stay
the same even if Linux checks for the IPL type unconditionally.

Signed-off-by: Viktor Mihajlovski <mihajlov@linux.vnet.ibm.com>
Message-Id: <1522940844-12336-4-git-send-email-mihajlov@linux.vnet.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit e8c7ef288a)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:02 -05:00
Eric Blake
955b77f56e nbd/client: Fix error messages during NBD_INFO_BLOCK_SIZE
A missing space makes for poor error messages, and sizes can't
go negative.  Also, we missed diagnosing a server that sends
a maximum block size less than the minimum.

Fixes: 081dd1fe
CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20180501154654.943782-1-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
(cherry picked from commit e475d108f1)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:02 -05:00
Jason Andryuk
f3f01bab82 ccid: Fix dwProtocols advertisement of T=0
Commit d7d218ef02 attempted to change
dwProtocols to only advertise support for T=0 and not T=1.  The change
was incorrect as it changed 0x00000003 to 0x00010000.

lsusb -v in a linux guest shows:
"dwProtocols         65536  (Invalid values detected)", though the
smart card could still be accessed.  Windows 7 does not detect inserted
smart cards and logs the the following Error in the Event Logs:

    Source: Smart Card Service
    Event ID: 610
    Smart Card Reader 'QEMU QEMU USB CCID 0' rejected IOCTL SET_PROTOCOL:
    Incorrect function. If this error persists, your smart card or reader
    may not be functioning correctly

    Command Header: 03 00 00 00

Setting to 0x00000001 fixes the Windows issue.

Signed-off-by: Jason Andryuk <jandryuk@gmail.com>
Message-id: 20180420183219.20722-1-jandryuk@gmail.com
Cc: qemu-stable@nongnu.org
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 0ee86bb6c5)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:02 -05:00
Geert Uytterhoeven
5648a81dd2 device_tree: Increase FDT_MAX_SIZE to 1 MiB
It is not uncommon for a contemporary FDT to be larger than 64 KiB,
leading to failures loading the device tree from sysfs:

    qemu-system-aarch64: qemu_fdt_setprop: Couldn't set ...: FDT_ERR_NOSPACE

Hence increase the limit to 1 MiB, like on PPC.

For reference, the largest arm64 DTB created from the Linux sources is
ca. 75 KiB large (100 KiB when built with symbols/fixup support).

Cc: qemu-stable@nongnu.org
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Message-id: 1523541337-23919-1-git-send-email-geert+renesas@glider.be
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
(cherry picked from commit 14ec3cbd7c)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:02 -05:00
Peter Maydell
0580c63b4f hw/char/cmsdk-apb-uart.c: Correctly clear INTSTATUS bits on writes
The CMSDK APB UART INTSTATUS register bits are all write-one-to-clear.
We were getting this correct for the TXO and RXO bits (which need
special casing because their state lives in the STATE register),
but had forgotten to handle the normal bits for RX and TX which
we do store in our s->intstatus field.

Perform the W1C operation on the bits in s->intstatus too.

Fixes: https://bugs.launchpad.net/qemu/+bug/1760262
Cc: qemu-stable@nongnu.org
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180410134203.17552-1-peter.maydell@linaro.org
(cherry picked from commit 6670b494fd)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:02 -05:00
Richard Henderson
b17ed3e1d2 tcg: Introduce tcg_set_insn_start_param
The parameters for tcg_gen_insn_start are target_ulong, which may be split
into two TCGArg parameters for storage in the opcode on 32-bit hosts.

Fixes the ARM target and its direct use of tcg_set_insn_param, which would
set the wrong argument in the 64-on-32 case.

Cc: qemu-stable@nongnu.org
Reported-by: alarson@ddci.com
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180410003558.2470-1-richard.henderson@linaro.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
(cherry picked from commit 9743cd5736)
 Conflicts:
	target/arm/translate.h
	tcg/tcg.h
* rework to avoid functional dependency on 15fa08f

Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:02 -05:00
Philippe Mathieu-Daudé
44633a272b hw/block/pflash_cfi: fix off-by-one error
ASAN reported:

    hw/block/pflash_cfi02.c:245:33: runtime error: index 82 out of bounds for type 'uint8_t [82]'

Since the 'cfi_len' member is not used, remove it to keep the code safer.

Cc: qemu-stable@nongnu.org
Reported-by: AddressSanitizer
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 07c13a7172)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:01 -05:00
Greg Kurz
8999a5945f vfio-ccw: fix memory leaks in vfio_ccw_realize()
If the subchannel is already attached or if vfio_get_device() fails, the
code jumps to the 'out_device_err' label and doesn't free the string it
has just allocated.

The code should be reworked so that vcdev->vdev.name only gets set when
the device has been attached, and freed when it is about to be detached.
This could be achieved  with the addition of a vfio_ccw_get_device()
function that would be the counterpart of vfio_put_device(). But this is
a more elaborate cleanup that should be done in a follow-up. For now,
let's just add calls to g_free() on the buggy error paths.

Signed-off-by: Greg Kurz <groug@kaod.org>
Message-Id: <152311222681.203086.8874800175539040298.stgit@bahia>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit be4d026f64)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:01 -05:00
Peter Maydell
28895d021e cpus.c: ensure running CPU recalculates icount deadlines on timer expiry
When we run in TCG icount mode, we calculate the number of instructions
to execute using tcg_get_icount_limit(), which ensures that we stop
execution at the next timer deadline. However there is a bug where
currently we do not recalculate that limit if the guest reprograms
a timer so that the next deadline moves closer, and so we will
continue execution until the original limit and fire the timer
later than we should.

Fix this bug in qemu_timer_notify_cb(): if we are currently running
a VCPU in icount mode, we simply need to kick it out of the main
loop and back to tcg_cpu_exec(), where it will recalculate the
icount limit. If we are not currently running a VCPU, then we
retain the existing logic for waking up a halted CPU.

Cc: qemu-stable@nongnu.org
Fixes: https://bugs.launchpad.net/qemu/+bug/1754038
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20180406123838.21249-1-peter.maydell@linaro.org
(cherry picked from commit c52e7132d7)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:01 -05:00
Kevin Wolf
12fc0de2ab gluster: Fix blockdev-add with server.N.type=unix
The legacy command line interface gets the socket path from an option
called 'socket'. QAPI in contract uses SocketAddress, where the
corresponding option is called 'path'.

Fix the gluster block driver to accept both 'socket' and 'path', with
'path' being the preferred syntax.

https://bugzilla.redhat.com/show_bug.cgi?id=1545155

Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-id: 20180403110810.25624-1-kwolf@redhat.com
Signed-off-by: Jeff Cody <jcody@redhat.com>
(cherry picked from commit 9dae635afa)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:01 -05:00
Greg Kurz
bb8d4bb3cc exec: fix memory leak in find_max_supported_pagesize()
The string returned by object_property_get_str() is dynamically allocated.

Signed-off-by: Greg Kurz <groug@kaod.org>
Message-Id: <152231458624.69730.1752893648612848392.stgit@bahia.lan>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit 72a841d2a4)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:01 -05:00
Alexandro Sanchez Bach
2b73d5d348 target/i386: Fix andn instruction
In commit 7073fbada7, the `andn` instruction
was implemented via `tcg_gen_andc` but passes the operands in the wrong
order:
- X86 defines `andn dest,src1,src2` as: dest = ~src1 & src2
- TCG defines `andc dest,src1,src2` as: dest = src1 & ~src2

The following simple test shows the issue:

    #include <stdio.h>
    #include <stdint.h>

    int main(void) {
        uint32_t ret = 0;
        __asm (
            "mov $0xFF00, %%ecx\n"
            "mov $0x0F0F, %%eax\n"
            "andn %%ecx, %%eax, %%ecx\n"
            "mov %%ecx, %0\n"
          : "=r" (ret));
        printf("%08X\n", ret);
        return 0;
    }

This patch fixes the problem by simply swapping the order of the two last
arguments in `tcg_gen_andc_tl`.

Reported-by: Alexandro Sanchez Bach <alexandro@phi.nz>
Signed-off-by: Alexandro Sanchez Bach <alexandro@phi.nz>
Cc: qemu-stable@nongnu.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 5cd10051c2)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:01 -05:00
Richard Henderson
5aac5f6aee tcg: Mark muluh_i64 and mulsh_i64 as 64-bit ops
Failure to do so results in the tcg optimizer sign-extending
any constant fold from 32-bits.  This turns out to be visible
in the RISC-V testsuite using a host that emits these opcodes
(e.g. any non-x86_64).

Reported-by: Michael Clark <mjc@sifive.com>
Reviewed-by: Emilio G. Cota <cota@braap.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
(cherry picked from commit f2f1dde751)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:01 -05:00
Max Reitz
c793a0debd iotests: Test preallocated truncate of 2G image
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180228131315.30194-3-mreitz@redhat.com
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 733d1dce0f)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:01 -05:00
Max Reitz
d315353bed block/file-posix: Fix fully preallocated truncate
Storing the lseek() result in an int results in it overflowing when the
file is at least 2 GB big.  Then, we have a 50 % chance of the result
being "negative" and thus thinking an error occurred when actually
everything went just fine.

So we should use the correct type for storing the result: off_t.

Reported-by: Daniel P. Berrange <berrange@redhat.com>
Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1549231
Cc: qemu-stable@nongnu.org
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180228131315.30194-2-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 82b45e0a0b)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:01 -05:00
Michal Privoznik
332969a64d qemu-pr-helper: Actually allow users to specify pidfile
Due to wrong specification of arguments to getopt_long() any
attempt to set pidfile resulted in:

1) the default to be leaked
2) the @pidfile variable to be set to NULL (because optarg is
NULL without this patch).

Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Message-Id: <6f10cd53d361a395aa0e85a9311ec4e9a8fc11e5.1521868451.git.mprivozn@redhat.com>
Cc: qemu-stable@nongnu.org
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit f8e1a98964)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:01 -05:00
Greg Kurz
db9c0d907e virtio_net: flush uncompleted TX on reset
If the backend could not transmit a packet right away for some reason,
the packet is queued for asynchronous sending. The corresponding vq
element is tracked in the async_tx.elem field of the VirtIONetQueue,
for later freeing when the transmission is complete.

If a reset happens before completion, virtio_net_tx_complete() will push
async_tx.elem back to the guest anyway, and we end up with the inuse flag
of the vq being equal to -1. The next call to virtqueue_pop() is then
likely to fail with "Virtqueue size exceeded".

This can be reproduced easily by starting a guest with an hubport backend
that is not connected to a functional network, eg,

 -device virtio-net-pci,netdev=hub0 -netdev hubport,id=hub0,hubid=0

and no other -netdev hubport,hubid=0 on the command line.

The appropriate fix is to ensure that such an asynchronous transmission
cannot survive a device reset. So for all queues, we first try to send
the packet again, and eventually we purge it if the backend still could
not deliver it.

CC: qemu-stable@nongnu.org
Reported-by: R. Nageswara Sastry <nasastry@in.ibm.com>
Buglink: https://github.com/open-power-host-os/qemu/issues/37
Signed-off-by: Greg Kurz <groug@kaod.org>
Tested-by: R. Nageswara Sastry <nasastry@in.ibm.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
(cherry picked from commit 94b52958b7)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Victor Kamensky
a053477b7c arm/translate-a64: treat DISAS_UPDATE as variant of DISAS_EXIT
In OE project 4.15 linux kernel boot hang was observed under
single cpu aarch64 qemu. Kernel code was in a loop waiting for
vtimer arrival, spinning in TC generated blocks, while interrupt
was pending unprocessed. This happened because when qemu tried to
handle vtimer interrupt target had interrupts disabled, as
result flag indicating TCG exit, cpu->icount_decr.u16.high,
was cleared but arm_cpu_exec_interrupt function did not call
arm_cpu_do_interrupt to process interrupt. Later when target
reenabled interrupts, it happened without exit into main loop, so
following code that waited for result of interrupt execution
run in infinite loop.

To solve the problem instructions that operate on CPU sys state
(i.e enable/disable interrupt), and marked as DISAS_UPDATE,
should be considered as DISAS_EXIT variant, and should be
forced to exit back to main loop so qemu will have a chance
processing pending CPU state updates, including pending
interrupts.

This change brings consistency with how DISAS_UPDATE is treated
in aarch32 case.

CC: Peter Maydell <peter.maydell@linaro.org>
CC: Alex Bennée <alex.bennee@linaro.org>
CC: qemu-stable@nongnu.org
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Victor Kamensky <kamensky@cisco.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 1521526368-1996-1-git-send-email-kamensky@cisco.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
(cherry picked from commit a75a52d624)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Kevin Wolf
7c22c5a3a7 tests/multiboot: Add .gitignore
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Jack Schwartz <jack.schwartz@oracle.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
(cherry picked from commit e2679395d5)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Kevin Wolf
678a75b8b5 tests/multiboot: Add tests for the a.out kludge
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Jack Schwartz <jack.schwartz@oracle.com>
(cherry picked from commit 1c8c426fb4)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Kevin Wolf
2392a86d2d tests/multiboot: Test exit code for every qemu run
Testing the exit code only once after a whole group of tests has
completed is not enough, it catches errors only in the very last qemu
invocation. We need to have the check after each qemu run.

The logging and diff with the reference output is still done once per
group to keep things more managable. This is not a problem because the
log file accumulates the output of all runs.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Jack Schwartz <jack.schwartz@oracle.com>
(cherry picked from commit 49713c413a)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Kevin Wolf
32ccaed1ae multiboot: Check validity of mh_header_addr
I couldn't find a case where this prevents something bad from happening
that isn't already caught by other checks, but let's err on the safe
side and check that mh_header_addr is as expected.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Jack Schwartz <jack.schwartz@oracle.com>
(cherry picked from commit dbf2dce7aa)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Kevin Wolf
31b0eb0dad multiboot: Reject kernels exceeding the address space
The code path where mh_load_end_addr is non-zero in the Multiboot
header checks that mh_load_end_addr >= mh_load_addr and so
mb_load_size is checked.  However, mb_load_size is not checked when
calculated from the file size, when mh_load_end_addr is 0.

If the kernel binary size is larger than can fit in the address space
after load_addr, we ended up with a kernel_size that is smaller than
load_size, which means that we read the file into a too small buffer.

Add a check to reject kernel files with such Multiboot headers.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Jack Schwartz <jack.schwartz@oracle.com>
(cherry picked from commit b17a9054a0)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Jack Schwartz
bbdcfd2dbc multiboot: fprintf(stderr...) -> error_report()
Change all fprintf(stderr...) calls in hw/i386/multiboot.c to call
error_report() instead, including the mb_debug macro.  Remove the "\n"
from strings passed to all modified calls, since error_report() appends
one.

Signed-off-by: Jack Schwartz <jack.schwartz@oracle.com>
Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 4b9006a41e)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Jack Schwartz
ddf965ef9d multiboot: Use header names when displaying fields
Refer to field names when displaying fields in printf and debug statements.

Signed-off-by: Jack Schwartz <jack.schwartz@oracle.com>
Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit ce5eb6dc4d)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Jack Schwartz
a030730621 multiboot: Remove unused variables from multiboot.c
Remove unused variables: mh_mode_type, mh_width, mh_height, mh_depth

Signed-off-by: Jack Schwartz <jack.schwartz@oracle.com>
Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 7a2e43cc96)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Jack Schwartz
059a6962b2 multiboot: bss_end_addr can be zero
The multiboot spec (https://www.gnu.org/software/grub/manual/multiboot/),
section 3.1.3, allows for bss_end_addr to be zero.

A zero bss_end_addr signifies there is no .bss section.

Suggested-by: Daniel Kiper <daniel.kiper@oracle.com>
Signed-off-by: Jack Schwartz <jack.schwartz@oracle.com>
Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
Reviewed-by: Prasad J Pandit <pjp@fedoraproject.org>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 2a8fcd119e)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:45:00 -05:00
Peter Lieven
ab2dfe8d17 migration/block: reset dirty bitmap before read in bulk phase
Reset the dirty bitmap before reading to make sure we don't miss
any new data.

Cc: qemu-stable@nongnu.org
Signed-off-by: Peter Lieven <pl@kamp.de>
Message-Id: <1520507908-16743-3-git-send-email-pl@kamp.de>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
(cherry picked from commit 86b124bc76)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:59 -05:00
Paolo Bonzini
cebb7fea7c memory: fix flatview_access_valid RCU read lock/unlock imbalance
Fixes: 11e732a5ed
Reported-by: Cornelia Huck <cohuck@redhat.com>
Reported-by: luigi burdo <intermediadc@hotmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Tested-by: Cornelia Huck <cohuck@redhat.com>
Tested-by: Thomas Huth <thuth@redhat.com>
Message-id: 20180307130238.19358-1-pbonzini@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
(cherry picked from commit b39b61e410)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:59 -05:00
Paolo Bonzini
aedaf01f8c address_space_rw: address_space_to_flatview needs RCU lock
address_space_rw is calling address_space_to_flatview but it can
be called outside the RCU lock.  To fix it, transform flatview_rw
into address_space_rw, since flatview_rw is otherwise unused.

Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit db84fd973e)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:59 -05:00
Paolo Bonzini
a7f4d8a105 address_space_map: address_space_to_flatview needs RCU lock
address_space_map is calling address_space_to_flatview but it can
be called outside the RCU lock.  The function itself is calling
rcu_read_lock/rcu_read_unlock, just in the wrong place, so the
fix is easy.

Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit ad0c60fa57)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:59 -05:00
Paolo Bonzini
fa876bc97a address_space_access_valid: address_space_to_flatview needs RCU lock
address_space_access_valid is calling address_space_to_flatview but it can
be called outside the RCU lock.  To fix it, push the rcu_read_lock/unlock
pair up from flatview_access_valid to address_space_access_valid.

Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 11e732a5ed)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:59 -05:00
Paolo Bonzini
f77c231202 address_space_read: address_space_to_flatview needs RCU lock
address_space_read is calling address_space_to_flatview but it can
be called outside the RCU lock.  To fix it, push the rcu_read_lock/unlock
pair up from flatview_read_full to address_space_read's constant size
fast path and address_space_read_full.

Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit b2a44fcad7)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:59 -05:00
Paolo Bonzini
df04d1f16a address_space_write: address_space_to_flatview needs RCU lock
address_space_write is calling address_space_to_flatview but it can
be called outside the RCU lock.  To fix it, push the rcu_read_lock/unlock
pair up from flatview_write to address_space_write.

Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 4c6ebbb364)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:59 -05:00
Paolo Bonzini
ac25a3257f memory: inline some performance-sensitive accessors
These accessors are called from inlined functions, and the call sequence
is much more expensive than just inlining the access.  Move the
struct declaration to memory-internal.h so that exec.c and memory.c
can both use an inline function.

Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 785a507ec7)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:59 -05:00
Paolo Bonzini
8e8b73992c openpic_kvm: drop address_space_to_flatview call
The MemoryListener is registered on address_space_memory, there is
not much to assert.  This currently works because the callback
is invoked only once when the listener is registered, but section->fv
is the _new_ FlatView, not the old one on later calls and that
would break.

This confines address_space_to_flatview to exec.c and memory.c.

Acked-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 80d2b933f9)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:59 -05:00
KONRAD Frederic
499211829d sparc: fix leon3 casa instruction when MMU is disabled
Since the commit af7a06bac7:
`casa [..](10), .., ..` (and probably others alternate space instructions)
triggers a data access exception when the MMU is disabled.

When we enter get_asi(...) dc->mem_idx is set to MMU_PHYS_IDX when the MMU
is disabled. Just keep mem_idx unchanged in this case so we passthrough the
MMU when it is disabled.

Signed-off-by: KONRAD Frederic <frederic.konrad@adacore.com>
Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
(cherry picked from commit 6e10f37c86)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:58 -05:00
Max Filippov
e1f5a04d17 linux-user: fix target_mprotect/target_munmap error return values
target_mprotect/target_munmap return value goes through get_errno at the
call site, thus the functions must either set errno to host error code
and return -1 or return negative guest error code. Do the latter.

Cc: qemu-stable@nongnu.org
Cc: Riku Voipio <riku.voipio@iki.fi>
Cc: Laurent Vivier <laurent@vivier.eu>
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
Message-Id: <20180228221609.11265-8-jcmvbkbc@gmail.com>
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
(cherry picked from commit 78cf339039)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:58 -05:00
Max Filippov
8fc971ed1b linux-user: fix assertion in shmdt
shmdt fails to call mmap_lock/mmap_unlock around page_set_flags,
resulting in the following assertion:
  page_set_flags: Assertion `have_mmap_lock()' failed.

Wrap shmdt internals into mmap_lock/mmap_unlock.

Cc: qemu-stable@nongnu.org
Cc: Riku Voipio <riku.voipio@iki.fi>
Cc: Laurent Vivier <laurent@vivier.eu>
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
Message-Id: <20180228221609.11265-7-jcmvbkbc@gmail.com>
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
(cherry picked from commit 3c5f6a5f88)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:58 -05:00
Max Filippov
1801fabd9b linux-user: fix mmap/munmap/mprotect/mremap/shmat
In linux-user QEMU that runs for a target with TARGET_ABI_BITS bigger
than L1_MAP_ADDR_SPACE_BITS an assertion in page_set_flags fires when
mmap, munmap, mprotect, mremap or shmat is called for an address outside
the guest address space. mmap and mprotect should return ENOMEM in such
case.

Change definition of GUEST_ADDR_MAX to always be the last valid guest
address. Account for this change in open_self_maps.
Add macro guest_addr_valid that verifies if the guest address is valid.
Add function guest_range_valid that verifies if address range is within
guest address space and does not wrap around. Use that macro in
mmap/munmap/mprotect/mremap/shmat for error checking.

Cc: qemu-stable@nongnu.org
Cc: Riku Voipio <riku.voipio@iki.fi>
Cc: Laurent Vivier <laurent@vivier.eu>
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
Message-Id: <20180307215010.30706-1-jcmvbkbc@gmail.com>
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
(cherry picked from commit ebf9a3630c)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:58 -05:00
Max Filippov
2ac88347fa target/xtensa: dump correct physical registers
xtensa_cpu_dump_state outputs CPU physical registers as is, without
synchronization from current window. That may result in different values
printed for the current window and corresponding physical registers.
Synchronize physical registers from window before dumping.

Cc: qemu-stable@nongnu.org
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
(cherry picked from commit b55b1afda9)
 Conflicts:
	target/xtensa/translate.c
* drop context dependencies
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:58 -05:00
Mark Cave-Ayland
1f4637953e loader: don't perform overlapping address check for memory region ROM images
All memory region ROM images have a base address of 0 which causes the overlapping
address check to fail if more than one memory region ROM image is present, or an
existing ROM image is loaded at address 0.

Make sure that we ignore the overlapping address check in
rom_check_and_register_reset() if this is a memory region ROM image. In particular
this fixes the "rom: requested regions overlap" error on startup when trying to
run qemu-system-sparc with a -kernel image since commit 7497638642: "tcx: switch to
load_image_mr() and remove prom_addr hack".

Suggested-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
(cherry picked from commit ca316c1152)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:58 -05:00
Stefan Berger
e2fc495b4a tpm: Set the flags of the CMD_INIT command to 0
The flags of the CMD_INIT control channel command were not
initialized properly. Fix this and set to 0.

Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
(cherry picked from commit 3027058764)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:58 -05:00
Kevin Wolf
4f81878152 rbd: Fix use after free in qemu_rbd_set_keypairs() error path
If we want to include the invalid option name in the error message, we
can't free the string earlier than that.

Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
(cherry picked from commit 71c87815f9)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:58 -05:00
Alberto Garcia
6f36bd3969 specs/qcow2: Fix documentation of the compressed cluster descriptor
This patch fixes several mistakes in the documentation of the
compressed cluster descriptor:

1) the documentation claims that the cluster descriptor contains the
   number of sectors used to store the compressed data, but what it
   actually contains is the number of sectors *minus one* or, in other
   words, the number of additional sectors after the first one.

2) the width of the fields is incorrectly specified. The number of bits
   used by each field is

      x = 62 - (cluster_bits - 8)   for the offset field
      y = (cluster_bits - 8)        for the size field

   So the offset field's location is [0, x-1], not [0, x] as stated.

3) the size field does not contain the size of the compressed data,
   but rather the number of sectors where that data is stored. The
   compressed data starts at the exact point specified in the offset
   field and ends when there's enough data to produce a cluster of
   decompressed data. Both points can be in the middle of a sector,
   allowing several compressed clusters to be stored next to one
   another, sharing sectors if necessary.

Cc: qemu-stable@nongnu.org
Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 156b46ded3)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:58 -05:00
Eric Blake
0430aa089c nbd: Honor server's advertised minimum block size
Commit 79ba8c98 (v2.7) changed the setting of request_alignment
to occur only during bdrv_refresh_limits(), rather than at at
bdrv_open() time; but at the time, NBD was unaffected, because
it still used sector-based callbacks, so the block layer
defaulted NBD to use 512 request_alignment.

Later, commit 70c4fb26 (also v2.7) changed NBD to use byte-based
callbacks, without setting request_alignment.  This resulted in
NBD using request_alignment of 1, which works great when the
server supports it (as is the case for qemu-nbd), but falls apart
miserably if the server requires alignment (but only if qemu
actually sends a sub-sector request; qemu-io can do it, but
most qemu operations still perform on sectors or larger).

Even later, the NBD protocol was updated to document that clients
should learn the server's minimum alignment during NBD_OPT_GO;
and recommended that clients should assume a minimum size of 512
unless the server understands NBD_OPT_GO and replied with a smaller
size.  Commit 081dd1fe (v2.10) attempted to do that, by assigning
request_alignment to whatever was learned from the server; but
it has two flaws: the assignment is done during bdrv_open() so
it gets unconditionally wiped out back to 1 during any later
bdrv_refresh_limits(); and the code is not using a default of 512
when the server did not report a minimum size.

Fix these issues by moving the assignment to request_alignment
to the right function, and by using a sane default when the
server does not advertise a minimum size.

CC: qemu-stable@nongnu.org
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20180215032905.27146-1-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy<vsementsov@virtuozzo.com>
(cherry picked from commit fd8d372dd3)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:58 -05:00
Greg Kurz
327d4645a0 spapr: make pseries-2.11 the default machine type
The spapr capability framework was introduced in QEMU 2.12. It allows
to have an explicit control on how host features are exposed to the
guest. This is especially needed to handle migration between hetero-
geneous hosts (eg, POWER8 to POWER9). It is also used to expose fixes/
workarounds against speculative execution vulnerabilities to guests.
The framework was hence backported to QEMU 2.11.1, especially these
commits:

0fac4aa930 spapr: Add pseries-2.12 machine type
9070f408f4 spapr: Treat Hardware Transactional Memory (HTM) as an
 optional capability

0fac4aa930 has the confusing effect of making pseries-2.12 the default
machine type for QEMU 2.11.1, instead of the expected pseries-2.11. This
patch changes the default machine back to pseries-2.11.

Unfortunately, 9070f408f4 enforces the HTM capability for pseries-2.11
to be enabled by default, ie, when not passing cap-htm on the command
line. This breaks several 'make check' testcases that run qemu-system-ppc64
with TCG.

The only sane way to fix this is to adapt the impacted testcases so that
they all pass cap-htm=off in this case. This patch does that as well.

Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 20:44:16 -05:00
Tiwei Bie
b940928fa0 virtio-balloon: unref the memory region before continuing
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Cc: qemu-stable@nongnu.org
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit b86107ab43)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 15:20:44 -05:00
Laszlo Ersek
c3c44a4fb0 pci-bridge/i82801b11: clear bridge registers on platform reset
The "i82801b11-bridge" device model is a descendant of "base-pci-bridge"
(TYPE_PCI_BRIDGE). However, unlike other similar devices, such as

- pci-bridge,
- pcie-pci-bridge,
- PCIE Root Port,
- xio3130 switch upstream and downstream ports,
- dec-21154-p2p-bridge,
- pbm-bridge,
- xilinx-pcie-root,

"i82801b11-bridge" does not clear the bridge specific registers at
platform reset.

This is a problem because devices on "i82801b11-bridge" continue to
respond to config space cycles after platform reset, when addressed with
the bus number that was previously programmed into the secondary bus
number register of "i82801b11-bridge". This error breaks OVMF's search for
extra (PXB) root buses, for example.

The device class reset method for "i82801b11-bridge" is currently NULL;
set it directly to pci_bridge_reset(), like the last three bridge models
in the above listing do.

Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Marcel Apfelbaum <marcel@redhat.com>
Cc: qemu-stable@nongnu.org
Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1541839
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Marcel Apfelbaum <marcel@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit ed247f40db)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 15:20:44 -05:00
Murilo Opsfelder Araujo
82c5191f9c block/ssh: fix possible segmentation fault when .desc is not null-terminated
This patch prevents a possible segmentation fault when .desc members are checked
against NULL.

The ssh_runtime_opts was added by commit
8a6a80896d ("block/ssh: Use QemuOpts for runtime
options").

This fix was inspired by
http://lists.nongnu.org/archive/html/qemu-devel/2018-01/msg00883.html.

Fixes: 8a6a80896d ("block/ssh: Use QemuOpts for runtime options")
Cc: Max Reitz <mreitz@redhat.com>
Cc: Eric Blake <eblake@redhat.com>
Signed-off-by: Murilo Opsfelder Araujo <muriloo@linux.vnet.ibm.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Jeff Cody <jcody@redhat.com>
(cherry picked from commit fbd5c4c0db)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-06-20 15:20:44 -05:00
Greg Kurz
72cc467aab spapr: register dummy ICPs later
Some older machine types create more ICPs than needed. We hence
need to register up to xics_max_server_number() dummy ICPs to
accomodate the migration of these machine types.

Recent VSMT rework changed xics_max_server_number() to return

    DIV_ROUND_UP(max_cpus * spapr->vsmt, smp_threads)

instead of

    DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(), smp_threads);

The change is okay but it requires spapr->vsmt to be set, which
isn't the case with the current code. This causes the formula to
return zero and we don't create dummy ICPs. This breaks migration
of older guests as reported here:

    https://bugzilla.redhat.com/show_bug.cgi?id=1549087

The dummy ICP workaround doesn't really have a dependency on XICS
itself. But it does depend on proper VCPU id numbering and it must
be applied before creating vCPUs (ie, creating real ICPs). So this
patch moves the workaround to spapr_init_cpus(), which already
assumes VSMT to be set.

Fixes: 72194664c8 ("spapr: use spapr->vsmt to compute VCPU ids")
Reported-by: Lukas Doktor <ldoktor@redhat.com>
Signed-off-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 72fdd4de8e)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:59:00 +02:00
Greg Kurz
184da186db spapr: fix missing CPU core nodes in DT when running with TCG
Commit 5d0fb1508e "spapr: consolidate the VCPU id numbering logic
in a single place" introduced a helper to detect thread0 of a virtual
core based on its VCPU id. This is used to create CPU core nodes in
the DT, but it is broken in TCG.

$ qemu-system-ppc64 -nographic -accel tcg -machine dumpdtb=dtb.bin \
                    -smp cores=16,maxcpus=16,threads=1
$ dtc -f -O dts dtb.bin | grep POWER8
                PowerPC,POWER8@0 {
                PowerPC,POWER8@8 {

instead of the expected 16 cores that we get with KVM:

$ dtc -f -O dts dtb.bin | grep POWER8
                PowerPC,POWER8@0 {
                PowerPC,POWER8@8 {
                PowerPC,POWER8@10 {
                PowerPC,POWER8@18 {
                PowerPC,POWER8@20 {
                PowerPC,POWER8@28 {
                PowerPC,POWER8@30 {
                PowerPC,POWER8@38 {
                PowerPC,POWER8@40 {
                PowerPC,POWER8@48 {
                PowerPC,POWER8@50 {
                PowerPC,POWER8@58 {
                PowerPC,POWER8@60 {
                PowerPC,POWER8@68 {
                PowerPC,POWER8@70 {
                PowerPC,POWER8@78 {

This happens because spapr_get_vcpu_id() maps VCPU ids to
cs->cpu_index in TCG mode. This confuses the code in
spapr_is_thread0_in_vcore(), since it assumes thread0 VCPU
ids to have a spapr->vsmt spacing.

    spapr_get_vcpu_id(cpu) % spapr->vsmt == 0

Actually, there's no real reason to expose cs->cpu_index instead
of the VCPU id, since we also generate it with TCG. Also we already
set it explicitly in spapr_set_vcpu_id(), so there's no real reason
either to call kvm_arch_vcpu_id() with KVM.

This patch unifies spapr_get_vcpu_id() to always return the computed
VCPU id both in TCG and KVM. This is one step forward towards KVM<->TCG
migration.

Fixes: 5d0fb1508e
Reported-by: Cédric Le Goater <clg@kaod.org>
Signed-off-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit b1a568c1c2)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:59:00 +02:00
Greg Kurz
e676038e60 spapr: consolidate the VCPU id numbering logic in a single place
Several places in the code need to calculate a VCPU id:

    (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads
    (core_id / smp_threads) * spapr->vsmt (1 user)
    index * spapr->vsmt (2 users)

or guess that the VCPU id of a given VCPU is the first thread of a virtual
core:

    index % spapr->vsmt != 0

Even if the numbering logic isn't that complex, it is rather fragile to
have these assumptions open-coded in several places. FWIW this was
proved with recent issues related to VSMT.

This patch moves the VCPU id formula to a single function to be called
everywhere the code needs to compute one. It also adds an helper to
guess if a VCPU is the first thread of a VCORE.

Signed-off-by: Greg Kurz <groug@kaod.org>
[dwg: Rename spapr_is_vcore() to spapr_is_thread0_in_vcore() for clarity]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 5d0fb1508e)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:58:59 +02:00
Greg Kurz
094706e69f spapr: rename spapr_vcpu_id() to spapr_get_vcpu_id()
The spapr_vcpu_id() function is an accessor actually. Let's rename it
for symmetry with the recently added spapr_set_vcpu_id() helper.

The motivation behind this is that a later patch will consolidate
the VCPU id formula in a function and spapr_vcpu_id looks like an
appropriate name.

Signed-off-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 14bb4486c8)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:58:59 +02:00
David Gibson
8c0ec3c398 target/ppc: Clarify compat mode max_threads value
We recently had some discussions that were sidetracked for a while, because
nearly everyone misapprehended the purpose of the 'max_threads' field in
the compatiblity modes table.  It's all about guest expectations, not host
expectations or support (that's handled elsewhere).

In an attempt to avoid a repeat of that confusion, rename the field to
'max_vthreads' and add an explanatory comment.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com>
(cherry picked from commit abbc124753)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:58:59 +02:00
Greg Kurz
a8074a5895 spapr: move VCPU calculation to core machine code
The VCPU ids are currently computed and assigned to each individual
CPU threads in spapr_cpu_core_realize(). But the numbering logic
of VCPU ids is actually a machine-level concept, and many places
in hw/ppc/spapr.c also have to compute VCPU ids out of CPU indexes.

The current formula used in spapr_cpu_core_realize() is:

    vcpu_id = (cc->core_id * spapr->vsmt / smp_threads) + i

where:

    cc->core_id is a multiple of smp_threads
    cpu_index = cc->core_id + i
    0 <= i < smp_threads

So we have:

    cpu_index % smp_threads == i
    cc->core_id / smp_threads == cpu_index / smp_threads

hence:

    vcpu_id =
        (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads;

This formula was used before VSMT at the time VCPU ids where computed
at the target emulation level. It has the advantage of being useable
to derive a VPCU id out of a CPU index only. It is fitted for all the
places where the machine code has to compute a VCPU id.

This patch introduces an accessor to set the VCPU id in a PowerPCCPU object
using the above formula. It is a first step to consolidate all the VCPU id
logic in a single place.

Signed-off-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 648edb6475)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:58:59 +02:00
Greg Kurz
cc86c46758 spapr: use spapr->vsmt to compute VCPU ids
Since the introduction of VSMT in 2.11, the spacing of VCPU ids
between cores is controllable through a machine property instead
of being only dictated by the SMT mode of the host:

    cpu->vcpu_id = (cc->core_id * spapr->vsmt / smp_threads) + i

Until recently, the machine code would try to change the SMT mode
of the host to be equal to VSMT or exit. This allowed the rest of
the code to assume that kvmppc_smt_threads() == spapr->vsmt is
always true.

Recent commit "8904e5a75005 spapr: Adjust default VSMT value for
better migration compatibility" relaxed the rule. If the VSMT
mode cannot be set in KVM for some reasons, but the requested
CPU topology is compatible with the current SMT mode, then we
let the guest run with  kvmppc_smt_threads() != spapr->vsmt.

This breaks quite a few places in the code, in particular when
calculating DRC indexes.

This is what happens on a POWER host with subcores-per-core=2 (ie,
supports up to SMT4) when passing the following topology:

    -smp threads=4,maxcpus=16 \
    -device host-spapr-cpu-core,core-id=4,id=core1 \
    -device host-spapr-cpu-core,core-id=8,id=core2

qemu-system-ppc64: warning: Failed to set KVM's VSMT mode to 8 (errno -22)

This is expected since KVM is limited to SMT4, but the guest is started
anyway because this topology can run on SMT4 even with a VSMT8 spacing.

But when we look at the DT, things get nastier:

cpus {
        ...
        ibm,drc-indexes = <0x4 0x10000000 0x10000004 0x10000008 0x1000000c>;

This means that we have the following association:

 CPU core device |     DRC    | VCPU id
-----------------+------------+---------
   boot core     | 0x10000000 | 0
   core1         | 0x10000004 | 4
   core2         | 0x10000008 | 8
   core3         | 0x1000000c | 12

But since the spacing of VCPU ids is 8, the DRC for core1 points to a
VCPU that doesn't exist, the DRC for core2 points to the first VCPU of
core1 and and so on...

        ...

        PowerPC,POWER8@0 {
                ...
                ibm,my-drc-index = <0x10000000>;
                ...
        };

        PowerPC,POWER8@8 {
                ...
                ibm,my-drc-index = <0x10000008>;
                ...
        };

        PowerPC,POWER8@10 {
                ...

No ibm,my-drc-index property for this core since 0x10000010 doesn't
exist in ibm,drc-indexes above.

                ...
        };
};

...

interrupt-controller {
        ...
        ibm,interrupt-server-ranges = <0x0 0x10>;

With a spacing of 8, the highest VCPU id for the given topology should be:
        16 * 8 / 4 = 32 and not 16

        ...
        linux,phandle = <0x7e7323b8>;
        interrupt-controller;
};

And CPU hot-plug/unplug is broken:

(qemu) device_del core1
pseries-hotplug-cpu: Cannot find CPU (drc index 10000004) to remove

(qemu) device_del core2
cpu 4 (hwid 8) Ready to die...
cpu 5 (hwid 9) Ready to die...
cpu 6 (hwid 10) Ready to die...
cpu 7 (hwid 11) Ready to die...

These are the VCPU ids of core1 actually

(qemu) device_add host-spapr-cpu-core,core-id=12,id=core3
(qemu) device_del core3
pseries-hotplug-cpu: Cannot find CPU (drc index 1000000c) to remove

This patches all the code in hw/ppc/spapr.c to assume the VSMT
spacing when manipulating VCPU ids.

Fixes: 8904e5a750
Signed-off-by: Greg Kurz <groug@kaod.org>

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>

(cherry picked from commit 72194664c8)

Signed-off-by: Greg Kurz <groug@kaod.org>

Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:58:59 +02:00
Laurent Vivier
c30b366d00 spapr: set vsmt to MAX(8, smp_threads)
We ignore silently the value of smp_threads when we set
the default VSMT value, and if smp_threads is greater than VSMT
kernel is going into trouble later.

Fixes: 8904e5a750
("spapr: Adjust default VSMT value for better migration compatibility")

Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 4ad64cbd0c)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:58:59 +02:00
David Gibson
af65bce409 spapr: Adjust default VSMT value for better migration compatibility
fa98fbfc "PC: KVM: Support machine option to set VSMT mode" introduced the
"vsmt" parameter for the pseries machine type, which controls the spacing
of the vcpu ids of thread 0 for each virtual core.  This was done to bring
some consistency and stability to how that was done, while still allowing
backwards compatibility for migration and otherwise.

The default value we used for vsmt was set to the max of the host's
advertised default number of threads and the number of vthreads per vcore
in the guest.  This was done to continue running without extra parameters
on older KVM versions which don't allow the VSMT value to be changed.

Unfortunately, even that smaller than before leakage of host configuration
into guest visible configuration still breaks things.  Specifically a guest
with 4 (or less) vthread/vcore will get a different vsmt value when
running on a POWER8 (vsmt==8) and POWER9 (vsmt==4) host.  That means the
vcpu ids don't line up so you can't migrate between them, though you should
be able to.

Long term we really want to make vsmt == smp_threads for sufficiently
new machine types.  However, that means that qemu will then require a
sufficiently recent KVM (one which supports changing VSMT) - that's still
not widely enough deployed to be really comfortable to do.

In the meantime we need some default that will work as often as
possible.  This patch changes that default to 8 in all circumstances.
This does change guest visible behaviour (including for existing
machine versions) for many cases - just not the most common/important
case.

Following is case by case justification for why this is still the least
worst option.  Note that any of the old behaviours can still be duplicated
after this patch, it's just that it requires manual intervention by
setting the vsmt property on the command line.

KVM HV on POWER8 host:
   This is the overwhelmingly common case in production setups, and is
   unchanged by design.  POWER8 hosts will advertise a default VSMT mode
   of 8, and > 8 vthreads/vcore isn't permitted

KVM HV on POWER7 host:
   Will break, but POWER7s allowing KVM were never released to the public.

KVM HV on POWER9 host:
   Not yet released to the public, breaking this now will reduce other
   breakage later.

KVM HV on PowerPC 970:
   Will theoretically break it, but it was barely supported to begin with
   and already required various user visible hacks to work.  Also so old
   that I just don't care.

TCG:
   This is the nastiest one; it means migration of TCG guests (without
   manual vsmt setting) will break.  Since TCG is rarely used in production
   I think this is worth it for the other benefits.  It does also remove
   one more barrier to TCG<->KVM migration which could be interesting for
   debugging applications.

KVM PR:
   As with TCG, this will break migration of existing configurations,
   without adding extra manual vsmt options.  As with TCG, it is rare in
   production so I think the benefits outweigh breakages.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com>
Reviewed-by: Greg Kurz <groug@kaod.org>
(cherry picked from commit 8904e5a750)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:58:59 +02:00
David Gibson
ad484114d1 spapr: Allow some cases where we can't set VSMT mode in the kernel
At present if we require a vsmt mode that's not equal to the kernel's
default, and the kernel doesn't let us change it (e.g. because it's an old
kernel without support) then we always fail.

But in fact we can cope with the kernel having a different vsmt as long as
  a) it's >= the actual number of vthreads/vcore (so that guest threads
     that are supposed to be on the same core act like it)
  b) it's a submultiple of the requested vsmt mode (so that guest threads
     spaced by the vsmt value will act like they're on different cores)

Allowing this case gives us a bit more freedom to adjust the vsmt behaviour
without breaking existing cases.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
Tested-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Greg Kurz <groug@kaod.org>
(cherry picked from commit 1f20f2e0ee)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:58:59 +02:00
Gerd Hoffmann
0a30cae50a sdl: workaround bug in sdl 2.0.8 headers
https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=892087

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Message-id: 20180307154258.9313-1-kraxel@redhat.com
(cherry picked from commit 2ca5c43091)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 12:58:05 +02:00
Paolo Bonzini
5892b5a9e3 memfd: fix configure test
Recent glibc added memfd_create in sys/mman.h.  This conflicts with
the definition in util/memfd.c:

    /builddir/build/BUILD/qemu-2.11.0-rc1/util/memfd.c:40:12: error: static declaration of memfd_create follows non-static declaration

Fix the configure test, and remove the sys/memfd.h inclusion since the
file actually does not exist---it is a typo in the memfd_create(2) man
page.

Cc: Marc-André Lureau <marcandre.lureau@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 75e5b70e6b)
Signed-off-by: Greg Kurz <groug@kaod.org>
2018-05-22 11:30:18 +02:00
Michael Roth
7c1beb52ed Update version for 2.11.1 release
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-14 14:41:05 -06:00
Greg Kurz
00e9fba2be spapr: add missing break in h_get_cpu_characteristics()
Detected by Coverity (CID 1385702). This fixes the recently added hypercall
to let guests properly apply Spectre and Meltdown workarounds.

Fixes: c59704b254 "target/ppc/spapr: Add H-Call H_GET_CPU_CHARACTERISTICS"
Reported-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit fa86f59234)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 19:39:27 -06:00
linzhecheng
63112b16a6 vga: check the validation of memory addr when draw text
Start a vm with qemu-kvm -enable-kvm -vnc :66 -smp 1 -m 1024 -hda
redhat_5.11.qcow2  -device pcnet -vga cirrus,
then use VNC client to connect to VM, and excute the code below in guest
OS will lead to qemu crash:

int main()
 {
    iopl(3);
    srand(time(NULL));
    int a,b;
    while(1){
	a = rand()%0x100;
	b = 0x3c0 + (rand()%0x20);
        outb(a,b);
    }
    return 0;
}

The above code is writing the registers of VGA randomly.
We can write VGA CRT controller registers index 0x0C or 0x0D
(which is the start address register) to modify the
the display memory address of the upper left pixel
or character of the screen. The address may be out of the
range of vga ram. So we should check the validation of memory address
when reading or writing it to avoid segfault.

Signed-off-by: linzhecheng <linzhecheng@huawei.com>
Message-id: 20180111132724.13744-1-linzhecheng@huawei.com
Fixes: CVE-2018-5683
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 191f59dc17)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 19:19:25 -06:00
linzhecheng
30c3b4823c input: fix memory leak
If kbd_queue is not empty and queue_count >= queue_limit,
we should free evt.

Change-Id: Ieeacf90d5e7e370a40452ec79031912d8b864d83
Signed-off-by: linzhecheng <linzhecheng@huawei.com>
Message-id: 20171225023730.5512-1-linzhecheng@huawei.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit fca4774a96)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 19:19:15 -06:00
Daniel P. Berrangé
88ab85384d ui: correctly advance output buffer when writing SASL data
In this previous commit:

  commit 8f61f1c5a6
  Author: Daniel P. Berrange <berrange@redhat.com>
  Date:   Mon Dec 18 19:12:20 2017 +0000

    ui: track how much decoded data we consumed when doing SASL encoding

I attempted to fix a flaw with tracking how much data had actually been
processed when encoding with SASL. With that flaw, the VNC server could
mistakenly discard queued data that had not been sent.

The fix was not quite right though, because it merely decremented the
vs->output.offset value. This is effectively discarding data from the
end of the pending output buffer. We actually need to discard data from
the start of the pending output buffer. We also want to free memory that
is no longer required. The correct way to handle this is to use the
buffer_advance() helper method instead of directly manipulating the
offset value.

Reported-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Message-id: 20180201155841.27509-1-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 627ebec208)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:34:49 -06:00
Daniel P. Berrange
64653b7fbe ui: avoid sign extension using client width/height
Pixman returns a signed int for the image width/height, but the VNC
protocol only permits a unsigned int16. Effective framebuffer size
is determined by the guest, limited by the video RAM size, so the
dimensions are unlikely to exceed the range of an unsigned int16,
but this is not currently validated.

With the current use of 'int' for client width/height, the calculation
of offsets in vnc_update_throttle_offset() suffers from integer size
promotion and sign extension, causing coverity warnings

*** CID 1385147:  Integer handling issues  (SIGN_EXTENSION)
/ui/vnc.c: 979 in vnc_update_throttle_offset()
973      * than that the client would already suffering awful audio
974      * glitches, so dropping samples is no worse really).
975      */
976     static void vnc_update_throttle_offset(VncState *vs)
977     {
978         size_t offset =
>>>     CID 1385147:  Integer handling issues  (SIGN_EXTENSION)
>>>     Suspicious implicit sign extension:
    "vs->client_pf.bytes_per_pixel" with type "unsigned char" (8 bits,
    unsigned) is promoted in "vs->client_width * vs->client_height *
    vs->client_pf.bytes_per_pixel" to type "int" (32 bits, signed), then
    sign-extended to type "unsigned long" (64 bits, unsigned).  If
    "vs->client_width * vs->client_height * vs->client_pf.bytes_per_pixel"
    is greater than 0x7FFFFFFF, the upper bits of the result will all be 1.
979             vs->client_width * vs->client_height * vs->client_pf.bytes_per_pixel;

Change client_width / client_height to be a size_t to avoid sign
extension and integer promotion. Then validate that dimensions are in
range wrt the RFB protocol u16 limits.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Message-id: 20180118155254.17053-1-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 4c956bd81e)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:34:43 -06:00
Daniel P. Berrange
9a26ca6b94 ui: mix misleading comments & return types of VNC I/O helper methods
While the QIOChannel APIs for reading/writing data return ssize_t, with negative
value indicating an error, the VNC code passes this return value through the
vnc_client_io_error() method. This detects the error condition, disconnects the
client and returns 0 to indicate error. Thus all the VNC helper methods should
return size_t (unsigned), and misleading comments which refer to the possibility
of negative return values need fixing.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-14-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 30b80fd526)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:34:38 -06:00
Daniel P. Berrange
172f4e5a31 ui: add trace events related to VNC client throttling
The VNC client throttling is quite subtle so will benefit from having trace
points available for live debugging.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-13-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 6aa22a2918)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:34:32 -06:00
Daniel P. Berrange
0c85a40e71 ui: place a hard cap on VNC server output buffer size
The previous patches fix problems with throttling of forced framebuffer updates
and audio data capture that would cause the QEMU output buffer size to grow
without bound. Those fixes are graceful in that once the client catches up with
reading data from the server, everything continues operating normally.

There is some data which the server sends to the client that is impractical to
throttle. Specifically there are various pseudo framebuffer update encodings to
inform the client of things like desktop resizes, pointer changes, audio
playback start/stop, LED state and so on. These generally only involve sending
a very small amount of data to the client, but a malicious guest might be able
to do things that trigger these changes at a very high rate. Throttling them is
not practical as missed or delayed events would cause broken behaviour for the
client.

This patch thus takes a more forceful approach of setting an absolute upper
bound on the amount of data we permit to be present in the output buffer at
any time. The previous patch set a threshold for throttling the output buffer
by allowing an amount of data equivalent to one complete framebuffer update and
one seconds worth of audio data. On top of this it allowed for one further
forced framebuffer update to be queued.

To be conservative, we thus take that throttling threshold and multiply it by
5 to form an absolute upper bound. If this bound is hit during vnc_write() we
forceably disconnect the client, refusing to queue further data. This limit is
high enough that it should never be hit unless a malicious client is trying to
exploit the sever, or the network is completely saturated preventing any sending
of data on the socket.

This completes the fix for CVE-2017-15124 started in the previous patches.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-12-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit f887cf165d)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:34:26 -06:00
Daniel P. Berrange
f9e53c77ea ui: fix VNC client throttling when forced update is requested
The VNC server must throttle data sent to the client to prevent the 'output'
buffer size growing without bound, if the client stops reading data off the
socket (either maliciously or due to stalled/slow network connection).

The current throttling is very crude because it simply checks whether the
output buffer offset is zero. This check is disabled if the client has requested
a forced update, because we want to send these as soon as possible.

As a result, the VNC client can cause QEMU to allocate arbitrary amounts of RAM.
They can first start something in the guest that triggers lots of framebuffer
updates eg play a youtube video. Then repeatedly send full framebuffer update
requests, but never read data back from the server. This can easily make QEMU's
VNC server send buffer consume 100MB of RAM per second, until the OOM killer
starts reaping processes (hopefully the rogue QEMU process, but it might pick
others...).

To address this we make the throttling more intelligent, so we can throttle
full updates. When we get a forced update request, we keep track of exactly how
much data we put on the output buffer. We will not process a subsequent forced
update request until this data has been fully sent on the wire. We always allow
one forced update request to be in flight, regardless of what data is queued
for incremental updates or audio data. The slight complication is that we do
not initially know how much data an update will send, as this is done in the
background by the VNC job thread. So we must track the fact that the job thread
has an update pending, and not process any further updates until this job is
has been completed & put data on the output buffer.

This unbounded memory growth affects all VNC server configurations supported by
QEMU, with no workaround possible. The mitigating factor is that it can only be
triggered by a client that has authenticated with the VNC server, and who is
able to trigger a large quantity of framebuffer updates or audio samples from
the guest OS. Mostly they'll just succeed in getting the OOM killer to kill
their own QEMU process, but its possible other processes can get taken out as
collateral damage.

This is a more general variant of the similar unbounded memory usage flaw in
the websockets server, that was previously assigned CVE-2017-15268, and fixed
in 2.11 by:

  commit a7b20a8efa
  Author: Daniel P. Berrange <berrange@redhat.com>
  Date:   Mon Oct 9 14:43:42 2017 +0100

    io: monitor encoutput buffer size from websocket GSource

This new general memory usage flaw has been assigned CVE-2017-15124, and is
partially fixed by this patch.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-11-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit ada8d2e436)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:34:21 -06:00
Daniel P. Berrange
f9c8767828 ui: fix VNC client throttling when audio capture is active
The VNC server must throttle data sent to the client to prevent the 'output'
buffer size growing without bound, if the client stops reading data off the
socket (either maliciously or due to stalled/slow network connection).

The current throttling is very crude because it simply checks whether the
output buffer offset is zero. This check must be disabled if audio capture is
enabled, because when streaming audio the output buffer offset will rarely be
zero due to queued audio data, and so this would starve framebuffer updates.

As a result, the VNC client can cause QEMU to allocate arbitrary amounts of RAM.
They can first start something in the guest that triggers lots of framebuffer
updates eg play a youtube video. Then enable audio capture, and simply never
read data back from the server. This can easily make QEMU's VNC server send
buffer consume 100MB of RAM per second, until the OOM killer starts reaping
processes (hopefully the rogue QEMU process, but it might pick others...).

To address this we make the throttling more intelligent, so we can throttle
when audio capture is active too. To determine how to throttle incremental
updates or audio data, we calculate a size threshold. Normally the threshold is
the approximate number of bytes associated with a single complete framebuffer
update. ie width * height * bytes per pixel. We'll send incremental updates
until we hit this threshold, at which point we'll stop sending updates until
data has been written to the wire, causing the output buffer offset to fall
back below the threshold.

If audio capture is enabled, we increase the size of the threshold to also
allow for upto 1 seconds worth of audio data samples. ie nchannels * bytes
per sample * frequency. This allows the output buffer to have a mixture of
incremental framebuffer updates and audio data queued, but once the threshold
is exceeded, audio data will be dropped and incremental updates will be
throttled.

This unbounded memory growth affects all VNC server configurations supported by
QEMU, with no workaround possible. The mitigating factor is that it can only be
triggered by a client that has authenticated with the VNC server, and who is
able to trigger a large quantity of framebuffer updates or audio samples from
the guest OS. Mostly they'll just succeed in getting the OOM killer to kill
their own QEMU process, but its possible other processes can get taken out as
collateral damage.

This is a more general variant of the similar unbounded memory usage flaw in
the websockets server, that was previously assigned CVE-2017-15268, and fixed
in 2.11 by:

  commit a7b20a8efa
  Author: Daniel P. Berrange <berrange@redhat.com>
  Date:   Mon Oct 9 14:43:42 2017 +0100

    io: monitor encoutput buffer size from websocket GSource

This new general memory usage flaw has been assigned CVE-2017-15124, and is
partially fixed by this patch.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-10-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit e2b72cb6e0)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:34:14 -06:00
Daniel P. Berrange
5af9f2504f ui: refactor code for determining if an update should be sent to the client
The logic for determining if it is possible to send an update to the client
will become more complicated shortly, so pull it out into a separate method
for easier extension later.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-9-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 0bad834228)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:34:09 -06:00
Daniel P. Berrange
2e6571e671 ui: correctly reset framebuffer update state after processing dirty regions
According to the RFB protocol, a client sends one or more framebuffer update
requests to the server. The server can reply with a single framebuffer update
response, that covers all previously received requests. Once the client has
read this update from the server, it may send further framebuffer update
requests to monitor future changes. The client is free to delay sending the
framebuffer update request if it needs to throttle the amount of data it is
reading from the server.

The QEMU VNC server, however, has never correctly handled the framebuffer
update requests. Once QEMU has received an update request, it will continue to
send client updates forever, even if the client hasn't asked for further
updates. This prevents the client from throttling back data it gets from the
server. This change fixes the flawed logic such that after a set of updates are
sent out, QEMU waits for a further update request before sending more data.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-8-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 728a7ac954)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:34:04 -06:00
Daniel P. Berrange
126617e6f8 ui: introduce enum to track VNC client framebuffer update request state
Currently the VNC servers tracks whether a client has requested an incremental
or forced update with two boolean flags. There are only really 3 distinct
states to track, so create an enum to more accurately reflect permitted states.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-7-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit fef1bbadfb)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:33:55 -06:00
Daniel P. Berrange
8a9c5c34ac ui: track how much decoded data we consumed when doing SASL encoding
When we encode data for writing with SASL, we encode the entire pending output
buffer. The subsequent write, however, may not be able to send the full encoded
data in one go though, particularly with a slow network. So we delay setting the
output buffer offset back to zero until all the SASL encoded data is sent.

Between encoding the data and completing sending of the SASL encoded data,
however, more data might have been placed on the pending output buffer. So it
is not valid to set offset back to zero. Instead we must keep track of how much
data we consumed during encoding and subtract only that amount.

With the current bug we would be throwing away some pending data without having
sent it at all. By sheer luck this did not previously cause any serious problem
because appending data to the send buffer is always an atomic action, so we
only ever throw away complete RFB protocol messages. In the case of frame buffer
updates we'd catch up fairly quickly, so no obvious problem was visible.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-6-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 8f61f1c5a6)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:33:50 -06:00
Daniel P. Berrange
616d64ac06 ui: avoid pointless VNC updates if framebuffer isn't dirty
The vnc_update_client() method checks the 'has_dirty' flag to see if there are
dirty regions that are pending to send to the client. Regardless of this flag,
if a forced update is requested, updates must be sent. For unknown reasons
though, the code also tries to sent updates if audio capture is enabled. This
makes no sense as audio capture state does not impact framebuffer contents, so
this check is removed.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-5-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 3541b08475)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:33:39 -06:00
Daniel P. Berrange
a7b2537f8a ui: remove redundant indentation in vnc_client_update
Now that previous dead / unreachable code has been removed, we can simplify
the indentation in the vnc_client_update method.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-4-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit b939eb89b6)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:33:33 -06:00
Daniel P. Berrange
de1e7a91c8 ui: remove unreachable code in vnc_update_client
A previous commit:

  commit 5a8be0f73d
  Author: Gerd Hoffmann <kraxel@redhat.com>
  Date:   Wed Jul 13 12:21:20 2016 +0200

    vnc: make sure we finish disconnect

Added a check for vs->disconnecting at the very start of the
vnc_update_client method. This means that the very next "if"
statement check for !vs->disconnecting always evaluates true,
and is thus redundant. This in turn means the vs->disconnecting
check at the very end of the method never evaluates true, and
is thus unreachable code.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-3-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit c53df96161)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:33:28 -06:00
Daniel P. Berrange
0181686a98 ui: remove 'sync' parameter from vnc_update_client
There is only one caller of vnc_update_client and that always passes false
for the 'sync' parameter.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: 20171218191228.31018-2-berrange@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 6af998db05)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 18:33:22 -06:00
Greg Kurz
a3fd64f2fe migration: incoming postcopy advise sanity checks
If postcopy-ram was set on the source but not on the destination,
migration doesn't occur, the destination prints an error and boots
the guest:

qemu-system-ppc64: Expected vmdescription section, but got 0

We end up with two running instances.

This behaviour was introduced in 2.11 by commit 58110f0acb "migration:
split common postcopy out of ram postcopy" to prepare ground for the
upcoming dirty bitmap postcopy support. It adds a new case where the
source may send an empty postcopy advise because dirty bitmap doesn't
need to check page sizes like RAM postcopy does.

If the source has enabled postcopy-ram, then it sends an advise with
the page size values. If the destination hasn't enabled postcopy-ram,
then loadvm_postcopy_handle_advise() leaves the page size values on
the stream and returns. This confuses qemu_loadvm_state() later on
and causes the destination to start execution.

As discussed several times, postcopy-ram should be enabled both sides
to be functional. This patch changes the destination to perform some
extra checks on the advise length to ensure this is the case. Otherwise
an error is returned and migration is aborted.

Reported-by: Balamuruhan S <bala24@linux.vnet.ibm.com>
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Daniel Henrique Barboza <danielhb@linux.vnet.ibm.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <151791621042.19120.3103118434734245776.stgit@bahia>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
(cherry picked from commit 875fcd013a)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-12 09:28:28 -06:00
Philippe Mathieu-Daudé
68d7e24475 target/sh4: add missing tcg_temp_free() in _decode_opc()
missed in c55497ecb8 and 852d481faf.

Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20171205170013.22337-3-f4bug@amsat.org>
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
(cherry picked from commit e691e0ed13)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-11 21:06:46 -06:00
Daniel Henrique Barboza
2095c5a2e3 migration/savevm.c: set MAX_VM_CMD_PACKAGED_SIZE to 1ul << 32
MAX_VM_CMD_PACKAGED_SIZE is a constant used in qemu_savevm_send_packaged
and loadvm_handle_cmd_packaged to determine whether a package is too
big to be sent or received. qemu_savevm_send_packaged is called inside
postcopy_start (migration/migration.c) to send the MigrationState
in a single blob to the destination, using the MIG_CMD_PACKAGED subcommand,
which will read it up using loadvm_handle_cmd_packaged. If the blob is
larger than MAX_VM_CMD_PACKAGED_SIZE, an error is thrown and the postcopy
migration is aborted. Both MAX_VM_CMD_PACKAGED_SIZE and MIG_CMD_PACKAGED
were introduced by commit 11cf1d984b ("MIG_CMD_PACKAGED: Send a packaged
chunk ..."). The constant has its original value of 1ul << 24 (16MB).

The current MAX_VM_CMD_PACKAGED_SIZE value is not enough to support postcopy
migration of bigger pseries guests. The blob size for a postcopy migration of
a pseries guest with the following setup:

qemu-system-ppc64 --nographic -vga none -machine pseries,accel=kvm -m 64G \
-smp 1,maxcpus=32 -device virtio-blk-pci,drive=rootdisk \
-drive file=f27.qcow2,if=none,cache=none,format=qcow2,id=rootdisk \
-netdev user,id=u1 -net nic,netdev=u1

Goes around 12MB. Bumping the RAM to 128G makes the blob sizes goes to 20MB.
With 256G the blob goes to 37MB - more than twice the current maximum size.
At this moment the pseries machine can handle guests with up to 1TB of RAM,
making this postcopy blob goes to 128MB of size approximately.

Following the discussions made in [1], there is a need to understand what
devices are aggressively consuming the blob in that manner and see if that
can be mitigated. Until then, we can set MAX_VM_CMD_PACKAGED_SIZE to the
maximum value allowed. Since the size is a 32 bit int variable, we can set
it as 1ul << 32, giving a maximum blob size of 4G that is enough to support
postcopy migration of 32TB RAM guests given the above constraints.

[1] https://lists.nongnu.org/archive/html/qemu-devel/2018-01/msg06313.html

Signed-off-by: Daniel Henrique Barboza <danielhb@linux.vnet.ibm.com>
Reported-by: Balamuruhan S <bala24@linux.vnet.ibm.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
(cherry picked from commit ee555cdf4d)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-11 21:06:01 -06:00
Dr. David Alan Gilbert
c8847f5565 migration: Recover block devices if failure in device state
In e91d895 I added the new pause-before-switchover mechanism
to allow migration completion to be delayed; this changes the
last state prior to completion to MIGRATE_STATUS_DEVICE rather
than MIGRATE_STATUS_ACTIVE.

Fix the failure path in migration_completion to recover the block
devices if it fails in MIGRATE_STATUS_DEVICE, not just the
MIGRATE_STATUS_ACTIVE that it previously had.

This corresponds to rh bz:
  https://bugzilla.redhat.com/show_bug.cgi?id=1538494
whose symptom is an occasional source crash on a failed migration.

Fixes: e91d8951d5
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
(cherry picked from commit 6039dd5b1c)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-11 21:05:39 -06:00
Ross Lagerwall
b9eec804f4 migration: Don't leak IO channels
Since qemu_fopen_channel_{in,out}put take references on the underlying
IO channels, make sure to release our references to them.

Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
Message-Id: <20171101142526.1006-2-ross.lagerwall@citrix.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
(cherry picked from commit 032b79f717)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-11 21:05:05 -06:00
Christian Borntraeger
b8aa511bc0 s390x/sclp: fix event mask handling
commit 67915de9f0 ("s390x/event-facility: variable-length event
masks") switched the sclp receive/send mask. This broke the sclp
lm console.

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Fixes: commit 67915de9f0 ("s390x/event-facility: variable-length event masks")
Cc: Cornelia Huck <cohuck@redhat.com>
Cc: Jason J. Herne <jjherne@linux.vnet.ibm.com>
Cc: qemu-stable@nongnu.org
Message-Id: <20180202094241.59537-1-borntraeger@de.ibm.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 869e676ae7)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-11 20:27:04 -06:00
linzhecheng
ab7b4f6734 memory: set ioeventfd_update_pending after address_space_update_ioeventfds
We should set ioeventfd_update_pending same as memory_region_update_pending.

Signed-off-by: linzhecheng <linzc@zju.edu.cn>
Message-Id: <1515934519-16158-1-git-send-email-linzc@zju.edu.cn>
Cc: qemu-stable@nongnu.org
Fixes: ade9c1aac5
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 0b15209571)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-11 20:24:14 -06:00
Suraj Jitindar Singh
ed8b4ecc68 target/ppc/spapr: Add H-Call H_GET_CPU_CHARACTERISTICS
The new H-Call H_GET_CPU_CHARACTERISTICS is used by the guest to query
behaviours and available characteristics of the cpu.

Implement the handler for this new H-Call which formulates its response
based on the setting of the spapr_caps cap-cfpc, cap-sbbc and cap-ibs.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit c59704b254)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 19:07:38 -06:00
Suraj Jitindar Singh
eab4b5170f target/ppc/spapr_caps: Add new tristate cap safe_indirect_branch
Add new tristate cap cap-ibs to represent the indirect branch
serialisation capability.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 4be8d4e7d9)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 19:07:38 -06:00
Suraj Jitindar Singh
d7aa3d0a0a target/ppc/spapr_caps: Add new tristate cap safe_bounds_check
Add new tristate cap cap-sbbc to represent the speculation barrier
bounds checking capability.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 09114fd817)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 19:07:38 -06:00
Suraj Jitindar Singh
3dc12273b7 target/ppc/spapr_caps: Add new tristate cap safe_cache
Add new tristate cap cap-cfpc to represent the cache flush on privilege
change capability.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 8f38eaf8f9)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 19:07:37 -06:00
Suraj Jitindar Singh
e9a8747cd2 target/ppc/spapr_caps: Add support for tristate spapr_capabilities
spapr_caps are used to represent the level of support for various
capabilities related to the spapr machine type. Currently there is
only support for boolean capabilities.

Add support for tristate capabilities by implementing their get/set
functions. These capabilities can have the values 0, 1 or 2
corresponding to broken, workaround and fixed.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 6898aed77f)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 19:07:37 -06:00
Suraj Jitindar Singh
49b1fa33a3 target/ppc/kvm: Add cap_ppc_safe_[cache/bounds_check/indirect_branch]
Add three new kvm capabilities used to represent the level of host support
for three corresponding workarounds.

Host support for each of the capabilities is queried through the
new ioctl KVM_PPC_GET_CPU_CHAR which returns four uint64 quantities. The
first two, character and behaviour, represent the available
characteristics of the cpu and the behaviour of the cpu respectively.
The second two, c_mask and b_mask, represent the mask of known bits for
the character and beheviour dwords respectively.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Correct some compile errors due to name change in final kernel
 patch version]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>

(cherry picked from commit 8acc2ae5e9)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 19:07:37 -06:00
Suraj Jitindar Singh
43a29f0025 target/ppc/spapr_caps: Add macro to generate spapr_caps migration vmstate
The vmstate description and the contained needed function for migration
of spapr_caps is the same for each cap, with the name of the cap
substituted. As such introduce a macro to allow for easier generation of
these.

Convert the three existing spapr_caps (htm, vsx, and dfp) to use this
macro.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 1f63ebaa91)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 19:07:37 -06:00
Cédric Le Goater
d72e0a69ea target/ppc: introduce the PPC_BIT() macro
and use them in a couple of obvious places. Other macros will be used
in the model of the XIVE interrupt controller.

Signed-off-by: Cédric Le Goater <clg@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 2a83f9976e)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 19:07:37 -06:00
Greg Kurz
4374cbca95 spapr: fix device tree properties when using compatibility mode
Commit 51f84465dd changed the compatility mode setting logic:
- machine reset only sets compatibility mode for the boot CPU
- compatibility mode is set for other CPUs when they are put online
  by the guest with the "start-cpu" RTAS call

This causes a regression for machines started with max-compat-cpu:
the device tree nodes related to secondary CPU cores contain wrong
"cpu-version" and "ibm,pa-features" values, as shown below.

Guest started on a POWER8 host with:
     -smp cores=2 -machine pseries,max-cpu-compat=compat7

                        ibm,pa-features = [18 00 f6 3f c7 c0 80 f0 80 00
 00 00 00 00 00 00 00 00 80 00 80 00 80 00 00 00];
                        cpu-version = <0x4d0200>;

                               ^^^
                        second CPU core

                        ibm,pa-features = <0x600f63f 0xc70080c0>;
                        cpu-version = <0xf000003>;

                               ^^^
                          boot CPU core

The second core is advertised in raw POWER8 mode. This happens because
CAS assumes all CPUs to have the same compatibility mode. Since the
boot CPU already has the requested compatibility mode, the CAS code
does not set it for the secondary one, and exposes the bogus device
tree properties in in the CAS response to the guest.

A similar situation is observed when hot-plugging a CPU core. The
related device tree properties are generated and exposed to guest
with the "ibm,configure-connector" RTAS before "start-cpu" is called.
The CPU core is advertised to the guest in raw mode as well.

It both cases, it boils down to the fact that "start-cpu" happens too
late. This can be fixed globally by propagating the compatibility mode
of the boot CPU to the other CPUs during reset.  For this to work, the
compatibility mode of the boot CPU must be set before the machine code
actually resets all CPUs.

It is not needed to set the compatibility mode in "start-cpu" anymore,
so the code is dropped.

Fixes: 51f84465dd
Signed-off-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 9012a53f06)
 Conflicts:
	hw/ppc/spapr_cpu_core.c
	hw/ppc/spapr_rtas.c
* drop context dep on d6322252b3
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 18:55:26 -06:00
Jose Ricardo Ziviani
a1f33a5b93 ppc: Change Power9 compat table to support at most 8 threads/core
Increases the max smt mode to 8 for Power9. That's because KVM supports
smt emulation in this platform so QEMU should allow users to use it as
well.

Today if we try to pass -smp ...,threads=8, QEMU will silently truncate
it to smt4 mode and may cause a crash if we try to perform a cpu
hotplug.

Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com>
[dwg: Added an explanatory comment]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>

(cherry picked from commit 03ee51d354)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 18:55:26 -06:00
Suraj Jitindar Singh
6a47136799 hw/ppc/spapr_caps: Rework spapr_caps to use uint8 internal representation
Currently spapr_caps are tied to boolean values (on or off). This patch
reworks the caps so that they can have any uint8 value. This allows more
capabilities with various values to be represented in the same way
internally. Capabilities are numbered in ascending order. The internal
representation of capability values is an array of uint8s in the
sPAPRMachineState, indexed by capability number.

Capabilities can have their own name, description, options, getter and
setter functions, type and allow functions. They also each have their own
section in the migration stream. Capabilities are only migrated if they
were explictly set on the command line, with the assumption that
otherwise the default will match.

On migration we ensure that the capability value on the destination
is greater than or equal to the capability value from the source. So
long at this remains the case then the migration is considered
compatible and allowed to continue.

This patch implements generic getter and setter functions for boolean
capabilities. It also converts the existings cap-htm, cap-vsx and
cap-dfp capabilities to this new format.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 4e5fe3688e)
 Conflicts:
	include/hw/ppc/spapr.h
*drop context dep on 60c6823b9b
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 18:54:33 -06:00
David Gibson
e4f4fa00eb spapr: Handle Decimal Floating Point (DFP) as an optional capability
Decimal Floating Point has been available on POWER7 and later (server)
cpus.  However, it can be disabled on the hypervisor, meaning that it's
not available to guests.

We currently handle this by conditionally advertising DFP support in the
device tree depending on whether the guest CPU model supports it - which
can also depend on what's allowed in the host for -cpu host.  That can lead
to confusion on migration, since host properties are silently affecting
guest visible properties.

This patch handles it by treating it as an optional capability for the
pseries machine type.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
(cherry picked from commit 2d1fb9bc8e)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 18:53:54 -06:00
David Gibson
ff6f7e10c6 spapr: Handle VMX/VSX presence as an spapr capability flag
We currently have some conditionals in the spapr device tree code to decide
whether or not to advertise the availability of the VMX (aka Altivec) and
VSX vector extensions to the guest, based on whether the guest cpu has
those features.

This can lead to confusion and subtle failures on migration, since it makes
a guest visible change based only on host capabilities.  We now have a
better mechanism for this, in spapr capabilities flags, which explicitly
depend on user options rather than host capabilities.

Rework the advertisement of VSX and VMX based on a new VSX capability.  We
no longer bother with a conditional for VMX support, because every CPU
that's ever been supported by the pseries machine type supports VMX.

NOTE: Some userspace distributions (e.g. RHEL7.4) already rely on
availability of VSX in libc, so using cap-vsx=off may lead to a fatal
SIGILL in init.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
(cherry picked from commit 2938664286)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 18:53:46 -06:00
David Gibson
7c578cbc37 target/ppc: Clean up probing of VMX, VSX and DFP availability on KVM
When constructing the "host" cpu class we modify whether the VMX and VSX
vector extensions and DFP (Decimal Floating Point) are available
based on whether KVM can support those instructions.  This can depend on
policy in the host kernel as well as on the actual host cpu capabilities.

However, the way we probe for this is not very nice: we explicitly check
the host's device tree.  That works in practice, but it's not really
correct, since the device tree is a property of the host kernel's platform
which we don't really know about.  We get away with it because the only
modern POWER platforms happen to encode VMX, VSX and DFP availability in
the device tree in the same way.

Arguably we should have an explicit KVM capability for this, but we haven't
needed one so far.  Barring specific KVM policies which don't yet exist,
each of these instruction classes will be available in the guest if and
only if they're available in the qemu userspace process.  We can determine
that from the ELF AUX vector we're supplied with.

Once reworked like this, there are no more callers for kvmppc_get_vmx() and
kvmppc_get_dfp() so remove them.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
(cherry picked from commit 3f2ca480eb)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 18:53:42 -06:00
David Gibson
804e5ea9ed spapr: Validate capabilities on migration
Now that the "pseries" machine type implements optional capabilities (well,
one so far) there's the possibility of having different capabilities
available at either end of a migration.  Although arguably a user error,
it would be nice to catch this situation and fail as gracefully as we can.

This adds code to migrate the capabilities flags.  These aren't pulled
directly into the destination's configuration since what the user has
specified on the destination command line should take precedence.  However,
they are checked against the destination capabilities.

If the source was using a capability which is absent on the destination,
we fail the migration, since that could easily cause a guest crash or other
bad behaviour.  If the source lacked a capability which is present on the
destination we warn, but allow the migration to proceed.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
(cherry picked from commit be85537d65)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 18:53:31 -06:00
David Gibson
9070f408f4 spapr: Treat Hardware Transactional Memory (HTM) as an optional capability
This adds an spapr capability bit for Hardware Transactional Memory.  It is
enabled by default for pseries-2.11 and earlier machine types. with POWER8
or later CPUs (as it must be, since earlier qemu versions would implicitly
allow it).  However it is disabled by default for the latest pseries-2.12
machine type.

This means that with the latest machine type, HTM will not be available,
regardless of CPU, unless it is explicitly enabled on the command line.
That change is made on the basis that:

 * This way running with -M pseries,accel=tcg will start with whatever cpu
   and will provide the same guest visible model as with accel=kvm.
     - More specifically, this means existing make check tests don't have
       to be modified to use cap-htm=off in order to run with TCG

 * We hope to add a new "HTM without suspend" feature in the not too
   distant future which could work on both POWER8 and POWER9 cpus, and
   could be enabled by default.

 * Best guesses suggest that future POWER cpus may well only support the
   HTM-without-suspend model, not the (frankly, horribly overcomplicated)
   POWER8 style HTM with suspend.

 * Anecdotal evidence suggests problems with HTM being enabled when it
   wasn't wanted are more common than being missing when it was.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
(cherry picked from commit ee76a09fc7)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 18:53:23 -06:00
David Gibson
78a38cd47e spapr: Capabilities infrastructure
Because PAPR is a paravirtual environment access to certain CPU (or other)
facilities can be blocked by the hypervisor.  PAPR provides ways to
advertise in the device tree whether or not those features are available to
the guest.

In some places we automatically determine whether to make a feature
available based on whether our host can support it, in most cases this is
based on limitations in the available KVM implementation.

Although we correctly advertise this to the guest, it means that host
factors might make changes to the guest visible environment which is bad:
as well as generaly reducing reproducibility, it means that a migration
between different host environments can easily go bad.

We've mostly gotten away with it because the environments considered mature
enough to be well supported (basically, KVM on POWER8) have had consistent
feature availability.  But, it's still not right and some limitations on
POWER9 is going to make it more of an issue in future.

This introduces an infrastructure for defining "sPAPR capabilities".  These
are set by default based on the machine version, masked by the capabilities
of the chosen cpu, but can be overriden with machine properties.

The intention is at reset time we verify that the requested capabilities
can be supported on the host (considering TCG, KVM and/or host cpu
limitations).  If not we simply fail, rather than silently modifying the
advertised featureset to the guest.

This does mean that certain configurations that "worked" may now fail, but
such configurations were already more subtly broken.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
(cherry picked from commit 33face6b89)
 Conflicts:
	include/hw/ppc/spapr.h
*drop context dep on 60c6823b9b
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 18:52:49 -06:00
David Gibson
0fac4aa930 spapr: Add pseries-2.12 machine type
While we're at it fix a couple of small errors in the 2.11 and 2.10 models
(they didn't have any real effect, but don't quite match the template).

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 2b6154120c)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-05 18:52:01 -06:00
Laurent Vivier
97d17551b5 spapr: don't initialize PATB entry if max-cpu-compat < power9
if KVM is enabled and KVM capabilities MMU radix is available,
the partition table entry (patb_entry) for the radix mode is
initialized by default in ppc_spapr_reset().

It's a problem if we want to migrate the guest to a POWER8 host
while the kernel is not started to set the value to the one
expected for a POWER8 CPU.

The "-machine max-cpu-compat=power8" should allow to migrate
a POWER9 KVM host to a POWER8 KVM host, but because patb_entry
is set, the destination QEMU tries to enable radix mode on the
POWER8 host. This fails and cancels the migration:

    Process table config unsupported by the host
    error while loading state for instance 0x0 of device 'spapr'
    load of migration failed: Invalid argument

This patch doesn't set the PATB entry if the user provides
a CPU compatibility mode that doesn't support radix mode.

Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 1481fe5fcf)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-04 23:38:26 -06:00
Peter Maydell
7e25155a7b linux-user/signal.c: Rename MC_* defines
The SPARC code in linux-user/signal.c defines a set of
MC_* constants. On some SPARC hosts these are also defined
by sys/ucontext.h, resulting in build failures:

linux-user/signal.c:2786:0: error: "MC_NGREG" redefined [-Werror]
 #define MC_NGREG 19

In file included from /usr/include/signal.h:302:0,
                 from include/qemu/osdep.h:86,
                 from linux-user/signal.c:19:
/usr/include/sparc64-linux-gnu/sys/ucontext.h:59:0: note: this is the location of the previous definition
 # define MC_NGREG __MC_NGREG

Rename all these constants to SPARC_MC_* to avoid the clash.

Cc: qemu-stable@nongnu.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 1517318239-15764-1-git-send-email-peter.maydell@linaro.org
(cherry picked from commit 8ebb314b95)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-01 15:22:33 -06:00
Greg Kurz
80277d7cd5 spapr_pci: fix MSI/MSIX selection
In various place we don't correctly check if the device supports MSI or
MSI-X. This can cause devices to be advertised with MSI support, even
if they only support MSI-X (like virtio-pci-* devices for example):

                ethernet@0 {
                        ibm,req#msi = <0x1>; <--- wrong!
			.
			ibm,loc-code = "qemu_virtio-net-pci:0000:00:00.0";
			.
			ibm,req#msi-x = <0x3>;
                };

Worse, this can also cause the "ibm,change-msi" RTAS call to corrupt the
PCI status and cause migration to fail:

  qemu-system-ppc64: get_pci_config_device: Bad config data: i=0x6
    read: 0 device: 10 cmask: 10 wmask: 0 w1cmask:0
                              ^^
           PCI_STATUS_CAP_LIST bit which is assumed to be constant

This patch changes spapr_populate_pci_child_dt() to properly check for
MSI support using msi_present(): this ensures that PCIDevice::msi_cap
was set by msi_init() and that msi_nr_vectors_allocated() will look at
the right place in the config space.

Checking PCIDevice::msix_entries_nr is enough for MSI-X but let's add
a call to msix_present() there as well for consistency.

It also changes rtas_ibm_change_msi() to select the appropriate MSI
type in Function 1 instead of always selecting plain MSI. This new
behaviour is compliant with LoPAPR 1.1, as described in "Table 71.
ibm,change-msi Argument Call Buffer":

  Function 1: If Number Outputs is equal to 3, request to set to a new
           number of MSIs (including set to 0).
           If the “ibm,change-msix-capable” property exists and Number
           Outputs is equal to 4, request is to set to a new number of
           MSI or MSI-X (platform choice) interrupts (including set to
           0).

Since MSI is the the platform default (LoPAPR 6.2.3 MSI Option), let's
check for MSI support first.

And finally, it checks the input parameters are valid, as described in
LoPAPR 1.1 "R1–7.3.10.5.1–3":

  For the MSI option: The platform must return a Status of -3 (Parameter
  error) from ibm,change-msi, with no change in interrupt assignments if
  the PCI configuration address does not support MSI and Function 3 was
  requested (that is, the “ibm,req#msi” property must exist for the PCI
  configuration address in order to use Function 3), or does not support
  MSI-X and Function 4 is requested (that is, the “ibm,req#msi-x” property
  must exist for the PCI configuration address in order to use Function 4),
  or if neither MSIs nor MSI-Xs are supported and Function 1 is requested.

This ensures that the ret_intr_type variable contains a valid MSI type
for this device, and that spapr_msi_setmsg() won't corrupt the PCI status.

Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
(cherry picked from commit 9cbe305b60)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-02-01 15:21:29 -06:00
Fam Zheng
50c6998c20 usb-storage: Fix share-rw option parsing
Because usb-storage creates an internal scsi device, we should propagate
options. We already do so for bootindex etc, but failed to take care of
share-rw. Fix it in an apparent way: add a new parameter to
scsi_bus_legacy_add_drive and pass in s->conf.share_rw.

Cc: qemu-stable@nongnu.org
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Message-id: 20180117005222.4781-1-famz@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit 395b953959)
 Conflicts:
	hw/usb/dev-storage.c
* dropped context dep on ceff3e1f01
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-29 09:27:47 -06:00
Fam Zheng
dccdaacc3d osdep: Retry SETLK upon EINTR
We could hit lock failure if there is a signal that makes fcntl return
-1 and errno set to EINTR. In this case we should retry.

Cc: qemu-stable@nongnu.org
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit f86428a1f4)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-29 09:14:03 -06:00
Christian Borntraeger
5683983e99 s390x/kvm: provide stfle.81
stfle.81 (ppa15) is a transparent facility that can be passed to the
guest without the need to implement hypervisor support. As this feature
can be provided by firmware we add it to all full models.

Cc: qemu-stable@nongnu.org
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Message-Id: <20180118085628.40798-4-borntraeger@de.ibm.com>
Reviewed-by: Halil Pasic <pasic@linux.vnet.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 9f0d13f4f1)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-29 08:33:00 -06:00
Christian Borntraeger
4d79c0e434 s390x/kvm: Handle bpb feature
We need to handle the bpb control on reset and migration. Normally
stfle.82 is transparent (and the normal guest part works without
hypervisor activity). To prevent any issues we require full
host kernel support for this feature.

Cc: qemu-stable@nongnu.org
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Message-Id: <20180118085628.40798-3-borntraeger@de.ibm.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
[CH: 'Branch Prediction Blocking' -> 'Branch prediction blocking']
Signed-off-by: Cornelia Huck <cohuck@redhat.com>

(cherry picked from commit b073c87517)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-29 08:32:50 -06:00
Cornelia Huck
098132386d linux-headers: update
Update headers against 4.15-rc9.

Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 9cbb636270)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-29 08:32:23 -06:00
Eric Auger
61c8e67a66 linux-headers: update to 4.15-rc1
Update headers against v4.15-rc1.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
Message-id: 1511883692-11511-4-git-send-email-eric.auger@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
(cherry picked from commit dd8739669f)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-29 08:32:18 -06:00
Claudio Imbrenda
e7857ad997 s390x: fix storage attributes migration for non-small guests
Fix storage attribute migration so that it does not fail for guests
with more than a few GB of RAM.
With such guests, the index in the buffer would go out of bounds,
usually by large amounts, thus receiving -EFAULT from the kernel.
Migration itself would be successful, but storage attributes would then
not be migrated completely.

This patch fixes the out of bounds access, and thus migration of all
storage attributes when the guest have large amounts of memory.

Cc: qemu-stable@nongnu.org
Signed-off-by: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
Fixes: 903fd80b03 ("s390x/migration: Storage attributes device")
Message-Id: <1516297904-18188-1-git-send-email-imbrenda@linux.vnet.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 46fa893355)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-29 08:28:08 -06:00
Peter Maydell
9327a8e2d6 linux-user: Fix locking order in fork_start()
Our locking order is that the tb lock should be taken
inside the mmap_lock, but fork_start() grabs locks the
other way around. This means that if a heavily multithreaded
guest process (such as Java) calls fork() it can deadlock,
with the thread that called fork() stuck in fork_start()
with the tb lock and waiting for the mmap lock, but some
other thread in tb_find() with the mmap lock and waiting
for the tb lock. The cpu_list_lock() should also always be
taken last, not first.

Fix this by making fork_start() grab the locks in the
right order. The order in which we drop locks doesn't
matter, so we leave fork_end() the way it is.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Cc: qemu-stable@nongnu.org
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <1512397331-15238-1-git-send-email-peter.maydell@linaro.org>
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
(cherry picked from commit 024949caf3)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-29 08:27:35 -06:00
Eduardo Habkost
8ebfafa796 i386: Add EPYC-IBPB CPU model
EPYC-IBPB is a copy of the EPYC CPU model with
just CPUID_8000_0008_EBX_IBPB added.

Cc: Jiri Denemark <jdenemar@redhat.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20180109154519.25634-7-ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit 6cfbc54e89)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 17:08:04 -06:00
Eduardo Habkost
61efbbf869 i386: Add new -IBRS versions of Intel CPU models
The new MSR IA32_SPEC_CTRL MSR was introduced by a recent Intel
microcode updated and can be used by OSes to mitigate
CVE-2017-5715.  Unfortunately we can't change the existing CPU
models without breaking existing setups, so users need to
explicitly update their VM configuration to use the new *-IBRS
CPU model if they want to expose IBRS to guests.

The new CPU models are simple copies of the existing CPU models,
with just CPUID_7_0_EDX_SPEC_CTRL added and model_id updated.

Cc: Jiri Denemark <jdenemar@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20180109154519.25634-6-ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit ac96c41354)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 17:08:04 -06:00
Eduardo Habkost
1ade973f52 i386: Add FEAT_8000_0008_EBX CPUID feature word
Add the new feature word and the "ibpb" feature flag.

Based on a patch by Paolo Bonzini.

Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20180109154519.25634-5-ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit 1b3420e1c4)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 17:08:04 -06:00
Eduardo Habkost
803d42fa65 i386: Add spec-ctrl CPUID bit
Add the feature name and a CPUID_7_0_EDX_SPEC_CTRL macro.

Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20180109154519.25634-4-ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit a2381f0934)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 17:08:04 -06:00
Paolo Bonzini
cb2637a5ae i386: Add support for SPEC_CTRL MSR
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20180109154519.25634-3-ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit a33a2cfe2f)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 17:08:04 -06:00
Eduardo Habkost
4b220d88ba i386: Change X86CPUDefinition::model_id to const char*
It is valid to have a 48-character model ID on CPUID, however the
definition of X86CPUDefinition::model_id is char[48], which can
make the compiler drop the null terminator from the string.

If a CPU model happens to have 48 bytes on model_id, "-cpu help"
will print garbage and the object_property_set_str() call at
x86_cpu_load_def() will read data outside the model_id array.

We could increase the array size to 49, but this would mean the
compiler would not issue a warning if a 49-char string is used by
mistake for model_id.

To make things simpler, simply change model_id to be const char*,
and validate the string length using an assert() on
x86_register_cpudef_type().

Reported-by: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20180109154519.25634-2-ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit 807e9869b8)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 17:08:04 -06:00
Marcel Apfelbaum
1027f3419b hw/pci-bridge: fix QEMU crash because of pcie-root-port
If we try to use more pcie_root_ports then available slots
and an IO hint is passed to the port, QEMU crashes because
we try to init the "IO hint" capability even if the device
is not created.
Fix it by checking for error before adding the capability,
so QEMU can fail gracefully.

Signed-off-by: Marcel Apfelbaum <marcel@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit fced4d00e6)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 16:45:21 -06:00
Stefan Hajnoczi
ccf82aee58 scsi-disk: release AioContext in unaligned WRITE SAME case
scsi_write_same_complete() can retry the write if the request was
unaligned.  Make sure to release the AioContext when that code path is
taken!

This patch fixes a hang when QEMU terminates after an unaligned WRITE
SAME request has been processed with dataplane.  The hang occurs because
iothread_stop_all() cannot acquire the AioContext lock that was leaked
by the IOThread in scsi_write_same_complete().

Fixes: b9e413dd37 ("block: explicitly acquire aiocontext in aio callbacks that need it").
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: qemu-stable@nongnu.org
Reported-by: Cong Li <coli@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20180104142502.15175-1-stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 24355b79bd)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 16:35:01 -06:00
Peter Maydell
1e14820884 hw/sd/ssi-sd: Reset SD card on controller reset
Since ssi-sd is still using the legacy SD card API, the SD
card created by sd_init() is not plugged into any bus. This
means that the controller has to reset it manually.

Failing to do this mostly didn't affect the guest since the
guest typically does a programmed SD card reset as part of
its SD controller driver initialization, but meant that
migration failed because it's only in sd_reset() that we
set up the wpgrps_size field.

In the case of sd-ssi, we have to implement an entire
reset function since there wasn't one previously, and
that requires a QOM cast macro that got omitted when this
device was QOMified.

Cc: qemu-stable@nongnu.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1515506513-31961-4-git-send-email-peter.maydell@linaro.org
(cherry picked from commit 8046d44f3c)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 16:34:00 -06:00
Peter Maydell
a22b8096b6 hw/sd/milkymist-memcard: Reset SD card on controller reset
Since milkymist-memcard is still using the legacy SD card API,
the SD card created by sd_init() is not plugged into any bus.
This means that the controller has to reset it manually.

Failing to do this mostly didn't affect the guest since the
guest typically does a programmed SD card reset as part of
its SD controller driver initialization, but meant that
migration failed because it's only in sd_reset() that we
set up the wpgrps_size field.

Cc: qemu-stable@nongnu.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1515506513-31961-3-git-send-email-peter.maydell@linaro.org
(cherry picked from commit 16bf0e0e7a)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 16:33:50 -06:00
Peter Maydell
3f44190cb6 hw/sd/pl181: Reset SD card on controller reset
Since pl181 is still using the legacy SD card API, the SD
card created by sd_init() is not plugged into any bus. This
means that the controller has to reset it manually.

Failing to do this mostly didn't affect the guest since the
guest typically does a programmed SD card reset as part of
its SD controller driver initialization, but meant that
migration failed because it's only in sd_reset() that we
set up the wpgrps_size field.

Cc: qemu-stable@nongnu.org
Fixes: https://bugs.launchpad.net/qemu/+bug/1739378
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1515506513-31961-2-git-send-email-peter.maydell@linaro.org
(cherry picked from commit 0cb57cc701)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 16:33:39 -06:00
Jay Zhou
88bf4a70df vhost: remove assertion to prevent crash
QEMU will assert on vhost-user backed virtio device hotplug if QEMU is
using more RAM regions than VHOST_MEMORY_MAX_NREGIONS (for example if
it were started with a lot of DIMM devices).

Fix it by returning error instead of asserting and let callers of
vhost_set_mem_table() handle error condition gracefully.

Cc: qemu-stable@nongnu.org
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit f4bf56fb78)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-23 16:30:56 -06:00
Michael S. Tsirkin
f793121539 virtio_error: don't invoke status callbacks
Backends don't need to know what frontend requested a reset,
and notifying then from virtio_error is messy because
virtio_error itself might be invoked from backend.

Let's just set the status directly.

Cc: qemu-stable@nongnu.org
Reported-by: Ilya Maximets <i.maximets@samsung.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit 8fc47c876d)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-15 18:18:05 -06:00
Peter Maydell
0af294d774 hw/intc/arm_gic: reserved register addresses are RAZ/WI
The GICv2 specification says that reserved register addresses
must RAZ/WI; now that we implement external abort handling
for Arm CPUs this means we must return MEMTX_OK rather than
MEMTX_ERROR, to avoid generating a spurious guest data abort.

Cc: qemu-stable@nongnu.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 1513183941-24300-3-git-send-email-peter.maydell@linaro.org
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
(cherry picked from commit 0cf0985201)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-11 15:10:57 -06:00
Peter Maydell
62425350b5 hw/intc/arm_gicv3: Make reserved register addresses RAZ/WI
The GICv3 specification says that reserved register addresses
should RAZ/WI. This means we need to return MEMTX_OK, not MEMTX_ERROR,
because now that we support generating external aborts the
latter will cause an abort on new board models.

Cc: qemu-stable@nongnu.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 1513183941-24300-2-git-send-email-peter.maydell@linaro.org
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
(cherry picked from commit f1945632b4)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-11 15:10:48 -06:00
Alex Williamson
d6f1448277 vfio: Fix vfio-kvm group registration
Commit 8c37faa475 ("vfio-pci, ppc64/spapr: Reorder group-to-container
attaching") moved registration of groups with the vfio-kvm device from
vfio_get_group() to vfio_connect_container(), but it missed the case
where a group is attached to an existing container and takes an early
exit.  Perhaps this is a less common case on ppc64/spapr, but on x86
(without viommu) all groups are connected to the same container and
thus only the first group gets registered with the vfio-kvm device.
This becomes a problem if we then hot-unplug the devices associated
with that first group and we end up with KVM being misinformed about
any vfio connections that might remain.  Fix by including the call to
vfio_kvm_device_add_group() in this early exit path.

Fixes: 8c37faa475 ("vfio-pci, ppc64/spapr: Reorder group-to-container attaching")
Cc: qemu-stable@nongnu.org # qemu-2.10+
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: Peter Xu <peterx@redhat.com>
Tested-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
(cherry picked from commit 2016986aed)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-09 11:09:36 -06:00
Fam Zheng
7f53a81073 block: Open backing image in force share mode for size probe
Management tools create overlays of running guests with qemu-img:

  $ qemu-img create -b /image/in/use.qcow2 -f qcow2 /overlay/image.qcow2

but this doesn't work anymore due to image locking:

    qemu-img: /overlay/image.qcow2: Failed to get shared "write" lock
    Is another process using the image?
    Could not open backing image to determine size.
Use the force share option to allow this use case again.

Cc: qemu-stable@nongnu.org
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit cc954f01e3)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-09 10:49:44 -06:00
Kevin Wolf
2b0c34cf61 block: Call .drain_begin only once in bdrv_drain_all_begin()
bdrv_drain_all_begin() used to call the .bdrv_co_drain_begin() driver
callback inside its polling loop. This means that how many times it got
called for each node depended on long it had to poll the event loop.

This is obviously not right and results in nodes that stay drained even
after bdrv_drain_all_end(), which calls .bdrv_co_drain_begin() once per
node.

Fix bdrv_drain_all_begin() to call the callback only once, too.

Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
(cherry picked from commit 2da9b7d456)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-09 10:49:13 -06:00
Kevin Wolf
057364da77 block: Make bdrv_drain_invoke() recursive
This change separates bdrv_drain_invoke(), which calls the BlockDriver
drain callbacks, from bdrv_drain_recurse(). Instead, the function
performs its own recursion now.

One reason for this is that bdrv_drain_recurse() can be called multiple
times by bdrv_drain_all_begin(), but the callbacks may only be called
once. The separation is necessary to fix this bug.

The other reason is that we intend to go to a model where we call all
driver callbacks first, and only then start polling. This is not fully
achieved yet with this patch, as bdrv_drain_invoke() contains a
BDRV_POLL_WHILE() loop for the block driver callbacks, which can still
call callbacks for any unrelated event. It's a step in this direction
anyway.

Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
(cherry picked from commit db0289b9b2)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-09 10:48:39 -06:00
Murilo Opsfelder Araujo
b9da3c1de7 block/nbd: fix segmentation fault when .desc is not null-terminated
The find_desc_by_name() from util/qemu-option.c relies on the .name not being
NULL to call strcmp(). This check becomes unsafe when the list is not
NULL-terminated, which is the case of nbd_runtime_opts in block/nbd.c, and can
result in segmentation fault when strcmp() tries to access an invalid memory:

    #0 0x00007fff8c75f7d4 in __strcmp_power9 () from /lib64/libc.so.6
    #1 0x00000000102d3ec8 in find_desc_by_name (desc=0x1036d6f0, name=0x28e46670 "server.path") at util/qemu-option.c:166
    #2 0x00000000102d93e0 in qemu_opts_absorb_qdict (opts=0x28e47a80, qdict=0x28e469a0, errp=0x7fffec247c98) at util/qemu-option.c:1026
    #3 0x000000001012a2e4 in nbd_open (bs=0x28e42290, options=0x28e469a0, flags=24578, errp=0x7fffec247d80) at block/nbd.c:406
    #4 0x00000000100144e8 in bdrv_open_driver (bs=0x28e42290, drv=0x1036e070 <bdrv_nbd_unix>, node_name=0x0, options=0x28e469a0, open_flags=24578, errp=0x7fffec247f50) at block.c:1135
    #5 0x0000000010015b04 in bdrv_open_common (bs=0x28e42290, file=0x0, options=0x28e469a0, errp=0x7fffec247f50) at block.c:1395

>From gdb, the desc[i].name was not NULL and resulted in strcmp() accessing an
invalid memory:

    >>> p desc[5]
    $8 = {
      name = 0x1037f098 "R27A",
      type = 1561964883,
      help = 0xc0bbb23e <error: Cannot access memory at address 0xc0bbb23e>,
      def_value_str = 0x2 <error: Cannot access memory at address 0x2>
    }
    >>> p desc[6]
    $9 = {
      name = 0x103dac78 <__gcov0.do_qemu_init_bdrv_nbd_init> "\001",
      type = 272101528,
      help = 0x29ec0b754403e31f <error: Cannot access memory at address 0x29ec0b754403e31f>,
      def_value_str = 0x81f343b9 <error: Cannot access memory at address 0x81f343b9>
    }

This patch fixes the segmentation fault in strcmp() by adding a NULL element at
the end of nbd_runtime_opts.desc list, which is the common practice to most of
other structs like runtime_opts in block/null.c. Thus, the desc[i].name != NULL
check becomes safe because it will not evaluate to true when .desc list reached
its end.

Reported-by: R. Nageswara Sastry <nasastry@in.ibm.com>
Buglink: https://bugs.launchpad.net/qemu/+bug/1727259
Signed-off-by: Murilo Opsfelder Araujo <muriloo@linux.vnet.ibm.com>
Message-Id: <20180105133241.14141-2-muriloo@linux.vnet.ibm.com>
CC: qemu-stable@nongnu.org
Fixes: 7ccc44fd7d
Signed-off-by: Eric Blake <eblake@redhat.com>
(cherry picked from commit c4365735a7)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-09 10:41:11 -06:00
Paolo Bonzini
c184e17c75 qemu-pr-helper: miscellaneous fixes
1) Return a generic sense if TEST UNIT READY does not provide one;

2) Fix two mistakes in copying from the spec.

Cc: qemu-stable@nongnu.org
Reported-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit a4a9b6eaf3)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-08 07:52:41 -06:00
Markus Armbruster
5ba945f1cb qemu-options: Remove stray colons from output of --help
Commit 43f187a broke --help: it put colons into blank lines.  It
removed the colon from DEFHEADING(TITLE:) and added it back in the
macro expansion of DEFHEADING(TITLE), so hxtool can emit "@subsection
TITLE" more easily.  Trouble is it's added back even for the blank
lines made with DEFHEADING().

Put the colons back where they were before commit 43f187a, and strip
them in hxtool instead.

Cc: Paolo Bonzini <pbonzini@redhat.com>
CC: qemu-stable@nongnu.org
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20171002140307.5292-2-armbru@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
(cherry picked from commit de6b4f908c)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-08 07:52:41 -06:00
Alex Bennée
ea311a9959 target/sh4: fix TCG leak during gusa sequence
This fixes bug #1735384 while running java under qemu-sh4. When debug
was enabled it showed a problem with TCG temps. Once fixed I was able
to run java -version normally.

Cc: qemu-stable@nongnu.org
Reported-by: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <20171206093050.25308-1-alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
(cherry picked from commit 6d56fc6cc3)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-08 07:52:41 -06:00
Peter Lieven
fd89d93e85 block/iscsi: dont leave allocmap in an invalid state on UNMAP failure
we forgot to set the allocmap to invalid if an UNMAP call fails.

Cc: qemu-stable@nongnu.org
Signed-off-by: Peter Lieven <pl@kamp.de>
Message-Id: <1512733868-9009-2-git-send-email-pl@kamp.de>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit aef172ffdc)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-08 07:40:25 -06:00
Peter Maydell
817a9fcba8 target/i386: Fix handling of VEX prefixes
In commit e3af7c788b we
replaced direct calls to to cpu_ld*_code() with calls
to the x86_ld*_code() wrappers which incorporate an
advance of s->pc. Unfortunately we didn't notice that
in one place the old code was deliberately not incrementing
s->pc:

@@ -4501,7 +4528,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
             static const int pp_prefix[4] = {
                 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
             };
-            int vex3, vex2 = cpu_ldub_code(env, s->pc);
+            int vex3, vex2 = x86_ldub_code(env, s);

             if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
                 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,

This meant we were mishandling this set of instructions.
Remove the manual advance of s->pc for the "is VEX" case
(which is now done by x86_ldub_code()) and instead rewind
PC in the case where we decide that this isn't really VEX.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Cc: qemu-stable@nongnu.org
Reported-by: Alexandro Sanchez Bach <alexandro@phi.nz>
Message-Id: <1513163959-17545-1-git-send-email-peter.maydell@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit cfcca361d7)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
2018-01-08 07:39:13 -06:00
5099 changed files with 150734 additions and 629301 deletions

View File

@@ -1,27 +0,0 @@
env:
CIRRUS_CLONE_DEPTH: 1
freebsd_12_task:
freebsd_instance:
image: freebsd-12-0-release-amd64
cpu: 8
memory: 8G
install_script: pkg install -y
bison curl cyrus-sasl git glib gmake gnutls
nettle perl5 pixman pkgconf png usbredir
script:
- mkdir build
- cd build
- ../configure || { cat config.log; exit 1; }
- gmake -j8
- gmake -j8 V=1 check
macos_task:
osx_instance:
image: mojave-base
install_script:
- brew install pkg-config python glib pixman make sdl2
script:
- ./configure --python=/usr/local/bin/python3 || { cat config.log; exit 1; }
- gmake -j$(sysctl -n hw.ncpu)
- gmake check -j$(sysctl -n hw.ncpu)

View File

@@ -1,10 +1,4 @@
# EditorConfig is a file format and collection of text editor plugins
# for maintaining consistent coding styles between different editors
# and IDEs. Most popular editors support this either natively or via
# plugin.
#
# Check https://editorconfig.org for details.
# http://editorconfig.org
root = true
[*]
@@ -12,23 +6,10 @@ end_of_line = lf
insert_final_newline = true
charset = utf-8
[*.mak]
indent_style = tab
indent_size = 8
file_type_emacs = makefile
[Makefile*]
indent_style = tab
indent_size = 8
file_type_emacs = makefile
[*.{c,h}]
indent_style = space
indent_size = 4
[*.{vert,frag}]
file_type_emacs = glsl
[*.json]
indent_style = space
file_type_emacs = python

33
.gitignore vendored
View File

@@ -1,4 +1,3 @@
/.doctrees
/config-devices.*
/config-all-devices.*
/config-all-disas.*
@@ -6,7 +5,6 @@
/config-target.*
/config.status
/config-temp
/elf2dmp
/trace-events-all
/trace/generated-events.h
/trace/generated-events.c
@@ -29,24 +27,16 @@
/libuser
/linux-headers/asm
/qga/qapi-generated
/qapi-gen-timestamp
/qapi/qapi-builtin-types.[ch]
/qapi/qapi-builtin-visit.[ch]
/qapi/qapi-commands-*.[ch]
/qapi/qapi-commands.[ch]
/qapi/qapi-emit-events.[ch]
/qapi/qapi-events-*.[ch]
/qapi/qapi-events.[ch]
/qapi/qapi-introspect.[ch]
/qapi/qapi-types-*.[ch]
/qapi/qapi-types.[ch]
/qapi/qapi-visit-*.[ch]
/qapi/qapi-visit.[ch]
/qapi/qapi-doc.texi
/qapi-generated
/qapi-types.[ch]
/qapi-visit.[ch]
/qapi-event.[ch]
/qmp-commands.h
/qmp-introspect.[ch]
/qmp-marshal.c
/qemu-doc.html
/qemu-doc.info
/qemu-doc.txt
/qemu-edid
/qemu-img
/qemu-nbd
/qemu-options.def
@@ -63,8 +53,8 @@
/qemu-version.h.tmp
/module_block.h
/scsi/qemu-pr-helper
/vscclient
/vhost-user-scsi
/vhost-user-blk
/fsdev/virtfs-proxy-helper
*.tmp
*.[1-9]
@@ -95,7 +85,6 @@
.sdk
*.gcda
*.gcno
*.gcov
/pc-bios/bios-pq/status
/pc-bios/vgabios-pq/status
/pc-bios/optionrom/linuxboot.asm
@@ -106,10 +95,6 @@
/pc-bios/optionrom/linuxboot_dma.bin
/pc-bios/optionrom/linuxboot_dma.raw
/pc-bios/optionrom/linuxboot_dma.img
/pc-bios/optionrom/pvh.asm
/pc-bios/optionrom/pvh.bin
/pc-bios/optionrom/pvh.raw
/pc-bios/optionrom/pvh.img
/pc-bios/optionrom/multiboot.asm
/pc-bios/optionrom/multiboot.bin
/pc-bios/optionrom/multiboot.raw
@@ -120,7 +105,6 @@
/pc-bios/optionrom/kvmvapic.img
/pc-bios/s390-ccw/s390-ccw.elf
/pc-bios/s390-ccw/s390-ccw.img
/docs/built
/docs/interop/qemu-ga-qapi.texi
/docs/interop/qemu-ga-ref.html
/docs/interop/qemu-ga-ref.info*
@@ -156,4 +140,3 @@ trace-dtrace-root.h
trace-dtrace-root.dtrace
trace-ust-all.h
trace-ust-all.c
/target/arm/decode-sve.inc.c

View File

@@ -1,73 +0,0 @@
before_script:
- apt-get update -qq
- apt-get install -y -qq flex bison libglib2.0-dev libpixman-1-dev genisoimage
build-system1:
script:
- apt-get install -y -qq libgtk-3-dev libvte-dev nettle-dev libcacard-dev
libusb-dev libvde-dev libspice-protocol-dev libgl1-mesa-dev
- ./configure --enable-werror --target-list="aarch64-softmmu alpha-softmmu
cris-softmmu hppa-softmmu lm32-softmmu moxie-softmmu microblazeel-softmmu
mips64el-softmmu m68k-softmmu ppc-softmmu riscv64-softmmu sparc-softmmu"
- make -j2
- make -j2 check
build-system2:
script:
- apt-get install -y -qq libsdl2-dev libgcrypt-dev libbrlapi-dev libaio-dev
libfdt-dev liblzo2-dev librdmacm-dev libibverbs-dev libibumad-dev
- ./configure --enable-werror --target-list="tricore-softmmu unicore32-softmmu
microblaze-softmmu mips-softmmu riscv32-softmmu s390x-softmmu sh4-softmmu
sparc64-softmmu x86_64-softmmu xtensa-softmmu nios2-softmmu or1k-softmmu"
- make -j2
- make -j2 check
build-disabled:
script:
- ./configure --enable-werror --disable-rdma --disable-slirp --disable-curl
--disable-capstone --disable-live-block-migration --disable-glusterfs
--disable-replication --disable-coroutine-pool --disable-smartcard
--disable-guest-agent --disable-curses --disable-libxml2 --disable-tpm
--disable-qom-cast-debug --disable-spice --disable-vhost-vsock
--disable-vhost-net --disable-vhost-crypto --disable-vhost-user
--target-list="i386-softmmu ppc64-softmmu mips64-softmmu i386-linux-user"
- make -j2
- make -j2 check-qtest SPEED=slow
build-tcg-disabled:
script:
- apt-get install -y -qq clang libgtk-3-dev libbluetooth-dev libusb-dev
- ./configure --cc=clang --enable-werror --disable-tcg --audio-drv-list=""
- make -j2
- make check-unit
- make check-qapi-schema
- cd tests/qemu-iotests/
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
052 063 077 086 101 104 106 113 147 148 150 151 152 157 159 160
163 170 171 183 184 192 194 197 205 208 215 221 222 226 227 236
- ./check -qcow2 001 002 003 004 005 007 008 009 010 011 012 013 017 018 019
020 021 022 024 025 027 028 029 031 032 033 034 035 036 037 038
039 040 042 043 046 047 048 049 050 051 052 053 054 056 057 058
060 061 062 063 065 066 067 068 069 071 072 073 074 079 080 082
085 086 089 090 091 095 096 097 098 099 102 103 104 105 107 108
110 111 114 117 120 122 124 126 127 129 130 132 133 134 137 138
139 140 141 142 143 144 145 147 150 151 152 154 155 156 157 158
161 165 170 172 174 176 177 179 184 186 187 190 192 194 195 196
197 200 202 203 205 208 209 214 215 216 217 218 222 226 227 229 234
build-user:
script:
- ./configure --enable-werror --disable-system --disable-guest-agent
--disable-capstone --disable-slirp --disable-fdt
- make -j2
- make run-tcg-tests-i386-linux-user run-tcg-tests-x86_64-linux-user
build-clang:
script:
- apt-get install -y -qq clang libsdl2-dev
xfslibs-dev libiscsi-dev libnfs-dev libseccomp-dev gnutls-dev librbd-dev
- ./configure --cc=clang --cxx=clang++ --enable-werror
--target-list="alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu
ppc-softmmu s390x-softmmu x86_64-softmmu arm-linux-user"
- make -j2
- make -j2 check

44
.gitmodules vendored
View File

@@ -1,54 +1,42 @@
[submodule "roms/vgabios"]
path = roms/vgabios
url = git://git.qemu-project.org/vgabios.git/
[submodule "roms/seabios"]
path = roms/seabios
url = https://git.qemu.org/git/seabios.git/
url = git://git.qemu-project.org/seabios.git/
[submodule "roms/SLOF"]
path = roms/SLOF
url = https://git.qemu.org/git/SLOF.git
url = git://git.qemu-project.org/SLOF.git
[submodule "roms/ipxe"]
path = roms/ipxe
url = https://git.qemu.org/git/ipxe.git
url = git://git.qemu-project.org/ipxe.git
[submodule "roms/openbios"]
path = roms/openbios
url = https://git.qemu.org/git/openbios.git
url = git://git.qemu-project.org/openbios.git
[submodule "roms/openhackware"]
path = roms/openhackware
url = https://git.qemu.org/git/openhackware.git
url = git://git.qemu-project.org/openhackware.git
[submodule "roms/qemu-palcode"]
path = roms/qemu-palcode
url = https://git.qemu.org/git/qemu-palcode.git
url = git://github.com/rth7680/qemu-palcode.git
[submodule "roms/sgabios"]
path = roms/sgabios
url = https://git.qemu.org/git/sgabios.git
url = git://git.qemu-project.org/sgabios.git
[submodule "dtc"]
path = dtc
url = https://git.qemu.org/git/dtc.git
url = git://git.qemu-project.org/dtc.git
[submodule "roms/u-boot"]
path = roms/u-boot
url = https://git.qemu.org/git/u-boot.git
url = git://git.qemu-project.org/u-boot.git
[submodule "roms/skiboot"]
path = roms/skiboot
url = https://git.qemu.org/git/skiboot.git
url = git://git.qemu.org/skiboot.git
[submodule "roms/QemuMacDrivers"]
path = roms/QemuMacDrivers
url = https://git.qemu.org/git/QemuMacDrivers.git
url = git://git.qemu.org/QemuMacDrivers.git
[submodule "ui/keycodemapdb"]
path = ui/keycodemapdb
url = https://git.qemu.org/git/keycodemapdb.git
url = git://git.qemu.org/keycodemapdb.git
[submodule "capstone"]
path = capstone
url = https://git.qemu.org/git/capstone.git
[submodule "roms/seabios-hppa"]
path = roms/seabios-hppa
url = https://github.com/hdeller/seabios-hppa.git
[submodule "roms/u-boot-sam460ex"]
path = roms/u-boot-sam460ex
url = https://git.qemu.org/git/u-boot-sam460ex.git
[submodule "tests/fp/berkeley-testfloat-3"]
path = tests/fp/berkeley-testfloat-3
url = https://github.com/cota/berkeley-testfloat-3
[submodule "tests/fp/berkeley-softfloat-3"]
path = tests/fp/berkeley-softfloat-3
url = https://github.com/cota/berkeley-softfloat-3
[submodule "roms/edk2"]
path = roms/edk2
url = https://github.com/tianocore/edk2.git
url = git://git.qemu.org/capstone.git

View File

@@ -1,51 +0,0 @@
#
# Common git-publish profiles that can be used to send patches to QEMU upstream.
#
# See https://github.com/stefanha/git-publish for more information
#
[gitpublishprofile "default"]
base = master
to = qemu-devel@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "rfc"]
base = master
prefix = RFC PATCH
to = qemu-devel@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "stable"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-stable@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "trivial"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-trivial@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "block"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-block@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "arm"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-arm@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "s390"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-s390@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "ppc"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-ppc@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null

View File

@@ -1,7 +1,6 @@
# This mailmap fixes up author names/addresses.
# The first section translates weird addresses from the original git import
# into proper addresses so that they are counted properly by git shortlog.
# This mailmap just translates the weird addresses from the original import into git
# into proper addresses so that they are counted properly in git shortlog output.
#
Andrzej Zaborowski <balrogg@gmail.com> balrog <balrog@c046a42c-6fe2-441c-8c8c-71466251a162>
Anthony Liguori <anthony@codemonkey.ws> aliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
@@ -12,28 +11,10 @@ Fabrice Bellard <fabrice@bellard.org> bellard <bellard@c046a42c-6fe2-441c-8c8c-7
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
Jocelyn Mayer <l_indien@magic.fr> j_mayer <j_mayer@c046a42c-6fe2-441c-8c8c-71466251a162>
Paul Brook <paul@codesourcery.com> pbrook <pbrook@c046a42c-6fe2-441c-8c8c-71466251a162>
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
Aleksandar Markovic <amarkovic@wavecomp.com> <aleksandar.markovic@mips.com>
Aleksandar Markovic <amarkovic@wavecomp.com> <aleksandar.markovic@imgtec.com>
Paul Burton <pburton@wavecomp.com> <paul.burton@mips.com>
Paul Burton <pburton@wavecomp.com> <paul.burton@imgtec.com>
Paul Burton <pburton@wavecomp.com> <paul@archlinuxmips.org>
Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
Paul Burton <paul.burton@mips.com> <paul@archlinuxmips.org>
Thiemo Seufer <ths@networkno.de> ths <ths@c046a42c-6fe2-441c-8c8c-71466251a162>
malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
# There is also a:
# (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
# for the cvs2svn initialization commit e63c3dc74bf.
# Next, translate a few commits where mailman rewrote the From: line due
# to strict SPF, although we prefer to avoid adding more entries like that.
Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org>
Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org>
Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
# Also list preferred name forms where people have changed their
# git author config, or had utf8/latin1 encoding issues.
Daniel P. Berrangé <berrange@redhat.com>
Reimar Döffinger <Reimar.Doeffinger@gmx.de>

View File

@@ -7,11 +7,10 @@ env:
matrix:
- IMAGE=debian-amd64
TARGET_LIST=x86_64-softmmu,x86_64-linux-user
# currently disabled as the mxe.cc repos are down
# - IMAGE=debian-win32-cross
# TARGET_LIST=arm-softmmu,i386-softmmu,lm32-softmmu
# - IMAGE=debian-win64-cross
# TARGET_LIST=aarch64-softmmu,sparc64-softmmu,x86_64-softmmu
- IMAGE=debian-win32-cross
TARGET_LIST=arm-softmmu,i386-softmmu,lm32-softmmu
- IMAGE=debian-win64-cross
TARGET_LIST=aarch64-softmmu,sparc64-softmmu,x86_64-softmmu
- IMAGE=debian-armel-cross
TARGET_LIST=arm-softmmu,arm-linux-user,armeb-linux-user
- IMAGE=debian-armhf-cross
@@ -24,6 +23,8 @@ env:
TARGET_LIST=mips-softmmu,mipsel-linux-user
- IMAGE=debian-mips64el-cross
TARGET_LIST=mips64el-softmmu,mips64el-linux-user
- IMAGE=debian-powerpc-cross
TARGET_LIST=ppc-softmmu,ppcemb-softmmu,ppc-linux-user
- IMAGE=debian-ppc64el-cross
TARGET_LIST=ppc64-softmmu,ppc64-linux-user,ppc64abi32-linux-user
build:
@@ -36,5 +37,13 @@ build:
options: "-e HOME=/root"
ci:
- unset CC
# some targets require newer up to date packages, for example TARGET_LIST matching
# aarch64*-softmmu|arm*-softmmu|ppc*-softmmu|microblaze*-softmmu|mips64el-softmmu)
# see the configure script:
# error_exit "DTC (libfdt) version >= 1.4.2 not present. Your options:"
# " (1) Preferred: Install the DTC (libfdt) devel package"
# " (2) Fetch the DTC submodule, using:"
# " git submodule update --init dtc"
- dpkg --compare-versions `dpkg-query --showformat='${Version}' --show libfdt-dev` ge 1.4.2 || git submodule update --init dtc
- ./configure ${QEMU_CONFIGURE_OPTS} --target-list=${TARGET_LIST}
- make -j$(($(getconf _NPROCESSORS_ONLN) + 1))

View File

@@ -1,13 +1,10 @@
# The current Travis default is a VM based 16.04 Xenial on GCE
# Additional builds with specific requirements for a full VM need to
# be added as additional matrix: entries later on
dist: xenial
sudo: false
language: c
python:
- "2.4"
compiler:
- gcc
cache: ccache
addons:
apt:
packages:
@@ -16,13 +13,12 @@ addons:
- libattr1-dev
- libbrlapi-dev
- libcap-ng-dev
- libgcc-4.8-dev
- libgnutls-dev
- libgtk-3-dev
- libiscsi-dev
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libncurses5-dev
- libnss3-dev
- libpixman-1-dev
- libpng12-dev
@@ -34,15 +30,9 @@ addons:
- libssh2-1-dev
- liburcu-dev
- libusb-1.0-0-dev
- libvte-2.91-dev
- libvte-2.90-dev
- sparse
- uuid-dev
- gcovr
homebrew:
packages:
- glib
- pixman
# The channel name "irc.oftc.net#qemu" is encrypted against qemu/qemu
# to prevent IRC notifications from forks. This was created using:
@@ -53,194 +43,126 @@ notifications:
- secure: "F7GDRgjuOo5IUyRLqSkmDL7kvdU4UcH3Lm/W2db2JnDHTGCqgEdaYEYKciyCLZ57vOTsTsOgesN8iUT7hNHBd1KWKjZe9KDTZWppWRYVwAwQMzVeSOsbbU4tRoJ6Pp+3qhH1Z0eGYR9ZgKYAoTumDFgSAYRp4IscKS8jkoedOqM="
on_success: change
on_failure: always
env:
global:
- SRC_DIR="."
- BUILD_DIR="."
- BASE_CONFIG="--disable-docs --disable-tools"
- TEST_CMD="make check -j3 V=1"
# This is broadly a list of "mainline" softmmu targets which have support across the major distros
- MAIN_SOFTMMU_TARGETS="aarch64-softmmu,arm-softmmu,i386-softmmu,mips-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
- TEST_CMD="make check"
- MAKEFLAGS="-j3"
matrix:
- CONFIG=""
- CONFIG="--enable-debug --enable-debug-tcg --enable-trace-backends=log"
- CONFIG="--disable-linux-aio --disable-cap-ng --disable-attr --disable-brlapi --disable-uuid --disable-libusb"
- CONFIG="--enable-modules"
- CONFIG="--with-coroutine=ucontext"
- CONFIG="--with-coroutine=sigaltstack"
git:
# we want to do this ourselves
submodules: false
before_install:
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update ; fi
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew install libffi gettext glib pixman ; fi
- wget -O - http://people.linaro.org/~alex.bennee/qemu-submodule-git-seed.tar.xz | tar -xvJ
- git submodule update --init --recursive
before_script:
- mkdir -p ${BUILD_DIR} && cd ${BUILD_DIR}
- ${SRC_DIR}/configure ${BASE_CONFIG} ${CONFIG} || { cat config.log && exit 1; }
- ./configure ${CONFIG}
script:
- make -j3 && ${TEST_CMD}
- make ${MAKEFLAGS} && ${TEST_CMD}
matrix:
include:
- env:
- CONFIG="--disable-system"
# we split the system builds as it takes a while to build them all
- env:
- CONFIG="--disable-user --target-list=${MAIN_SOFTMMU_TARGETS}"
- env:
- CONFIG="--disable-user --target-list-exclude=${MAIN_SOFTMMU_TARGETS}"
# Just build tools and run minimal unit and softfloat checks
- env:
- BASE_CONFIG="--enable-tools"
- CONFIG="--disable-user --disable-system"
- TEST_CMD="make check-unit check-softfloat -j3"
- env:
- CONFIG="--enable-debug --enable-debug-tcg --disable-user"
# TCG debug can be run just on it's own and is mostly agnostic to user/softmmu distinctions
- env:
- CONFIG="--enable-debug-tcg --disable-system"
- env:
- CONFIG="--disable-linux-aio --disable-cap-ng --disable-attr --disable-brlapi --disable-libusb --disable-replication --target-list=${MAIN_SOFTMMU_TARGETS}"
# Module builds are mostly of interest to major distros
- env:
- CONFIG="--enable-modules --target-list=${MAIN_SOFTMMU_TARGETS}"
# Alternate coroutines implementations are only really of interest to KVM users
# However we can't test against KVM on Travis so we can only run unit tests
- env:
- CONFIG="--with-coroutine=ucontext --disable-tcg"
- TEST_CMD="make check-unit -j3 V=1"
- env:
- CONFIG="--with-coroutine=sigaltstack --disable-tcg"
- TEST_CMD="make check-unit -j3 V=1"
# Check we can build docs and tools (out of tree)
- env:
- BUILD_DIR="out-of-tree/build/dir" SRC_DIR="../../.."
- BASE_CONFIG="--enable-tools --enable-docs"
- CONFIG="--target-list=x86_64-softmmu,aarch64-linux-user"
addons:
apt:
packages:
- python-sphinx
- texinfo
- perl
# Test with Clang for compile portability (Travis uses clang-5.0)
- env:
- CONFIG="--disable-system"
# Test with CLang for compile portability
- env: CONFIG=""
compiler: clang
- env:
- CONFIG="--disable-user --target-list=${MAIN_SOFTMMU_TARGETS}"
compiler: clang
- env:
- CONFIG="--disable-user --target-list-exclude=${MAIN_SOFTMMU_TARGETS}"
compiler: clang
# gprof/gcov are GCC features
- env:
- CONFIG="--enable-gprof --enable-gcov --disable-pie --target-list=${MAIN_SOFTMMU_TARGETS}"
after_success:
- ${SRC_DIR}/scripts/travis/coverage-summary.sh
- env: CONFIG="--enable-gprof --enable-gcov --disable-pie"
compiler: gcc
# We manually include builds which we disable "make check" for
- env:
- CONFIG="--without-default-devices --disable-user"
- TEST_CMD=""
# We manually include builds which we disable "make check" for
- env:
- CONFIG="--enable-debug --enable-tcg-interpreter"
- TEST_CMD=""
# We don't need to exercise every backend with every front-end
- env:
- CONFIG="--enable-trace-backends=log,simple,syslog --disable-system"
- TEST_CMD=""
- env:
- CONFIG="--enable-trace-backends=ftrace --target-list=x86_64-softmmu"
- TEST_CMD=""
- env:
- CONFIG="--enable-trace-backends=ust --target-list=x86_64-softmmu"
- TEST_CMD=""
# MacOSX builds
- env:
- CONFIG="--target-list=${MAIN_SOFTMMU_TARGETS}"
- env: CONFIG="--enable-debug --enable-tcg-interpreter"
TEST_CMD=""
compiler: gcc
- env: CONFIG="--enable-trace-backends=simple"
TEST_CMD=""
compiler: gcc
- env: CONFIG="--enable-trace-backends=ftrace"
TEST_CMD=""
compiler: gcc
- env: CONFIG="--enable-trace-backends=ust"
TEST_CMD=""
compiler: gcc
- env: CONFIG="--disable-tcg"
TEST_CMD=""
compiler: gcc
- env: CONFIG=""
os: osx
osx_image: xcode9.4
compiler: clang
- env:
- CONFIG="--target-list=i386-softmmu,ppc-softmmu,ppc64-softmmu,m68k-softmmu,x86_64-softmmu"
os: osx
osx_image: xcode10.2
compiler: clang
# Python builds
- env:
- CONFIG="--target-list=x86_64-softmmu"
language: python
python:
- "3.4"
- env:
- CONFIG="--target-list=x86_64-softmmu"
language: python
python:
- "3.6"
# Acceptance (Functional) tests
- env:
- CONFIG="--python=/usr/bin/python3 --target-list=x86_64-softmmu"
- TEST_CMD="make AVOCADO_SHOW=app check-acceptance"
# Plain Trusty System Build
- env: CONFIG="--disable-linux-user"
sudo: required
addons:
apt:
packages:
- python3-pip
- python3.5-venv
dist: trusty
compiler: gcc
before_install:
- sudo apt-get update -qq
- sudo apt-get build-dep -qq qemu
- wget -O - http://people.linaro.org/~alex.bennee/qemu-submodule-git-seed.tar.xz | tar -xvJ
- git submodule update --init --recursive
# Plain Trusty Linux User Build
- env: CONFIG="--disable-system"
sudo: required
addons:
dist: trusty
compiler: gcc
before_install:
- sudo apt-get update -qq
- sudo apt-get build-dep -qq qemu
- wget -O - http://people.linaro.org/~alex.bennee/qemu-submodule-git-seed.tar.xz | tar -xvJ
- git submodule update --init --recursive
# Trusty System build with latest stable clang
- sudo: required
addons:
dist: trusty
language: generic
compiler: none
env:
- COMPILER_NAME=clang CXX=clang++-3.9 CC=clang-3.9
- CONFIG="--disable-linux-user --cc=clang-3.9 --cxx=clang++-3.9"
before_install:
- wget -nv -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
- sudo apt-add-repository -y 'deb http://llvm.org/apt/trusty llvm-toolchain-trusty-3.9 main'
- sudo apt-get update -qq
- sudo apt-get install -qq -y clang-3.9
- sudo apt-get build-dep -qq qemu
- wget -O - http://people.linaro.org/~alex.bennee/qemu-submodule-git-seed.tar.xz | tar -xvJ
- git submodule update --init --recursive
before_script:
- ./configure ${CONFIG} || cat config.log
# Trusty Linux User build with latest stable clang
- sudo: required
addons:
dist: trusty
language: generic
compiler: none
env:
- COMPILER_NAME=clang CXX=clang++-3.9 CC=clang-3.9
- CONFIG="--disable-system --cc=clang-3.9 --cxx=clang++-3.9"
before_install:
- wget -nv -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
- sudo apt-add-repository -y 'deb http://llvm.org/apt/trusty llvm-toolchain-trusty-3.9 main'
- sudo apt-get update -qq
- sudo apt-get install -qq -y clang-3.9
- sudo apt-get build-dep -qq qemu
- wget -O - http://people.linaro.org/~alex.bennee/qemu-submodule-git-seed.tar.xz | tar -xvJ
- git submodule update --init --recursive
before_script:
- ./configure ${CONFIG} || cat config.log
# Using newer GCC with sanitizers
- addons:
apt:
update: true
sources:
# PPAs for newer toolchains
- ubuntu-toolchain-r-test
packages:
# Extra toolchains
- gcc-7
- g++-7
- gcc-5
- g++-5
# Build dependencies
- libaio-dev
- libattr1-dev
@@ -263,25 +185,14 @@ matrix:
- libssh2-1-dev
- liburcu-dev
- libusb-1.0-0-dev
- libvte-2.91-dev
- libvte-2.90-dev
- sparse
- uuid-dev
language: generic
compiler: none
env:
- COMPILER_NAME=gcc CXX=g++-7 CC=gcc-7
- CONFIG="--cc=gcc-7 --cxx=g++-7 --disable-pie --disable-linux-user"
- COMPILER_NAME=gcc CXX=g++-5 CC=gcc-5
- CONFIG="--cc=gcc-5 --cxx=g++-5 --disable-pie --disable-linux-user"
- TEST_CMD=""
before_script:
- ./configure ${CONFIG} --extra-cflags="-g3 -O0 -fsanitize=thread -fuse-ld=gold" || { cat config.log && exit 1; }
# Run check-tcg against linux-user
- env:
- CONFIG="--disable-system"
- TEST_CMD="make -j3 check-tcg V=1"
# Run check-tcg against softmmu targets
- env:
- CONFIG="--target-list=xtensa-softmmu,arm-softmmu"
- TEST_CMD="make -j3 check-tcg V=1"
- ./configure ${CONFIG} --extra-cflags="-g3 -O0 -fsanitize=thread -fuse-ld=gold" || cat config.log

View File

@@ -124,23 +124,6 @@ We use traditional C-style /* */ comments and avoid // comments.
Rationale: The // form is valid in C99, so this is purely a matter of
consistency of style. The checkpatch script will warn you about this.
Multiline comment blocks should have a row of stars on the left,
and the initial /* and terminating */ both on their own lines:
/*
* like
* this
*/
This is the same format required by the Linux kernel coding style.
(Some of the existing comments in the codebase use the GNU Coding
Standards form which does not have stars on the left, or other
variations; avoid these when writing new comments, but don't worry
about converting to the preferred form unless you're editing that
comment anyway.)
Rationale: Consistency, and ease of visually picking out a multiline
comment from the surrounding code.
8. trace-events style
8.1 0x prefix

View File

@@ -1,8 +1,8 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
@@ -10,7 +10,7 @@
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
@@ -112,7 +112,7 @@ modification follow. Pay close attention to the difference between a
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
@@ -146,7 +146,7 @@ such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
@@ -432,7 +432,7 @@ decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
@@ -455,7 +455,7 @@ FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Libraries
@@ -476,7 +476,7 @@ convey the exclusion of warranty; and each file should have at least the
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -485,7 +485,7 @@ convey the exclusion of warranty; and each file should have at least the
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Also add information on how to contact you by electronic and paper mail.
@@ -500,3 +500,5 @@ necessary. Here is a sample; alter the names:
Ty Coon, President of Vice
That's all there is to it!

270
COPYING.PYTHON Normal file
View File

@@ -0,0 +1,270 @@
A. HISTORY OF THE SOFTWARE
==========================
Python was created in the early 1990s by Guido van Rossum at Stichting
Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
as a successor of a language called ABC. Guido remains Python's
principal author, although it includes many contributions from others.
In 1995, Guido continued his work on Python at the Corporation for
National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
in Reston, Virginia where he released several versions of the
software.
In May 2000, Guido and the Python core development team moved to
BeOpen.com to form the BeOpen PythonLabs team. In October of the same
year, the PythonLabs team moved to Digital Creations (now Zope
Corporation, see http://www.zope.com). In 2001, the Python Software
Foundation (PSF, see http://www.python.org/psf/) was formed, a
non-profit organization created specifically to own Python-related
Intellectual Property. Zope Corporation is a sponsoring member of
the PSF.
All Python releases are Open Source (see http://www.opensource.org for
the Open Source Definition). Historically, most, but not all, Python
releases have also been GPL-compatible; the table below summarizes
the various releases.
Release Derived Year Owner GPL-
from compatible? (1)
0.9.0 thru 1.2 1991-1995 CWI yes
1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
1.6 1.5.2 2000 CNRI no
2.0 1.6 2000 BeOpen.com no
1.6.1 1.6 2001 CNRI yes (2)
2.1 2.0+1.6.1 2001 PSF no
2.0.1 2.0+1.6.1 2001 PSF yes
2.1.1 2.1+2.0.1 2001 PSF yes
2.2 2.1.1 2001 PSF yes
2.1.2 2.1.1 2002 PSF yes
2.1.3 2.1.2 2002 PSF yes
2.2.1 2.2 2002 PSF yes
2.2.2 2.2.1 2002 PSF yes
2.2.3 2.2.2 2003 PSF yes
2.3 2.2.2 2002-2003 PSF yes
2.3.1 2.3 2002-2003 PSF yes
2.3.2 2.3.1 2002-2003 PSF yes
2.3.3 2.3.2 2002-2003 PSF yes
2.3.4 2.3.3 2004 PSF yes
2.3.5 2.3.4 2005 PSF yes
2.4 2.3 2004 PSF yes
2.4.1 2.4 2005 PSF yes
2.4.2 2.4.1 2005 PSF yes
2.4.3 2.4.2 2006 PSF yes
2.5 2.4 2006 PSF yes
2.7 2.6 2010 PSF yes
Footnotes:
(1) GPL-compatible doesn't mean that we're distributing Python under
the GPL. All Python licenses, unlike the GPL, let you distribute
a modified version without making your changes open source. The
GPL-compatible licenses make it possible to combine Python with
other software that is released under the GPL; the others don't.
(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
because its license has a choice of law clause. According to
CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
is "not incompatible" with the GPL.
Thanks to the many outside volunteers who have worked under Guido's
direction to make these releases possible.
B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
===============================================================
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
--------------------------------------------
1. This LICENSE AGREEMENT is between the Python Software Foundation
("PSF"), and the Individual or Organization ("Licensee") accessing and
otherwise using this software ("Python") in source or binary form and
its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF
hereby grants Licensee a nonexclusive, royalty-free, world-wide
license to reproduce, analyze, test, perform and/or display publicly,
prepare derivative works, distribute, and otherwise use Python
alone or in any derivative version, provided, however, that PSF's
License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights
Reserved" are retained in Python alone or in any derivative version
prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python.
4. PSF is making Python available to Licensee on an "AS IS"
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
-------------------------------------------
BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
Individual or Organization ("Licensee") accessing and otherwise using
this software in source or binary form and its associated
documentation ("the Software").
2. Subject to the terms and conditions of this BeOpen Python License
Agreement, BeOpen hereby grants Licensee a non-exclusive,
royalty-free, world-wide license to reproduce, analyze, test, perform
and/or display publicly, prepare derivative works, distribute, and
otherwise use the Software alone or in any derivative version,
provided, however, that the BeOpen Python License is retained in the
Software, alone or in any derivative version prepared by Licensee.
3. BeOpen is making the Software available to Licensee on an "AS IS"
basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
5. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
6. This License Agreement shall be governed by and interpreted in all
respects by the law of the State of California, excluding conflict of
law provisions. Nothing in this License Agreement shall be deemed to
create any relationship of agency, partnership, or joint venture
between BeOpen and Licensee. This License Agreement does not grant
permission to use BeOpen trademarks or trade names in a trademark
sense to endorse or promote products or services of Licensee, or any
third party. As an exception, the "BeOpen Python" logos available at
http://www.pythonlabs.com/logos.html may be used according to the
permissions granted on that web page.
7. By copying, installing or otherwise using the software, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
---------------------------------------
1. This LICENSE AGREEMENT is between the Corporation for National
Research Initiatives, having an office at 1895 Preston White Drive,
Reston, VA 20191 ("CNRI"), and the Individual or Organization
("Licensee") accessing and otherwise using Python 1.6.1 software in
source or binary form and its associated documentation.
2. Subject to the terms and conditions of this License Agreement, CNRI
hereby grants Licensee a nonexclusive, royalty-free, world-wide
license to reproduce, analyze, test, perform and/or display publicly,
prepare derivative works, distribute, and otherwise use Python 1.6.1
alone or in any derivative version, provided, however, that CNRI's
License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
1995-2001 Corporation for National Research Initiatives; All Rights
Reserved" are retained in Python 1.6.1 alone or in any derivative
version prepared by Licensee. Alternately, in lieu of CNRI's License
Agreement, Licensee may substitute the following text (omitting the
quotes): "Python 1.6.1 is made available subject to the terms and
conditions in CNRI's License Agreement. This Agreement together with
Python 1.6.1 may be located on the Internet using the following
unique, persistent identifier (known as a handle): 1895.22/1013. This
Agreement may also be obtained from a proxy server on the Internet
using the following URL: http://hdl.handle.net/1895.22/1013".
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python 1.6.1 or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python 1.6.1.
4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. This License Agreement shall be governed by the federal
intellectual property law of the United States, including without
limitation the federal copyright law, and, to the extent such
U.S. federal law does not apply, by the law of the Commonwealth of
Virginia, excluding Virginia's conflict of law provisions.
Notwithstanding the foregoing, with regard to derivative works based
on Python 1.6.1 that incorporate non-separable material that was
previously distributed under the GNU General Public License (GPL), the
law of the Commonwealth of Virginia shall govern this License
Agreement only as to issues arising under or with respect to
Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
License Agreement shall be deemed to create any relationship of
agency, partnership, or joint venture between CNRI and Licensee. This
License Agreement does not grant permission to use CNRI trademarks or
trade name in a trademark sense to endorse or promote products or
services of Licensee, or any third party.
8. By clicking on the "ACCEPT" button where indicated, or by copying,
installing or otherwise using Python 1.6.1, Licensee agrees to be
bound by the terms and conditions of this License Agreement.
ACCEPT
CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
--------------------------------------------------
Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
The Netherlands. All rights reserved.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the name of Stichting Mathematisch
Centrum or CWI not be used in advertising or publicity pertaining to
distribution of the software without specific, written prior
permission.
STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@@ -118,15 +118,6 @@ Please note that g_malloc will exit on allocation failure, so there
is no need to test for failure (as you would have to with malloc).
Calling g_malloc with a zero size is valid and will return NULL.
Prefer g_new(T, n) instead of g_malloc(sizeof(T) * n) for the following
reasons:
a. It catches multiplication overflowing size_t;
b. It returns T * instead of void *, letting compiler catch more type
errors.
Declarations like T *v = g_malloc(sizeof(*v)) are acceptable, though.
Memory allocated by qemu_memalign or qemu_blockalign must be freed with
qemu_vfree, since breaking this will cause problems on Win32.

View File

@@ -1,36 +0,0 @@
# These are "proxy" symbols used to pass config-host.mak values
# down to Kconfig. See also MINIKCONF_ARGS in the Makefile:
# these two need to be kept in sync.
config KVM
bool
config LINUX
bool
config OPENGL
bool
config X11
bool
config SPICE
bool
config IVSHMEM
bool
config TPM
bool
config VHOST_USER
bool
config XEN
bool
config VIRTFS
bool
config PVRDMA
bool

File diff suppressed because it is too large Load Diff

422
Makefile
View File

@@ -6,13 +6,7 @@ BUILD_DIR=$(CURDIR)
# Before including a proper config-host.mak, assume we are in the source tree
SRC_PATH=.
UNCHECKED_GOALS := %clean TAGS cscope ctags dist \
html info pdf txt \
help check-help print-% \
docker docker-% vm-test vm-build-%
print-%:
@echo '$*=$($*)'
UNCHECKED_GOALS := %clean TAGS cscope ctags docker docker-% help
# All following code might depend on configuration variables
ifneq ($(wildcard config-host.mak),)
@@ -56,7 +50,7 @@ ifneq ($(realpath $(SRC_PATH)),$(realpath .))
ifneq ($(wildcard $(SRC_PATH)/config-host.mak),)
$(error This is an out of tree build but your source tree ($(SRC_PATH)) \
seems to have been used for an in-tree build. You can fix this by running \
"$(MAKE) distclean && rm -rf *-linux-user *-softmmu" in your source tree)
"make distclean && rm -rf *-linux-user *-softmmu" in your source tree)
endif
endif
@@ -67,7 +61,7 @@ CONFIG_ALL=y
-include config-all-devices.mak
-include config-all-disas.mak
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/pc-bios $(SRC_PATH)/VERSION
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/pc-bios
@echo $@ is out-of-date, running configure
@# TODO: The next lines include code which supports a smooth
@# transition from old configurations without config.status.
@@ -87,41 +81,11 @@ endif
include $(SRC_PATH)/rules.mak
# Create QEMU_PKGVERSION and FULL_VERSION strings
# If PKGVERSION is set, use that; otherwise get version and -dirty status from git
QEMU_PKGVERSION := $(if $(PKGVERSION),$(PKGVERSION),$(shell \
cd $(SRC_PATH); \
if test -e .git; then \
git describe --match 'v*' 2>/dev/null | tr -d '\n'; \
if ! git diff-index --quiet HEAD &>/dev/null; then \
echo "-dirty"; \
fi; \
fi))
# Either "version (pkgversion)", or just "version" if pkgversion not set
FULL_VERSION := $(if $(QEMU_PKGVERSION),$(VERSION) ($(QEMU_PKGVERSION)),$(VERSION))
GENERATED_FILES = qemu-version.h config-host.h qemu-options.def
GENERATED_QAPI_FILES = qapi/qapi-builtin-types.h qapi/qapi-builtin-types.c
GENERATED_QAPI_FILES += qapi/qapi-types.h qapi/qapi-types.c
GENERATED_QAPI_FILES += $(QAPI_MODULES:%=qapi/qapi-types-%.h)
GENERATED_QAPI_FILES += $(QAPI_MODULES:%=qapi/qapi-types-%.c)
GENERATED_QAPI_FILES += qapi/qapi-builtin-visit.h qapi/qapi-builtin-visit.c
GENERATED_QAPI_FILES += qapi/qapi-visit.h qapi/qapi-visit.c
GENERATED_QAPI_FILES += $(QAPI_MODULES:%=qapi/qapi-visit-%.h)
GENERATED_QAPI_FILES += $(QAPI_MODULES:%=qapi/qapi-visit-%.c)
GENERATED_QAPI_FILES += qapi/qapi-commands.h qapi/qapi-commands.c
GENERATED_QAPI_FILES += $(QAPI_MODULES:%=qapi/qapi-commands-%.h)
GENERATED_QAPI_FILES += $(QAPI_MODULES:%=qapi/qapi-commands-%.c)
GENERATED_QAPI_FILES += qapi/qapi-emit-events.h qapi/qapi-emit-events.c
GENERATED_QAPI_FILES += qapi/qapi-events.h qapi/qapi-events.c
GENERATED_QAPI_FILES += $(QAPI_MODULES:%=qapi/qapi-events-%.h)
GENERATED_QAPI_FILES += $(QAPI_MODULES:%=qapi/qapi-events-%.c)
GENERATED_QAPI_FILES += qapi/qapi-introspect.c qapi/qapi-introspect.h
GENERATED_QAPI_FILES += qapi/qapi-doc.texi
GENERATED_FILES += $(GENERATED_QAPI_FILES)
GENERATED_FILES += qmp-commands.h qapi-types.h qapi-visit.h qapi-event.h
GENERATED_FILES += qmp-marshal.c qapi-types.c qapi-visit.c qapi-event.c
GENERATED_FILES += qmp-introspect.h
GENERATED_FILES += qmp-introspect.c
GENERATED_FILES += trace/generated-tcg-tracers.h
@@ -159,7 +123,7 @@ tracetool-y += $(shell find $(SRC_PATH)/scripts/tracetool -name "*.py")
%/trace.h: %/trace.h-timestamp
@cmp $< $@ >/dev/null 2>&1 || cp $< $@
%/trace.h-timestamp: $(SRC_PATH)/%/trace-events $(tracetool-y) $(BUILD_DIR)/config-host.mak
%/trace.h-timestamp: $(SRC_PATH)/%/trace-events $(tracetool-y)
$(call quiet-command,$(TRACETOOL) \
--group=$(call trace-group-name,$@) \
--format=h \
@@ -168,7 +132,7 @@ tracetool-y += $(shell find $(SRC_PATH)/scripts/tracetool -name "*.py")
%/trace.c: %/trace.c-timestamp
@cmp $< $@ >/dev/null 2>&1 || cp $< $@
%/trace.c-timestamp: $(SRC_PATH)/%/trace-events $(tracetool-y) $(BUILD_DIR)/config-host.mak
%/trace.c-timestamp: $(SRC_PATH)/%/trace-events $(tracetool-y)
$(call quiet-command,$(TRACETOOL) \
--group=$(call trace-group-name,$@) \
--format=c \
@@ -177,7 +141,7 @@ tracetool-y += $(shell find $(SRC_PATH)/scripts/tracetool -name "*.py")
%/trace-ust.h: %/trace-ust.h-timestamp
@cmp $< $@ >/dev/null 2>&1 || cp $< $@
%/trace-ust.h-timestamp: $(SRC_PATH)/%/trace-events $(tracetool-y) $(BUILD_DIR)/config-host.mak
%/trace-ust.h-timestamp: $(SRC_PATH)/%/trace-events $(tracetool-y)
$(call quiet-command,$(TRACETOOL) \
--group=$(call trace-group-name,$@) \
--format=ust-events-h \
@@ -201,7 +165,7 @@ tracetool-y += $(shell find $(SRC_PATH)/scripts/tracetool -name "*.py")
trace-root.h: trace-root.h-timestamp
@cmp $< $@ >/dev/null 2>&1 || cp $< $@
trace-root.h-timestamp: $(SRC_PATH)/trace-events $(tracetool-y) $(BUILD_DIR)/config-host.mak
trace-root.h-timestamp: $(SRC_PATH)/trace-events $(tracetool-y)
$(call quiet-command,$(TRACETOOL) \
--group=root \
--format=h \
@@ -210,7 +174,7 @@ trace-root.h-timestamp: $(SRC_PATH)/trace-events $(tracetool-y) $(BUILD_DIR)/con
trace-root.c: trace-root.c-timestamp
@cmp $< $@ >/dev/null 2>&1 || cp $< $@
trace-root.c-timestamp: $(SRC_PATH)/trace-events $(tracetool-y) $(BUILD_DIR)/config-host.mak
trace-root.c-timestamp: $(SRC_PATH)/trace-events $(tracetool-y)
$(call quiet-command,$(TRACETOOL) \
--group=root \
--format=c \
@@ -219,7 +183,7 @@ trace-root.c-timestamp: $(SRC_PATH)/trace-events $(tracetool-y) $(BUILD_DIR)/con
trace-ust-root.h: trace-ust-root.h-timestamp
@cmp $< $@ >/dev/null 2>&1 || cp $< $@
trace-ust-root.h-timestamp: $(SRC_PATH)/trace-events $(tracetool-y) $(BUILD_DIR)/config-host.mak
trace-ust-root.h-timestamp: $(SRC_PATH)/trace-events $(tracetool-y)
$(call quiet-command,$(TRACETOOL) \
--group=root \
--format=ust-events-h \
@@ -228,7 +192,7 @@ trace-ust-root.h-timestamp: $(SRC_PATH)/trace-events $(tracetool-y) $(BUILD_DIR)
trace-ust-all.h: trace-ust-all.h-timestamp
@cmp $< $@ >/dev/null 2>&1 || cp $< $@
trace-ust-all.h-timestamp: $(trace-events-files) $(tracetool-y) $(BUILD_DIR)/config-host.mak
trace-ust-all.h-timestamp: $(trace-events-files) $(tracetool-y)
$(call quiet-command,$(TRACETOOL) \
--group=all \
--format=ust-events-h \
@@ -237,7 +201,7 @@ trace-ust-all.h-timestamp: $(trace-events-files) $(tracetool-y) $(BUILD_DIR)/con
trace-ust-all.c: trace-ust-all.c-timestamp
@cmp $< $@ >/dev/null 2>&1 || cp $< $@
trace-ust-all.c-timestamp: $(trace-events-files) $(tracetool-y) $(BUILD_DIR)/config-host.mak
trace-ust-all.c-timestamp: $(trace-events-files) $(tracetool-y)
$(call quiet-command,$(TRACETOOL) \
--group=all \
--format=ust-events-c \
@@ -262,30 +226,17 @@ KEYCODEMAP_GEN = $(SRC_PATH)/ui/keycodemapdb/tools/keymap-gen
KEYCODEMAP_CSV = $(SRC_PATH)/ui/keycodemapdb/data/keymaps.csv
KEYCODEMAP_FILES = \
ui/input-keymap-atset1-to-qcode.c \
ui/input-keymap-linux-to-qcode.c \
ui/input-keymap-qcode-to-atset1.c \
ui/input-keymap-qcode-to-atset2.c \
ui/input-keymap-qcode-to-atset3.c \
ui/input-keymap-qcode-to-linux.c \
ui/input-keymap-qcode-to-qnum.c \
ui/input-keymap-qcode-to-sun.c \
ui/input-keymap-qnum-to-qcode.c \
ui/input-keymap-usb-to-qcode.c \
ui/input-keymap-win32-to-qcode.c \
ui/input-keymap-x11-to-qcode.c \
ui/input-keymap-xorgevdev-to-qcode.c \
ui/input-keymap-xorgkbd-to-qcode.c \
ui/input-keymap-xorgxquartz-to-qcode.c \
ui/input-keymap-xorgxwin-to-qcode.c \
ui/input-keymap-osx-to-qcode.c \
$(NULL)
GENERATED_FILES += $(KEYCODEMAP_FILES)
ui/input-keymap-%.c: $(KEYCODEMAP_GEN) $(KEYCODEMAP_CSV) $(SRC_PATH)/ui/Makefile.objs
$(call quiet-command,\
stem=$* && src=$${stem%-to-*} dst=$${stem#*-to-} && \
src=$$(echo $@ | sed -E -e "s,^ui/input-keymap-(.+)-to-(.+)\.c$$,\1,") && \
dst=$$(echo $@ | sed -E -e "s,^ui/input-keymap-(.+)-to-(.+)\.c$$,\2,") && \
test -e $(KEYCODEMAP_GEN) && \
$(PYTHON) $(KEYCODEMAP_GEN) \
--lang glib2 \
@@ -308,33 +259,29 @@ $(call set-vpath, $(SRC_PATH))
LIBS+=-lz $(LIBS_TOOLS)
HELPERS-$(call land,$(CONFIG_SOFTMMU),$(CONFIG_LINUX)) = qemu-bridge-helper$(EXESUF)
HELPERS-$(CONFIG_LINUX) = qemu-bridge-helper$(EXESUF)
ifdef BUILD_DOCS
DOCS=qemu-doc.html qemu-doc.txt qemu.1 qemu-img.1 qemu-nbd.8 qemu-ga.8
DOCS+=docs/interop/qemu-qmp-ref.html docs/interop/qemu-qmp-ref.txt docs/interop/qemu-qmp-ref.7
DOCS+=docs/interop/qemu-ga-ref.html docs/interop/qemu-ga-ref.txt docs/interop/qemu-ga-ref.7
DOCS+=docs/qemu-block-drivers.7
DOCS+=docs/qemu-cpu-models.7
ifdef CONFIG_VIRTFS
DOCS+=fsdev/virtfs-proxy-helper.1
endif
ifdef CONFIG_TRACE_SYSTEMTAP
DOCS+=scripts/qemu-trace-stap.1
endif
else
DOCS=
endif
SUBDIR_MAKEFLAGS=$(if $(V),,--no-print-directory --quiet) BUILD_DIR=$(BUILD_DIR)
SUBDIR_DEVICES_MAK=$(patsubst %, %/config-devices.mak, $(filter %-softmmu, $(TARGET_DIRS)))
SUBDIR_DEVICES_MAK_DEP=$(patsubst %, %.d, $(SUBDIR_DEVICES_MAK))
SUBDIR_MAKEFLAGS=$(if $(V),,--no-print-directory) BUILD_DIR=$(BUILD_DIR)
SUBDIR_DEVICES_MAK=$(patsubst %, %/config-devices.mak, $(TARGET_DIRS))
SUBDIR_DEVICES_MAK_DEP=$(patsubst %, %-config-devices.mak.d, $(TARGET_DIRS))
ifeq ($(SUBDIR_DEVICES_MAK),)
config-all-devices.mak: config-host.mak
config-all-devices.mak:
$(call quiet-command,echo '# no devices' > $@,"GEN","$@")
else
config-all-devices.mak: $(SUBDIR_DEVICES_MAK) config-host.mak
config-all-devices.mak: $(SUBDIR_DEVICES_MAK)
$(call quiet-command, sed -n \
's|^\([^=]*\)=\(.*\)$$|\1:=$$(findstring y,$$(\1)\2)|p' \
$(SUBDIR_DEVICES_MAK) | sort -u > $@, \
@@ -343,27 +290,9 @@ endif
-include $(SUBDIR_DEVICES_MAK_DEP)
# This has to be kept in sync with Kconfig.host.
MINIKCONF_ARGS = \
$(CONFIG_MINIKCONF_MODE) \
$@ $*-config.devices.mak.d $< $(MINIKCONF_INPUTS) \
CONFIG_KVM=$(CONFIG_KVM) \
CONFIG_SPICE=$(CONFIG_SPICE) \
CONFIG_IVSHMEM=$(CONFIG_IVSHMEM) \
CONFIG_TPM=$(CONFIG_TPM) \
CONFIG_XEN=$(CONFIG_XEN) \
CONFIG_OPENGL=$(CONFIG_OPENGL) \
CONFIG_X11=$(CONFIG_X11) \
CONFIG_VHOST_USER=$(CONFIG_VHOST_USER) \
CONFIG_VIRTFS=$(CONFIG_VIRTFS) \
CONFIG_LINUX=$(CONFIG_LINUX) \
CONFIG_PVRDMA=$(CONFIG_PVRDMA)
MINIKCONF_INPUTS = $(SRC_PATH)/Kconfig.host $(SRC_PATH)/hw/Kconfig
MINIKCONF = $(PYTHON) $(SRC_PATH)/scripts/minikconf.py \
$(SUBDIR_DEVICES_MAK): %/config-devices.mak: default-configs/%.mak $(MINIKCONF_INPUTS) $(BUILD_DIR)/config-host.mak
$(call quiet-command, $(MINIKCONF) $(MINIKCONF_ARGS) > $@.tmp, "GEN", "$@.tmp")
%/config-devices.mak: default-configs/%.mak $(SRC_PATH)/scripts/make_device_config.sh
$(call quiet-command, \
$(SHELL) $(SRC_PATH)/scripts/make_device_config.sh $< $*-config-devices.mak.d $@ > $@.tmp,"GEN","$@.tmp")
$(call quiet-command, if test -f $@; then \
if cmp -s $@.old $@; then \
mv $@.tmp $@; \
@@ -374,7 +303,7 @@ $(SUBDIR_DEVICES_MAK): %/config-devices.mak: default-configs/%.mak $(MINIKCONF_I
else \
echo "WARNING: $@ out of date.";\
fi; \
echo "Run \"$(MAKE) defconfig\" to regenerate."; \
echo "Run \"make defconfig\" to regenerate."; \
rm $@.tmp; \
fi; \
else \
@@ -391,17 +320,13 @@ endif
dummy := $(call unnest-vars,, \
stub-obj-y \
authz-obj-y \
chardev-obj-y \
util-obj-y \
qga-obj-y \
elf2dmp-obj-y \
ivshmem-client-obj-y \
ivshmem-server-obj-y \
rdmacm-mux-obj-y \
libvhost-user-obj-y \
vhost-user-scsi-obj-y \
vhost-user-blk-obj-y \
qga-vss-dll-obj-y \
block-obj-y \
block-obj-m \
@@ -411,21 +336,30 @@ dummy := $(call unnest-vars,, \
io-obj-y \
common-obj-y \
common-obj-m \
ui-obj-y \
ui-obj-m \
audio-obj-y \
audio-obj-m \
trace-obj-y)
include $(SRC_PATH)/tests/Makefile.include
all: $(DOCS) $(if $(BUILD_DOCS),sphinxdocs) $(TOOLS) $(HELPERS-y) recurse-all modules
all: $(DOCS) $(TOOLS) $(HELPERS-y) recurse-all modules
qemu-version.h: FORCE
$(call quiet-command, \
(printf '#define QEMU_PKGVERSION "$(QEMU_PKGVERSION)"\n'; \
printf '#define QEMU_FULL_VERSION "$(FULL_VERSION)"\n'; \
) > $@.tmp)
(cd $(SRC_PATH); \
printf '#define QEMU_PKGVERSION '; \
if test -n "$(PKGVERSION)"; then \
printf '"$(PKGVERSION)"\n'; \
else \
if test -d .git; then \
printf '" ('; \
git describe --match 'v*' 2>/dev/null | tr -d '\n'; \
if ! git diff-index --quiet HEAD &>/dev/null; then \
printf -- '-dirty'; \
fi; \
printf ')"\n'; \
else \
printf '""\n'; \
fi; \
fi) > $@.tmp)
$(call quiet-command, if ! cmp -s $@ $@.tmp; then \
mv $@.tmp $@; \
else \
@@ -440,7 +374,6 @@ qemu-options.def: $(SRC_PATH)/qemu-options.hx $(SRC_PATH)/scripts/hxtool
SUBDIR_RULES=$(patsubst %,subdir-%, $(TARGET_DIRS))
SOFTMMU_SUBDIR_RULES=$(filter %-softmmu,$(SUBDIR_RULES))
$(SOFTMMU_SUBDIR_RULES): $(authz-obj-y)
$(SOFTMMU_SUBDIR_RULES): $(block-obj-y)
$(SOFTMMU_SUBDIR_RULES): $(crypto-obj-y)
$(SOFTMMU_SUBDIR_RULES): $(io-obj-y)
@@ -457,7 +390,7 @@ subdir-dtc: .git-submodule-status dtc/libfdt dtc/tests
$(call quiet-command,$(MAKE) $(DTC_MAKE_ARGS) CPPFLAGS="$(DTC_CPPFLAGS)" CFLAGS="$(DTC_CFLAGS)" LDFLAGS="$(LDFLAGS)" ARFLAGS="$(ARFLAGS)" CC="$(CC)" AR="$(AR)" LD="$(LD)" $(SUBDIR_MAKEFLAGS) libfdt/libfdt.a,)
dtc/%: .git-submodule-status
@mkdir -p $@
mkdir -p $@
# Overriding CFLAGS causes us to lose defines added in the sub-makefile.
# Not overriding CFLAGS leads to mis-matches between compilation modes.
@@ -474,9 +407,6 @@ CAP_CFLAGS += -DCAPSTONE_HAS_X86
subdir-capstone: .git-submodule-status
$(call quiet-command,$(MAKE) -C $(SRC_PATH)/capstone CAPSTONE_SHARED=no BUILDDIR="$(BUILD_DIR)/capstone" CC="$(CC)" AR="$(AR)" LD="$(LD)" RANLIB="$(RANLIB)" CFLAGS="$(CAP_CFLAGS)" $(SUBDIR_MAKEFLAGS) $(BUILD_DIR)/capstone/$(LIBCAPSTONE))
subdir-slirp: .git-submodule-status
$(call quiet-command,$(MAKE) -C $(SRC_PATH)/slirp BUILD_DIR="$(BUILD_DIR)/slirp" CC="$(CC)" AR="$(AR)" LD="$(LD)" RANLIB="$(RANLIB)" CFLAGS="$(QEMU_CFLAGS)")
$(SUBDIR_RULES): libqemuutil.a $(common-obj-y) $(chardev-obj-y) \
$(qom-obj-y) $(crypto-aes-obj-$(CONFIG_USER_ONLY))
@@ -498,7 +428,7 @@ Makefile: $(version-obj-y)
# Build libraries
libqemuutil.a: $(util-obj-y) $(trace-obj-y) $(stub-obj-y)
libvhost-user.a: $(libvhost-user-obj-y) $(util-obj-y) $(stub-obj-y)
libvhost-user.a: $(libvhost-user-obj-y)
######################################################################
@@ -506,20 +436,18 @@ COMMON_LDADDS = libqemuutil.a
qemu-img.o: qemu-img-cmds.h
qemu-img$(EXESUF): qemu-img.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
qemu-nbd$(EXESUF): qemu-nbd.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
qemu-io$(EXESUF): qemu-io.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
qemu-img$(EXESUF): qemu-img.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
qemu-nbd$(EXESUF): qemu-nbd.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
qemu-io$(EXESUF): qemu-io.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
qemu-bridge-helper$(EXESUF): qemu-bridge-helper.o $(COMMON_LDADDS)
qemu-keymap$(EXESUF): qemu-keymap.o ui/input-keymap.o $(COMMON_LDADDS)
qemu-edid$(EXESUF): qemu-edid.o hw/display/edid-generate.o $(COMMON_LDADDS)
fsdev/virtfs-proxy-helper$(EXESUF): fsdev/virtfs-proxy-helper.o fsdev/9p-marshal.o fsdev/9p-iov-marshal.o $(COMMON_LDADDS)
fsdev/virtfs-proxy-helper$(EXESUF): LIBS += -lcap
scsi/qemu-pr-helper$(EXESUF): scsi/qemu-pr-helper.o scsi/utils.o $(authz-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
scsi/qemu-pr-helper$(EXESUF): scsi/qemu-pr-helper.o scsi/utils.o $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
ifdef CONFIG_MPATH
scsi/qemu-pr-helper$(EXESUF): LIBS += -ludev -lmultipath -lmpathpersist
endif
@@ -533,37 +461,68 @@ qemu-ga$(EXESUF): QEMU_CFLAGS += -I qga/qapi-generated
qemu-keymap$(EXESUF): LIBS += $(XKBCOMMON_LIBS)
qemu-keymap$(EXESUF): QEMU_CFLAGS += $(XKBCOMMON_CFLAGS)
qapi-py = $(SRC_PATH)/scripts/qapi/commands.py \
$(SRC_PATH)/scripts/qapi/events.py \
$(SRC_PATH)/scripts/qapi/introspect.py \
$(SRC_PATH)/scripts/qapi/types.py \
$(SRC_PATH)/scripts/qapi/visit.py \
$(SRC_PATH)/scripts/qapi/common.py \
$(SRC_PATH)/scripts/qapi/doc.py \
$(SRC_PATH)/scripts/qapi-gen.py
gen-out-type = $(subst .,-,$(suffix $@))
qga/qapi-generated/qga-qapi-types.c qga/qapi-generated/qga-qapi-types.h \
qga/qapi-generated/qga-qapi-visit.c qga/qapi-generated/qga-qapi-visit.h \
qga/qapi-generated/qga-qapi-commands.h qga/qapi-generated/qga-qapi-commands.c \
qga/qapi-generated/qga-qapi-doc.texi: \
qga/qapi-generated/qapi-gen-timestamp ;
qga/qapi-generated/qapi-gen-timestamp: $(SRC_PATH)/qga/qapi-schema.json $(qapi-py)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-gen.py \
-o qga/qapi-generated -p "qga-" $<, \
"GEN","$(@:%-timestamp=%)")
@>$@
qapi-py = $(SRC_PATH)/scripts/qapi.py $(SRC_PATH)/scripts/ordereddict.py
qapi-modules = $(SRC_PATH)/qapi/qapi-schema.json \
$(QAPI_MODULES:%=$(SRC_PATH)/qapi/%.json)
qga/qapi-generated/qga-qapi-types.c qga/qapi-generated/qga-qapi-types.h :\
$(SRC_PATH)/qga/qapi-schema.json $(SRC_PATH)/scripts/qapi-types.py $(qapi-py)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-types.py \
$(gen-out-type) -o qga/qapi-generated -p "qga-" $<, \
"GEN","$@")
qga/qapi-generated/qga-qapi-visit.c qga/qapi-generated/qga-qapi-visit.h :\
$(SRC_PATH)/qga/qapi-schema.json $(SRC_PATH)/scripts/qapi-visit.py $(qapi-py)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-visit.py \
$(gen-out-type) -o qga/qapi-generated -p "qga-" $<, \
"GEN","$@")
qga/qapi-generated/qga-qmp-commands.h qga/qapi-generated/qga-qmp-marshal.c :\
$(SRC_PATH)/qga/qapi-schema.json $(SRC_PATH)/scripts/qapi-commands.py $(qapi-py)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-commands.py \
$(gen-out-type) -o qga/qapi-generated -p "qga-" $<, \
"GEN","$@")
$(GENERATED_QAPI_FILES): qapi-gen-timestamp ;
qapi-gen-timestamp: $(qapi-modules) $(qapi-py)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-gen.py \
-o "qapi" -b $<, \
"GEN","$(@:%-timestamp=%)")
@>$@
qapi-modules = $(SRC_PATH)/qapi-schema.json $(SRC_PATH)/qapi/common.json \
$(SRC_PATH)/qapi/block.json $(SRC_PATH)/qapi/block-core.json \
$(SRC_PATH)/qapi/char.json \
$(SRC_PATH)/qapi/crypto.json \
$(SRC_PATH)/qapi/introspect.json \
$(SRC_PATH)/qapi/migration.json \
$(SRC_PATH)/qapi/net.json \
$(SRC_PATH)/qapi/rocker.json \
$(SRC_PATH)/qapi/run-state.json \
$(SRC_PATH)/qapi/sockets.json \
$(SRC_PATH)/qapi/tpm.json \
$(SRC_PATH)/qapi/trace.json \
$(SRC_PATH)/qapi/transaction.json \
$(SRC_PATH)/qapi/ui.json
QGALIB_GEN=$(addprefix qga/qapi-generated/, qga-qapi-types.h qga-qapi-visit.h qga-qapi-commands.h)
qapi-types.c qapi-types.h :\
$(qapi-modules) $(SRC_PATH)/scripts/qapi-types.py $(qapi-py)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-types.py \
$(gen-out-type) -o "." -b $<, \
"GEN","$@")
qapi-visit.c qapi-visit.h :\
$(qapi-modules) $(SRC_PATH)/scripts/qapi-visit.py $(qapi-py)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-visit.py \
$(gen-out-type) -o "." -b $<, \
"GEN","$@")
qapi-event.c qapi-event.h :\
$(qapi-modules) $(SRC_PATH)/scripts/qapi-event.py $(qapi-py)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-event.py \
$(gen-out-type) -o "." $<, \
"GEN","$@")
qmp-commands.h qmp-marshal.c :\
$(qapi-modules) $(SRC_PATH)/scripts/qapi-commands.py $(qapi-py)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-commands.py \
$(gen-out-type) -o "." $<, \
"GEN","$@")
qmp-introspect.h qmp-introspect.c :\
$(qapi-modules) $(SRC_PATH)/scripts/qapi-introspect.py $(qapi-py)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-introspect.py \
$(gen-out-type) -o "." $<, \
"GEN","$@")
QGALIB_GEN=$(addprefix qga/qapi-generated/, qga-qapi-types.h qga-qapi-visit.h qga-qmp-commands.h)
$(qga-obj-y): $(QGALIB_GEN)
qemu-ga$(EXESUF): $(qga-obj-y) $(COMMON_LDADDS)
@@ -591,10 +550,6 @@ ifneq ($(EXESUF),)
qemu-ga: qemu-ga$(EXESUF) $(QGA_VSS_PROVIDER) $(QEMU_GA_MSI)
endif
elf2dmp$(EXESUF): LIBS += $(CURL_LIBS)
elf2dmp$(EXESUF): $(elf2dmp-obj-y)
$(call LINK, $^)
ifdef CONFIG_IVSHMEM
ivshmem-client$(EXESUF): $(ivshmem-client-obj-y) $(COMMON_LDADDS)
$(call LINK, $^)
@@ -603,36 +558,18 @@ ivshmem-server$(EXESUF): $(ivshmem-server-obj-y) $(COMMON_LDADDS)
endif
vhost-user-scsi$(EXESUF): $(vhost-user-scsi-obj-y) libvhost-user.a
$(call LINK, $^)
vhost-user-blk$(EXESUF): $(vhost-user-blk-obj-y) libvhost-user.a
$(call LINK, $^)
rdmacm-mux$(EXESUF): LIBS += "-libumad"
rdmacm-mux$(EXESUF): $(rdmacm-mux-obj-y) $(COMMON_LDADDS)
$(call LINK, $^)
module_block.h: $(SRC_PATH)/scripts/modules/module_block.py config-host.mak
$(call quiet-command,$(PYTHON) $< $@ \
$(addprefix $(SRC_PATH)/,$(patsubst %.mo,%.c,$(block-obj-m))), \
"GEN","$@")
ifdef CONFIG_GCOV
.PHONY: clean-coverage
clean-coverage:
$(call quiet-command, \
find . \( -name '*.gcda' -o -name '*.gcov' \) -type f -exec rm {} +, \
"CLEAN", "coverage files")
endif
clean:
# avoid old build problems by removing potentially incorrect old files
rm -f config.mak op-i386.h opc-i386.h gen-op-i386.h op-arm.h opc-arm.h gen-op-arm.h
rm -f qemu-options.def
rm -f *.msi
find . \( -name '*.so' -o -name '*.dll' -o -name '*.mo' -o -name '*.[oda]' \) -type f \
! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-aarch64.a \
! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \
! -path ./roms/edk2/BaseTools/Source/Python/UPT/Dll/sqlite3.dll \
-exec rm {} +
find . \( -name '*.so' -o -name '*.dll' -o -name '*.mo' -o -name '*.[oda]' \) -type f -exec rm {} +
rm -f $(filter-out %.tlb,$(TOOLS)) $(HELPERS-y) qemu-ga TAGS cscope.* *.pod *~ */*~
rm -f fsdev/*.pod scsi/*.pod
rm -f qemu-img-cmds.h
@@ -641,13 +578,13 @@ clean:
rm -f trace/generated-tracers-dtrace.dtrace*
rm -f trace/generated-tracers-dtrace.h*
rm -f $(foreach f,$(GENERATED_FILES),$(f) $(f)-timestamp)
rm -f qapi-gen-timestamp
rm -rf qapi-generated
rm -rf qga/qapi-generated
for d in $(ALL_SUBDIRS); do \
if test -d $$d; then $(MAKE) -C $$d $@ || exit 1; fi; \
rm -f $$d/qemu-options.def; \
done
rm -f config-all-devices.mak
rm -f $(SUBDIR_DEVICES_MAK) config-all-devices.mak
VERSION ?= $(shell cat VERSION)
@@ -656,26 +593,9 @@ dist: qemu-$(VERSION).tar.bz2
qemu-%.tar.bz2:
$(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)"
# Sphinx does not allow building manuals into the same directory as
# the source files, so if we're doing an in-tree QEMU build we must
# build the manuals into a subdirectory (and then install them from
# there for 'make install'). For an out-of-tree build we can just
# use the docs/ subdirectory in the build tree as normal.
ifeq ($(realpath $(SRC_PATH)),$(realpath .))
MANUAL_BUILDDIR := docs/built
else
MANUAL_BUILDDIR := docs
endif
define clean-manual =
rm -rf $(MANUAL_BUILDDIR)/$1/_static
rm -f $(MANUAL_BUILDDIR)/$1/objects.inv $(MANUAL_BUILDDIR)/$1/searchindex.js $(MANUAL_BUILDDIR)/$1/*.html
endef
distclean: clean
rm -f config-host.mak config-host.h* config-host.ld $(DOCS) qemu-options.texi qemu-img-cmds.texi qemu-monitor.texi qemu-monitor-info.texi
rm -f config-all-devices.mak config-all-disas.mak config.status
rm -f $(SUBDIR_DEVICES_MAK)
rm -f po/*.mo tests/qemu-iotests/common.env
rm -f roms/seabios/config.mak roms/vgabios/config.mak
rm -f qemu-doc.info qemu-doc.aux qemu-doc.cp qemu-doc.cps
@@ -691,57 +611,40 @@ distclean: clean
rm -f docs/interop/qemu-qmp-ref.pdf docs/interop/qemu-ga-ref.pdf
rm -f docs/interop/qemu-qmp-ref.html docs/interop/qemu-ga-ref.html
rm -f docs/qemu-block-drivers.7
rm -f docs/qemu-cpu-models.7
rm -rf .doctrees
$(call clean-manual,devel)
$(call clean-manual,interop)
for d in $(TARGET_DIRS); do \
rm -rf $$d || exit 1 ; \
done
rm -Rf .sdk
if test -f dtc/version_gen.h; then $(MAKE) $(DTC_MAKE_ARGS) clean; fi
KEYMAPS=da en-gb et fr fr-ch is lt no pt-br sv \
KEYMAPS=da en-gb et fr fr-ch is lt modifiers no pt-br sv \
ar de en-us fi fr-be hr it lv nl pl ru th \
de-ch es fo fr-ca hu ja mk pt sl tr \
common de-ch es fo fr-ca hu ja mk nl-be pt sl tr \
bepo cz
ifdef INSTALL_BLOBS
BLOBS=bios.bin bios-256k.bin sgabios.bin vgabios.bin vgabios-cirrus.bin \
vgabios-stdvga.bin vgabios-vmware.bin vgabios-qxl.bin vgabios-virtio.bin \
vgabios-ramfb.bin vgabios-bochs-display.bin \
acpi-dsdt.aml \
ppc_rom.bin openbios-sparc32 openbios-sparc64 openbios-ppc QEMU,tcx.bin QEMU,cgthree.bin \
pxe-e1000.rom pxe-eepro100.rom pxe-ne2k_pci.rom \
pxe-pcnet.rom pxe-rtl8139.rom pxe-virtio.rom \
efi-e1000.rom efi-eepro100.rom efi-ne2k_pci.rom \
efi-pcnet.rom efi-rtl8139.rom efi-virtio.rom \
efi-e1000e.rom efi-vmxnet3.rom \
bamboo.dtb canyonlands.dtb petalogix-s3adsp1800.dtb petalogix-ml605.dtb \
multiboot.bin linuxboot.bin linuxboot_dma.bin kvmvapic.bin pvh.bin \
qemu-icon.bmp qemu_logo_no_text.svg \
bamboo.dtb petalogix-s3adsp1800.dtb petalogix-ml605.dtb \
multiboot.bin linuxboot.bin linuxboot_dma.bin kvmvapic.bin \
s390-ccw.img s390-netboot.img \
spapr-rtas.bin slof.bin skiboot.lid \
palcode-clipper \
u-boot.e500 u-boot-sam460-20100605.bin \
qemu_vga.ndrv \
hppa-firmware.img
u-boot.e500 \
qemu_vga.ndrv
else
BLOBS=
endif
# Note that we manually filter-out the non-Sphinx documentation which
# is currently built into the docs/interop directory in the build tree.
define install-manual =
for d in $$(cd $(MANUAL_BUILDDIR) && find $1 -type d); do $(INSTALL_DIR) "$(DESTDIR)$(qemu_docdir)/$$d"; done
for f in $$(cd $(MANUAL_BUILDDIR) && find $1 -type f -a '!' '(' -name 'qemu-*-qapi.*' -o -name 'qemu-*-ref.*' ')' ); do $(INSTALL_DATA) "$(MANUAL_BUILDDIR)/$$f" "$(DESTDIR)$(qemu_docdir)/$$f"; done
endef
# Note that we deliberately do not install the "devel" manual: it is
# for QEMU developers, and not interesting to our users.
.PHONY: install-sphinxdocs
install-sphinxdocs: sphinxdocs
$(call install-manual,interop)
install-doc: $(DOCS) install-sphinxdocs
install-doc: $(DOCS)
$(INSTALL_DIR) "$(DESTDIR)$(qemu_docdir)"
$(INSTALL_DATA) qemu-doc.html "$(DESTDIR)$(qemu_docdir)"
$(INSTALL_DATA) qemu-doc.txt "$(DESTDIR)$(qemu_docdir)"
@@ -753,15 +656,11 @@ ifdef CONFIG_POSIX
$(INSTALL_DIR) "$(DESTDIR)$(mandir)/man7"
$(INSTALL_DATA) docs/interop/qemu-qmp-ref.7 "$(DESTDIR)$(mandir)/man7"
$(INSTALL_DATA) docs/qemu-block-drivers.7 "$(DESTDIR)$(mandir)/man7"
$(INSTALL_DATA) docs/qemu-cpu-models.7 "$(DESTDIR)$(mandir)/man7"
ifneq ($(TOOLS),)
$(INSTALL_DATA) qemu-img.1 "$(DESTDIR)$(mandir)/man1"
$(INSTALL_DIR) "$(DESTDIR)$(mandir)/man8"
$(INSTALL_DATA) qemu-nbd.8 "$(DESTDIR)$(mandir)/man8"
endif
ifdef CONFIG_TRACE_SYSTEMTAP
$(INSTALL_DATA) scripts/qemu-trace-stap.1 "$(DESTDIR)$(mandir)/man1"
endif
ifneq (,$(findstring qemu-ga,$(TOOLS)))
$(INSTALL_DATA) qemu-ga.8 "$(DESTDIR)$(mandir)/man8"
$(INSTALL_DATA) docs/interop/qemu-ga-ref.html "$(DESTDIR)$(qemu_docdir)"
@@ -784,7 +683,6 @@ ifneq (,$(findstring qemu-ga,$(TOOLS)))
endif
endif
ICON_SIZES=16x16 24x24 32x32 48x48 64x64 128x128 256x256 512x512
install: all $(if $(BUILD_DOCS),install-doc) install-datadir install-localstatedir
ifneq ($(TOOLS),)
@@ -801,29 +699,12 @@ endif
ifneq ($(HELPERS-y),)
$(call install-prog,$(HELPERS-y),$(DESTDIR)$(libexecdir))
endif
ifdef CONFIG_TRACE_SYSTEMTAP
$(INSTALL_PROG) "scripts/qemu-trace-stap" $(DESTDIR)$(bindir)
endif
ifneq ($(BLOBS),)
set -e; for x in $(BLOBS); do \
$(INSTALL_DATA) $(SRC_PATH)/pc-bios/$$x "$(DESTDIR)$(qemu_datadir)"; \
done
endif
for s in $(ICON_SIZES); do \
mkdir -p "$(DESTDIR)/$(qemu_icondir)/hicolor/$${s}/apps"; \
$(INSTALL_DATA) $(SRC_PATH)/ui/icons/qemu_$${s}.png \
"$(DESTDIR)/$(qemu_icondir)/hicolor/$${s}/apps/qemu.png"; \
done; \
mkdir -p "$(DESTDIR)/$(qemu_icondir)/hicolor/32x32/apps"; \
$(INSTALL_DATA) $(SRC_PATH)/ui/icons/qemu_32x32.bmp \
"$(DESTDIR)/$(qemu_icondir)/hicolor/32x32/apps/qemu.bmp"; \
mkdir -p "$(DESTDIR)/$(qemu_icondir)/hicolor/scalable/apps"; \
$(INSTALL_DATA) $(SRC_PATH)/ui/icons/qemu.svg \
"$(DESTDIR)/$(qemu_icondir)/hicolor/scalable/apps/qemu.svg"
mkdir -p "$(DESTDIR)/$(qemu_desktopdir)"
$(INSTALL_DATA) $(SRC_PATH)/ui/qemu.desktop \
"$(DESTDIR)/$(qemu_desktopdir)/qemu.desktop"
ifdef CONFIG_GTK
ifeq ($(CONFIG_GTK),y)
$(MAKE) -C po $@
endif
$(INSTALL_DIR) "$(DESTDIR)$(qemu_datadir)/keymaps"
@@ -892,23 +773,6 @@ docs/version.texi: $(SRC_PATH)/VERSION
%.pdf: %.texi docs/version.texi
$(call quiet-command,texi2pdf $(TEXI2PDFFLAGS) $< -o $@,"GEN","$@")
# Sphinx builds all its documentation at once in one invocation
# and handles "don't rebuild things unless necessary" itself.
# The '.doctrees' files are cached information to speed this up.
.PHONY: sphinxdocs
sphinxdocs: $(MANUAL_BUILDDIR)/devel/index.html $(MANUAL_BUILDDIR)/interop/index.html
# Canned command to build a single manual
build-manual = $(call quiet-command,sphinx-build $(if $(V),,-q) -W -n -b html -D version=$(VERSION) -D release="$(FULL_VERSION)" -d .doctrees/$1 $(SRC_PATH)/docs/$1 $(MANUAL_BUILDDIR)/$1 ,"SPHINX","$(MANUAL_BUILDDIR)/$1")
# We assume all RST files in the manual's directory are used in it
manual-deps = $(wildcard $(SRC_PATH)/docs/$1/*.rst) $(SRC_PATH)/docs/$1/conf.py $(SRC_PATH)/docs/conf.py
$(MANUAL_BUILDDIR)/devel/index.html: $(call manual-deps,devel)
$(call build-manual,devel)
$(MANUAL_BUILDDIR)/interop/index.html: $(call manual-deps,interop)
$(call build-manual,interop)
qemu-options.texi: $(SRC_PATH)/qemu-options.hx $(SRC_PATH)/scripts/hxtool
$(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -t < $< > $@,"GEN","$@")
@@ -921,11 +785,13 @@ qemu-monitor-info.texi: $(SRC_PATH)/hmp-commands-info.hx $(SRC_PATH)/scripts/hxt
qemu-img-cmds.texi: $(SRC_PATH)/qemu-img-cmds.hx $(SRC_PATH)/scripts/hxtool
$(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -t < $< > $@,"GEN","$@")
docs/interop/qemu-qmp-qapi.texi: qapi/qapi-doc.texi
@cp -p $< $@
docs/interop/qemu-qmp-qapi.texi docs/interop/qemu-ga-qapi.texi: $(SRC_PATH)/scripts/qapi2texi.py $(qapi-py)
docs/interop/qemu-ga-qapi.texi: qga/qapi-generated/qga-qapi-doc.texi
@cp -p $< $@
docs/interop/qemu-qmp-qapi.texi: $(qapi-modules)
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi2texi.py $< > $@,"GEN","$@")
docs/interop/qemu-ga-qapi.texi: $(SRC_PATH)/qga/qapi-schema.json
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi2texi.py $< > $@,"GEN","$@")
qemu.1: qemu-doc.texi qemu-options.texi qemu-monitor.texi qemu-monitor-info.texi
qemu.1: qemu-option-trace.texi
@@ -934,19 +800,16 @@ fsdev/virtfs-proxy-helper.1: fsdev/virtfs-proxy-helper.texi
qemu-nbd.8: qemu-nbd.texi qemu-option-trace.texi
qemu-ga.8: qemu-ga.texi
docs/qemu-block-drivers.7: docs/qemu-block-drivers.texi
docs/qemu-cpu-models.7: docs/qemu-cpu-models.texi
scripts/qemu-trace-stap.1: scripts/qemu-trace-stap.texi
html: qemu-doc.html docs/interop/qemu-qmp-ref.html docs/interop/qemu-ga-ref.html sphinxdocs
html: qemu-doc.html docs/interop/qemu-qmp-ref.html docs/interop/qemu-ga-ref.html
info: qemu-doc.info docs/interop/qemu-qmp-ref.info docs/interop/qemu-ga-ref.info
pdf: qemu-doc.pdf docs/interop/qemu-qmp-ref.pdf docs/interop/qemu-ga-ref.pdf
txt: qemu-doc.txt docs/interop/qemu-qmp-ref.txt docs/interop/qemu-ga-ref.txt
qemu-doc.html qemu-doc.info qemu-doc.pdf qemu-doc.txt: \
qemu-img.texi qemu-nbd.texi qemu-options.texi qemu-option-trace.texi \
qemu-deprecated.texi qemu-monitor.texi qemu-img-cmds.texi qemu-ga.texi \
qemu-monitor-info.texi docs/qemu-block-drivers.texi \
docs/qemu-cpu-models.texi
qemu-monitor.texi qemu-img-cmds.texi qemu-ga.texi \
qemu-monitor-info.texi docs/qemu-block-drivers.texi
docs/interop/qemu-ga-ref.dvi docs/interop/qemu-ga-ref.html \
docs/interop/qemu-ga-ref.info docs/interop/qemu-ga-ref.pdf \
@@ -958,19 +821,6 @@ docs/interop/qemu-qmp-ref.dvi docs/interop/qemu-qmp-ref.html \
docs/interop/qemu-qmp-ref.txt docs/interop/qemu-qmp-ref.7: \
docs/interop/qemu-qmp-ref.texi docs/interop/qemu-qmp-qapi.texi
$(filter %.1 %.7 %.8,$(DOCS)): scripts/texi2pod.pl
# Reports/Analysis
%/coverage-report.html:
@mkdir -p $*
$(call quiet-command,\
gcovr -r $(SRC_PATH) --object-directory $(BUILD_PATH) \
-p --html --html-details -o $@, \
"GEN", "coverage-report.html")
.PHONY: coverage-report
coverage-report: $(CURDIR)/reports/coverage/coverage-report.html
ifdef CONFIG_WIN32
@@ -1051,9 +901,6 @@ include $(SRC_PATH)/tests/vm/Makefile.include
help:
@echo 'Generic targets:'
@echo ' all - Build all'
ifdef CONFIG_MODULES
@echo ' modules - Build all modules'
endif
@echo ' dir/file.o - Build specified target only'
@echo ' install - Install QEMU, documentation and tools'
@echo ' ctags/TAGS - Generate tags file for editors'
@@ -1066,9 +913,6 @@ endif
echo '')
@echo 'Cleaning targets:'
@echo ' clean - Remove most generated files but keep the config'
ifdef CONFIG_GCOV
@echo ' clean-coverage - Remove coverage files'
endif
@echo ' distclean - Remove all generated files'
@echo ' dist - Build a distributable tarball'
@echo ''
@@ -1080,9 +924,6 @@ endif
@echo 'Documentation targets:'
@echo ' html info pdf txt'
@echo ' - Build documentation in specified format'
ifdef CONFIG_GCOV
@echo ' coverage-report - Create code coverage report'
endif
@echo ''
ifdef CONFIG_WIN32
@echo 'Windows targets:'
@@ -1092,5 +933,4 @@ ifdef QEMU_GA_MSI_ENABLED
endif
@echo ''
endif
@echo ' $(MAKE) [targets] (quiet build, default)'
@echo ' $(MAKE) V=1 [targets] (verbose build)'
@echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'

View File

@@ -1,20 +1,16 @@
#######################################################################
# Common libraries for tools and emulators
stub-obj-y = stubs/ util/ crypto/
stub-obj-y = stubs/ crypto/
util-obj-y = util/ qobject/ qapi/
util-obj-y += qmp-introspect.o qapi-types.o qapi-visit.o qapi-event.o
chardev-obj-y = chardev/
#######################################################################
# authz-obj-y is code used by both qemu system emulation and qemu-img
authz-obj-y = authz/
#######################################################################
# block-obj-y is code used by both qemu system emulation and qemu-img
block-obj-y = nbd/
block-obj-y += block.o blockjob.o job.o
block-obj-y += nbd/
block-obj-y += block.o blockjob.o
block-obj-y += block/ scsi/
block-obj-y += qemu-io-cmds.o
block-obj-$(CONFIG_REPLICATION) += replication.o
@@ -45,7 +41,6 @@ io-obj-y = io/
ifeq ($(CONFIG_SOFTMMU),y)
common-obj-y = blockdev.o blockdev-nbd.o block/
common-obj-y += bootdevice.o iothread.o
common-obj-y += job-qmp.o
common-obj-y += net/
common-obj-y += qdev-monitor.o device-hotplug.o
common-obj-$(CONFIG_WIN32) += os-win32.o
@@ -56,13 +51,11 @@ common-obj-$(CONFIG_LINUX) += fsdev/
common-obj-y += migration/
common-obj-y += audio/
common-obj-m += audio/
common-obj-y += hw/
common-obj-y += replay/
common-obj-y += ui/
common-obj-m += ui/
common-obj-y += bt-host.o bt-vhci.o
bt-host.o-cflags := $(BLUEZ_CFLAGS)
@@ -71,6 +64,8 @@ common-obj-y += vl.o
vl.o-cflags := $(GPROF_CFLAGS) $(SDL_CFLAGS)
common-obj-$(CONFIG_TPM) += tpm.o
common-obj-$(CONFIG_SLIRP) += slirp/
common-obj-y += backends/
common-obj-y += chardev/
@@ -83,8 +78,9 @@ common-obj-$(CONFIG_FDT) += device_tree.o
######################################################################
# qapi
common-obj-y += qmp-marshal.o
common-obj-y += qmp-introspect.o
common-obj-y += qmp.o hmp.o
common-obj-y += qapi/
endif
#######################################################################
@@ -101,96 +97,80 @@ version-obj-$(CONFIG_WIN32) += $(BUILD_DIR)/version.o
######################################################################
# tracing
util-obj-y += trace/
target-obj-y += trace/
######################################################################
# guest agent
# FIXME: a few definitions from qapi/qapi-types.o and
# qapi/qapi-visit.o are needed by libqemuutil.a. These should be
# extracted into a QAPI schema module, or perhaps a separate schema.
# FIXME: a few definitions from qapi-types.o/qapi-visit.o are needed
# by libqemuutil.a. These should be moved to a separate .json schema.
qga-obj-y = qga/
qga-vss-dll-obj-y = qga/
######################################################################
# contrib
elf2dmp-obj-y = contrib/elf2dmp/
ivshmem-client-obj-$(CONFIG_IVSHMEM) = contrib/ivshmem-client/
ivshmem-server-obj-$(CONFIG_IVSHMEM) = contrib/ivshmem-server/
libvhost-user-obj-y = contrib/libvhost-user/
vhost-user-scsi.o-cflags := $(LIBISCSI_CFLAGS)
vhost-user-scsi.o-libs := $(LIBISCSI_LIBS)
vhost-user-scsi-obj-y = contrib/vhost-user-scsi/
vhost-user-blk-obj-y = contrib/vhost-user-blk/
rdmacm-mux-obj-y = contrib/rdmacm-mux/
######################################################################
trace-events-subdirs =
trace-events-subdirs += accel/kvm
trace-events-subdirs += accel/tcg
trace-events-subdirs += audio
trace-events-subdirs += authz
trace-events-subdirs += util
trace-events-subdirs += crypto
trace-events-subdirs += io
trace-events-subdirs += migration
trace-events-subdirs += block
trace-events-subdirs += chardev
trace-events-subdirs += crypto
trace-events-subdirs += hw/9pfs
trace-events-subdirs += hw/acpi
trace-events-subdirs += hw/alpha
trace-events-subdirs += hw/arm
trace-events-subdirs += hw/audio
trace-events-subdirs += hw/block
trace-events-subdirs += hw/block/dataplane
trace-events-subdirs += hw/char
trace-events-subdirs += hw/display
trace-events-subdirs += hw/dma
trace-events-subdirs += hw/hppa
trace-events-subdirs += hw/i2c
trace-events-subdirs += hw/i386
trace-events-subdirs += hw/i386/xen
trace-events-subdirs += hw/ide
trace-events-subdirs += hw/input
trace-events-subdirs += hw/intc
trace-events-subdirs += hw/net
trace-events-subdirs += hw/virtio
trace-events-subdirs += hw/audio
trace-events-subdirs += hw/misc
trace-events-subdirs += hw/usb
trace-events-subdirs += hw/scsi
trace-events-subdirs += hw/nvram
trace-events-subdirs += hw/display
trace-events-subdirs += hw/input
trace-events-subdirs += hw/timer
trace-events-subdirs += hw/dma
trace-events-subdirs += hw/sparc
trace-events-subdirs += hw/sd
trace-events-subdirs += hw/isa
trace-events-subdirs += hw/mem
trace-events-subdirs += hw/misc
trace-events-subdirs += hw/misc/macio
trace-events-subdirs += hw/net
trace-events-subdirs += hw/nvram
trace-events-subdirs += hw/pci
trace-events-subdirs += hw/pci-host
trace-events-subdirs += hw/i386
trace-events-subdirs += hw/i386/xen
trace-events-subdirs += hw/9pfs
trace-events-subdirs += hw/ppc
trace-events-subdirs += hw/rdma
trace-events-subdirs += hw/rdma/vmw
trace-events-subdirs += hw/pci
trace-events-subdirs += hw/s390x
trace-events-subdirs += hw/scsi
trace-events-subdirs += hw/sd
trace-events-subdirs += hw/sparc
trace-events-subdirs += hw/sparc64
trace-events-subdirs += hw/timer
trace-events-subdirs += hw/tpm
trace-events-subdirs += hw/usb
trace-events-subdirs += hw/vfio
trace-events-subdirs += hw/virtio
trace-events-subdirs += hw/watchdog
trace-events-subdirs += hw/acpi
trace-events-subdirs += hw/arm
trace-events-subdirs += hw/alpha
trace-events-subdirs += hw/xen
trace-events-subdirs += hw/gpio
trace-events-subdirs += io
trace-events-subdirs += linux-user
trace-events-subdirs += migration
trace-events-subdirs += nbd
trace-events-subdirs += hw/ide
trace-events-subdirs += ui
trace-events-subdirs += audio
trace-events-subdirs += net
trace-events-subdirs += qapi
trace-events-subdirs += qom
trace-events-subdirs += scsi
trace-events-subdirs += target/arm
trace-events-subdirs += target/hppa
trace-events-subdirs += target/i386
trace-events-subdirs += target/mips
trace-events-subdirs += target/ppc
trace-events-subdirs += target/riscv
trace-events-subdirs += target/s390x
trace-events-subdirs += target/sparc
trace-events-subdirs += ui
trace-events-subdirs += util
trace-events-subdirs += target/s390x
trace-events-subdirs += target/ppc
trace-events-subdirs += qom
trace-events-subdirs += linux-user
trace-events-subdirs += qapi
trace-events-subdirs += accel/tcg
trace-events-subdirs += accel/kvm
trace-events-subdirs += nbd
trace-events-subdirs += scsi
trace-events-files = $(SRC_PATH)/trace-events $(trace-events-subdirs:%=$(SRC_PATH)/%/trace-events)

View File

@@ -4,19 +4,16 @@ BUILD_DIR?=$(CURDIR)/..
include ../config-host.mak
include config-target.mak
include $(SRC_PATH)/rules.mak
ifdef CONFIG_SOFTMMU
include config-devices.mak
endif
include $(SRC_PATH)/rules.mak
$(call set-vpath, $(SRC_PATH):$(BUILD_DIR))
ifdef CONFIG_LINUX
QEMU_CFLAGS += -I../linux-headers
endif
QEMU_CFLAGS += -iquote .. -iquote $(SRC_PATH)/target/$(TARGET_BASE_ARCH) -DNEED_CPU_H
QEMU_CFLAGS += -I.. -I$(SRC_PATH)/target/$(TARGET_BASE_ARCH) -DNEED_CPU_H
QEMU_CFLAGS+=-iquote $(SRC_PATH)/include
QEMU_CFLAGS+=-I$(SRC_PATH)/include
ifdef CONFIG_USER_ONLY
# user emulator name
@@ -39,14 +36,11 @@ endif
PROGS=$(QEMU_PROG) $(QEMU_PROGW)
STPFILES=
# Makefile Tests
include $(SRC_PATH)/tests/tcg/Makefile.include
config-target.h: config-target.h-timestamp
config-target.h-timestamp: config-target.mak
ifdef CONFIG_TRACE_SYSTEMTAP
stap: $(QEMU_PROG).stp-installed $(QEMU_PROG).stp $(QEMU_PROG)-simpletrace.stp $(QEMU_PROG)-log.stp
stap: $(QEMU_PROG).stp-installed $(QEMU_PROG).stp $(QEMU_PROG)-simpletrace.stp
ifdef CONFIG_USER_ONLY
TARGET_TYPE=user
@@ -85,14 +79,6 @@ $(QEMU_PROG)-simpletrace.stp: $(BUILD_DIR)/trace-events-all $(tracetool-y)
--probe-prefix=qemu.$(TARGET_TYPE).$(TARGET_NAME) \
$< > $@,"GEN","$(TARGET_DIR)$(QEMU_PROG)-simpletrace.stp")
$(QEMU_PROG)-log.stp: $(BUILD_DIR)/trace-events-all $(tracetool-y)
$(call quiet-command,$(TRACETOOL) \
--group=all \
--format=log-stap \
--backends=$(TRACE_BACKENDS) \
--probe-prefix=qemu.$(TARGET_TYPE).$(TARGET_NAME) \
$< > $@,"GEN","$(TARGET_DIR)$(QEMU_PROG)-log.stp")
else
stap:
endif
@@ -103,17 +89,15 @@ all: $(PROGS) stap
# Dummy command so that make thinks it has done something
@true
obj-y += trace/
#########################################################
# cpu emulator library
obj-y += exec.o
obj-y += accel/
obj-$(CONFIG_TCG) += tcg/tcg.o tcg/tcg-op.o tcg/tcg-op-vec.o tcg/tcg-op-gvec.o
obj-$(CONFIG_TCG) += tcg/tcg-common.o tcg/optimize.o
obj-$(CONFIG_TCG) += tcg/tcg.o tcg/tcg-op.o tcg/optimize.o
obj-$(CONFIG_TCG) += tcg/tcg-common.o
obj-$(CONFIG_TCG_INTERPRETER) += tcg/tci.o
obj-$(CONFIG_TCG_INTERPRETER) += disas/tci.o
obj-$(CONFIG_TCG) += fpu/softfloat.o
obj-y += fpu/softfloat.o
obj-y += target/$(TARGET_BASE_ARCH)/
obj-y += disas.o
obj-$(call notempty,$(TARGET_XML_FILES)) += gdbstub-xml.o
@@ -151,11 +135,9 @@ ifdef CONFIG_SOFTMMU
obj-y += arch_init.o cpus.o monitor.o gdbstub.o balloon.o ioport.o numa.o
obj-y += qtest.o
obj-y += hw/
obj-y += qapi/
obj-y += memory.o
obj-y += memory_mapping.o
obj-y += dump.o
obj-$(TARGET_X86_64) += win_dump.o
obj-y += migration/ram.o
LIBS := $(libs_softmmu) $(LIBS)
@@ -170,12 +152,20 @@ GENERATED_FILES += hmp-commands.h hmp-commands-info.h
endif # CONFIG_SOFTMMU
# Workaround for http://gcc.gnu.org/PR55489, see configure.
%/translate.o: QEMU_CFLAGS += $(TRANSLATE_OPT_CFLAGS)
dummy := $(call unnest-vars,,obj-y)
all-obj-y := $(obj-y)
target-obj-y :=
block-obj-y :=
common-obj-y :=
chardev-obj-y :=
include $(SRC_PATH)/Makefile.objs
dummy := $(call unnest-vars,,target-obj-y)
target-obj-y-save := $(target-obj-y)
dummy := $(call unnest-vars,.., \
authz-obj-y \
block-obj-y \
block-obj-m \
chardev-obj-y \
@@ -185,17 +175,16 @@ dummy := $(call unnest-vars,.., \
io-obj-y \
common-obj-y \
common-obj-m)
target-obj-y := $(target-obj-y-save)
all-obj-y += $(common-obj-y)
all-obj-y += $(target-obj-y)
all-obj-y += $(qom-obj-y)
all-obj-$(CONFIG_SOFTMMU) += $(authz-obj-y)
all-obj-$(CONFIG_SOFTMMU) += $(block-obj-y) $(chardev-obj-y)
all-obj-$(CONFIG_USER_ONLY) += $(crypto-aes-obj-y)
all-obj-$(CONFIG_SOFTMMU) += $(crypto-obj-y)
all-obj-$(CONFIG_SOFTMMU) += $(io-obj-y)
ifdef CONFIG_SOFTMMU
$(QEMU_PROG_BUILD): config-devices.mak
endif
COMMON_LDADDS = ../libqemuutil.a
@@ -220,7 +209,6 @@ clean: clean-target
rm -f *.a *~ $(PROGS)
rm -f $(shell find . -name '*.[od]')
rm -f hmp-commands.h gdbstub-xml.c
rm -f trace/generated-helpers.c trace/generated-helpers.c-timestamp
ifdef CONFIG_TRACE_SYSTEMTAP
rm -f *.stp
endif
@@ -233,7 +221,6 @@ ifdef CONFIG_TRACE_SYSTEMTAP
$(INSTALL_DIR) "$(DESTDIR)$(qemu_datadir)/../systemtap/tapset"
$(INSTALL_DATA) $(QEMU_PROG).stp-installed "$(DESTDIR)$(qemu_datadir)/../systemtap/tapset/$(QEMU_PROG).stp"
$(INSTALL_DATA) $(QEMU_PROG)-simpletrace.stp "$(DESTDIR)$(qemu_datadir)/../systemtap/tapset/$(QEMU_PROG)-simpletrace.stp"
$(INSTALL_DATA) $(QEMU_PROG)-log.stp "$(DESTDIR)$(qemu_datadir)/../systemtap/tapset/$(QEMU_PROG)-log.stp"
endif
GENERATED_FILES += config-target.h

37
README
View File

@@ -54,9 +54,9 @@ Submitting patches
The QEMU source code is maintained under the GIT version control system.
git clone https://git.qemu.org/git/qemu.git
git clone git://git.qemu.org/qemu.git
When submitting patches, one common approach is to use 'git
When submitting patches, the preferred approach is to use 'git
format-patch' and/or 'git send-email' to format & send the mail to the
qemu-devel@nongnu.org mailing list. All patches submitted must contain
a 'Signed-off-by' line from the author. Patches should follow the
@@ -68,39 +68,6 @@ the QEMU website
https://qemu.org/Contribute/SubmitAPatch
https://qemu.org/Contribute/TrivialPatches
The QEMU website is also maintained under source control.
git clone https://git.qemu.org/git/qemu-web.git
https://www.qemu.org/2017/02/04/the-new-qemu-website-is-up/
A 'git-publish' utility was created to make above process less
cumbersome, and is highly recommended for making regular contributions,
or even just for sending consecutive patch series revisions. It also
requires a working 'git send-email' setup, and by default doesn't
automate everything, so you may want to go through the above steps
manually for once.
For installation instructions, please go to
https://github.com/stefanha/git-publish
The workflow with 'git-publish' is:
$ git checkout master -b my-feature
$ # work on new commits, add your 'Signed-off-by' lines to each
$ git publish
Your patch series will be sent and tagged as my-feature-v1 if you need to refer
back to it in the future.
Sending v2:
$ git checkout my-feature # same topic branch
$ # making changes to the commits (using 'git rebase', for example)
$ git publish
Your patch series will be sent with 'v2' tag in the subject and the git tip
will be tagged as my-feature-v2.
Bug reporting
=============

View File

@@ -1 +1 @@
4.0.1
2.11.2

View File

@@ -1,4 +1,4 @@
obj-$(CONFIG_SOFTMMU) += accel.o
obj-$(CONFIG_KVM) += kvm/
obj-y += kvm/
obj-$(CONFIG_TCG) += tcg/
obj-y += stubs/

View File

@@ -26,6 +26,7 @@
#include "qemu/osdep.h"
#include "sysemu/accel.h"
#include "hw/boards.h"
#include "qemu-common.h"
#include "sysemu/arch_init.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
@@ -33,8 +34,6 @@
#include "hw/xen/xen.h"
#include "qom/object.h"
#include "qemu/error-report.h"
#include "qemu/option.h"
#include "qapi/error.h"
static const TypeInfo accel_type = {
.name = TYPE_ACCEL,
@@ -65,16 +64,14 @@ static int accel_init_machine(AccelClass *acc, MachineState *ms)
ms->accelerator = NULL;
*(acc->allowed) = false;
object_unref(OBJECT(accel));
} else {
object_set_accelerator_compat_props(acc->compat_props);
}
return ret;
}
void configure_accelerator(MachineState *ms, const char *progname)
void configure_accelerator(MachineState *ms)
{
const char *accel;
char **accel_list, **tmp;
const char *accel, *p;
char buf[10];
int ret;
bool accel_initialised = false;
bool init_failed = false;
@@ -82,28 +79,17 @@ void configure_accelerator(MachineState *ms, const char *progname)
accel = qemu_opt_get(qemu_get_machine_opts(), "accel");
if (accel == NULL) {
/* Select the default accelerator */
int pnlen = strlen(progname);
if (pnlen >= 3 && g_str_equal(&progname[pnlen - 3], "kvm")) {
/* If the program name ends with "kvm", we prefer KVM */
accel = "kvm:tcg";
} else {
#if defined(CONFIG_TCG)
accel = "tcg";
#elif defined(CONFIG_KVM)
accel = "kvm";
#else
error_report("No accelerator selected and"
" no default accelerator available");
exit(1);
#endif
}
/* Use the default "accelerator", tcg */
accel = "tcg";
}
accel_list = g_strsplit(accel, ":", 0);
for (tmp = accel_list; !accel_initialised && tmp && *tmp; tmp++) {
acc = accel_find(*tmp);
p = accel;
while (!accel_initialised && *p != '\0') {
if (*p == ':') {
p++;
}
p = get_opt_name(buf, sizeof(buf), p, ':');
acc = accel_find(buf);
if (!acc) {
continue;
}
@@ -121,7 +107,6 @@ void configure_accelerator(MachineState *ms, const char *progname)
accel_initialised = true;
}
}
g_strfreev(accel_list);
if (!accel_initialised) {
if (!init_failed) {
@@ -135,13 +120,10 @@ void configure_accelerator(MachineState *ms, const char *progname)
}
}
void accel_setup_post(MachineState *ms)
void accel_register_compat_props(AccelState *accel)
{
AccelState *accel = ms->accelerator;
AccelClass *acc = ACCEL_GET_CLASS(accel);
if (acc->setup_post) {
acc->setup_post(ms, accel);
}
AccelClass *class = ACCEL_GET_CLASS(accel);
register_compat_props_array(class->global_props);
}
static void register_accel_types(void)

View File

@@ -1,2 +1 @@
obj-y += kvm-all.o
obj-$(call lnot,$(CONFIG_SEV)) += sev-stub.o
obj-$(CONFIG_KVM) += kvm-all.o

View File

@@ -38,8 +38,6 @@
#include "qemu/event_notifier.h"
#include "trace.h"
#include "hw/irq.h"
#include "sysemu/sev.h"
#include "sysemu/balloon.h"
#include "hw/boards.h"
@@ -79,14 +77,13 @@ struct KVMState
int fd;
int vmfd;
int coalesced_mmio;
int coalesced_pio;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
bool coalesced_flush_in_progress;
int vcpu_events;
int robust_singlestep;
int debugregs;
#ifdef KVM_CAP_SET_GUEST_DEBUG
QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
#endif
int many_ioeventfds;
int intx_set_mask;
@@ -102,14 +99,10 @@ struct KVMState
int nr_allocated_irq_routes;
unsigned long *used_gsi_bitmap;
unsigned int gsi_count;
QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
#endif
KVMMemoryListener memory_listener;
QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
/* memory encryption */
void *memcrypt_handle;
int (*memcrypt_encrypt_data)(void *handle, uint8_t *ptr, uint64_t len);
};
KVMState *kvm_state;
@@ -145,26 +138,6 @@ int kvm_get_max_memslots(void)
return s->nr_slots;
}
bool kvm_memcrypt_enabled(void)
{
if (kvm_state && kvm_state->memcrypt_handle) {
return true;
}
return false;
}
int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
{
if (kvm_state->memcrypt_handle &&
kvm_state->memcrypt_encrypt_data) {
return kvm_state->memcrypt_encrypt_data(kvm_state->memcrypt_handle,
ptr, len);
}
return 1;
}
static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
{
KVMState *s = kvm_state;
@@ -258,29 +231,24 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
return 0;
}
static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot)
{
KVMState *s = kvm_state;
struct kvm_userspace_memory_region mem;
int ret;
mem.slot = slot->slot | (kml->as_id << 16);
mem.guest_phys_addr = slot->start_addr;
mem.userspace_addr = (unsigned long)slot->ram;
mem.flags = slot->flags;
if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
if (slot->memory_size && mem.flags & KVM_MEM_READONLY) {
/* Set the slot size to 0 before setting the slot to the desired
* value. This is needed based on KVM commit 75d61fbc. */
mem.memory_size = 0;
kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
}
mem.memory_size = slot->memory_size;
ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
slot->old_flags = mem.flags;
trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
mem.memory_size, mem.userspace_addr, ret);
return ret;
return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
}
int kvm_destroy_vcpu(CPUState *cpu)
@@ -394,14 +362,17 @@ static int kvm_mem_flags(MemoryRegion *mr)
static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
MemoryRegion *mr)
{
int old_flags;
old_flags = mem->flags;
mem->flags = kvm_mem_flags(mr);
/* If nothing changed effectively, no need to issue ioctl */
if (mem->flags == mem->old_flags) {
if (mem->flags == old_flags) {
return 0;
}
return kvm_set_user_memory_region(kml, mem, false);
return kvm_set_user_memory_region(kml, mem);
}
static int kvm_section_update_flags(KVMMemoryListener *kml,
@@ -561,45 +532,6 @@ static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
}
}
static void kvm_coalesce_pio_add(MemoryListener *listener,
MemoryRegionSection *section,
hwaddr start, hwaddr size)
{
KVMState *s = kvm_state;
if (s->coalesced_pio) {
struct kvm_coalesced_mmio_zone zone;
zone.addr = start;
zone.size = size;
zone.pio = 1;
(void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
}
}
static void kvm_coalesce_pio_del(MemoryListener *listener,
MemoryRegionSection *section,
hwaddr start, hwaddr size)
{
KVMState *s = kvm_state;
if (s->coalesced_pio) {
struct kvm_coalesced_mmio_zone zone;
zone.addr = start;
zone.size = size;
zone.pio = 1;
(void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
}
}
static MemoryListener kvm_coalesced_pio_listener = {
.coalesced_io_add = kvm_coalesce_pio_add,
.coalesced_io_del = kvm_coalesce_pio_del,
};
int kvm_check_extension(KVMState *s, unsigned int extension)
{
int ret;
@@ -657,8 +589,6 @@ static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
.fd = fd,
};
trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
datamatch);
if (!kvm_enabled()) {
return -ENOSYS;
}
@@ -690,7 +620,6 @@ static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
.fd = fd,
};
int r;
trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
if (!kvm_enabled()) {
return -ENOSYS;
}
@@ -797,8 +726,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
/* unregister the slot */
mem->memory_size = 0;
mem->flags = 0;
err = kvm_set_user_memory_region(kml, mem, false);
err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error unregistering slot: %s\n",
__func__, strerror(-err));
@@ -814,7 +742,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
mem->ram = ram;
mem->flags = kvm_mem_flags(mr);
err = kvm_set_user_memory_region(kml, mem, true);
err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
strerror(-err));
@@ -1593,7 +1521,7 @@ static int kvm_init(MachineState *ms)
kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
if (mc->kvm_type) {
type = mc->kvm_type(ms, kvm_type);
type = mc->kvm_type(kvm_type);
} else if (kvm_type) {
ret = -EINVAL;
fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
@@ -1659,8 +1587,6 @@ static int kvm_init(MachineState *ms)
}
s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
s->coalesced_pio = s->coalesced_mmio &&
kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
#ifdef KVM_CAP_VCPU_EVENTS
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
@@ -1684,8 +1610,10 @@ static int kvm_init(MachineState *ms)
s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
}
#ifdef KVM_CAP_READONLY_MEM
kvm_readonly_mem_allowed =
(kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
#endif
kvm_eventfds_allowed =
(kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
@@ -1704,20 +1632,6 @@ static int kvm_init(MachineState *ms)
kvm_state = s;
/*
* if memory encryption object is specified then initialize the memory
* encryption context.
*/
if (ms->memory_encryption) {
kvm_state->memcrypt_handle = sev_guest_init(ms->memory_encryption);
if (!kvm_state->memcrypt_handle) {
ret = -1;
goto err;
}
kvm_state->memcrypt_encrypt_data = sev_encrypt_data;
}
ret = kvm_arch_init(ms, s);
if (ret < 0) {
goto err;
@@ -1731,22 +1645,17 @@ static int kvm_init(MachineState *ms)
s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
}
s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region;
s->memory_listener.listener.coalesced_mmio_del = kvm_uncoalesce_mmio_region;
kvm_memory_listener_register(s, &s->memory_listener,
&address_space_memory, 0);
memory_listener_register(&kvm_io_listener,
&address_space_io);
memory_listener_register(&kvm_coalesced_pio_listener,
&address_space_io);
s->many_ioeventfds = kvm_check_many_ioeventfds();
s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
if (!s->sync_mmu) {
qemu_balloon_inhibit(true);
}
return 0;
@@ -1825,13 +1734,7 @@ void kvm_flush_coalesced_mmio_buffer(void)
ent = &ring->coalesced_mmio[ring->first];
if (ent->pio == 1) {
address_space_rw(&address_space_io, ent->phys_addr,
MEMTXATTRS_UNSPECIFIED, ent->data,
ent->len, true);
} else {
cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
}
cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
smp_wmb();
ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
}

View File

@@ -1,26 +0,0 @@
/*
* QEMU SEV stub
*
* Copyright Advanced Micro Devices 2018
*
* Authors:
* Brijesh Singh <brijesh.singh@amd.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "sysemu/sev.h"
int sev_encrypt_data(void *handle, uint8_t *ptr, uint64_t len)
{
abort();
}
void *sev_guest_init(const char *id)
{
return NULL;
}

View File

@@ -1,4 +1,4 @@
# See docs/devel/tracing.txt for syntax documentation.
# Trace events for debugging and performance instrumentation
# kvm-all.c
kvm_ioctl(int type, void *arg) "type 0x%x, arg %p"
@@ -12,7 +12,4 @@ kvm_irqchip_commit_routes(void) ""
kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d"
kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
kvm_irqchip_release_virq(int virq) "virq %d"
kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d"
kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"

View File

@@ -1,5 +1,3 @@
obj-$(call lnot,$(CONFIG_HAX)) += hax-stub.o
obj-$(call lnot,$(CONFIG_HVF)) += hvf-stub.o
obj-$(call lnot,$(CONFIG_WHPX)) += whpx-stub.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o
obj-$(call lnot,$(CONFIG_HAX)) += hax-stub.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o

View File

@@ -1,31 +0,0 @@
/*
* QEMU HVF support
*
* Copyright 2017 Red Hat, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2 or later, as published by the Free Software Foundation,
* and may be copied, distributed, and modified under those terms.
*
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "cpu.h"
#include "sysemu/hvf.h"
int hvf_init_vcpu(CPUState *cpu)
{
return -ENOSYS;
}
int hvf_vcpu_exec(CPUState *cpu)
{
return -ENOSYS;
}
void hvf_vcpu_destroy(CPUState *cpu)
{
}

View File

@@ -105,16 +105,6 @@ int kvm_on_sigbus(int code, void *addr)
return 1;
}
bool kvm_memcrypt_enabled(void)
{
return false;
}
int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
{
return 1;
}
#ifndef CONFIG_USER_ONLY
int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
{

View File

@@ -21,6 +21,10 @@ void tb_flush(CPUState *cpu)
{
}
void tb_unlock(void)
{
}
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
{
}

View File

@@ -1,48 +0,0 @@
/*
* QEMU Windows Hypervisor Platform accelerator (WHPX) stub
*
* Copyright Microsoft Corp. 2017
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "cpu.h"
#include "sysemu/whpx.h"
int whpx_init_vcpu(CPUState *cpu)
{
return -1;
}
int whpx_vcpu_exec(CPUState *cpu)
{
return -1;
}
void whpx_destroy_vcpu(CPUState *cpu)
{
}
void whpx_vcpu_kick(CPUState *cpu)
{
}
void whpx_cpu_synchronize_state(CPUState *cpu)
{
}
void whpx_cpu_synchronize_post_reset(CPUState *cpu)
{
}
void whpx_cpu_synchronize_post_init(CPUState *cpu)
{
}
void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
{
}

View File

@@ -1,6 +1,6 @@
obj-$(CONFIG_SOFTMMU) += tcg-all.o
obj-$(CONFIG_SOFTMMU) += cputlb.o
obj-y += tcg-runtime.o tcg-runtime-gvec.o
obj-y += tcg-runtime.o
obj-y += cpu-exec.o cpu-exec-common.o translate-all.o
obj-y += translator.o

View File

@@ -7,7 +7,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -18,37 +18,26 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "trace/mem.h"
#if DATA_SIZE == 16
# define SUFFIX o
# define DATA_TYPE Int128
# define BSWAP bswap128
# define SHIFT 4
#elif DATA_SIZE == 8
# define SUFFIX q
# define DATA_TYPE uint64_t
# define SDATA_TYPE int64_t
# define BSWAP bswap64
# define SHIFT 3
#elif DATA_SIZE == 4
# define SUFFIX l
# define DATA_TYPE uint32_t
# define SDATA_TYPE int32_t
# define BSWAP bswap32
# define SHIFT 2
#elif DATA_SIZE == 2
# define SUFFIX w
# define DATA_TYPE uint16_t
# define SDATA_TYPE int16_t
# define BSWAP bswap16
# define SHIFT 1
#elif DATA_SIZE == 1
# define SUFFIX b
# define DATA_TYPE uint8_t
# define SDATA_TYPE int8_t
# define BSWAP
# define SHIFT 0
#else
# error unsupported data size
#endif
@@ -59,37 +48,14 @@
# define ABI_TYPE uint32_t
#endif
#define ATOMIC_TRACE_RMW do { \
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \
\
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, \
info | TRACE_MEM_ST); \
} while (0)
#define ATOMIC_TRACE_LD do { \
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \
\
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
} while (0)
# define ATOMIC_TRACE_ST do { \
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, true); \
\
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
} while (0)
/* Define host-endian atomic operations. Note that END is used within
the ATOMIC_NAME macro, and redefined below. */
#if DATA_SIZE == 1
# define END
# define MEND _be /* either le or be would be fine */
#elif defined(HOST_WORDS_BIGENDIAN)
# define END _be
# define MEND _be
#else
# define END _le
# define MEND _le
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
@@ -97,27 +63,17 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret;
ATOMIC_TRACE_RMW;
#if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, cmpv, newv);
#else
ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
#endif
DATA_TYPE ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
ATOMIC_MMU_CLEANUP;
return ret;
}
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
ATOMIC_TRACE_LD;
val = atomic16_read(haddr);
__atomic_load(haddr, &val, __ATOMIC_RELAXED);
ATOMIC_MMU_CLEANUP;
return val;
}
@@ -127,22 +83,16 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
ATOMIC_TRACE_ST;
atomic16_set(haddr, val);
__atomic_store(haddr, &val, __ATOMIC_RELAXED);
ATOMIC_MMU_CLEANUP;
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret;
ATOMIC_TRACE_RMW;
ret = atomic_xchg__nocheck(haddr, val);
DATA_TYPE ret = atomic_xchg__nocheck(haddr, val);
ATOMIC_MMU_CLEANUP;
return ret;
}
@@ -153,10 +103,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
{ \
ATOMIC_MMU_DECLS; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE ret; \
\
ATOMIC_TRACE_RMW; \
ret = atomic_##X(haddr, val); \
DATA_TYPE ret = atomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
return ret; \
}
@@ -171,48 +118,9 @@ GEN_ATOMIC_HELPER(or_fetch)
GEN_ATOMIC_HELPER(xor_fetch)
#undef GEN_ATOMIC_HELPER
/* These helpers are, as a whole, full barriers. Within the helper,
* the leading barrier is explicit and the trailing barrier is within
* cmpxchg primitive.
*
* Trace this load + RMW loop as a single RMW op. This way, regardless
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval EXTRA_ARGS) \
{ \
ATOMIC_MMU_DECLS; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE cmp, old, new, val = xval; \
\
ATOMIC_TRACE_RMW; \
smp_mb(); \
cmp = atomic_read__nocheck(haddr); \
do { \
old = cmp; new = FN(old, val); \
cmp = atomic_cmpxchg__nocheck(haddr, old, new); \
} while (cmp != old); \
ATOMIC_MMU_CLEANUP; \
return RET; \
}
GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA SIZE >= 16 */
#undef END
#undef MEND
#if DATA_SIZE > 1
@@ -220,10 +128,8 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
within the ATOMIC_NAME macro. */
#ifdef HOST_WORDS_BIGENDIAN
# define END _le
# define MEND _le
#else
# define END _be
# define MEND _be
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
@@ -231,27 +137,17 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret;
ATOMIC_TRACE_RMW;
#if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
#else
ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
#endif
DATA_TYPE ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
ATOMIC_MMU_CLEANUP;
return BSWAP(ret);
}
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
ATOMIC_TRACE_LD;
val = atomic16_read(haddr);
__atomic_load(haddr, &val, __ATOMIC_RELAXED);
ATOMIC_MMU_CLEANUP;
return BSWAP(val);
}
@@ -261,23 +157,17 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
ATOMIC_TRACE_ST;
val = BSWAP(val);
atomic16_set(haddr, val);
__atomic_store(haddr, &val, __ATOMIC_RELAXED);
ATOMIC_MMU_CLEANUP;
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
ABI_TYPE ret;
ATOMIC_TRACE_RMW;
ret = atomic_xchg__nocheck(haddr, BSWAP(val));
ABI_TYPE ret = atomic_xchg__nocheck(haddr, BSWAP(val));
ATOMIC_MMU_CLEANUP;
return BSWAP(ret);
}
@@ -288,10 +178,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
{ \
ATOMIC_MMU_DECLS; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE ret; \
\
ATOMIC_TRACE_RMW; \
ret = atomic_##X(haddr, BSWAP(val)); \
DATA_TYPE ret = atomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
return BSWAP(ret); \
}
@@ -305,64 +192,54 @@ GEN_ATOMIC_HELPER(xor_fetch)
#undef GEN_ATOMIC_HELPER
/* These helpers are, as a whole, full barriers. Within the helper,
* the leading barrier is explicit and the trailing barrier is within
* cmpxchg primitive.
*
* Trace this load + RMW loop as a single RMW op. This way, regardless
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval EXTRA_ARGS) \
{ \
ATOMIC_MMU_DECLS; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
\
ATOMIC_TRACE_RMW; \
smp_mb(); \
ldn = atomic_read__nocheck(haddr); \
do { \
ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
} while (ldo != ldn); \
ATOMIC_MMU_CLEANUP; \
return RET; \
}
GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
/* Note that for addition, we need to use a separate cmpxchg loop instead
of bswaps for the reverse-host-endian helpers. */
#define ADD(X, Y) (X + Y)
GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#undef ADD
ABI_TYPE ATOMIC_NAME(fetch_add)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ldo, ldn, ret, sto;
#undef GEN_ATOMIC_HELPER_FN
ldo = atomic_read__nocheck(haddr);
while (1) {
ret = BSWAP(ldo);
sto = BSWAP(ret + val);
ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto);
if (ldn == ldo) {
ATOMIC_MMU_CLEANUP;
return ret;
}
ldo = ldn;
}
}
ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ldo, ldn, ret, sto;
ldo = atomic_read__nocheck(haddr);
while (1) {
ret = BSWAP(ldo) + val;
sto = BSWAP(ret);
ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto);
if (ldn == ldo) {
ATOMIC_MMU_CLEANUP;
return ret;
}
ldo = ldn;
}
}
#endif /* DATA_SIZE >= 16 */
#undef END
#undef MEND
#endif /* DATA_SIZE > 1 */
#undef ATOMIC_TRACE_ST
#undef ATOMIC_TRACE_LD
#undef ATOMIC_TRACE_RMW
#undef BSWAP
#undef ABI_TYPE
#undef DATA_TYPE
#undef SDATA_TYPE
#undef SUFFIX
#undef DATA_SIZE
#undef SHIFT

View File

@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,14 +21,17 @@
#include "cpu.h"
#include "sysemu/cpus.h"
#include "exec/exec-all.h"
#include "exec/memory-internal.h"
bool tcg_allowed;
/* exit the current TB, but without causing any exception to be raised */
void cpu_loop_exit_noexc(CPUState *cpu)
{
/* XXX: restore cpu registers saved in host registers */
cpu->exception_index = -1;
cpu_loop_exit(cpu);
siglongjmp(cpu->jmp_env, 1);
}
#if defined(CONFIG_SOFTMMU)
@@ -63,17 +66,15 @@ void cpu_reloading_memory_map(void)
void cpu_loop_exit(CPUState *cpu)
{
/* Undo the setting in cpu_tb_exec. */
cpu->can_do_io = 1;
siglongjmp(cpu->jmp_env, 1);
}
void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
{
if (pc) {
cpu_restore_state(cpu, pc, true);
cpu_restore_state(cpu, pc);
}
cpu_loop_exit(cpu);
siglongjmp(cpu->jmp_env, 1);
}
void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)

View File

@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -25,6 +25,7 @@
#include "qemu/atomic.h"
#include "sysemu/qtest.h"
#include "qemu/timer.h"
#include "exec/address-spaces.h"
#include "qemu/rcu.h"
#include "exec/tb-hash.h"
#include "exec/tb-lookup.h"
@@ -145,24 +146,19 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
uint8_t *tb_ptr = itb->tc.ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
"Trace %d: %p ["
TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
cpu->cpu_index, itb->tc.ptr,
itb->cs_base, itb->pc, itb->flags,
"Trace %p [%d: " TARGET_FMT_lx "] %s\n",
itb->tc.ptr, cpu->cpu_index, itb->pc,
lookup_symbol(itb->pc));
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
&& qemu_log_in_addr_range(itb->pc)) {
qemu_log_lock();
int flags = 0;
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
flags |= CPU_DUMP_FPU;
}
#if defined(TARGET_I386)
flags |= CPU_DUMP_CCOP;
log_cpu_state(cpu, CPU_DUMP_CCOP);
#else
log_cpu_state(cpu, 0);
#endif
log_cpu_state(cpu, flags);
qemu_log_unlock();
}
#endif /* DEBUG_DISAS */
@@ -212,20 +208,20 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
We only end up here when an existing TB is too long. */
cflags |= MIN(max_cycles, CF_COUNT_MASK);
mmap_lock();
tb_lock();
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
orig_tb->flags, cflags);
tb->orig_tb = orig_tb;
mmap_unlock();
tb_unlock();
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
cpu_tb_exec(cpu, tb);
mmap_lock();
tb_lock();
tb_phys_invalidate(tb, -1);
mmap_unlock();
tcg_tb_remove(tb);
tb_remove(tb);
tb_unlock();
}
#endif
@@ -244,7 +240,12 @@ void cpu_exec_step_atomic(CPUState *cpu)
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
tb_lock();
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
if (likely(tb == NULL)) {
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
}
tb_unlock();
mmap_unlock();
}
@@ -259,17 +260,15 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_tb_exec(cpu, tb);
cc->cpu_exec_exit(cpu);
} else {
/*
/* We may have exited due to another problem here, so we need
* to reset any tb_locks we may have taken but didn't release.
* The mmap_lock is dropped by tb_gen_code if it runs out of
* memory.
*/
#ifndef CONFIG_SOFTMMU
tcg_debug_assert(!have_mmap_lock());
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
tb_lock_reset();
}
if (in_exclusive_region) {
@@ -292,7 +291,7 @@ struct tb_desc {
uint32_t trace_vcpu_dstate;
};
static bool tb_lookup_cmp(const void *p, const void *d)
static bool tb_cmp(const void *p, const void *d)
{
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
@@ -335,12 +334,9 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
if (phys_pc == -1) {
return NULL;
}
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
return qht_lookup(&tb_ctx.htable, tb_cmp, &desc, h);
}
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
@@ -354,43 +350,28 @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
}
}
/* Called with tb_lock held. */
static inline void tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_next)
{
uintptr_t old;
assert(n < ARRAY_SIZE(tb->jmp_list_next));
qemu_spin_lock(&tb_next->jmp_lock);
/* make sure the destination TB is valid */
if (tb_next->cflags & CF_INVALID) {
goto out_unlock_next;
if (tb->jmp_list_next[n]) {
/* Another thread has already done this while we were
* outside of the lock; nothing to do in this case */
return;
}
/* Atomically claim the jump destination slot only if it was NULL */
old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
if (old) {
goto out_unlock_next;
}
/* patch the native jump address */
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
/* add in TB jmp list */
tb->jmp_list_next[n] = tb_next->jmp_list_head;
tb_next->jmp_list_head = (uintptr_t)tb | n;
qemu_spin_unlock(&tb_next->jmp_lock);
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"Linking TBs %p [" TARGET_FMT_lx
"] index %d -> %p [" TARGET_FMT_lx "]\n",
tb->tc.ptr, tb->pc, n,
tb_next->tc.ptr, tb_next->pc);
return;
out_unlock_next:
qemu_spin_unlock(&tb_next->jmp_lock);
return;
/* patch the native jump address */
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
/* add in TB jmp circular list */
tb->jmp_list_next[n] = tb_next->jmp_list_first;
tb_next->jmp_list_first = (uintptr_t)tb | n;
}
static inline TranslationBlock *tb_find(CPUState *cpu,
@@ -400,11 +381,27 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
bool acquired_tb_lock = false;
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
if (tb == NULL) {
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
* taken outside tb_lock. As system emulation is currently
* single threaded the locks are NOPs.
*/
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
tb_lock();
acquired_tb_lock = true;
/* There's a chance that our desired tb has been translated while
* taking the locks so we check again inside the lock.
*/
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
if (likely(tb == NULL)) {
/* if no translated code available, then translate it now */
tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
}
mmap_unlock();
/* We add the TB in the virtual pc hash table for the fast lookup */
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
@@ -419,8 +416,17 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
}
#endif
/* See if we can patch the calling TB. */
if (last_tb) {
tb_add_jump(last_tb, tb_exit, tb);
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
if (!acquired_tb_lock) {
tb_lock();
acquired_tb_lock = true;
}
if (!(tb->cflags & CF_INVALID)) {
tb_add_jump(last_tb, tb_exit, tb);
}
}
if (acquired_tb_lock) {
tb_unlock();
}
return tb;
}
@@ -519,13 +525,19 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
int32_t insns_left;
/* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request.
* Ensure zeroing happens before reading cpu->exit_request or
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/
atomic_mb_set(&cpu->icount_decr.u16.high, 0);
insns_left = atomic_read(&cpu->icount_decr.u32);
atomic_set(&cpu->icount_decr.u16.high, 0);
if (unlikely(insns_left < 0)) {
/* Ensure the zeroing of icount_decr comes before the next read
* of cpu->exit_request or cpu->interrupt_request.
*/
smp_mb();
}
if (unlikely(atomic_read(&cpu->interrupt_request))) {
int interrupt_request;
@@ -577,7 +589,6 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
else {
if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
replay_interrupt();
cpu->exception_index = -1;
*last_tb = NULL;
}
/* The target hook may have updated the 'cpu->interrupt_request';
@@ -599,9 +610,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (unlikely(atomic_read(&cpu->exit_request)
|| (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
atomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT;
}
cpu->exception_index = EXCP_INTERRUPT;
return true;
}
@@ -696,13 +705,11 @@ int cpu_exec(CPUState *cpu)
g_assert(cpu == current_cpu);
g_assert(cc == CPU_GET_CLASS(cpu));
#endif /* buggy compiler */
#ifndef CONFIG_SOFTMMU
tcg_debug_assert(!have_mmap_lock());
#endif
cpu->can_do_io = 1;
tb_lock_reset();
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
}
/* if an exception is pending, we execute it here */

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -98,23 +98,19 @@
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
size_t mmu_idx, size_t index,
target_ulong addr,
uintptr_t retaddr,
bool recheck,
MMUAccessType access_type)
uintptr_t retaddr)
{
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, recheck,
access_type, DATA_SIZE);
return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, DATA_SIZE);
}
#endif
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = entry->ADDR_READ;
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
DATA_TYPE res;
@@ -125,14 +121,13 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
}
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = entry->ADDR_READ;
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
/* Handle an IO access. */
@@ -143,9 +138,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
tlb_addr & TLB_RECHECK,
READ_ACCESS_TYPE);
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
res = TGT_LE(res);
return res;
}
@@ -169,7 +162,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
return res;
}
haddr = addr + entry->addend;
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
#else
@@ -182,10 +175,9 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = entry->ADDR_READ;
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
DATA_TYPE res;
@@ -196,14 +188,13 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
}
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = entry->ADDR_READ;
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
/* Handle an IO access. */
@@ -214,9 +205,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
tlb_addr & TLB_RECHECK,
READ_ACCESS_TYPE);
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
res = TGT_BE(res);
return res;
}
@@ -240,7 +229,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
return res;
}
haddr = addr + entry->addend;
haddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
return res;
}
@@ -270,21 +259,18 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
size_t mmu_idx, size_t index,
DATA_TYPE val,
target_ulong addr,
uintptr_t retaddr,
bool recheck)
uintptr_t retaddr)
{
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
recheck, DATA_SIZE);
return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, DATA_SIZE);
}
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = tlb_addr_write(entry);
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
@@ -294,14 +280,12 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
}
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
}
/* Handle an IO access. */
@@ -313,8 +297,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
val = TGT_LE(val);
glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr,
retaddr, tlb_addr & TLB_RECHECK);
glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
return;
}
@@ -322,18 +305,18 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
int i;
target_ulong page2;
CPUTLBEntry *entry2;
int i, index2;
target_ulong page2, tlb_addr2;
do_unaligned_access:
/* Ensure the second page is in the TLB. Note that the first page
is already guaranteed to be filled, and that the second page
cannot evict the first. */
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
entry2 = tlb_entry(env, mmu_idx, page2);
if (!tlb_hit_page(tlb_addr_write(entry2), page2)
index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
&& !VICTIM_TLB_HIT(addr_write, page2)) {
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@@ -349,7 +332,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
return;
}
haddr = addr + entry->addend;
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
#else
@@ -361,10 +344,9 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = tlb_addr_write(entry);
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
unsigned a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
@@ -374,14 +356,12 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
}
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
}
/* Handle an IO access. */
@@ -393,8 +373,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
val = TGT_BE(val);
glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr,
tlb_addr & TLB_RECHECK);
glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
return;
}
@@ -402,18 +381,18 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
int i;
target_ulong page2;
CPUTLBEntry *entry2;
int i, index2;
target_ulong page2, tlb_addr2;
do_unaligned_access:
/* Ensure the second page is in the TLB. Note that the first page
is already guaranteed to be filled, and that the second page
cannot evict the first. */
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
entry2 = tlb_entry(env, mmu_idx, page2);
if (!tlb_hit_page(tlb_addr_write(entry2), page2)
index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
&& !VICTIM_TLB_HIT(addr_write, page2)) {
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@@ -429,7 +408,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
return;
}
haddr = addr + entry->addend;
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
}
#endif /* DATA_SIZE > 1 */

View File

@@ -51,7 +51,7 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
} else {
atomic_set(&cpu->icount_decr.u16.high, -1);
cpu->icount_decr.u16.high = -1;
if (use_icount &&
!cpu->can_do_io
&& (mask & ~old_mask) != 0) {

File diff suppressed because it is too large Load Diff

View File

@@ -156,9 +156,8 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env)
return tcg_ctx->code_gen_epilogue;
}
qemu_log_mask_and_addr(CPU_LOG_EXEC, pc,
"Chain %d: %p ["
TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
cpu->cpu_index, tb->tc.ptr, cs_base, pc, flags,
"Chain %p [%d: " TARGET_FMT_lx "] %s\n",
tb->tc.ptr, cpu->cpu_index, pc,
lookup_symbol(pc));
return tb->tc.ptr;
}

View File

@@ -125,161 +125,12 @@ GEN_ATOMIC_HELPERS(fetch_add)
GEN_ATOMIC_HELPERS(fetch_and)
GEN_ATOMIC_HELPERS(fetch_or)
GEN_ATOMIC_HELPERS(fetch_xor)
GEN_ATOMIC_HELPERS(fetch_smin)
GEN_ATOMIC_HELPERS(fetch_umin)
GEN_ATOMIC_HELPERS(fetch_smax)
GEN_ATOMIC_HELPERS(fetch_umax)
GEN_ATOMIC_HELPERS(add_fetch)
GEN_ATOMIC_HELPERS(and_fetch)
GEN_ATOMIC_HELPERS(or_fetch)
GEN_ATOMIC_HELPERS(xor_fetch)
GEN_ATOMIC_HELPERS(smin_fetch)
GEN_ATOMIC_HELPERS(umin_fetch)
GEN_ATOMIC_HELPERS(smax_fetch)
GEN_ATOMIC_HELPERS(umax_fetch)
GEN_ATOMIC_HELPERS(xchg)
#undef GEN_ATOMIC_HELPERS
DEF_HELPER_FLAGS_3(gvec_mov, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_dup8, TCG_CALL_NO_RWG, void, ptr, i32, i32)
DEF_HELPER_FLAGS_3(gvec_dup16, TCG_CALL_NO_RWG, void, ptr, i32, i32)
DEF_HELPER_FLAGS_3(gvec_dup32, TCG_CALL_NO_RWG, void, ptr, i32, i32)
DEF_HELPER_FLAGS_3(gvec_dup64, TCG_CALL_NO_RWG, void, ptr, i32, i64)
DEF_HELPER_FLAGS_4(gvec_add8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_add16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_add32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_add64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_adds8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_adds16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_adds32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_adds64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_sub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_subs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_subs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_subs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_subs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_mul8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_mul16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_mul32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_mul64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_muls8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_muls16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_muls32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_muls64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_ssadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ssadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ssadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ssadd64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sssub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sssub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sssub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sssub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_usadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_usadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_usadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_usadd64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ussub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ussub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ussub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ussub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_smin8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_smin16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_smin32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_smin64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_smax8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_smax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_smax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_smax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_umin8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_umin16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_umin32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_umin64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_umax8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_umax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_umax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_umax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg8, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg16, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg32, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg64, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_not, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_and, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_or, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_xor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_andc, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_orc, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_nand, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_3(gvec_shl8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_shl16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_shl32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_shl64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_shr8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_shr16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_shr32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_shr64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sar8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_eq64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ne8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ne16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ne32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ne64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_lt8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_lt16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_lt32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_lt64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_le8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_le16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_le32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_le64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ltu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ltu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ltu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ltu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

View File

@@ -1,4 +1,4 @@
# See docs/devel/tracing.txt for syntax documentation.
# Trace events for debugging and performance instrumentation
# TCG related tracing (mostly disabled by default)
# cpu-exec.c

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -23,13 +23,10 @@
/* translate-all.c */
struct page_collection *page_collection_lock(tb_page_addr_t start,
tb_page_addr_t end);
void page_collection_unlock(struct page_collection *set);
void tb_invalidate_phys_page_fast(struct page_collection *pages,
tb_page_addr_t start, int len);
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
void tb_check_watchpoint(CPUState *cpu);
#ifdef CONFIG_USER_ONLY

View File

@@ -34,7 +34,7 @@ void translator_loop_temp_check(DisasContextBase *db)
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb)
{
int bp_insn = 0;
int max_insns;
/* Initialize DisasContext */
db->tb = tb;
@@ -45,18 +45,18 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
db->singlestep_enabled = cpu->singlestep_enabled;
/* Instruction counting */
db->max_insns = tb_cflags(db->tb) & CF_COUNT_MASK;
if (db->max_insns == 0) {
db->max_insns = CF_COUNT_MASK;
max_insns = tb_cflags(db->tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (db->max_insns > TCG_MAX_INSNS) {
db->max_insns = TCG_MAX_INSNS;
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
if (db->singlestep_enabled || singlestep) {
db->max_insns = 1;
max_insns = 1;
}
ops->init_disas_context(db, cpu);
max_insns = ops->init_disas_context(db, cpu, max_insns);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
/* Reset the temp count so that we can identify leaks */
@@ -73,13 +73,11 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
/* Pass breakpoint hits to target for further processing */
if (!db->singlestep_enabled
&& unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
CPUBreakpoint *bp;
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
if (bp->pc == db->pc_next) {
if (ops->breakpoint_check(db, cpu, bp)) {
bp_insn = 1;
break;
}
}
@@ -97,8 +95,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
update db->pc_next and db->is_jmp to indicate what should be
done next -- either exiting this loop or locate the start of
the next instruction. */
if (db->num_insns == db->max_insns
&& (tb_cflags(db->tb) & CF_LAST_IO)) {
if (db->num_insns == max_insns && (tb_cflags(db->tb) & CF_LAST_IO)) {
/* Accept I/O on the last instruction. */
gen_io_start();
ops->translate_insn(db, cpu);
@@ -114,7 +111,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
/* Stop translation if the output buffer is full,
or we have executed all of the allowed instructions. */
if (tcg_op_buf_full() || db->num_insns >= db->max_insns) {
if (tcg_op_buf_full() || db->num_insns >= max_insns) {
db->is_jmp = DISAS_TOO_MANY;
break;
}
@@ -122,7 +119,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
/* Emit code to exit the TB, as indicated by db->is_jmp. */
ops->tb_stop(db, cpu);
gen_tb_end(db->tb, db->num_insns - bp_insn);
gen_tb_end(db->tb, db->num_insns);
/* The disas_log hook may use these values rather than recompute. */
db->tb->size = db->pc_next - db->pc_first;

View File

@@ -2,9 +2,6 @@
#include "qemu-common.h"
#include "qom/cpu.h"
#include "sysemu/replay.h"
#include "sysemu/sysemu.h"
bool enable_cpu_pm = false;
void cpu_resume(CPUState *cpu)
{

View File

@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -25,7 +25,6 @@
#include "exec/cpu_ldst.h"
#include "translate-all.h"
#include "exec/helper-proto.h"
#include "qemu/atomic128.h"
#undef EAX
#undef ECX
@@ -58,13 +57,12 @@ static void cpu_exit_tb_from_sighandler(CPUState *cpu, sigset_t *old_set)
the effective address of the memory exception. 'is_write' is 1 if a
write caused the exception and otherwise 0'. 'old_set' is the
signal set which should be restored */
static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
int is_write, sigset_t *old_set)
{
CPUState *cpu = current_cpu;
CPUClass *cc;
int ret;
unsigned long address = (unsigned long)info->si_addr;
/* We must handle PC addresses from two different sources:
* a call return address and a signal frame address.
@@ -105,18 +103,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
pc, address, is_write, *(unsigned long *)old_set);
#endif
/* XXX: locking issue */
/* Note that it is important that we don't call page_unprotect() unless
* this is really a "write to nonwriteable page" fault, because
* page_unprotect() assumes that if it is called for an access to
* a page that's writeable this means we had two threads racing and
* another thread got there first and already made the page writeable;
* so we will retry the access. If we were to call page_unprotect()
* for some other kind of fault that should really be passed to the
* guest, we'd end up in an infinite loop of retrying the faulting
* access.
*/
if (is_write && info->si_signo == SIGSEGV && info->si_code == SEGV_ACCERR &&
h2g_valid(address)) {
if (is_write && h2g_valid(address)) {
switch (page_unprotect(h2g(address), pc)) {
case 0:
/* Fault not caused by a page marked unwritable to protect
@@ -150,7 +137,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
cc = CPU_GET_CLASS(cpu);
/* see if it is an MMU fault */
g_assert(cc->handle_mmu_fault);
ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX);
ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX);
if (ret == 0) {
/* The MMU fault was handled without causing real CPU fault.
@@ -169,7 +156,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
}
/* Now we have a real cpu fault. */
cpu_restore_state(cpu, pc, true);
cpu_restore_state(cpu, pc);
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit(cpu);
@@ -228,8 +215,9 @@ int cpu_signal_handler(int host_signum, void *pinfo,
#endif
pc = EIP_sig(uc);
trapno = TRAP_sig(uc);
return handle_cpu_signal(pc, info,
trapno == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
trapno == 0xe ?
(ERROR_sig(uc) >> 1) & 1 : 0,
&MASK_sig(uc));
}
@@ -273,8 +261,9 @@ int cpu_signal_handler(int host_signum, void *pinfo,
#endif
pc = PC_sig(uc);
return handle_cpu_signal(pc, info,
TRAP_sig(uc) == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
TRAP_sig(uc) == 0xe ?
(ERROR_sig(uc) >> 1) & 1 : 0,
&MASK_sig(uc));
}
@@ -352,7 +341,8 @@ int cpu_signal_handler(int host_signum, void *pinfo,
is_write = 1;
}
#endif
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write, &uc->uc_sigmask);
}
#elif defined(__alpha__)
@@ -382,7 +372,8 @@ int cpu_signal_handler(int host_signum, void *pinfo,
is_write = 1;
}
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write, &uc->uc_sigmask);
}
#elif defined(__sparc__)
@@ -441,7 +432,8 @@ int cpu_signal_handler(int host_signum, void *pinfo,
break;
}
}
return handle_cpu_signal(pc, info, is_write, sigmask);
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write, sigmask);
}
#elif defined(__arm__)
@@ -474,72 +466,72 @@ int cpu_signal_handler(int host_signum, void *pinfo,
* later processor; on v5 we will always report this as a read).
*/
is_write = extract32(uc->uc_mcontext.error_code, 11, 1);
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write,
&uc->uc_sigmask);
}
#elif defined(__aarch64__)
#ifndef ESR_MAGIC
/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */
#define ESR_MAGIC 0x45535201
struct esr_context {
struct _aarch64_ctx head;
uint64_t esr;
};
#endif
static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc)
{
return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved;
}
static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr)
{
return (struct _aarch64_ctx *)((char *)hdr + hdr->size);
}
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
{
siginfo_t *info = pinfo;
ucontext_t *uc = puc;
uintptr_t pc = uc->uc_mcontext.pc;
uint32_t insn = *(uint32_t *)pc;
bool is_write;
struct _aarch64_ctx *hdr;
struct esr_context const *esrctx = NULL;
/* Find the esr_context, which has the WnR bit in it */
for (hdr = first_ctx(uc); hdr->magic; hdr = next_ctx(hdr)) {
if (hdr->magic == ESR_MAGIC) {
esrctx = (struct esr_context const *)hdr;
break;
/* XXX: need kernel patch to get write flag faster. */
is_write = ( (insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */
|| (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */
|| (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */
|| (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */
|| (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */
|| (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */
|| (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */
/* Ingore bits 10, 11 & 21, controlling indexing. */
|| (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */
|| (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */
/* Ignore bits 23 & 24, controlling indexing. */
|| (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */
return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
is_write, &uc->uc_sigmask);
}
#elif defined(__ia64)
#ifndef __ISR_VALID
/* This ought to be in <bits/siginfo.h>... */
# define __ISR_VALID 1
#endif
int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
{
siginfo_t *info = pinfo;
ucontext_t *uc = puc;
unsigned long ip;
int is_write = 0;
ip = uc->uc_mcontext.sc_ip;
switch (host_signum) {
case SIGILL:
case SIGFPE:
case SIGSEGV:
case SIGBUS:
case SIGTRAP:
if (info->si_code && (info->si_segvflags & __ISR_VALID)) {
/* ISR.W (write-access) is bit 33: */
is_write = (info->si_isr >> 33) & 1;
}
}
break;
if (esrctx) {
/* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */
uint64_t esr = esrctx->esr;
is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1;
} else {
/*
* Fall back to parsing instructions; will only be needed
* for really ancient (pre-3.16) kernels.
*/
uint32_t insn = *(uint32_t *)pc;
is_write = ((insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */
|| (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */
|| (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */
|| (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */
|| (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */
|| (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */
|| (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */
/* Ignore bits 10, 11 & 21, controlling indexing. */
|| (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */
|| (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */
/* Ignore bits 23 & 24, controlling indexing. */
|| (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */
default:
break;
}
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
return handle_cpu_signal(ip, (unsigned long)info->si_addr,
is_write,
(sigset_t *)&uc->uc_sigmask);
}
#elif defined(__s390__)
@@ -591,7 +583,8 @@ int cpu_signal_handler(int host_signum, void *pinfo,
}
break;
}
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write, &uc->uc_sigmask);
}
#elif defined(__mips__)
@@ -606,82 +599,8 @@ int cpu_signal_handler(int host_signum, void *pinfo,
/* XXX: compute is_write */
is_write = 0;
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
}
#elif defined(__riscv)
int cpu_signal_handler(int host_signum, void *pinfo,
void *puc)
{
siginfo_t *info = pinfo;
ucontext_t *uc = puc;
greg_t pc = uc->uc_mcontext.__gregs[REG_PC];
uint32_t insn = *(uint32_t *)pc;
int is_write = 0;
/* Detect store by reading the instruction at the program
counter. Note: we currently only generate 32-bit
instructions so we thus only detect 32-bit stores */
switch (((insn >> 0) & 0b11)) {
case 3:
switch (((insn >> 2) & 0b11111)) {
case 8:
switch (((insn >> 12) & 0b111)) {
case 0: /* sb */
case 1: /* sh */
case 2: /* sw */
case 3: /* sd */
case 4: /* sq */
is_write = 1;
break;
default:
break;
}
break;
case 9:
switch (((insn >> 12) & 0b111)) {
case 2: /* fsw */
case 3: /* fsd */
case 4: /* fsq */
is_write = 1;
break;
default:
break;
}
break;
default:
break;
}
}
/* Check for compressed instructions */
switch (((insn >> 13) & 0b111)) {
case 7:
switch (insn & 0b11) {
case 0: /*c.sd */
case 2: /* c.sdsp */
is_write = 1;
break;
default:
break;
}
break;
case 6:
switch (insn & 0b11) {
case 0: /* c.sw */
case 3: /* c.swsp */
is_write = 1;
break;
default:
break;
}
break;
default:
break;
}
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write, &uc->uc_sigmask);
}
#else
@@ -729,7 +648,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* The following is only callable from other helpers, and matches up
with the softmmu version. */
#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
#ifdef CONFIG_ATOMIC128
#undef EXTRA_ARGS
#undef ATOMIC_NAME
@@ -742,4 +661,4 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#define DATA_SIZE 16
#include "atomic_template.h"
#endif
#endif /* CONFIG_ATOMIC128 */

View File

@@ -28,10 +28,9 @@
#include "sysemu/arch_init.h"
#include "hw/pci/pci.h"
#include "hw/audio/soundhw.h"
#include "qapi/qapi-commands-misc.h"
#include "qapi/error.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qmp-commands.h"
#include "hw/acpi/acpi.h"
#include "qemu/help_option.h"
@@ -52,14 +51,12 @@ int graphic_depth = 32;
#define QEMU_ARCH QEMU_ARCH_ARM
#elif defined(TARGET_CRIS)
#define QEMU_ARCH QEMU_ARCH_CRIS
#elif defined(TARGET_HPPA)
#define QEMU_ARCH QEMU_ARCH_HPPA
#elif defined(TARGET_I386)
#define QEMU_ARCH QEMU_ARCH_I386
#elif defined(TARGET_LM32)
#define QEMU_ARCH QEMU_ARCH_LM32
#elif defined(TARGET_M68K)
#define QEMU_ARCH QEMU_ARCH_M68K
#elif defined(TARGET_LM32)
#define QEMU_ARCH QEMU_ARCH_LM32
#elif defined(TARGET_MICROBLAZE)
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
#elif defined(TARGET_MIPS)
@@ -72,20 +69,18 @@ int graphic_depth = 32;
#define QEMU_ARCH QEMU_ARCH_OPENRISC
#elif defined(TARGET_PPC)
#define QEMU_ARCH QEMU_ARCH_PPC
#elif defined(TARGET_RISCV)
#define QEMU_ARCH QEMU_ARCH_RISCV
#elif defined(TARGET_S390X)
#define QEMU_ARCH QEMU_ARCH_S390X
#elif defined(TARGET_SH4)
#define QEMU_ARCH QEMU_ARCH_SH4
#elif defined(TARGET_SPARC)
#define QEMU_ARCH QEMU_ARCH_SPARC
#elif defined(TARGET_TRICORE)
#define QEMU_ARCH QEMU_ARCH_TRICORE
#elif defined(TARGET_UNICORE32)
#define QEMU_ARCH QEMU_ARCH_UNICORE32
#elif defined(TARGET_XTENSA)
#define QEMU_ARCH QEMU_ARCH_XTENSA
#elif defined(TARGET_UNICORE32)
#define QEMU_ARCH QEMU_ARCH_UNICORE32
#elif defined(TARGET_TRICORE)
#define QEMU_ARCH QEMU_ARCH_TRICORE
#endif
const uint32_t arch_type = QEMU_ARCH;
@@ -113,8 +108,7 @@ TargetInfo *qmp_query_target(Error **errp)
{
TargetInfo *info = g_malloc0(sizeof(*info));
info->arch = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME, -1,
&error_abort);
info->arch = g_strdup(TARGET_NAME);
return info;
}

View File

@@ -1,31 +1,19 @@
common-obj-y = audio.o audio_legacy.o noaudio.o wavaudio.o mixeng.o
common-obj-y = audio.o noaudio.o wavaudio.o mixeng.o
common-obj-$(CONFIG_SDL) += sdlaudio.o
common-obj-$(CONFIG_OSS) += ossaudio.o
common-obj-$(CONFIG_SPICE) += spiceaudio.o
common-obj-$(CONFIG_AUDIO_COREAUDIO) += coreaudio.o
common-obj-$(CONFIG_AUDIO_DSOUND) += dsoundaudio.o
common-obj-$(CONFIG_COREAUDIO) += coreaudio.o
common-obj-$(CONFIG_ALSA) += alsaaudio.o
common-obj-$(CONFIG_DSOUND) += dsoundaudio.o
common-obj-$(CONFIG_PA) += paaudio.o
common-obj-$(CONFIG_AUDIO_PT_INT) += audio_pt_int.o
common-obj-$(CONFIG_AUDIO_WIN_INT) += audio_win_int.o
common-obj-y += wavcapture.o
sdlaudio.o-cflags := $(SDL_CFLAGS)
sdlaudio.o-libs := $(SDL_LIBS)
alsaaudio.o-libs := $(ALSA_LIBS)
paaudio.o-libs := $(PULSE_LIBS)
coreaudio.o-libs := $(COREAUDIO_LIBS)
dsoundaudio.o-libs := $(DSOUND_LIBS)
# alsa module
common-obj-$(CONFIG_AUDIO_ALSA) += alsa.mo
alsa.mo-objs = alsaaudio.o
alsa.mo-libs := $(ALSA_LIBS)
# oss module
common-obj-$(CONFIG_AUDIO_OSS) += oss.mo
oss.mo-objs = ossaudio.o
oss.mo-libs := $(OSS_LIBS)
# pulseaudio module
common-obj-$(CONFIG_AUDIO_PA) += pa.mo
pa.mo-objs = paaudio.o
pa.mo-libs := $(PULSE_LIBS)
# sdl module
common-obj-$(CONFIG_AUDIO_SDL) += sdl.mo
sdl.mo-objs = sdlaudio.o
sdl.mo-cflags := $(SDL_CFLAGS)
sdl.mo-libs := $(SDL_LIBS)
ossaudio.o-libs := $(OSS_LIBS)

View File

@@ -28,14 +28,35 @@
#include "audio.h"
#include "trace.h"
#if QEMU_GNUC_PREREQ(4, 3)
#pragma GCC diagnostic ignored "-Waddress"
#endif
#define AUDIO_CAP "alsa"
#include "audio_int.h"
typedef struct ALSAConf {
int size_in_usec_in;
int size_in_usec_out;
const char *pcm_name_in;
const char *pcm_name_out;
unsigned int buffer_size_in;
unsigned int period_size_in;
unsigned int buffer_size_out;
unsigned int period_size_out;
unsigned int threshold;
int buffer_size_in_overridden;
int period_size_in_overridden;
int buffer_size_out_overridden;
int period_size_out_overridden;
} ALSAConf;
struct pollhlp {
snd_pcm_t *handle;
struct pollfd *pfds;
ALSAConf *conf;
int count;
int mask;
};
@@ -47,7 +68,6 @@ typedef struct ALSAVoiceOut {
void *pcm_buf;
snd_pcm_t *handle;
struct pollhlp pollhlp;
Audiodev *dev;
} ALSAVoiceOut;
typedef struct ALSAVoiceIn {
@@ -55,18 +75,21 @@ typedef struct ALSAVoiceIn {
snd_pcm_t *handle;
void *pcm_buf;
struct pollhlp pollhlp;
Audiodev *dev;
} ALSAVoiceIn;
struct alsa_params_req {
int freq;
snd_pcm_format_t fmt;
int nchannels;
int size_in_usec;
int override_mask;
unsigned int buffer_size;
unsigned int period_size;
};
struct alsa_params_obt {
int freq;
AudioFormat fmt;
audfmt_e fmt;
int endianness;
int nchannels;
snd_pcm_uframes_t samples;
@@ -273,16 +296,16 @@ static int alsa_write (SWVoiceOut *sw, void *buf, int len)
return audio_pcm_sw_write (sw, buf, len);
}
static snd_pcm_format_t aud_to_alsafmt (AudioFormat fmt, int endianness)
static snd_pcm_format_t aud_to_alsafmt (audfmt_e fmt, int endianness)
{
switch (fmt) {
case AUDIO_FORMAT_S8:
case AUD_FMT_S8:
return SND_PCM_FORMAT_S8;
case AUDIO_FORMAT_U8:
case AUD_FMT_U8:
return SND_PCM_FORMAT_U8;
case AUDIO_FORMAT_S16:
case AUD_FMT_S16:
if (endianness) {
return SND_PCM_FORMAT_S16_BE;
}
@@ -290,7 +313,7 @@ static snd_pcm_format_t aud_to_alsafmt (AudioFormat fmt, int endianness)
return SND_PCM_FORMAT_S16_LE;
}
case AUDIO_FORMAT_U16:
case AUD_FMT_U16:
if (endianness) {
return SND_PCM_FORMAT_U16_BE;
}
@@ -298,7 +321,7 @@ static snd_pcm_format_t aud_to_alsafmt (AudioFormat fmt, int endianness)
return SND_PCM_FORMAT_U16_LE;
}
case AUDIO_FORMAT_S32:
case AUD_FMT_S32:
if (endianness) {
return SND_PCM_FORMAT_S32_BE;
}
@@ -306,7 +329,7 @@ static snd_pcm_format_t aud_to_alsafmt (AudioFormat fmt, int endianness)
return SND_PCM_FORMAT_S32_LE;
}
case AUDIO_FORMAT_U32:
case AUD_FMT_U32:
if (endianness) {
return SND_PCM_FORMAT_U32_BE;
}
@@ -323,58 +346,58 @@ static snd_pcm_format_t aud_to_alsafmt (AudioFormat fmt, int endianness)
}
}
static int alsa_to_audfmt (snd_pcm_format_t alsafmt, AudioFormat *fmt,
static int alsa_to_audfmt (snd_pcm_format_t alsafmt, audfmt_e *fmt,
int *endianness)
{
switch (alsafmt) {
case SND_PCM_FORMAT_S8:
*endianness = 0;
*fmt = AUDIO_FORMAT_S8;
*fmt = AUD_FMT_S8;
break;
case SND_PCM_FORMAT_U8:
*endianness = 0;
*fmt = AUDIO_FORMAT_U8;
*fmt = AUD_FMT_U8;
break;
case SND_PCM_FORMAT_S16_LE:
*endianness = 0;
*fmt = AUDIO_FORMAT_S16;
*fmt = AUD_FMT_S16;
break;
case SND_PCM_FORMAT_U16_LE:
*endianness = 0;
*fmt = AUDIO_FORMAT_U16;
*fmt = AUD_FMT_U16;
break;
case SND_PCM_FORMAT_S16_BE:
*endianness = 1;
*fmt = AUDIO_FORMAT_S16;
*fmt = AUD_FMT_S16;
break;
case SND_PCM_FORMAT_U16_BE:
*endianness = 1;
*fmt = AUDIO_FORMAT_U16;
*fmt = AUD_FMT_U16;
break;
case SND_PCM_FORMAT_S32_LE:
*endianness = 0;
*fmt = AUDIO_FORMAT_S32;
*fmt = AUD_FMT_S32;
break;
case SND_PCM_FORMAT_U32_LE:
*endianness = 0;
*fmt = AUDIO_FORMAT_U32;
*fmt = AUD_FMT_U32;
break;
case SND_PCM_FORMAT_S32_BE:
*endianness = 1;
*fmt = AUDIO_FORMAT_S32;
*fmt = AUD_FMT_S32;
break;
case SND_PCM_FORMAT_U32_BE:
*endianness = 1;
*fmt = AUDIO_FORMAT_U32;
*fmt = AUD_FMT_U32;
break;
default:
@@ -387,18 +410,17 @@ static int alsa_to_audfmt (snd_pcm_format_t alsafmt, AudioFormat *fmt,
static void alsa_dump_info (struct alsa_params_req *req,
struct alsa_params_obt *obt,
snd_pcm_format_t obtfmt,
AudiodevAlsaPerDirectionOptions *apdo)
snd_pcm_format_t obtfmt)
{
dolog("parameter | requested value | obtained value\n");
dolog("format | %10d | %10d\n", req->fmt, obtfmt);
dolog("channels | %10d | %10d\n",
req->nchannels, obt->nchannels);
dolog("frequency | %10d | %10d\n", req->freq, obt->freq);
dolog("============================================\n");
dolog("requested: buffer len %" PRId32 " period len %" PRId32 "\n",
apdo->buffer_length, apdo->period_length);
dolog("obtained: samples %ld\n", obt->samples);
dolog ("parameter | requested value | obtained value\n");
dolog ("format | %10d | %10d\n", req->fmt, obtfmt);
dolog ("channels | %10d | %10d\n",
req->nchannels, obt->nchannels);
dolog ("frequency | %10d | %10d\n", req->freq, obt->freq);
dolog ("============================================\n");
dolog ("requested: buffer size %d period size %d\n",
req->buffer_size, req->period_size);
dolog ("obtained: samples %ld\n", obt->samples);
}
static void alsa_set_threshold (snd_pcm_t *handle, snd_pcm_uframes_t threshold)
@@ -431,23 +453,23 @@ static void alsa_set_threshold (snd_pcm_t *handle, snd_pcm_uframes_t threshold)
}
}
static int alsa_open(bool in, struct alsa_params_req *req,
struct alsa_params_obt *obt, snd_pcm_t **handlep,
Audiodev *dev)
static int alsa_open (int in, struct alsa_params_req *req,
struct alsa_params_obt *obt, snd_pcm_t **handlep,
ALSAConf *conf)
{
AudiodevAlsaOptions *aopts = &dev->u.alsa;
AudiodevAlsaPerDirectionOptions *apdo = in ? aopts->in : aopts->out;
snd_pcm_t *handle;
snd_pcm_hw_params_t *hw_params;
int err;
int size_in_usec;
unsigned int freq, nchannels;
const char *pcm_name = apdo->has_dev ? apdo->dev : "default";
const char *pcm_name = in ? conf->pcm_name_in : conf->pcm_name_out;
snd_pcm_uframes_t obt_buffer_size;
const char *typ = in ? "ADC" : "DAC";
snd_pcm_format_t obtfmt;
freq = req->freq;
nchannels = req->nchannels;
size_in_usec = req->size_in_usec;
snd_pcm_hw_params_alloca (&hw_params);
@@ -507,42 +529,79 @@ static int alsa_open(bool in, struct alsa_params_req *req,
goto err;
}
if (apdo->buffer_length) {
int dir = 0;
unsigned int btime = apdo->buffer_length;
if (req->buffer_size) {
unsigned long obt;
err = snd_pcm_hw_params_set_buffer_time_near(
handle, hw_params, &btime, &dir);
if (size_in_usec) {
int dir = 0;
unsigned int btime = req->buffer_size;
err = snd_pcm_hw_params_set_buffer_time_near (
handle,
hw_params,
&btime,
&dir
);
obt = btime;
}
else {
snd_pcm_uframes_t bsize = req->buffer_size;
err = snd_pcm_hw_params_set_buffer_size_near (
handle,
hw_params,
&bsize
);
obt = bsize;
}
if (err < 0) {
alsa_logerr2(err, typ, "Failed to set buffer time to %" PRId32 "\n",
apdo->buffer_length);
alsa_logerr2 (err, typ, "Failed to set buffer %s to %d\n",
size_in_usec ? "time" : "size", req->buffer_size);
goto err;
}
if (apdo->has_buffer_length && btime != apdo->buffer_length) {
dolog("Requested buffer time %" PRId32
" was rejected, using %u\n", apdo->buffer_length, btime);
}
if ((req->override_mask & 2) && (obt - req->buffer_size))
dolog ("Requested buffer %s %u was rejected, using %lu\n",
size_in_usec ? "time" : "size", req->buffer_size, obt);
}
if (apdo->period_length) {
int dir = 0;
unsigned int ptime = apdo->period_length;
if (req->period_size) {
unsigned long obt;
err = snd_pcm_hw_params_set_period_time_near(handle, hw_params, &ptime,
&dir);
if (size_in_usec) {
int dir = 0;
unsigned int ptime = req->period_size;
err = snd_pcm_hw_params_set_period_time_near (
handle,
hw_params,
&ptime,
&dir
);
obt = ptime;
}
else {
int dir = 0;
snd_pcm_uframes_t psize = req->period_size;
err = snd_pcm_hw_params_set_period_size_near (
handle,
hw_params,
&psize,
&dir
);
obt = psize;
}
if (err < 0) {
alsa_logerr2(err, typ, "Failed to set period time to %" PRId32 "\n",
apdo->period_length);
alsa_logerr2 (err, typ, "Failed to set period %s to %d\n",
size_in_usec ? "time" : "size", req->period_size);
goto err;
}
if (apdo->has_period_length && ptime != apdo->period_length) {
dolog("Requested period time %" PRId32 " was rejected, using %d\n",
apdo->period_length, ptime);
}
if (((req->override_mask & 1) && (obt - req->period_size)))
dolog ("Requested period %s %u was rejected, using %lu\n",
size_in_usec ? "time" : "size", req->period_size, obt);
}
err = snd_pcm_hw_params (handle, hw_params);
@@ -574,12 +633,30 @@ static int alsa_open(bool in, struct alsa_params_req *req,
goto err;
}
if (!in && aopts->has_threshold && aopts->threshold) {
struct audsettings as = { .freq = freq };
alsa_set_threshold(
handle,
audio_buffer_frames(qapi_AudiodevAlsaPerDirectionOptions_base(apdo),
&as, aopts->threshold));
if (!in && conf->threshold) {
snd_pcm_uframes_t threshold;
int bytes_per_sec;
bytes_per_sec = freq << (nchannels == 2);
switch (obt->fmt) {
case AUD_FMT_S8:
case AUD_FMT_U8:
break;
case AUD_FMT_S16:
case AUD_FMT_U16:
bytes_per_sec <<= 1;
break;
case AUD_FMT_S32:
case AUD_FMT_U32:
bytes_per_sec <<= 2;
break;
}
threshold = (conf->threshold * bytes_per_sec) / 1000;
alsa_set_threshold (handle, threshold);
}
obt->nchannels = nchannels;
@@ -592,11 +669,11 @@ static int alsa_open(bool in, struct alsa_params_req *req,
obt->nchannels != req->nchannels ||
obt->freq != req->freq) {
dolog ("Audio parameters for %s\n", typ);
alsa_dump_info(req, obt, obtfmt, apdo);
alsa_dump_info (req, obt, obtfmt);
}
#ifdef DEBUG
alsa_dump_info(req, obt, obtfmt, pdo);
alsa_dump_info (req, obt, obtfmt);
#endif
return 0;
@@ -722,13 +799,19 @@ static int alsa_init_out(HWVoiceOut *hw, struct audsettings *as,
struct alsa_params_obt obt;
snd_pcm_t *handle;
struct audsettings obt_as;
Audiodev *dev = drv_opaque;
ALSAConf *conf = drv_opaque;
req.fmt = aud_to_alsafmt (as->fmt, as->endianness);
req.freq = as->freq;
req.nchannels = as->nchannels;
req.period_size = conf->period_size_out;
req.buffer_size = conf->buffer_size_out;
req.size_in_usec = conf->size_in_usec_out;
req.override_mask =
(conf->period_size_out_overridden ? 1 : 0) |
(conf->buffer_size_out_overridden ? 2 : 0);
if (alsa_open(0, &req, &obt, &handle, dev)) {
if (alsa_open (0, &req, &obt, &handle, conf)) {
return -1;
}
@@ -740,7 +823,7 @@ static int alsa_init_out(HWVoiceOut *hw, struct audsettings *as,
audio_pcm_init_info (&hw->info, &obt_as);
hw->samples = obt.samples;
alsa->pcm_buf = audio_calloc(__func__, obt.samples, 1 << hw->info.shift);
alsa->pcm_buf = audio_calloc (AUDIO_FUNC, obt.samples, 1 << hw->info.shift);
if (!alsa->pcm_buf) {
dolog ("Could not allocate DAC buffer (%d samples, each %d bytes)\n",
hw->samples, 1 << hw->info.shift);
@@ -749,7 +832,7 @@ static int alsa_init_out(HWVoiceOut *hw, struct audsettings *as,
}
alsa->handle = handle;
alsa->dev = dev;
alsa->pollhlp.conf = conf;
return 0;
}
@@ -789,12 +872,16 @@ static int alsa_voice_ctl (snd_pcm_t *handle, const char *typ, int ctl)
static int alsa_ctl_out (HWVoiceOut *hw, int cmd, ...)
{
ALSAVoiceOut *alsa = (ALSAVoiceOut *) hw;
AudiodevAlsaPerDirectionOptions *apdo = alsa->dev->u.alsa.out;
switch (cmd) {
case VOICE_ENABLE:
{
bool poll_mode = apdo->try_poll;
va_list ap;
int poll_mode;
va_start (ap, cmd);
poll_mode = va_arg (ap, int);
va_end (ap);
ldebug ("enabling voice\n");
if (poll_mode && alsa_poll_out (hw)) {
@@ -823,13 +910,19 @@ static int alsa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
struct alsa_params_obt obt;
snd_pcm_t *handle;
struct audsettings obt_as;
Audiodev *dev = drv_opaque;
ALSAConf *conf = drv_opaque;
req.fmt = aud_to_alsafmt (as->fmt, as->endianness);
req.freq = as->freq;
req.nchannels = as->nchannels;
req.period_size = conf->period_size_in;
req.buffer_size = conf->buffer_size_in;
req.size_in_usec = conf->size_in_usec_in;
req.override_mask =
(conf->period_size_in_overridden ? 1 : 0) |
(conf->buffer_size_in_overridden ? 2 : 0);
if (alsa_open(1, &req, &obt, &handle, dev)) {
if (alsa_open (1, &req, &obt, &handle, conf)) {
return -1;
}
@@ -841,7 +934,7 @@ static int alsa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
audio_pcm_init_info (&hw->info, &obt_as);
hw->samples = obt.samples;
alsa->pcm_buf = audio_calloc(__func__, hw->samples, 1 << hw->info.shift);
alsa->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift);
if (!alsa->pcm_buf) {
dolog ("Could not allocate ADC buffer (%d samples, each %d bytes)\n",
hw->samples, 1 << hw->info.shift);
@@ -850,7 +943,7 @@ static int alsa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
}
alsa->handle = handle;
alsa->dev = dev;
alsa->pollhlp.conf = conf;
return 0;
}
@@ -992,12 +1085,16 @@ static int alsa_read (SWVoiceIn *sw, void *buf, int size)
static int alsa_ctl_in (HWVoiceIn *hw, int cmd, ...)
{
ALSAVoiceIn *alsa = (ALSAVoiceIn *) hw;
AudiodevAlsaPerDirectionOptions *apdo = alsa->dev->u.alsa.in;
switch (cmd) {
case VOICE_ENABLE:
{
bool poll_mode = apdo->try_poll;
va_list ap;
int poll_mode;
va_start (ap, cmd);
poll_mode = va_arg (ap, int);
va_end (ap);
ldebug ("enabling voice\n");
if (poll_mode && alsa_poll_in (hw)) {
@@ -1020,54 +1117,88 @@ static int alsa_ctl_in (HWVoiceIn *hw, int cmd, ...)
return -1;
}
static void alsa_init_per_direction(AudiodevAlsaPerDirectionOptions *apdo)
static ALSAConf glob_conf = {
.buffer_size_out = 4096,
.period_size_out = 1024,
.pcm_name_out = "default",
.pcm_name_in = "default",
};
static void *alsa_audio_init (void)
{
if (!apdo->has_try_poll) {
apdo->try_poll = true;
apdo->has_try_poll = true;
}
}
static void *alsa_audio_init(Audiodev *dev)
{
AudiodevAlsaOptions *aopts;
assert(dev->driver == AUDIODEV_DRIVER_ALSA);
aopts = &dev->u.alsa;
alsa_init_per_direction(aopts->in);
alsa_init_per_direction(aopts->out);
/*
* need to define them, as otherwise alsa produces no sound
* doesn't set has_* so alsa_open can identify it wasn't set by the user
*/
if (!dev->u.alsa.out->has_period_length) {
/* 1024 frames assuming 44100Hz */
dev->u.alsa.out->period_length = 1024 * 1000000 / 44100;
}
if (!dev->u.alsa.out->has_buffer_length) {
/* 4096 frames assuming 44100Hz */
dev->u.alsa.out->buffer_length = 4096ll * 1000000 / 44100;
}
/*
* OptsVisitor sets unspecified optional fields to zero, but do not depend
* on it...
*/
if (!dev->u.alsa.in->has_period_length) {
dev->u.alsa.in->period_length = 0;
}
if (!dev->u.alsa.in->has_buffer_length) {
dev->u.alsa.in->buffer_length = 0;
}
return dev;
ALSAConf *conf = g_malloc(sizeof(ALSAConf));
*conf = glob_conf;
return conf;
}
static void alsa_audio_fini (void *opaque)
{
g_free(opaque);
}
static struct audio_option alsa_options[] = {
{
.name = "DAC_SIZE_IN_USEC",
.tag = AUD_OPT_BOOL,
.valp = &glob_conf.size_in_usec_out,
.descr = "DAC period/buffer size in microseconds (otherwise in frames)"
},
{
.name = "DAC_PERIOD_SIZE",
.tag = AUD_OPT_INT,
.valp = &glob_conf.period_size_out,
.descr = "DAC period size (0 to go with system default)",
.overriddenp = &glob_conf.period_size_out_overridden
},
{
.name = "DAC_BUFFER_SIZE",
.tag = AUD_OPT_INT,
.valp = &glob_conf.buffer_size_out,
.descr = "DAC buffer size (0 to go with system default)",
.overriddenp = &glob_conf.buffer_size_out_overridden
},
{
.name = "ADC_SIZE_IN_USEC",
.tag = AUD_OPT_BOOL,
.valp = &glob_conf.size_in_usec_in,
.descr =
"ADC period/buffer size in microseconds (otherwise in frames)"
},
{
.name = "ADC_PERIOD_SIZE",
.tag = AUD_OPT_INT,
.valp = &glob_conf.period_size_in,
.descr = "ADC period size (0 to go with system default)",
.overriddenp = &glob_conf.period_size_in_overridden
},
{
.name = "ADC_BUFFER_SIZE",
.tag = AUD_OPT_INT,
.valp = &glob_conf.buffer_size_in,
.descr = "ADC buffer size (0 to go with system default)",
.overriddenp = &glob_conf.buffer_size_in_overridden
},
{
.name = "THRESHOLD",
.tag = AUD_OPT_INT,
.valp = &glob_conf.threshold,
.descr = "(undocumented)"
},
{
.name = "DAC_DEV",
.tag = AUD_OPT_STR,
.valp = &glob_conf.pcm_name_out,
.descr = "DAC device name (for instance dmix)"
},
{
.name = "ADC_DEV",
.tag = AUD_OPT_STR,
.valp = &glob_conf.pcm_name_in,
.descr = "ADC device name"
},
{ /* End of list */ }
};
static struct audio_pcm_ops alsa_pcm_ops = {
.init_out = alsa_init_out,
.fini_out = alsa_fini_out,
@@ -1082,9 +1213,10 @@ static struct audio_pcm_ops alsa_pcm_ops = {
.ctl_in = alsa_ctl_in,
};
static struct audio_driver alsa_audio_driver = {
struct audio_driver alsa_audio_driver = {
.name = "alsa",
.descr = "ALSA http://www.alsa-project.org",
.options = alsa_options,
.init = alsa_audio_init,
.fini = alsa_audio_fini,
.pcm_ops = &alsa_pcm_ops,
@@ -1094,9 +1226,3 @@ static struct audio_driver alsa_audio_driver = {
.voice_size_out = sizeof (ALSAVoiceOut),
.voice_size_in = sizeof (ALSAVoiceIn)
};
static void register_audio_alsa(void)
{
audio_driver_register(&alsa_audio_driver);
}
type_init(register_audio_alsa);

File diff suppressed because it is too large Load Diff

View File

@@ -26,31 +26,30 @@
#define QEMU_AUDIO_H
#include "qemu/queue.h"
#include "qapi/qapi-types-audio.h"
typedef void (*audio_callback_fn) (void *opaque, int avail);
typedef enum {
AUD_FMT_U8,
AUD_FMT_S8,
AUD_FMT_U16,
AUD_FMT_S16,
AUD_FMT_U32,
AUD_FMT_S32
} audfmt_e;
#ifdef HOST_WORDS_BIGENDIAN
#define AUDIO_HOST_ENDIANNESS 1
#else
#define AUDIO_HOST_ENDIANNESS 0
#endif
typedef struct audsettings {
struct audsettings {
int freq;
int nchannels;
AudioFormat fmt;
audfmt_e fmt;
int endianness;
} audsettings;
audsettings audiodev_to_audsettings(AudiodevPerDirectionOptions *pdo);
int audioformat_bytes_per_sample(AudioFormat fmt);
int audio_buffer_frames(AudiodevPerDirectionOptions *pdo,
audsettings *as, int def_usecs);
int audio_buffer_samples(AudiodevPerDirectionOptions *pdo,
audsettings *as, int def_usecs);
int audio_buffer_bytes(AudiodevPerDirectionOptions *pdo,
audsettings *as, int def_usecs);
};
typedef enum {
AUD_CNOTIFY_ENABLE,
@@ -90,6 +89,7 @@ typedef struct QEMUAudioTimeStamp {
void AUD_vlog (const char *cap, const char *fmt, va_list ap) GCC_FMT_ATTR(2, 0);
void AUD_log (const char *cap, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
void AUD_help (void);
void AUD_register_card (const char *name, QEMUSoundCard *card);
void AUD_remove_card (QEMUSoundCard *card);
CaptureVoiceOut *AUD_add_capture (
@@ -171,8 +171,4 @@ void audio_sample_to_uint64(void *samples, int pos,
void audio_sample_from_uint64(void *samples, int pos,
uint64_t left, uint64_t right);
void audio_parse_option(const char *opt);
void audio_init_audiodevs(void);
void audio_legacy_help(void);
#endif /* QEMU_AUDIO_H */

View File

@@ -25,7 +25,7 @@
#ifndef QEMU_AUDIO_INT_H
#define QEMU_AUDIO_INT_H
#ifdef CONFIG_AUDIO_COREAUDIO
#ifdef CONFIG_COREAUDIO
#define FLOAT_MIXENG
/* #define RECIPROCAL */
#endif
@@ -33,6 +33,22 @@
struct audio_pcm_ops;
typedef enum {
AUD_OPT_INT,
AUD_OPT_FMT,
AUD_OPT_STR,
AUD_OPT_BOOL
} audio_option_tag_e;
struct audio_option {
const char *name;
audio_option_tag_e tag;
void *valp;
const char *descr;
int *overriddenp;
int overridden;
};
struct audio_callback {
void *opaque;
audio_callback_fn fn;
@@ -125,11 +141,11 @@ struct SWVoiceIn {
QLIST_ENTRY (SWVoiceIn) entries;
};
typedef struct audio_driver audio_driver;
struct audio_driver {
const char *name;
const char *descr;
void *(*init) (Audiodev *);
struct audio_option *options;
void *(*init) (void);
void (*fini) (void *);
struct audio_pcm_ops *pcm_ops;
int can_be_default;
@@ -138,7 +154,6 @@ struct audio_driver {
int voice_size_out;
int voice_size_in;
int ctl_caps;
QLIST_ENTRY(audio_driver) next;
};
struct audio_pcm_ops {
@@ -174,9 +189,8 @@ struct SWVoiceCap {
QLIST_ENTRY (SWVoiceCap) entries;
};
typedef struct AudioState {
struct AudioState {
struct audio_driver *drv;
Audiodev *dev;
void *drv_opaque;
QEMUTimer *ts;
@@ -187,16 +201,19 @@ typedef struct AudioState {
int nb_hw_voices_out;
int nb_hw_voices_in;
int vm_running;
int64_t period_ticks;
} AudioState;
};
extern struct audio_driver no_audio_driver;
extern struct audio_driver oss_audio_driver;
extern struct audio_driver sdl_audio_driver;
extern struct audio_driver wav_audio_driver;
extern struct audio_driver alsa_audio_driver;
extern struct audio_driver coreaudio_audio_driver;
extern struct audio_driver dsound_audio_driver;
extern struct audio_driver pa_audio_driver;
extern struct audio_driver spice_audio_driver;
extern const struct mixeng_volume nominal_volume;
extern const char *audio_prio_list[];
void audio_driver_register(audio_driver *drv);
audio_driver *audio_driver_lookup(const char *name);
void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as);
void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len);
@@ -235,18 +252,10 @@ static inline int audio_ring_dist (int dst, int src, int len)
#define AUDIO_STRINGIFY_(n) #n
#define AUDIO_STRINGIFY(n) AUDIO_STRINGIFY_(n)
typedef struct AudiodevListEntry {
Audiodev *dev;
QSIMPLEQ_ENTRY(AudiodevListEntry) next;
} AudiodevListEntry;
typedef QSIMPLEQ_HEAD(, AudiodevListEntry) AudiodevListHead;
AudiodevListHead audio_handle_legacy_opts(void);
void audio_free_audiodev_list(AudiodevListHead *head);
void audio_create_pdos(Audiodev *dev);
AudiodevPerDirectionOptions *audio_get_pdo_in(Audiodev *dev);
AudiodevPerDirectionOptions *audio_get_pdo_out(Audiodev *dev);
#if defined _MSC_VER || defined __GNUC__
#define AUDIO_FUNC __FUNCTION__
#else
#define AUDIO_FUNC __FILE__ ":" AUDIO_STRINGIFY (__LINE__)
#endif
#endif /* QEMU_AUDIO_INT_H */

View File

@@ -1,550 +0,0 @@
/*
* QEMU Audio subsystem: legacy configuration handling
*
* Copyright (c) 2015-2019 Zoltán Kővágó <DirtY.iCE.hu@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "audio.h"
#include "audio_int.h"
#include "qemu-common.h"
#include "qemu/cutils.h"
#include "qemu/timer.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-audio.h"
#include "qapi/visitor-impl.h"
#define AUDIO_CAP "audio-legacy"
#include "audio_int.h"
static uint32_t toui32(const char *str)
{
unsigned long long ret;
if (parse_uint_full(str, &ret, 10) || ret > UINT32_MAX) {
dolog("Invalid integer value `%s'\n", str);
exit(1);
}
return ret;
}
/* helper functions to convert env variables */
static void get_bool(const char *env, bool *dst, bool *has_dst)
{
const char *val = getenv(env);
if (val) {
*dst = toui32(val) != 0;
*has_dst = true;
}
}
static void get_int(const char *env, uint32_t *dst, bool *has_dst)
{
const char *val = getenv(env);
if (val) {
*dst = toui32(val);
*has_dst = true;
}
}
static void get_str(const char *env, char **dst, bool *has_dst)
{
const char *val = getenv(env);
if (val) {
if (*has_dst) {
g_free(*dst);
}
*dst = g_strdup(val);
*has_dst = true;
}
}
static void get_fmt(const char *env, AudioFormat *dst, bool *has_dst)
{
const char *val = getenv(env);
if (val) {
size_t i;
for (i = 0; AudioFormat_lookup.size; ++i) {
if (strcasecmp(val, AudioFormat_lookup.array[i]) == 0) {
*dst = i;
*has_dst = true;
return;
}
}
dolog("Invalid audio format `%s'\n", val);
exit(1);
}
}
static void get_millis_to_usecs(const char *env, uint32_t *dst, bool *has_dst)
{
const char *val = getenv(env);
if (val) {
*dst = toui32(val) * 1000;
*has_dst = true;
}
}
static uint32_t frames_to_usecs(uint32_t frames,
AudiodevPerDirectionOptions *pdo)
{
uint32_t freq = pdo->has_frequency ? pdo->frequency : 44100;
return (frames * 1000000 + freq / 2) / freq;
}
static void get_frames_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
AudiodevPerDirectionOptions *pdo)
{
const char *val = getenv(env);
if (val) {
*dst = frames_to_usecs(toui32(val), pdo);
*has_dst = true;
}
}
static uint32_t samples_to_usecs(uint32_t samples,
AudiodevPerDirectionOptions *pdo)
{
uint32_t channels = pdo->has_channels ? pdo->channels : 2;
return frames_to_usecs(samples / channels, pdo);
}
static void get_samples_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
AudiodevPerDirectionOptions *pdo)
{
const char *val = getenv(env);
if (val) {
*dst = samples_to_usecs(toui32(val), pdo);
*has_dst = true;
}
}
static uint32_t bytes_to_usecs(uint32_t bytes, AudiodevPerDirectionOptions *pdo)
{
AudioFormat fmt = pdo->has_format ? pdo->format : AUDIO_FORMAT_S16;
uint32_t bytes_per_sample = audioformat_bytes_per_sample(fmt);
return samples_to_usecs(bytes / bytes_per_sample, pdo);
}
static void get_bytes_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
AudiodevPerDirectionOptions *pdo)
{
const char *val = getenv(env);
if (val) {
*dst = bytes_to_usecs(toui32(val), pdo);
*has_dst = true;
}
}
/* backend specific functions */
/* ALSA */
static void handle_alsa_per_direction(
AudiodevAlsaPerDirectionOptions *apdo, const char *prefix)
{
char buf[64];
size_t len = strlen(prefix);
bool size_in_usecs = false;
bool dummy;
memcpy(buf, prefix, len);
strcpy(buf + len, "TRY_POLL");
get_bool(buf, &apdo->try_poll, &apdo->has_try_poll);
strcpy(buf + len, "DEV");
get_str(buf, &apdo->dev, &apdo->has_dev);
strcpy(buf + len, "SIZE_IN_USEC");
get_bool(buf, &size_in_usecs, &dummy);
strcpy(buf + len, "PERIOD_SIZE");
get_int(buf, &apdo->period_length, &apdo->has_period_length);
if (apdo->has_period_length && !size_in_usecs) {
apdo->period_length = frames_to_usecs(
apdo->period_length,
qapi_AudiodevAlsaPerDirectionOptions_base(apdo));
}
strcpy(buf + len, "BUFFER_SIZE");
get_int(buf, &apdo->buffer_length, &apdo->has_buffer_length);
if (apdo->has_buffer_length && !size_in_usecs) {
apdo->buffer_length = frames_to_usecs(
apdo->buffer_length,
qapi_AudiodevAlsaPerDirectionOptions_base(apdo));
}
}
static void handle_alsa(Audiodev *dev)
{
AudiodevAlsaOptions *aopt = &dev->u.alsa;
handle_alsa_per_direction(aopt->in, "QEMU_ALSA_ADC_");
handle_alsa_per_direction(aopt->out, "QEMU_ALSA_DAC_");
get_millis_to_usecs("QEMU_ALSA_THRESHOLD",
&aopt->threshold, &aopt->has_threshold);
}
/* coreaudio */
static void handle_coreaudio(Audiodev *dev)
{
get_frames_to_usecs(
"QEMU_COREAUDIO_BUFFER_SIZE",
&dev->u.coreaudio.out->buffer_length,
&dev->u.coreaudio.out->has_buffer_length,
qapi_AudiodevCoreaudioPerDirectionOptions_base(dev->u.coreaudio.out));
get_int("QEMU_COREAUDIO_BUFFER_COUNT",
&dev->u.coreaudio.out->buffer_count,
&dev->u.coreaudio.out->has_buffer_count);
}
/* dsound */
static void handle_dsound(Audiodev *dev)
{
get_millis_to_usecs("QEMU_DSOUND_LATENCY_MILLIS",
&dev->u.dsound.latency, &dev->u.dsound.has_latency);
get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_OUT",
&dev->u.dsound.out->buffer_length,
&dev->u.dsound.out->has_buffer_length,
dev->u.dsound.out);
get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_IN",
&dev->u.dsound.in->buffer_length,
&dev->u.dsound.in->has_buffer_length,
dev->u.dsound.in);
}
/* OSS */
static void handle_oss_per_direction(
AudiodevOssPerDirectionOptions *opdo, const char *try_poll_env,
const char *dev_env)
{
get_bool(try_poll_env, &opdo->try_poll, &opdo->has_try_poll);
get_str(dev_env, &opdo->dev, &opdo->has_dev);
get_bytes_to_usecs("QEMU_OSS_FRAGSIZE",
&opdo->buffer_length, &opdo->has_buffer_length,
qapi_AudiodevOssPerDirectionOptions_base(opdo));
get_int("QEMU_OSS_NFRAGS", &opdo->buffer_count,
&opdo->has_buffer_count);
}
static void handle_oss(Audiodev *dev)
{
AudiodevOssOptions *oopt = &dev->u.oss;
handle_oss_per_direction(oopt->in, "QEMU_AUDIO_ADC_TRY_POLL",
"QEMU_OSS_ADC_DEV");
handle_oss_per_direction(oopt->out, "QEMU_AUDIO_DAC_TRY_POLL",
"QEMU_OSS_DAC_DEV");
get_bool("QEMU_OSS_MMAP", &oopt->try_mmap, &oopt->has_try_mmap);
get_bool("QEMU_OSS_EXCLUSIVE", &oopt->exclusive, &oopt->has_exclusive);
get_int("QEMU_OSS_POLICY", &oopt->dsp_policy, &oopt->has_dsp_policy);
}
/* pulseaudio */
static void handle_pa_per_direction(
AudiodevPaPerDirectionOptions *ppdo, const char *env)
{
get_str(env, &ppdo->name, &ppdo->has_name);
}
static void handle_pa(Audiodev *dev)
{
handle_pa_per_direction(dev->u.pa.in, "QEMU_PA_SOURCE");
handle_pa_per_direction(dev->u.pa.out, "QEMU_PA_SINK");
get_samples_to_usecs(
"QEMU_PA_SAMPLES", &dev->u.pa.in->buffer_length,
&dev->u.pa.in->has_buffer_length,
qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.in));
get_samples_to_usecs(
"QEMU_PA_SAMPLES", &dev->u.pa.out->buffer_length,
&dev->u.pa.out->has_buffer_length,
qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.out));
get_str("QEMU_PA_SERVER", &dev->u.pa.server, &dev->u.pa.has_server);
}
/* SDL */
static void handle_sdl(Audiodev *dev)
{
/* SDL is output only */
get_samples_to_usecs("QEMU_SDL_SAMPLES", &dev->u.sdl.out->buffer_length,
&dev->u.sdl.out->has_buffer_length, dev->u.sdl.out);
}
/* wav */
static void handle_wav(Audiodev *dev)
{
get_int("QEMU_WAV_FREQUENCY",
&dev->u.wav.out->frequency, &dev->u.wav.out->has_frequency);
get_fmt("QEMU_WAV_FORMAT", &dev->u.wav.out->format,
&dev->u.wav.out->has_format);
get_int("QEMU_WAV_DAC_FIXED_CHANNELS",
&dev->u.wav.out->channels, &dev->u.wav.out->has_channels);
get_str("QEMU_WAV_PATH", &dev->u.wav.path, &dev->u.wav.has_path);
}
/* general */
static void handle_per_direction(
AudiodevPerDirectionOptions *pdo, const char *prefix)
{
char buf[64];
size_t len = strlen(prefix);
memcpy(buf, prefix, len);
strcpy(buf + len, "FIXED_SETTINGS");
get_bool(buf, &pdo->fixed_settings, &pdo->has_fixed_settings);
strcpy(buf + len, "FIXED_FREQ");
get_int(buf, &pdo->frequency, &pdo->has_frequency);
strcpy(buf + len, "FIXED_FMT");
get_fmt(buf, &pdo->format, &pdo->has_format);
strcpy(buf + len, "FIXED_CHANNELS");
get_int(buf, &pdo->channels, &pdo->has_channels);
strcpy(buf + len, "VOICES");
get_int(buf, &pdo->voices, &pdo->has_voices);
}
static AudiodevListEntry *legacy_opt(const char *drvname)
{
AudiodevListEntry *e = g_malloc0(sizeof(AudiodevListEntry));
e->dev = g_malloc0(sizeof(Audiodev));
e->dev->id = g_strdup(drvname);
e->dev->driver = qapi_enum_parse(
&AudiodevDriver_lookup, drvname, -1, &error_abort);
audio_create_pdos(e->dev);
handle_per_direction(audio_get_pdo_in(e->dev), "QEMU_AUDIO_ADC_");
handle_per_direction(audio_get_pdo_out(e->dev), "QEMU_AUDIO_DAC_");
/* Original description: Timer period in HZ (0 - use lowest possible) */
get_int("QEMU_AUDIO_TIMER_PERIOD",
&e->dev->timer_period, &e->dev->has_timer_period);
if (e->dev->has_timer_period && e->dev->timer_period) {
e->dev->timer_period = NANOSECONDS_PER_SECOND / 1000 /
e->dev->timer_period;
}
switch (e->dev->driver) {
case AUDIODEV_DRIVER_ALSA:
handle_alsa(e->dev);
break;
case AUDIODEV_DRIVER_COREAUDIO:
handle_coreaudio(e->dev);
break;
case AUDIODEV_DRIVER_DSOUND:
handle_dsound(e->dev);
break;
case AUDIODEV_DRIVER_OSS:
handle_oss(e->dev);
break;
case AUDIODEV_DRIVER_PA:
handle_pa(e->dev);
break;
case AUDIODEV_DRIVER_SDL:
handle_sdl(e->dev);
break;
case AUDIODEV_DRIVER_WAV:
handle_wav(e->dev);
break;
default:
break;
}
return e;
}
AudiodevListHead audio_handle_legacy_opts(void)
{
const char *drvname = getenv("QEMU_AUDIO_DRV");
AudiodevListHead head = QSIMPLEQ_HEAD_INITIALIZER(head);
if (drvname) {
AudiodevListEntry *e;
audio_driver *driver = audio_driver_lookup(drvname);
if (!driver) {
dolog("Unknown audio driver `%s'\n", drvname);
exit(1);
}
e = legacy_opt(drvname);
QSIMPLEQ_INSERT_TAIL(&head, e, next);
} else {
for (int i = 0; audio_prio_list[i]; i++) {
audio_driver *driver = audio_driver_lookup(audio_prio_list[i]);
if (driver && driver->can_be_default) {
AudiodevListEntry *e = legacy_opt(driver->name);
QSIMPLEQ_INSERT_TAIL(&head, e, next);
}
}
if (QSIMPLEQ_EMPTY(&head)) {
dolog("Internal error: no default audio driver available\n");
exit(1);
}
}
return head;
}
/* visitor to print -audiodev option */
typedef struct {
Visitor visitor;
bool comma;
GList *path;
} LegacyPrintVisitor;
static void lv_start_struct(Visitor *v, const char *name, void **obj,
size_t size, Error **errp)
{
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
lv->path = g_list_append(lv->path, g_strdup(name));
}
static void lv_end_struct(Visitor *v, void **obj)
{
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
lv->path = g_list_delete_link(lv->path, g_list_last(lv->path));
}
static void lv_print_key(Visitor *v, const char *name)
{
GList *e;
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
if (lv->comma) {
putchar(',');
} else {
lv->comma = true;
}
for (e = lv->path; e; e = e->next) {
if (e->data) {
printf("%s.", (const char *) e->data);
}
}
printf("%s=", name);
}
static void lv_type_int64(Visitor *v, const char *name, int64_t *obj,
Error **errp)
{
lv_print_key(v, name);
printf("%" PRIi64, *obj);
}
static void lv_type_uint64(Visitor *v, const char *name, uint64_t *obj,
Error **errp)
{
lv_print_key(v, name);
printf("%" PRIu64, *obj);
}
static void lv_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
{
lv_print_key(v, name);
printf("%s", *obj ? "on" : "off");
}
static void lv_type_str(Visitor *v, const char *name, char **obj, Error **errp)
{
const char *str = *obj;
lv_print_key(v, name);
while (*str) {
if (*str == ',') {
putchar(',');
}
putchar(*str++);
}
}
static void lv_complete(Visitor *v, void *opaque)
{
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
assert(lv->path == NULL);
}
static void lv_free(Visitor *v)
{
LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
g_list_free_full(lv->path, g_free);
g_free(lv);
}
static Visitor *legacy_visitor_new(void)
{
LegacyPrintVisitor *lv = g_malloc0(sizeof(LegacyPrintVisitor));
lv->visitor.start_struct = lv_start_struct;
lv->visitor.end_struct = lv_end_struct;
/* lists not supported */
lv->visitor.type_int64 = lv_type_int64;
lv->visitor.type_uint64 = lv_type_uint64;
lv->visitor.type_bool = lv_type_bool;
lv->visitor.type_str = lv_type_str;
lv->visitor.type = VISITOR_OUTPUT;
lv->visitor.complete = lv_complete;
lv->visitor.free = lv_free;
return &lv->visitor;
}
void audio_legacy_help(void)
{
AudiodevListHead head;
AudiodevListEntry *e;
printf("Environment variable based configuration deprecated.\n");
printf("Please use the new -audiodev option.\n");
head = audio_handle_legacy_opts();
printf("\nEquivalent -audiodev to your current environment variables:\n");
if (!getenv("QEMU_AUDIO_DRV")) {
printf("(Since you didn't specify QEMU_AUDIO_DRV, I'll list all "
"possibilities)\n");
}
QSIMPLEQ_FOREACH(e, &head, next) {
Visitor *v;
Audiodev *dev = e->dev;
printf("-audiodev ");
v = legacy_visitor_new();
visit_type_Audiodev(v, NULL, &dev, &error_abort);
visit_free(v);
printf("\n");
}
audio_free_audiodev_list(&head);
}

View File

@@ -31,7 +31,7 @@ int audio_pt_init (struct audio_pt *p, void *(*func) (void *),
err = sigfillset (&set);
if (err) {
logerr(p, errno, "%s(%s): sigfillset failed", cap, __func__);
logerr (p, errno, "%s(%s): sigfillset failed", cap, AUDIO_FUNC);
return -1;
}
@@ -57,8 +57,8 @@ int audio_pt_init (struct audio_pt *p, void *(*func) (void *),
err2 = pthread_sigmask (SIG_SETMASK, &old_set, NULL);
if (err2) {
logerr(p, err2, "%s(%s): pthread_sigmask (restore) failed",
cap, __func__);
logerr (p, err2, "%s(%s): pthread_sigmask (restore) failed",
cap, AUDIO_FUNC);
/* We have failed to restore original signal mask, all bets are off,
so terminate the process */
exit (EXIT_FAILURE);
@@ -74,17 +74,17 @@ int audio_pt_init (struct audio_pt *p, void *(*func) (void *),
err2:
err2 = pthread_cond_destroy (&p->cond);
if (err2) {
logerr(p, err2, "%s(%s): pthread_cond_destroy failed", cap, __func__);
logerr (p, err2, "%s(%s): pthread_cond_destroy failed", cap, AUDIO_FUNC);
}
err1:
err2 = pthread_mutex_destroy (&p->mutex);
if (err2) {
logerr(p, err2, "%s(%s): pthread_mutex_destroy failed", cap, __func__);
logerr (p, err2, "%s(%s): pthread_mutex_destroy failed", cap, AUDIO_FUNC);
}
err0:
logerr(p, err, "%s(%s): %s failed", cap, __func__, efunc);
logerr (p, err, "%s(%s): %s failed", cap, AUDIO_FUNC, efunc);
return -1;
}
@@ -94,13 +94,13 @@ int audio_pt_fini (struct audio_pt *p, const char *cap)
err = pthread_cond_destroy (&p->cond);
if (err) {
logerr(p, err, "%s(%s): pthread_cond_destroy failed", cap, __func__);
logerr (p, err, "%s(%s): pthread_cond_destroy failed", cap, AUDIO_FUNC);
ret = -1;
}
err = pthread_mutex_destroy (&p->mutex);
if (err) {
logerr(p, err, "%s(%s): pthread_mutex_destroy failed", cap, __func__);
logerr (p, err, "%s(%s): pthread_mutex_destroy failed", cap, AUDIO_FUNC);
ret = -1;
}
return ret;
@@ -112,7 +112,7 @@ int audio_pt_lock (struct audio_pt *p, const char *cap)
err = pthread_mutex_lock (&p->mutex);
if (err) {
logerr(p, err, "%s(%s): pthread_mutex_lock failed", cap, __func__);
logerr (p, err, "%s(%s): pthread_mutex_lock failed", cap, AUDIO_FUNC);
return -1;
}
return 0;
@@ -124,7 +124,7 @@ int audio_pt_unlock (struct audio_pt *p, const char *cap)
err = pthread_mutex_unlock (&p->mutex);
if (err) {
logerr(p, err, "%s(%s): pthread_mutex_unlock failed", cap, __func__);
logerr (p, err, "%s(%s): pthread_mutex_unlock failed", cap, AUDIO_FUNC);
return -1;
}
return 0;
@@ -136,7 +136,7 @@ int audio_pt_wait (struct audio_pt *p, const char *cap)
err = pthread_cond_wait (&p->cond, &p->mutex);
if (err) {
logerr(p, err, "%s(%s): pthread_cond_wait failed", cap, __func__);
logerr (p, err, "%s(%s): pthread_cond_wait failed", cap, AUDIO_FUNC);
return -1;
}
return 0;
@@ -148,12 +148,12 @@ int audio_pt_unlock_and_signal (struct audio_pt *p, const char *cap)
err = pthread_mutex_unlock (&p->mutex);
if (err) {
logerr(p, err, "%s(%s): pthread_mutex_unlock failed", cap, __func__);
logerr (p, err, "%s(%s): pthread_mutex_unlock failed", cap, AUDIO_FUNC);
return -1;
}
err = pthread_cond_signal (&p->cond);
if (err) {
logerr(p, err, "%s(%s): pthread_cond_signal failed", cap, __func__);
logerr (p, err, "%s(%s): pthread_cond_signal failed", cap, AUDIO_FUNC);
return -1;
}
return 0;
@@ -166,7 +166,7 @@ int audio_pt_join (struct audio_pt *p, void **arg, const char *cap)
err = pthread_join (p->thread, &ret);
if (err) {
logerr(p, err, "%s(%s): pthread_join failed", cap, __func__);
logerr (p, err, "%s(%s): pthread_join failed", cap, AUDIO_FUNC);
return -1;
}
*arg = ret;

View File

@@ -57,13 +57,13 @@ static void glue (audio_init_nb_voices_, TYPE) (struct audio_driver *drv)
glue (s->nb_hw_voices_, TYPE) = max_voices;
}
if (audio_bug(__func__, !voice_size && max_voices)) {
if (audio_bug (AUDIO_FUNC, !voice_size && max_voices)) {
dolog ("drv=`%s' voice_size=0 max_voices=%d\n",
drv->name, max_voices);
glue (s->nb_hw_voices_, TYPE) = 0;
}
if (audio_bug(__func__, voice_size && !max_voices)) {
if (audio_bug (AUDIO_FUNC, voice_size && !max_voices)) {
dolog ("drv=`%s' voice_size=%d max_voices=0\n",
drv->name, voice_size);
}
@@ -77,7 +77,7 @@ static void glue (audio_pcm_hw_free_resources_, TYPE) (HW *hw)
static int glue (audio_pcm_hw_alloc_resources_, TYPE) (HW *hw)
{
HWBUF = audio_calloc(__func__, hw->samples, sizeof(struct st_sample));
HWBUF = audio_calloc (AUDIO_FUNC, hw->samples, sizeof (struct st_sample));
if (!HWBUF) {
dolog ("Could not allocate " NAME " buffer (%d samples)\n",
hw->samples);
@@ -105,7 +105,7 @@ static int glue (audio_pcm_sw_alloc_resources_, TYPE) (SW *sw)
samples = ((int64_t) sw->hw->samples << 32) / sw->ratio;
sw->buf = audio_calloc(__func__, samples, sizeof(struct st_sample));
sw->buf = audio_calloc (AUDIO_FUNC, samples, sizeof (struct st_sample));
if (!sw->buf) {
dolog ("Could not allocate buffer for `%s' (%d samples)\n",
SW_NAME (sw), samples);
@@ -238,17 +238,17 @@ static HW *glue (audio_pcm_hw_add_new_, TYPE) (struct audsettings *as)
return NULL;
}
if (audio_bug(__func__, !drv)) {
if (audio_bug (AUDIO_FUNC, !drv)) {
dolog ("No host audio driver\n");
return NULL;
}
if (audio_bug(__func__, !drv->pcm_ops)) {
if (audio_bug (AUDIO_FUNC, !drv->pcm_ops)) {
dolog ("Host audio driver without pcm_ops\n");
return NULL;
}
hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE));
hw = audio_calloc (AUDIO_FUNC, 1, glue (drv->voice_size_, TYPE));
if (!hw) {
dolog ("Can not allocate voice `%s' size %d\n",
drv->name, glue (drv->voice_size_, TYPE));
@@ -266,7 +266,7 @@ static HW *glue (audio_pcm_hw_add_new_, TYPE) (struct audsettings *as)
goto err0;
}
if (audio_bug(__func__, hw->samples <= 0)) {
if (audio_bug (AUDIO_FUNC, hw->samples <= 0)) {
dolog ("hw->samples=%d\n", hw->samples);
goto err1;
}
@@ -299,42 +299,11 @@ static HW *glue (audio_pcm_hw_add_new_, TYPE) (struct audsettings *as)
return NULL;
}
AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev)
{
switch (dev->driver) {
case AUDIODEV_DRIVER_NONE:
return dev->u.none.TYPE;
case AUDIODEV_DRIVER_ALSA:
return qapi_AudiodevAlsaPerDirectionOptions_base(dev->u.alsa.TYPE);
case AUDIODEV_DRIVER_COREAUDIO:
return qapi_AudiodevCoreaudioPerDirectionOptions_base(
dev->u.coreaudio.TYPE);
case AUDIODEV_DRIVER_DSOUND:
return dev->u.dsound.TYPE;
case AUDIODEV_DRIVER_OSS:
return qapi_AudiodevOssPerDirectionOptions_base(dev->u.oss.TYPE);
case AUDIODEV_DRIVER_PA:
return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE);
case AUDIODEV_DRIVER_SDL:
return dev->u.sdl.TYPE;
case AUDIODEV_DRIVER_SPICE:
return dev->u.spice.TYPE;
case AUDIODEV_DRIVER_WAV:
return dev->u.wav.TYPE;
case AUDIODEV_DRIVER__MAX:
break;
}
abort();
}
static HW *glue (audio_pcm_hw_add_, TYPE) (struct audsettings *as)
{
HW *hw;
AudioState *s = &glob_audio_state;
AudiodevPerDirectionOptions *pdo = glue(audio_get_pdo_, TYPE)(s->dev);
if (pdo->fixed_settings) {
if (glue (conf.fixed_, TYPE).enabled && glue (conf.fixed_, TYPE).greedy) {
hw = glue (audio_pcm_hw_add_new_, TYPE) (as);
if (hw) {
return hw;
@@ -362,17 +331,15 @@ static SW *glue (audio_pcm_create_voice_pair_, TYPE) (
SW *sw;
HW *hw;
struct audsettings hw_as;
AudioState *s = &glob_audio_state;
AudiodevPerDirectionOptions *pdo = glue(audio_get_pdo_, TYPE)(s->dev);
if (pdo->fixed_settings) {
hw_as = audiodev_to_audsettings(pdo);
if (glue (conf.fixed_, TYPE).enabled) {
hw_as = glue (conf.fixed_, TYPE).settings;
}
else {
hw_as = *as;
}
sw = audio_calloc(__func__, 1, sizeof(*sw));
sw = audio_calloc (AUDIO_FUNC, 1, sizeof (*sw));
if (!sw) {
dolog ("Could not allocate soft voice `%s' (%zu bytes)\n",
sw_name ? sw_name : "unknown", sizeof (*sw));
@@ -412,7 +379,7 @@ static void glue (audio_close_, TYPE) (SW *sw)
void glue (AUD_close_, TYPE) (QEMUSoundCard *card, SW *sw)
{
if (sw) {
if (audio_bug(__func__, !card)) {
if (audio_bug (AUDIO_FUNC, !card)) {
dolog ("card=%p\n", card);
return;
}
@@ -431,9 +398,8 @@ SW *glue (AUD_open_, TYPE) (
)
{
AudioState *s = &glob_audio_state;
AudiodevPerDirectionOptions *pdo = glue(audio_get_pdo_, TYPE)(s->dev);
if (audio_bug(__func__, !card || !name || !callback_fn || !as)) {
if (audio_bug (AUDIO_FUNC, !card || !name || !callback_fn || !as)) {
dolog ("card=%p name=%p callback_fn=%p as=%p\n",
card, name, callback_fn, as);
goto fail;
@@ -442,12 +408,12 @@ SW *glue (AUD_open_, TYPE) (
ldebug ("open %s, freq %d, nchannels %d, fmt %d\n",
name, as->freq, as->nchannels, as->fmt);
if (audio_bug(__func__, audio_validate_settings(as))) {
if (audio_bug (AUDIO_FUNC, audio_validate_settings (as))) {
audio_print_settings (as);
goto fail;
}
if (audio_bug(__func__, !s->drv)) {
if (audio_bug (AUDIO_FUNC, !s->drv)) {
dolog ("Can not open `%s' (no host audio driver)\n", name);
goto fail;
}
@@ -456,7 +422,7 @@ SW *glue (AUD_open_, TYPE) (
return sw;
}
if (!pdo->fixed_settings && sw) {
if (!glue (conf.fixed_, TYPE).enabled && sw) {
glue (AUD_close_, TYPE) (card, sw);
sw = NULL;
}

View File

@@ -24,20 +24,20 @@ int waveformat_from_audio_settings (WAVEFORMATEX *wfx,
wfx->cbSize = 0;
switch (as->fmt) {
case AUDIO_FORMAT_S8:
case AUDIO_FORMAT_U8:
case AUD_FMT_S8:
case AUD_FMT_U8:
wfx->wBitsPerSample = 8;
break;
case AUDIO_FORMAT_S16:
case AUDIO_FORMAT_U16:
case AUD_FMT_S16:
case AUD_FMT_U16:
wfx->wBitsPerSample = 16;
wfx->nAvgBytesPerSec <<= 1;
wfx->nBlockAlign <<= 1;
break;
case AUDIO_FORMAT_S32:
case AUDIO_FORMAT_U32:
case AUD_FMT_S32:
case AUD_FMT_U32:
wfx->wBitsPerSample = 32;
wfx->nAvgBytesPerSec <<= 2;
wfx->nBlockAlign <<= 2;
@@ -85,15 +85,15 @@ int waveformat_to_audio_settings (WAVEFORMATEX *wfx,
switch (wfx->wBitsPerSample) {
case 8:
as->fmt = AUDIO_FORMAT_U8;
as->fmt = AUD_FMT_U8;
break;
case 16:
as->fmt = AUDIO_FORMAT_S16;
as->fmt = AUD_FMT_S16;
break;
case 32:
as->fmt = AUDIO_FORMAT_S32;
as->fmt = AUD_FMT_S32;
break;
default:

View File

@@ -36,6 +36,11 @@
#define MAC_OS_X_VERSION_10_6 1060
#endif
typedef struct {
int buffer_frames;
int nbuffers;
} CoreaudioConf;
typedef struct coreaudioVoiceOut {
HWVoiceOut hw;
pthread_mutex_t mutex;
@@ -502,9 +507,7 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
int err;
const char *typ = "playback";
AudioValueRange frameRange;
Audiodev *dev = drv_opaque;
AudiodevCoreaudioPerDirectionOptions *cpdo = dev->u.coreaudio.out;
int frames;
CoreaudioConf *conf = drv_opaque;
/* create mutex */
err = pthread_mutex_init(&core->mutex, NULL);
@@ -535,17 +538,16 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
return -1;
}
frames = audio_buffer_frames(
qapi_AudiodevCoreaudioPerDirectionOptions_base(cpdo), as, 11610);
if (frameRange.mMinimum > frames) {
if (frameRange.mMinimum > conf->buffer_frames) {
core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMinimum;
dolog ("warning: Upsizing Buffer Frames to %f\n", frameRange.mMinimum);
} else if (frameRange.mMaximum < frames) {
}
else if (frameRange.mMaximum < conf->buffer_frames) {
core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMaximum;
dolog ("warning: Downsizing Buffer Frames to %f\n", frameRange.mMaximum);
}
else {
core->audioDevicePropertyBufferFrameSize = frames;
core->audioDevicePropertyBufferFrameSize = conf->buffer_frames;
}
/* set Buffer Frame Size */
@@ -566,8 +568,7 @@ static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
"Could not get device buffer frame size\n");
return -1;
}
hw->samples = (cpdo->has_buffer_count ? cpdo->buffer_count : 4) *
core->audioDevicePropertyBufferFrameSize;
hw->samples = conf->nbuffers * core->audioDevicePropertyBufferFrameSize;
/* get StreamFormat */
status = coreaudio_get_streamformat(core->outputDeviceID,
@@ -679,15 +680,40 @@ static int coreaudio_ctl_out (HWVoiceOut *hw, int cmd, ...)
return 0;
}
static void *coreaudio_audio_init(Audiodev *dev)
static CoreaudioConf glob_conf = {
.buffer_frames = 512,
.nbuffers = 4,
};
static void *coreaudio_audio_init (void)
{
return dev;
CoreaudioConf *conf = g_malloc(sizeof(CoreaudioConf));
*conf = glob_conf;
return conf;
}
static void coreaudio_audio_fini (void *opaque)
{
g_free(opaque);
}
static struct audio_option coreaudio_options[] = {
{
.name = "BUFFER_SIZE",
.tag = AUD_OPT_INT,
.valp = &glob_conf.buffer_frames,
.descr = "Size of the buffer in frames"
},
{
.name = "BUFFER_COUNT",
.tag = AUD_OPT_INT,
.valp = &glob_conf.nbuffers,
.descr = "Number of buffers"
},
{ /* End of list */ }
};
static struct audio_pcm_ops coreaudio_pcm_ops = {
.init_out = coreaudio_init_out,
.fini_out = coreaudio_fini_out,
@@ -696,9 +722,10 @@ static struct audio_pcm_ops coreaudio_pcm_ops = {
.ctl_out = coreaudio_ctl_out
};
static struct audio_driver coreaudio_audio_driver = {
struct audio_driver coreaudio_audio_driver = {
.name = "coreaudio",
.descr = "CoreAudio http://developer.apple.com/audio/coreaudio.html",
.options = coreaudio_options,
.init = coreaudio_audio_init,
.fini = coreaudio_audio_fini,
.pcm_ops = &coreaudio_pcm_ops,
@@ -708,9 +735,3 @@ static struct audio_driver coreaudio_audio_driver = {
.voice_size_out = sizeof (coreaudioVoiceOut),
.voice_size_in = 0
};
static void register_audio_coreaudio(void)
{
audio_driver_register(&coreaudio_audio_driver);
}
type_init(register_audio_coreaudio);

View File

@@ -167,18 +167,17 @@ static int dsound_init_out(HWVoiceOut *hw, struct audsettings *as,
dsound *s = drv_opaque;
WAVEFORMATEX wfx;
struct audsettings obt_as;
DSoundConf *conf = &s->conf;
#ifdef DSBTYPE_IN
const char *typ = "ADC";
DSoundVoiceIn *ds = (DSoundVoiceIn *) hw;
DSCBUFFERDESC bd;
DSCBCAPS bc;
AudiodevPerDirectionOptions *pdo = s->dev->u.dsound.in;
#else
const char *typ = "DAC";
DSoundVoiceOut *ds = (DSoundVoiceOut *) hw;
DSBUFFERDESC bd;
DSBCAPS bc;
AudiodevPerDirectionOptions *pdo = s->dev->u.dsound.out;
#endif
if (!s->FIELD2) {
@@ -194,8 +193,8 @@ static int dsound_init_out(HWVoiceOut *hw, struct audsettings *as,
memset (&bd, 0, sizeof (bd));
bd.dwSize = sizeof (bd);
bd.lpwfxFormat = &wfx;
bd.dwBufferBytes = audio_buffer_bytes(pdo, as, 92880);
#ifdef DSBTYPE_IN
bd.dwBufferBytes = conf->bufsize_in;
hr = IDirectSoundCapture_CreateCaptureBuffer (
s->dsound_capture,
&bd,
@@ -204,6 +203,7 @@ static int dsound_init_out(HWVoiceOut *hw, struct audsettings *as,
);
#else
bd.dwFlags = DSBCAPS_STICKYFOCUS | DSBCAPS_GETCURRENTPOSITION2;
bd.dwBufferBytes = conf->bufsize_out;
hr = IDirectSound_CreateSoundBuffer (
s->dsound,
&bd,

View File

@@ -32,7 +32,6 @@
#define AUDIO_CAP "dsound"
#include "audio_int.h"
#include "qemu/host-utils.h"
#include <windows.h>
#include <mmsystem.h>
@@ -43,11 +42,17 @@
/* #define DEBUG_DSOUND */
typedef struct {
int bufsize_in;
int bufsize_out;
int latency_millis;
} DSoundConf;
typedef struct {
LPDIRECTSOUND dsound;
LPDIRECTSOUNDCAPTURE dsound_capture;
struct audsettings settings;
Audiodev *dev;
DSoundConf conf;
} dsound;
typedef struct {
@@ -243,9 +248,9 @@ static void GCC_FMT_ATTR (3, 4) dsound_logerr2 (
dsound_log_hresult (hr);
}
static uint64_t usecs_to_bytes(struct audio_pcm_info *info, uint32_t usecs)
static DWORD millis_to_bytes (struct audio_pcm_info *info, DWORD millis)
{
return muldiv64(usecs, info->bytes_per_second, 1000000);
return (millis * info->bytes_per_second) / 1000;
}
#ifdef DEBUG_DSOUND
@@ -473,7 +478,7 @@ static int dsound_run_out (HWVoiceOut *hw, int live)
LPVOID p1, p2;
int bufsize;
dsound *s = ds->s;
AudiodevDsoundOptions *dso = &s->dev->u.dsound;
DSoundConf *conf = &s->conf;
if (!dsb) {
dolog ("Attempt to run empty with playback buffer\n");
@@ -496,14 +501,14 @@ static int dsound_run_out (HWVoiceOut *hw, int live)
len = live << hwshift;
if (ds->first_time) {
if (dso->latency) {
if (conf->latency_millis) {
DWORD cur_blat;
cur_blat = audio_ring_dist (wpos, ppos, bufsize);
ds->first_time = 0;
old_pos = wpos;
old_pos +=
usecs_to_bytes(&hw->info, dso->latency) - cur_blat;
millis_to_bytes (&hw->info, conf->latency_millis) - cur_blat;
old_pos %= bufsize;
old_pos &= ~hw->info.align;
}
@@ -538,7 +543,7 @@ static int dsound_run_out (HWVoiceOut *hw, int live)
}
}
if (audio_bug(__func__, len < 0 || len > bufsize)) {
if (audio_bug (AUDIO_FUNC, len < 0 || len > bufsize)) {
dolog ("len=%d bufsize=%d old_pos=%ld ppos=%ld\n",
len, bufsize, old_pos, ppos);
return 0;
@@ -742,6 +747,12 @@ static int dsound_run_in (HWVoiceIn *hw)
return decr;
}
static DSoundConf glob_conf = {
.bufsize_in = 16384,
.bufsize_out = 16384,
.latency_millis = 10
};
static void dsound_audio_fini (void *opaque)
{
HRESULT hr;
@@ -772,22 +783,13 @@ static void dsound_audio_fini (void *opaque)
g_free(s);
}
static void *dsound_audio_init(Audiodev *dev)
static void *dsound_audio_init (void)
{
int err;
HRESULT hr;
dsound *s = g_malloc0(sizeof(dsound));
AudiodevDsoundOptions *dso;
assert(dev->driver == AUDIODEV_DRIVER_DSOUND);
s->dev = dev;
dso = &dev->u.dsound;
if (!dso->has_latency) {
dso->has_latency = true;
dso->latency = 10000; /* 10 ms */
}
s->conf = glob_conf;
hr = CoInitialize (NULL);
if (FAILED (hr)) {
dsound_logerr (hr, "Could not initialize COM\n");
@@ -852,6 +854,28 @@ static void *dsound_audio_init(Audiodev *dev)
return s;
}
static struct audio_option dsound_options[] = {
{
.name = "LATENCY_MILLIS",
.tag = AUD_OPT_INT,
.valp = &glob_conf.latency_millis,
.descr = "(undocumented)"
},
{
.name = "BUFSIZE_OUT",
.tag = AUD_OPT_INT,
.valp = &glob_conf.bufsize_out,
.descr = "(undocumented)"
},
{
.name = "BUFSIZE_IN",
.tag = AUD_OPT_INT,
.valp = &glob_conf.bufsize_in,
.descr = "(undocumented)"
},
{ /* End of list */ }
};
static struct audio_pcm_ops dsound_pcm_ops = {
.init_out = dsound_init_out,
.fini_out = dsound_fini_out,
@@ -866,9 +890,10 @@ static struct audio_pcm_ops dsound_pcm_ops = {
.ctl_in = dsound_ctl_in
};
static struct audio_driver dsound_audio_driver = {
struct audio_driver dsound_audio_driver = {
.name = "dsound",
.descr = "DirectSound http://wikipedia.org/wiki/DirectSound",
.options = dsound_options,
.init = dsound_audio_init,
.fini = dsound_audio_fini,
.pcm_ops = &dsound_pcm_ops,
@@ -878,9 +903,3 @@ static struct audio_driver dsound_audio_driver = {
.voice_size_out = sizeof (DSoundVoiceOut),
.voice_size_in = sizeof (DSoundVoiceIn)
};
static void register_audio_dsound(void)
{
audio_driver_register(&dsound_audio_driver);
}
type_init(register_audio_dsound);

View File

@@ -344,7 +344,7 @@ struct rate {
*/
void *st_rate_start (int inrate, int outrate)
{
struct rate *rate = audio_calloc(__func__, 1, sizeof(*rate));
struct rate *rate = audio_calloc (AUDIO_FUNC, 1, sizeof (*rate));
if (!rate) {
dolog ("Could not allocate resampler (%zu bytes)\n", sizeof (*rate));

View File

@@ -136,7 +136,7 @@ static int no_ctl_in (HWVoiceIn *hw, int cmd, ...)
return 0;
}
static void *no_audio_init(Audiodev *dev)
static void *no_audio_init (void)
{
return &no_audio_init;
}
@@ -160,9 +160,10 @@ static struct audio_pcm_ops no_pcm_ops = {
.ctl_in = no_ctl_in
};
static struct audio_driver no_audio_driver = {
struct audio_driver no_audio_driver = {
.name = "none",
.descr = "Timer based audio emulation",
.options = NULL,
.init = no_audio_init,
.fini = no_audio_fini,
.pcm_ops = &no_pcm_ops,
@@ -172,9 +173,3 @@ static struct audio_driver no_audio_driver = {
.voice_size_out = sizeof (NoVoiceOut),
.voice_size_in = sizeof (NoVoiceIn)
};
static void register_audio_none(void)
{
audio_driver_register(&no_audio_driver);
}
type_init(register_audio_none);

View File

@@ -37,6 +37,16 @@
#define USE_DSP_POLICY
#endif
typedef struct OSSConf {
int try_mmap;
int nfrags;
int fragsize;
const char *devpath_out;
const char *devpath_in;
int exclusive;
int policy;
} OSSConf;
typedef struct OSSVoiceOut {
HWVoiceOut hw;
void *pcm_buf;
@@ -46,7 +56,7 @@ typedef struct OSSVoiceOut {
int fragsize;
int mmapped;
int pending;
Audiodev *dev;
OSSConf *conf;
} OSSVoiceOut;
typedef struct OSSVoiceIn {
@@ -55,12 +65,12 @@ typedef struct OSSVoiceIn {
int fd;
int nfrags;
int fragsize;
Audiodev *dev;
OSSConf *conf;
} OSSVoiceIn;
struct oss_params {
int freq;
int fmt;
audfmt_e fmt;
int nchannels;
int nfrags;
int fragsize;
@@ -138,16 +148,16 @@ static int oss_write (SWVoiceOut *sw, void *buf, int len)
return audio_pcm_sw_write (sw, buf, len);
}
static int aud_to_ossfmt (AudioFormat fmt, int endianness)
static int aud_to_ossfmt (audfmt_e fmt, int endianness)
{
switch (fmt) {
case AUDIO_FORMAT_S8:
case AUD_FMT_S8:
return AFMT_S8;
case AUDIO_FORMAT_U8:
case AUD_FMT_U8:
return AFMT_U8;
case AUDIO_FORMAT_S16:
case AUD_FMT_S16:
if (endianness) {
return AFMT_S16_BE;
}
@@ -155,7 +165,7 @@ static int aud_to_ossfmt (AudioFormat fmt, int endianness)
return AFMT_S16_LE;
}
case AUDIO_FORMAT_U16:
case AUD_FMT_U16:
if (endianness) {
return AFMT_U16_BE;
}
@@ -172,37 +182,37 @@ static int aud_to_ossfmt (AudioFormat fmt, int endianness)
}
}
static int oss_to_audfmt (int ossfmt, AudioFormat *fmt, int *endianness)
static int oss_to_audfmt (int ossfmt, audfmt_e *fmt, int *endianness)
{
switch (ossfmt) {
case AFMT_S8:
*endianness = 0;
*fmt = AUDIO_FORMAT_S8;
*fmt = AUD_FMT_S8;
break;
case AFMT_U8:
*endianness = 0;
*fmt = AUDIO_FORMAT_U8;
*fmt = AUD_FMT_U8;
break;
case AFMT_S16_LE:
*endianness = 0;
*fmt = AUDIO_FORMAT_S16;
*fmt = AUD_FMT_S16;
break;
case AFMT_U16_LE:
*endianness = 0;
*fmt = AUDIO_FORMAT_U16;
*fmt = AUD_FMT_U16;
break;
case AFMT_S16_BE:
*endianness = 1;
*fmt = AUDIO_FORMAT_S16;
*fmt = AUD_FMT_S16;
break;
case AFMT_U16_BE:
*endianness = 1;
*fmt = AUDIO_FORMAT_U16;
*fmt = AUD_FMT_U16;
break;
default:
@@ -252,25 +262,19 @@ static int oss_get_version (int fd, int *version, const char *typ)
}
#endif
static int oss_open(int in, struct oss_params *req, audsettings *as,
struct oss_params *obt, int *pfd, Audiodev *dev)
static int oss_open (int in, struct oss_params *req,
struct oss_params *obt, int *pfd, OSSConf* conf)
{
AudiodevOssOptions *oopts = &dev->u.oss;
AudiodevOssPerDirectionOptions *opdo = in ? oopts->in : oopts->out;
int fd;
int oflags = (oopts->has_exclusive && oopts->exclusive) ? O_EXCL : 0;
int oflags = conf->exclusive ? O_EXCL : 0;
audio_buf_info abinfo;
int fmt, freq, nchannels;
int setfragment = 1;
const char *dspname = opdo->has_dev ? opdo->dev : "/dev/dsp";
const char *dspname = in ? conf->devpath_in : conf->devpath_out;
const char *typ = in ? "ADC" : "DAC";
#ifdef USE_DSP_POLICY
int policy = oopts->has_dsp_policy ? oopts->dsp_policy : 5;
#endif
/* Kludge needed to have working mmap on Linux */
oflags |= (oopts->has_try_mmap && oopts->try_mmap) ?
O_RDWR : (in ? O_RDONLY : O_WRONLY);
oflags |= conf->try_mmap ? O_RDWR : (in ? O_RDONLY : O_WRONLY);
fd = open (dspname, oflags | O_NONBLOCK);
if (-1 == fd) {
@@ -281,9 +285,6 @@ static int oss_open(int in, struct oss_params *req, audsettings *as,
freq = req->freq;
nchannels = req->nchannels;
fmt = req->fmt;
req->nfrags = opdo->has_buffer_count ? opdo->buffer_count : 4;
req->fragsize = audio_buffer_bytes(
qapi_AudiodevOssPerDirectionOptions_base(opdo), as, 23220);
if (ioctl (fd, SNDCTL_DSP_SAMPLESIZE, &fmt)) {
oss_logerr2 (errno, typ, "Failed to set sample size %d\n", req->fmt);
@@ -307,18 +308,18 @@ static int oss_open(int in, struct oss_params *req, audsettings *as,
}
#ifdef USE_DSP_POLICY
if (policy >= 0) {
if (conf->policy >= 0) {
int version;
if (!oss_get_version (fd, &version, typ)) {
trace_oss_version(version);
if (version >= 0x040000) {
int policy2 = policy;
if (ioctl(fd, SNDCTL_DSP_POLICY, &policy2)) {
int policy = conf->policy;
if (ioctl (fd, SNDCTL_DSP_POLICY, &policy)) {
oss_logerr2 (errno, typ,
"Failed to set timing policy to %d\n",
policy);
conf->policy);
goto err;
}
setfragment = 0;
@@ -499,18 +500,19 @@ static int oss_init_out(HWVoiceOut *hw, struct audsettings *as,
int endianness;
int err;
int fd;
AudioFormat effective_fmt;
audfmt_e effective_fmt;
struct audsettings obt_as;
Audiodev *dev = drv_opaque;
AudiodevOssOptions *oopts = &dev->u.oss;
OSSConf *conf = drv_opaque;
oss->fd = -1;
req.fmt = aud_to_ossfmt (as->fmt, as->endianness);
req.freq = as->freq;
req.nchannels = as->nchannels;
req.fragsize = conf->fragsize;
req.nfrags = conf->nfrags;
if (oss_open(0, &req, as, &obt, &fd, dev)) {
if (oss_open (0, &req, &obt, &fd, conf)) {
return -1;
}
@@ -537,7 +539,7 @@ static int oss_init_out(HWVoiceOut *hw, struct audsettings *as,
hw->samples = (obt.nfrags * obt.fragsize) >> hw->info.shift;
oss->mmapped = 0;
if (oopts->has_try_mmap && oopts->try_mmap) {
if (conf->try_mmap) {
oss->pcm_buf = mmap (
NULL,
hw->samples << hw->info.shift,
@@ -580,9 +582,11 @@ static int oss_init_out(HWVoiceOut *hw, struct audsettings *as,
}
if (!oss->mmapped) {
oss->pcm_buf = audio_calloc(__func__,
hw->samples,
1 << hw->info.shift);
oss->pcm_buf = audio_calloc (
AUDIO_FUNC,
hw->samples,
1 << hw->info.shift
);
if (!oss->pcm_buf) {
dolog (
"Could not allocate DAC buffer (%d samples, each %d bytes)\n",
@@ -595,7 +599,7 @@ static int oss_init_out(HWVoiceOut *hw, struct audsettings *as,
}
oss->fd = fd;
oss->dev = dev;
oss->conf = conf;
return 0;
}
@@ -603,12 +607,16 @@ static int oss_ctl_out (HWVoiceOut *hw, int cmd, ...)
{
int trig;
OSSVoiceOut *oss = (OSSVoiceOut *) hw;
AudiodevOssPerDirectionOptions *opdo = oss->dev->u.oss.out;
switch (cmd) {
case VOICE_ENABLE:
{
bool poll_mode = opdo->try_poll;
va_list ap;
int poll_mode;
va_start (ap, cmd);
poll_mode = va_arg (ap, int);
va_end (ap);
ldebug ("enabling voice\n");
if (poll_mode) {
@@ -661,16 +669,18 @@ static int oss_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
int endianness;
int err;
int fd;
AudioFormat effective_fmt;
audfmt_e effective_fmt;
struct audsettings obt_as;
Audiodev *dev = drv_opaque;
OSSConf *conf = drv_opaque;
oss->fd = -1;
req.fmt = aud_to_ossfmt (as->fmt, as->endianness);
req.freq = as->freq;
req.nchannels = as->nchannels;
if (oss_open(1, &req, as, &obt, &fd, dev)) {
req.fragsize = conf->fragsize;
req.nfrags = conf->nfrags;
if (oss_open (1, &req, &obt, &fd, conf)) {
return -1;
}
@@ -695,7 +705,7 @@ static int oss_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
}
hw->samples = (obt.nfrags * obt.fragsize) >> hw->info.shift;
oss->pcm_buf = audio_calloc(__func__, hw->samples, 1 << hw->info.shift);
oss->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift);
if (!oss->pcm_buf) {
dolog ("Could not allocate ADC buffer (%d samples, each %d bytes)\n",
hw->samples, 1 << hw->info.shift);
@@ -704,7 +714,7 @@ static int oss_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
}
oss->fd = fd;
oss->dev = dev;
oss->conf = conf;
return 0;
}
@@ -795,12 +805,16 @@ static int oss_read (SWVoiceIn *sw, void *buf, int size)
static int oss_ctl_in (HWVoiceIn *hw, int cmd, ...)
{
OSSVoiceIn *oss = (OSSVoiceIn *) hw;
AudiodevOssPerDirectionOptions *opdo = oss->dev->u.oss.out;
switch (cmd) {
case VOICE_ENABLE:
{
bool poll_mode = opdo->try_poll;
va_list ap;
int poll_mode;
va_start (ap, cmd);
poll_mode = va_arg (ap, int);
va_end (ap);
if (poll_mode) {
oss_poll_in (hw);
@@ -820,36 +834,82 @@ static int oss_ctl_in (HWVoiceIn *hw, int cmd, ...)
return 0;
}
static void oss_init_per_direction(AudiodevOssPerDirectionOptions *opdo)
static OSSConf glob_conf = {
.try_mmap = 0,
.nfrags = 4,
.fragsize = 4096,
.devpath_out = "/dev/dsp",
.devpath_in = "/dev/dsp",
.exclusive = 0,
.policy = 5
};
static void *oss_audio_init (void)
{
if (!opdo->has_try_poll) {
opdo->try_poll = true;
opdo->has_try_poll = true;
}
}
OSSConf *conf = g_malloc(sizeof(OSSConf));
*conf = glob_conf;
static void *oss_audio_init(Audiodev *dev)
{
AudiodevOssOptions *oopts;
assert(dev->driver == AUDIODEV_DRIVER_OSS);
oopts = &dev->u.oss;
oss_init_per_direction(oopts->in);
oss_init_per_direction(oopts->out);
if (access(oopts->in->has_dev ? oopts->in->dev : "/dev/dsp",
R_OK | W_OK) < 0 ||
access(oopts->out->has_dev ? oopts->out->dev : "/dev/dsp",
R_OK | W_OK) < 0) {
if (access(conf->devpath_in, R_OK | W_OK) < 0 ||
access(conf->devpath_out, R_OK | W_OK) < 0) {
g_free(conf);
return NULL;
}
return dev;
return conf;
}
static void oss_audio_fini (void *opaque)
{
g_free(opaque);
}
static struct audio_option oss_options[] = {
{
.name = "FRAGSIZE",
.tag = AUD_OPT_INT,
.valp = &glob_conf.fragsize,
.descr = "Fragment size in bytes"
},
{
.name = "NFRAGS",
.tag = AUD_OPT_INT,
.valp = &glob_conf.nfrags,
.descr = "Number of fragments"
},
{
.name = "MMAP",
.tag = AUD_OPT_BOOL,
.valp = &glob_conf.try_mmap,
.descr = "Try using memory mapped access"
},
{
.name = "DAC_DEV",
.tag = AUD_OPT_STR,
.valp = &glob_conf.devpath_out,
.descr = "Path to DAC device"
},
{
.name = "ADC_DEV",
.tag = AUD_OPT_STR,
.valp = &glob_conf.devpath_in,
.descr = "Path to ADC device"
},
{
.name = "EXCLUSIVE",
.tag = AUD_OPT_BOOL,
.valp = &glob_conf.exclusive,
.descr = "Open device in exclusive mode (vmix won't work)"
},
#ifdef USE_DSP_POLICY
{
.name = "POLICY",
.tag = AUD_OPT_INT,
.valp = &glob_conf.policy,
.descr = "Set the timing policy of the device, -1 to use fragment mode",
},
#endif
{ /* End of list */ }
};
static struct audio_pcm_ops oss_pcm_ops = {
.init_out = oss_init_out,
.fini_out = oss_fini_out,
@@ -864,9 +924,10 @@ static struct audio_pcm_ops oss_pcm_ops = {
.ctl_in = oss_ctl_in
};
static struct audio_driver oss_audio_driver = {
struct audio_driver oss_audio_driver = {
.name = "oss",
.descr = "OSS http://www.opensound.com",
.options = oss_options,
.init = oss_audio_init,
.fini = oss_audio_fini,
.pcm_ops = &oss_pcm_ops,
@@ -876,9 +937,3 @@ static struct audio_driver oss_audio_driver = {
.voice_size_out = sizeof (OSSVoiceOut),
.voice_size_in = sizeof (OSSVoiceIn)
};
static void register_audio_oss(void)
{
audio_driver_register(&oss_audio_driver);
}
type_init(register_audio_oss);

View File

@@ -2,7 +2,6 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "audio.h"
#include "qapi/opts-visitor.h"
#include <pulse/pulseaudio.h>
@@ -11,7 +10,14 @@
#include "audio_pt_int.h"
typedef struct {
Audiodev *dev;
int samples;
char *server;
char *sink;
char *source;
} PAConf;
typedef struct {
PAConf conf;
pa_threaded_mainloop *mainloop;
pa_context *context;
} paaudio;
@@ -26,7 +32,6 @@ typedef struct {
void *pcm_buf;
struct audio_pt pt;
paaudio *g;
int samples;
} PAVoiceOut;
typedef struct {
@@ -41,7 +46,6 @@ typedef struct {
const void *read_data;
size_t read_index, read_length;
paaudio *g;
int samples;
} PAVoiceIn;
static void qpa_audio_fini(void *opaque);
@@ -85,7 +89,7 @@ static inline int PA_STREAM_IS_GOOD(pa_stream_state_t x)
} \
goto label; \
} \
} while (0)
} while (0);
#define CHECK_DEAD_GOTO(c, stream, rerror, label) \
do { \
@@ -103,7 +107,7 @@ static inline int PA_STREAM_IS_GOOD(pa_stream_state_t x)
} \
goto label; \
} \
} while (0)
} while (0);
static int qpa_simple_read (PAVoiceIn *p, void *data, size_t length, int *rerror)
{
@@ -202,7 +206,7 @@ static void *qpa_thread_out (void *arg)
PAVoiceOut *pa = arg;
HWVoiceOut *hw = &pa->hw;
if (audio_pt_lock(&pa->pt, __func__)) {
if (audio_pt_lock (&pa->pt, AUDIO_FUNC)) {
return NULL;
}
@@ -218,15 +222,15 @@ static void *qpa_thread_out (void *arg)
break;
}
if (audio_pt_wait(&pa->pt, __func__)) {
if (audio_pt_wait (&pa->pt, AUDIO_FUNC)) {
goto exit;
}
}
decr = to_mix = audio_MIN(pa->live, pa->samples >> 5);
decr = to_mix = audio_MIN (pa->live, pa->g->conf.samples >> 2);
rpos = pa->rpos;
if (audio_pt_unlock(&pa->pt, __func__)) {
if (audio_pt_unlock (&pa->pt, AUDIO_FUNC)) {
return NULL;
}
@@ -247,7 +251,7 @@ static void *qpa_thread_out (void *arg)
to_mix -= chunk;
}
if (audio_pt_lock(&pa->pt, __func__)) {
if (audio_pt_lock (&pa->pt, AUDIO_FUNC)) {
return NULL;
}
@@ -257,7 +261,7 @@ static void *qpa_thread_out (void *arg)
}
exit:
audio_pt_unlock(&pa->pt, __func__);
audio_pt_unlock (&pa->pt, AUDIO_FUNC);
return NULL;
}
@@ -266,7 +270,7 @@ static int qpa_run_out (HWVoiceOut *hw, int live)
int decr;
PAVoiceOut *pa = (PAVoiceOut *) hw;
if (audio_pt_lock(&pa->pt, __func__)) {
if (audio_pt_lock (&pa->pt, AUDIO_FUNC)) {
return 0;
}
@@ -275,10 +279,10 @@ static int qpa_run_out (HWVoiceOut *hw, int live)
pa->live = live - decr;
hw->rpos = pa->rpos;
if (pa->live > 0) {
audio_pt_unlock_and_signal(&pa->pt, __func__);
audio_pt_unlock_and_signal (&pa->pt, AUDIO_FUNC);
}
else {
audio_pt_unlock(&pa->pt, __func__);
audio_pt_unlock (&pa->pt, AUDIO_FUNC);
}
return decr;
}
@@ -294,7 +298,7 @@ static void *qpa_thread_in (void *arg)
PAVoiceIn *pa = arg;
HWVoiceIn *hw = &pa->hw;
if (audio_pt_lock(&pa->pt, __func__)) {
if (audio_pt_lock (&pa->pt, AUDIO_FUNC)) {
return NULL;
}
@@ -310,15 +314,15 @@ static void *qpa_thread_in (void *arg)
break;
}
if (audio_pt_wait(&pa->pt, __func__)) {
if (audio_pt_wait (&pa->pt, AUDIO_FUNC)) {
goto exit;
}
}
incr = to_grab = audio_MIN(pa->dead, pa->samples >> 5);
incr = to_grab = audio_MIN (pa->dead, pa->g->conf.samples >> 2);
wpos = pa->wpos;
if (audio_pt_unlock(&pa->pt, __func__)) {
if (audio_pt_unlock (&pa->pt, AUDIO_FUNC)) {
return NULL;
}
@@ -338,7 +342,7 @@ static void *qpa_thread_in (void *arg)
to_grab -= chunk;
}
if (audio_pt_lock(&pa->pt, __func__)) {
if (audio_pt_lock (&pa->pt, AUDIO_FUNC)) {
return NULL;
}
@@ -348,7 +352,7 @@ static void *qpa_thread_in (void *arg)
}
exit:
audio_pt_unlock(&pa->pt, __func__);
audio_pt_unlock (&pa->pt, AUDIO_FUNC);
return NULL;
}
@@ -357,7 +361,7 @@ static int qpa_run_in (HWVoiceIn *hw)
int live, incr, dead;
PAVoiceIn *pa = (PAVoiceIn *) hw;
if (audio_pt_lock(&pa->pt, __func__)) {
if (audio_pt_lock (&pa->pt, AUDIO_FUNC)) {
return 0;
}
@@ -368,10 +372,10 @@ static int qpa_run_in (HWVoiceIn *hw)
pa->dead = dead - incr;
hw->wpos = pa->wpos;
if (pa->dead > 0) {
audio_pt_unlock_and_signal(&pa->pt, __func__);
audio_pt_unlock_and_signal (&pa->pt, AUDIO_FUNC);
}
else {
audio_pt_unlock(&pa->pt, __func__);
audio_pt_unlock (&pa->pt, AUDIO_FUNC);
}
return incr;
}
@@ -381,21 +385,21 @@ static int qpa_read (SWVoiceIn *sw, void *buf, int len)
return audio_pcm_sw_read (sw, buf, len);
}
static pa_sample_format_t audfmt_to_pa (AudioFormat afmt, int endianness)
static pa_sample_format_t audfmt_to_pa (audfmt_e afmt, int endianness)
{
int format;
switch (afmt) {
case AUDIO_FORMAT_S8:
case AUDIO_FORMAT_U8:
case AUD_FMT_S8:
case AUD_FMT_U8:
format = PA_SAMPLE_U8;
break;
case AUDIO_FORMAT_S16:
case AUDIO_FORMAT_U16:
case AUD_FMT_S16:
case AUD_FMT_U16:
format = endianness ? PA_SAMPLE_S16BE : PA_SAMPLE_S16LE;
break;
case AUDIO_FORMAT_S32:
case AUDIO_FORMAT_U32:
case AUD_FMT_S32:
case AUD_FMT_U32:
format = endianness ? PA_SAMPLE_S32BE : PA_SAMPLE_S32LE;
break;
default:
@@ -406,26 +410,26 @@ static pa_sample_format_t audfmt_to_pa (AudioFormat afmt, int endianness)
return format;
}
static AudioFormat pa_to_audfmt (pa_sample_format_t fmt, int *endianness)
static audfmt_e pa_to_audfmt (pa_sample_format_t fmt, int *endianness)
{
switch (fmt) {
case PA_SAMPLE_U8:
return AUDIO_FORMAT_U8;
return AUD_FMT_U8;
case PA_SAMPLE_S16BE:
*endianness = 1;
return AUDIO_FORMAT_S16;
return AUD_FMT_S16;
case PA_SAMPLE_S16LE:
*endianness = 0;
return AUDIO_FORMAT_S16;
return AUD_FMT_S16;
case PA_SAMPLE_S32BE:
*endianness = 1;
return AUDIO_FORMAT_S32;
return AUD_FMT_S32;
case PA_SAMPLE_S32LE:
*endianness = 0;
return AUDIO_FORMAT_S32;
return AUD_FMT_S32;
default:
dolog ("Internal logic error: Bad pa_sample_format %d\n", fmt);
return AUDIO_FORMAT_U8;
return AUD_FMT_U8;
}
}
@@ -542,15 +546,17 @@ static int qpa_init_out(HWVoiceOut *hw, struct audsettings *as,
struct audsettings obt_as = *as;
PAVoiceOut *pa = (PAVoiceOut *) hw;
paaudio *g = pa->g = drv_opaque;
AudiodevPaOptions *popts = &g->dev->u.pa;
AudiodevPaPerDirectionOptions *ppdo = popts->out;
ss.format = audfmt_to_pa (as->fmt, as->endianness);
ss.channels = as->nchannels;
ss.rate = as->freq;
ba.tlength = pa_usec_to_bytes(ppdo->latency, &ss);
ba.minreq = -1;
/*
* qemu audio tick runs at 100 Hz (by default), so processing
* data chunks worth 10 ms of sound should be a good fit.
*/
ba.tlength = pa_usec_to_bytes (10 * 1000, &ss);
ba.minreq = pa_usec_to_bytes (5 * 1000, &ss);
ba.maxlength = -1;
ba.prebuf = -1;
@@ -560,7 +566,7 @@ static int qpa_init_out(HWVoiceOut *hw, struct audsettings *as,
g,
"qemu",
PA_STREAM_PLAYBACK,
ppdo->has_name ? ppdo->name : NULL,
g->conf.sink,
&ss,
NULL, /* channel map */
&ba, /* buffering attributes */
@@ -572,10 +578,8 @@ static int qpa_init_out(HWVoiceOut *hw, struct audsettings *as,
}
audio_pcm_init_info (&hw->info, &obt_as);
hw->samples = pa->samples = audio_buffer_samples(
qapi_AudiodevPaPerDirectionOptions_base(ppdo),
&obt_as, ppdo->buffer_length);
pa->pcm_buf = audio_calloc(__func__, hw->samples, 1 << hw->info.shift);
hw->samples = g->conf.samples;
pa->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift);
pa->rpos = hw->rpos;
if (!pa->pcm_buf) {
dolog ("Could not allocate buffer (%d bytes)\n",
@@ -583,7 +587,7 @@ static int qpa_init_out(HWVoiceOut *hw, struct audsettings *as,
goto fail2;
}
if (audio_pt_init(&pa->pt, qpa_thread_out, hw, AUDIO_CAP, __func__)) {
if (audio_pt_init (&pa->pt, qpa_thread_out, hw, AUDIO_CAP, AUDIO_FUNC)) {
goto fail3;
}
@@ -605,32 +609,24 @@ static int qpa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
{
int error;
pa_sample_spec ss;
pa_buffer_attr ba;
struct audsettings obt_as = *as;
PAVoiceIn *pa = (PAVoiceIn *) hw;
paaudio *g = pa->g = drv_opaque;
AudiodevPaOptions *popts = &g->dev->u.pa;
AudiodevPaPerDirectionOptions *ppdo = popts->in;
ss.format = audfmt_to_pa (as->fmt, as->endianness);
ss.channels = as->nchannels;
ss.rate = as->freq;
ba.fragsize = pa_usec_to_bytes(ppdo->latency, &ss);
ba.maxlength = -1;
ba.minreq = -1;
ba.prebuf = -1;
obt_as.fmt = pa_to_audfmt (ss.format, &obt_as.endianness);
pa->stream = qpa_simple_new (
g,
"qemu",
PA_STREAM_RECORD,
ppdo->has_name ? ppdo->name : NULL,
g->conf.source,
&ss,
NULL, /* channel map */
&ba, /* buffering attributes */
NULL, /* buffering attributes */
&error
);
if (!pa->stream) {
@@ -639,10 +635,8 @@ static int qpa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
}
audio_pcm_init_info (&hw->info, &obt_as);
hw->samples = pa->samples = audio_buffer_samples(
qapi_AudiodevPaPerDirectionOptions_base(ppdo),
&obt_as, ppdo->buffer_length);
pa->pcm_buf = audio_calloc(__func__, hw->samples, 1 << hw->info.shift);
hw->samples = g->conf.samples;
pa->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift);
pa->wpos = hw->wpos;
if (!pa->pcm_buf) {
dolog ("Could not allocate buffer (%d bytes)\n",
@@ -650,7 +644,7 @@ static int qpa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
goto fail2;
}
if (audio_pt_init(&pa->pt, qpa_thread_in, hw, AUDIO_CAP, __func__)) {
if (audio_pt_init (&pa->pt, qpa_thread_in, hw, AUDIO_CAP, AUDIO_FUNC)) {
goto fail3;
}
@@ -673,17 +667,17 @@ static void qpa_fini_out (HWVoiceOut *hw)
void *ret;
PAVoiceOut *pa = (PAVoiceOut *) hw;
audio_pt_lock(&pa->pt, __func__);
audio_pt_lock (&pa->pt, AUDIO_FUNC);
pa->done = 1;
audio_pt_unlock_and_signal(&pa->pt, __func__);
audio_pt_join(&pa->pt, &ret, __func__);
audio_pt_unlock_and_signal (&pa->pt, AUDIO_FUNC);
audio_pt_join (&pa->pt, &ret, AUDIO_FUNC);
if (pa->stream) {
pa_stream_unref (pa->stream);
pa->stream = NULL;
}
audio_pt_fini(&pa->pt, __func__);
audio_pt_fini (&pa->pt, AUDIO_FUNC);
g_free (pa->pcm_buf);
pa->pcm_buf = NULL;
}
@@ -693,17 +687,17 @@ static void qpa_fini_in (HWVoiceIn *hw)
void *ret;
PAVoiceIn *pa = (PAVoiceIn *) hw;
audio_pt_lock(&pa->pt, __func__);
audio_pt_lock (&pa->pt, AUDIO_FUNC);
pa->done = 1;
audio_pt_unlock_and_signal(&pa->pt, __func__);
audio_pt_join(&pa->pt, &ret, __func__);
audio_pt_unlock_and_signal (&pa->pt, AUDIO_FUNC);
audio_pt_join (&pa->pt, &ret, AUDIO_FUNC);
if (pa->stream) {
pa_stream_unref (pa->stream);
pa->stream = NULL;
}
audio_pt_fini(&pa->pt, __func__);
audio_pt_fini (&pa->pt, AUDIO_FUNC);
g_free (pa->pcm_buf);
pa->pcm_buf = NULL;
}
@@ -813,54 +807,15 @@ static int qpa_ctl_in (HWVoiceIn *hw, int cmd, ...)
return 0;
}
static int qpa_validate_per_direction_opts(Audiodev *dev,
AudiodevPaPerDirectionOptions *pdo)
/* common */
static PAConf glob_conf = {
.samples = 4096,
};
static void *qpa_audio_init (void)
{
if (!pdo->has_buffer_length) {
pdo->has_buffer_length = true;
pdo->buffer_length = 46440;
}
if (!pdo->has_latency) {
pdo->has_latency = true;
pdo->latency = 15000;
}
return 1;
}
static void *qpa_audio_init(Audiodev *dev)
{
paaudio *g;
AudiodevPaOptions *popts = &dev->u.pa;
const char *server;
if (!popts->has_server) {
char pidfile[64];
char *runtime;
struct stat st;
runtime = getenv("XDG_RUNTIME_DIR");
if (!runtime) {
return NULL;
}
snprintf(pidfile, sizeof(pidfile), "%s/pulse/pid", runtime);
if (stat(pidfile, &st) != 0) {
return NULL;
}
}
assert(dev->driver == AUDIODEV_DRIVER_PA);
g = g_malloc(sizeof(paaudio));
server = popts->has_server ? popts->server : NULL;
if (!qpa_validate_per_direction_opts(dev, popts->in)) {
goto fail;
}
if (!qpa_validate_per_direction_opts(dev, popts->out)) {
goto fail;
}
g->dev = dev;
paaudio *g = g_malloc(sizeof(paaudio));
g->conf = glob_conf;
g->mainloop = NULL;
g->context = NULL;
@@ -870,14 +825,14 @@ static void *qpa_audio_init(Audiodev *dev)
}
g->context = pa_context_new (pa_threaded_mainloop_get_api (g->mainloop),
server);
g->conf.server);
if (!g->context) {
goto fail;
}
pa_context_set_state_callback (g->context, context_state_cb, g);
if (pa_context_connect(g->context, server, 0, NULL) < 0) {
if (pa_context_connect (g->context, g->conf.server, 0, NULL) < 0) {
qpa_logerr (pa_context_errno (g->context),
"pa_context_connect() failed\n");
goto fail;
@@ -940,6 +895,34 @@ static void qpa_audio_fini (void *opaque)
g_free(g);
}
struct audio_option qpa_options[] = {
{
.name = "SAMPLES",
.tag = AUD_OPT_INT,
.valp = &glob_conf.samples,
.descr = "buffer size in samples"
},
{
.name = "SERVER",
.tag = AUD_OPT_STR,
.valp = &glob_conf.server,
.descr = "server address"
},
{
.name = "SINK",
.tag = AUD_OPT_STR,
.valp = &glob_conf.sink,
.descr = "sink device name"
},
{
.name = "SOURCE",
.tag = AUD_OPT_STR,
.valp = &glob_conf.source,
.descr = "source device name"
},
{ /* End of list */ }
};
static struct audio_pcm_ops qpa_pcm_ops = {
.init_out = qpa_init_out,
.fini_out = qpa_fini_out,
@@ -954,9 +937,10 @@ static struct audio_pcm_ops qpa_pcm_ops = {
.ctl_in = qpa_ctl_in
};
static struct audio_driver pa_audio_driver = {
struct audio_driver pa_audio_driver = {
.name = "pa",
.descr = "http://www.pulseaudio.org/",
.options = qpa_options,
.init = qpa_audio_init,
.fini = qpa_audio_fini,
.pcm_ops = &qpa_pcm_ops,
@@ -967,9 +951,3 @@ static struct audio_driver pa_audio_driver = {
.voice_size_in = sizeof (PAVoiceIn),
.ctl_caps = VOICE_VOLUME_CAP
};
static void register_audio_pa(void)
{
audio_driver_register(&pa_audio_driver);
}
type_init(register_audio_pa);

View File

@@ -38,17 +38,31 @@
#define AUDIO_CAP "sdl"
#include "audio_int.h"
#define USE_SEMAPHORE (SDL_MAJOR_VERSION < 2)
typedef struct SDLVoiceOut {
HWVoiceOut hw;
int live;
#if USE_SEMAPHORE
int rpos;
#endif
int decr;
} SDLVoiceOut;
static struct {
int nb_samples;
} conf = {
.nb_samples = 1024
};
static struct SDLAudioState {
int exit;
#if USE_SEMAPHORE
SDL_mutex *mutex;
SDL_sem *sem;
#endif
int initialized;
bool driver_created;
Audiodev *dev;
} glob_sdl;
typedef struct SDLAudioState SDLAudioState;
@@ -63,19 +77,79 @@ static void GCC_FMT_ATTR (1, 2) sdl_logerr (const char *fmt, ...)
AUD_log (AUDIO_CAP, "Reason: %s\n", SDL_GetError ());
}
static int aud_to_sdlfmt (AudioFormat fmt)
static int sdl_lock (SDLAudioState *s, const char *forfn)
{
#if USE_SEMAPHORE
if (SDL_LockMutex (s->mutex)) {
sdl_logerr ("SDL_LockMutex for %s failed\n", forfn);
return -1;
}
#else
SDL_LockAudio();
#endif
return 0;
}
static int sdl_unlock (SDLAudioState *s, const char *forfn)
{
#if USE_SEMAPHORE
if (SDL_UnlockMutex (s->mutex)) {
sdl_logerr ("SDL_UnlockMutex for %s failed\n", forfn);
return -1;
}
#else
SDL_UnlockAudio();
#endif
return 0;
}
static int sdl_post (SDLAudioState *s, const char *forfn)
{
#if USE_SEMAPHORE
if (SDL_SemPost (s->sem)) {
sdl_logerr ("SDL_SemPost for %s failed\n", forfn);
return -1;
}
#endif
return 0;
}
#if USE_SEMAPHORE
static int sdl_wait (SDLAudioState *s, const char *forfn)
{
if (SDL_SemWait (s->sem)) {
sdl_logerr ("SDL_SemWait for %s failed\n", forfn);
return -1;
}
return 0;
}
#endif
static int sdl_unlock_and_post (SDLAudioState *s, const char *forfn)
{
if (sdl_unlock (s, forfn)) {
return -1;
}
return sdl_post (s, forfn);
}
static int aud_to_sdlfmt (audfmt_e fmt)
{
switch (fmt) {
case AUDIO_FORMAT_S8:
case AUD_FMT_S8:
return AUDIO_S8;
case AUDIO_FORMAT_U8:
case AUD_FMT_U8:
return AUDIO_U8;
case AUDIO_FORMAT_S16:
case AUD_FMT_S16:
return AUDIO_S16LSB;
case AUDIO_FORMAT_U16:
case AUD_FMT_U16:
return AUDIO_U16LSB;
default:
@@ -87,37 +161,37 @@ static int aud_to_sdlfmt (AudioFormat fmt)
}
}
static int sdl_to_audfmt(int sdlfmt, AudioFormat *fmt, int *endianness)
static int sdl_to_audfmt(int sdlfmt, audfmt_e *fmt, int *endianness)
{
switch (sdlfmt) {
case AUDIO_S8:
*endianness = 0;
*fmt = AUDIO_FORMAT_S8;
*fmt = AUD_FMT_S8;
break;
case AUDIO_U8:
*endianness = 0;
*fmt = AUDIO_FORMAT_U8;
*fmt = AUD_FMT_U8;
break;
case AUDIO_S16LSB:
*endianness = 0;
*fmt = AUDIO_FORMAT_S16;
*fmt = AUD_FMT_S16;
break;
case AUDIO_U16LSB:
*endianness = 0;
*fmt = AUDIO_FORMAT_U16;
*fmt = AUD_FMT_U16;
break;
case AUDIO_S16MSB:
*endianness = 1;
*fmt = AUDIO_FORMAT_S16;
*fmt = AUD_FMT_S16;
break;
case AUDIO_U16MSB:
*endianness = 1;
*fmt = AUDIO_FORMAT_U16;
*fmt = AUD_FMT_U16;
break;
default:
@@ -169,9 +243,9 @@ static int sdl_open (SDL_AudioSpec *req, SDL_AudioSpec *obt)
static void sdl_close (SDLAudioState *s)
{
if (s->initialized) {
SDL_LockAudio();
sdl_lock (s, "sdl_close");
s->exit = 1;
SDL_UnlockAudio();
sdl_unlock_and_post (s, "sdl_close");
SDL_PauseAudio (1);
SDL_CloseAudio ();
s->initialized = 0;
@@ -184,36 +258,76 @@ static void sdl_callback (void *opaque, Uint8 *buf, int len)
SDLAudioState *s = &glob_sdl;
HWVoiceOut *hw = &sdl->hw;
int samples = len >> hw->info.shift;
int to_mix, decr;
if (s->exit || !sdl->live) {
if (s->exit) {
return;
}
/* dolog ("in callback samples=%d live=%d\n", samples, sdl->live); */
while (samples) {
int to_mix, decr;
to_mix = audio_MIN(samples, sdl->live);
decr = to_mix;
while (to_mix) {
int chunk = audio_MIN(to_mix, hw->samples - hw->rpos);
struct st_sample *src = hw->mix_buf + hw->rpos;
/* dolog ("in callback samples=%d\n", samples); */
#if USE_SEMAPHORE
sdl_wait (s, "sdl_callback");
if (s->exit) {
return;
}
/* dolog ("in callback to_mix %d, chunk %d\n", to_mix, chunk); */
hw->clip(buf, src, chunk);
hw->rpos = (hw->rpos + chunk) % hw->samples;
to_mix -= chunk;
buf += chunk << hw->info.shift;
if (sdl_lock (s, "sdl_callback")) {
return;
}
if (audio_bug (AUDIO_FUNC, sdl->live < 0 || sdl->live > hw->samples)) {
dolog ("sdl->live=%d hw->samples=%d\n",
sdl->live, hw->samples);
return;
}
if (!sdl->live) {
goto again;
}
#else
if (s->exit || !sdl->live) {
break;
}
#endif
/* dolog ("in callback live=%d\n", live); */
to_mix = audio_MIN (samples, sdl->live);
decr = to_mix;
while (to_mix) {
int chunk = audio_MIN (to_mix, hw->samples - hw->rpos);
struct st_sample *src = hw->mix_buf + hw->rpos;
/* dolog ("in callback to_mix %d, chunk %d\n", to_mix, chunk); */
hw->clip (buf, src, chunk);
#if USE_SEMAPHORE
sdl->rpos = (sdl->rpos + chunk) % hw->samples;
#else
hw->rpos = (hw->rpos + chunk) % hw->samples;
#endif
to_mix -= chunk;
buf += chunk << hw->info.shift;
}
samples -= decr;
sdl->live -= decr;
sdl->decr += decr;
#if USE_SEMAPHORE
again:
if (sdl_unlock (s, "sdl_callback")) {
return;
}
#endif
}
samples -= decr;
sdl->live -= decr;
sdl->decr += decr;
/* dolog ("done len=%d\n", len); */
#if (SDL_MAJOR_VERSION >= 2)
/* SDL2 does not clear the remaining buffer for us, so do it on our own */
if (samples) {
memset(buf, 0, samples << hw->info.shift);
}
#endif
}
static int sdl_write_out (SWVoiceOut *sw, void *buf, int len)
@@ -225,8 +339,11 @@ static int sdl_run_out (HWVoiceOut *hw, int live)
{
int decr;
SDLVoiceOut *sdl = (SDLVoiceOut *) hw;
SDLAudioState *s = &glob_sdl;
SDL_LockAudio();
if (sdl_lock (s, "sdl_run_out")) {
return 0;
}
if (sdl->decr > live) {
ldebug ("sdl->decr %d live %d sdl->live %d\n",
@@ -238,10 +355,19 @@ static int sdl_run_out (HWVoiceOut *hw, int live)
decr = audio_MIN (sdl->decr, live);
sdl->decr -= decr;
#if USE_SEMAPHORE
sdl->live = live - decr;
hw->rpos = sdl->rpos;
#else
sdl->live = live;
#endif
SDL_UnlockAudio();
if (sdl->live > 0) {
sdl_unlock_and_post (s, "sdl_run_out");
}
else {
sdl_unlock (s, "sdl_run_out");
}
return decr;
}
@@ -260,13 +386,13 @@ static int sdl_init_out(HWVoiceOut *hw, struct audsettings *as,
SDL_AudioSpec req, obt;
int endianness;
int err;
AudioFormat effective_fmt;
audfmt_e effective_fmt;
struct audsettings obt_as;
req.freq = as->freq;
req.format = aud_to_sdlfmt (as->fmt);
req.channels = as->nchannels;
req.samples = audio_buffer_samples(s->dev->u.sdl.out, as, 11610);
req.samples = conf.nb_samples;
req.callback = sdl_callback;
req.userdata = sdl;
@@ -310,7 +436,7 @@ static int sdl_ctl_out (HWVoiceOut *hw, int cmd, ...)
return 0;
}
static void *sdl_audio_init(Audiodev *dev)
static void *sdl_audio_init (void)
{
SDLAudioState *s = &glob_sdl;
if (s->driver_created) {
@@ -323,8 +449,24 @@ static void *sdl_audio_init(Audiodev *dev)
return NULL;
}
#if USE_SEMAPHORE
s->mutex = SDL_CreateMutex ();
if (!s->mutex) {
sdl_logerr ("Failed to create SDL mutex\n");
SDL_QuitSubSystem (SDL_INIT_AUDIO);
return NULL;
}
s->sem = SDL_CreateSemaphore (0);
if (!s->sem) {
sdl_logerr ("Failed to create SDL semaphore\n");
SDL_DestroyMutex (s->mutex);
SDL_QuitSubSystem (SDL_INIT_AUDIO);
return NULL;
}
#endif
s->driver_created = true;
s->dev = dev;
return s;
}
@@ -332,11 +474,24 @@ static void sdl_audio_fini (void *opaque)
{
SDLAudioState *s = opaque;
sdl_close (s);
#if USE_SEMAPHORE
SDL_DestroySemaphore (s->sem);
SDL_DestroyMutex (s->mutex);
#endif
SDL_QuitSubSystem (SDL_INIT_AUDIO);
s->driver_created = false;
s->dev = NULL;
}
static struct audio_option sdl_options[] = {
{
.name = "SAMPLES",
.tag = AUD_OPT_INT,
.valp = &conf.nb_samples,
.descr = "Size of SDL buffer in samples"
},
{ /* End of list */ }
};
static struct audio_pcm_ops sdl_pcm_ops = {
.init_out = sdl_init_out,
.fini_out = sdl_fini_out,
@@ -345,9 +500,10 @@ static struct audio_pcm_ops sdl_pcm_ops = {
.ctl_out = sdl_ctl_out,
};
static struct audio_driver sdl_audio_driver = {
struct audio_driver sdl_audio_driver = {
.name = "sdl",
.descr = "SDL http://www.libsdl.org",
.options = sdl_options,
.init = sdl_audio_init,
.fini = sdl_audio_fini,
.pcm_ops = &sdl_pcm_ops,
@@ -357,9 +513,3 @@ static struct audio_driver sdl_audio_driver = {
.voice_size_out = sizeof (SDLVoiceOut),
.voice_size_in = 0
};
static void register_audio_sdl(void)
{
audio_driver_register(&sdl_audio_driver);
}
type_init(register_audio_sdl);

View File

@@ -77,7 +77,7 @@ static const SpiceRecordInterface record_sif = {
.base.minor_version = SPICE_INTERFACE_RECORD_MINOR,
};
static void *spice_audio_init(Audiodev *dev)
static void *spice_audio_init (void)
{
if (!using_spice) {
return NULL;
@@ -130,7 +130,7 @@ static int line_out_init(HWVoiceOut *hw, struct audsettings *as,
settings.freq = SPICE_INTERFACE_PLAYBACK_FREQ;
#endif
settings.nchannels = SPICE_INTERFACE_PLAYBACK_CHAN;
settings.fmt = AUDIO_FORMAT_S16;
settings.fmt = AUD_FMT_S16;
settings.endianness = AUDIO_HOST_ENDIANNESS;
audio_pcm_init_info (&hw->info, &settings);
@@ -258,7 +258,7 @@ static int line_in_init(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
settings.freq = SPICE_INTERFACE_RECORD_FREQ;
#endif
settings.nchannels = SPICE_INTERFACE_RECORD_CHAN;
settings.fmt = AUDIO_FORMAT_S16;
settings.fmt = AUD_FMT_S16;
settings.endianness = AUDIO_HOST_ENDIANNESS;
audio_pcm_init_info (&hw->info, &settings);
@@ -373,6 +373,10 @@ static int line_in_ctl (HWVoiceIn *hw, int cmd, ...)
return 0;
}
static struct audio_option audio_options[] = {
{ /* end of list */ },
};
static struct audio_pcm_ops audio_callbacks = {
.init_out = line_out_init,
.fini_out = line_out_fini,
@@ -387,9 +391,10 @@ static struct audio_pcm_ops audio_callbacks = {
.ctl_in = line_in_ctl,
};
static struct audio_driver spice_audio_driver = {
struct audio_driver spice_audio_driver = {
.name = "spice",
.descr = "spice audio driver",
.options = audio_options,
.init = spice_audio_init,
.fini = spice_audio_fini,
.pcm_ops = &audio_callbacks,
@@ -406,9 +411,3 @@ void qemu_spice_audio_init (void)
{
spice_audio_driver.can_be_default = 1;
}
static void register_audio_spice(void)
{
audio_driver_register(&spice_audio_driver);
}
type_init(register_audio_spice);

View File

@@ -1,6 +1,6 @@
# See docs/devel/tracing.txt for syntax documentation.
# alsaaudio.c
# audio/alsaaudio.c
alsa_revents(int revents) "revents = %d"
alsa_pollout(int i, int fd) "i = %d fd = %d"
alsa_set_handler(int events, int index, int fd, int err) "events=0x%x index=%d fd=%d err=%d"
@@ -12,11 +12,6 @@ alsa_resume_out(void) "Resuming suspended output stream"
alsa_resume_in(void) "Resuming suspended input stream"
alsa_no_frames(int state) "No frames available and ALSA state is %d"
# ossaudio.c
# audio/ossaudio.c
oss_version(int version) "OSS version = 0x%x"
oss_invalid_available_size(int size, int bufsize) "Invalid available size, size=%d bufsize=%d"
# audio.c
audio_timer_start(int interval) "interval %d ms"
audio_timer_stop(void) ""
audio_timer_delayed(int interval) "interval %d ms"

View File

@@ -24,7 +24,6 @@
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
#include "qemu/timer.h"
#include "qapi/opts-visitor.h"
#include "audio.h"
#define AUDIO_CAP "wav"
@@ -38,6 +37,11 @@ typedef struct WAVVoiceOut {
int total_samples;
} WAVVoiceOut;
typedef struct {
struct audsettings settings;
const char *wav_path;
} WAVConf;
static int wav_run_out (HWVoiceOut *hw, int live)
{
WAVVoiceOut *wav = (WAVVoiceOut *) hw;
@@ -108,30 +112,25 @@ static int wav_init_out(HWVoiceOut *hw, struct audsettings *as,
0x02, 0x00, 0x44, 0xac, 0x00, 0x00, 0x10, 0xb1, 0x02, 0x00, 0x04,
0x00, 0x10, 0x00, 0x64, 0x61, 0x74, 0x61, 0x00, 0x00, 0x00, 0x00
};
Audiodev *dev = drv_opaque;
AudiodevWavOptions *wopts = &dev->u.wav;
struct audsettings wav_as = audiodev_to_audsettings(dev->u.wav.out);
const char *wav_path = wopts->has_path ? wopts->path : "qemu.wav";
WAVConf *conf = drv_opaque;
struct audsettings wav_as = conf->settings;
stereo = wav_as.nchannels == 2;
switch (wav_as.fmt) {
case AUDIO_FORMAT_S8:
case AUDIO_FORMAT_U8:
case AUD_FMT_S8:
case AUD_FMT_U8:
bits16 = 0;
break;
case AUDIO_FORMAT_S16:
case AUDIO_FORMAT_U16:
case AUD_FMT_S16:
case AUD_FMT_U16:
bits16 = 1;
break;
case AUDIO_FORMAT_S32:
case AUDIO_FORMAT_U32:
case AUD_FMT_S32:
case AUD_FMT_U32:
dolog ("WAVE files can not handle 32bit formats\n");
return -1;
default:
abort();
}
hdr[34] = bits16 ? 0x10 : 0x08;
@@ -140,7 +139,7 @@ static int wav_init_out(HWVoiceOut *hw, struct audsettings *as,
audio_pcm_init_info (&hw->info, &wav_as);
hw->samples = 1024;
wav->pcm_buf = audio_calloc(__func__, hw->samples, 1 << hw->info.shift);
wav->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift);
if (!wav->pcm_buf) {
dolog ("Could not allocate buffer (%d bytes)\n",
hw->samples << hw->info.shift);
@@ -152,10 +151,10 @@ static int wav_init_out(HWVoiceOut *hw, struct audsettings *as,
le_store (hdr + 28, hw->info.freq << (bits16 + stereo), 4);
le_store (hdr + 32, 1 << (bits16 + stereo), 2);
wav->f = fopen(wav_path, "wb");
wav->f = fopen (conf->wav_path, "wb");
if (!wav->f) {
dolog ("Failed to open wave file `%s'\nReason: %s\n",
wav_path, strerror(errno));
conf->wav_path, strerror (errno));
g_free (wav->pcm_buf);
wav->pcm_buf = NULL;
return -1;
@@ -223,17 +222,54 @@ static int wav_ctl_out (HWVoiceOut *hw, int cmd, ...)
return 0;
}
static void *wav_audio_init(Audiodev *dev)
static WAVConf glob_conf = {
.settings.freq = 44100,
.settings.nchannels = 2,
.settings.fmt = AUD_FMT_S16,
.wav_path = "qemu.wav"
};
static void *wav_audio_init (void)
{
assert(dev->driver == AUDIODEV_DRIVER_WAV);
return dev;
WAVConf *conf = g_malloc(sizeof(WAVConf));
*conf = glob_conf;
return conf;
}
static void wav_audio_fini (void *opaque)
{
ldebug ("wav_fini");
g_free(opaque);
}
static struct audio_option wav_options[] = {
{
.name = "FREQUENCY",
.tag = AUD_OPT_INT,
.valp = &glob_conf.settings.freq,
.descr = "Frequency"
},
{
.name = "FORMAT",
.tag = AUD_OPT_FMT,
.valp = &glob_conf.settings.fmt,
.descr = "Format"
},
{
.name = "DAC_FIXED_CHANNELS",
.tag = AUD_OPT_INT,
.valp = &glob_conf.settings.nchannels,
.descr = "Number of channels (1 - mono, 2 - stereo)"
},
{
.name = "PATH",
.tag = AUD_OPT_STR,
.valp = &glob_conf.wav_path,
.descr = "Path to wave file"
},
{ /* End of list */ }
};
static struct audio_pcm_ops wav_pcm_ops = {
.init_out = wav_init_out,
.fini_out = wav_fini_out,
@@ -242,9 +278,10 @@ static struct audio_pcm_ops wav_pcm_ops = {
.ctl_out = wav_ctl_out,
};
static struct audio_driver wav_audio_driver = {
struct audio_driver wav_audio_driver = {
.name = "wav",
.descr = "WAV renderer http://wikipedia.org/wiki/WAV",
.options = wav_options,
.init = wav_audio_init,
.fini = wav_audio_fini,
.pcm_ops = &wav_pcm_ops,
@@ -254,9 +291,3 @@ static struct audio_driver wav_audio_driver = {
.voice_size_out = sizeof (WAVVoiceOut),
.voice_size_in = 0
};
static void register_audio_wav(void)
{
audio_driver_register(&wav_audio_driver);
}
type_init(register_audio_wav);

View File

@@ -1,7 +1,6 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "monitor/monitor.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "audio.h"
@@ -38,29 +37,30 @@ static void wav_destroy (void *opaque)
uint8_t dlen[4];
uint32_t datalen = wav->bytes;
uint32_t rifflen = datalen + 36;
Monitor *mon = cur_mon;
if (wav->f) {
le_store (rlen, rifflen, 4);
le_store (dlen, datalen, 4);
if (fseek (wav->f, 4, SEEK_SET)) {
error_report("wav_destroy: rlen fseek failed: %s",
strerror(errno));
monitor_printf (mon, "wav_destroy: rlen fseek failed\nReason: %s\n",
strerror (errno));
goto doclose;
}
if (fwrite (rlen, 4, 1, wav->f) != 1) {
error_report("wav_destroy: rlen fwrite failed: %s",
strerror(errno));
monitor_printf (mon, "wav_destroy: rlen fwrite failed\nReason %s\n",
strerror (errno));
goto doclose;
}
if (fseek (wav->f, 32, SEEK_CUR)) {
error_report("wav_destroy: dlen fseek failed: %s",
strerror(errno));
monitor_printf (mon, "wav_destroy: dlen fseek failed\nReason %s\n",
strerror (errno));
goto doclose;
}
if (fwrite (dlen, 1, 4, wav->f) != 4) {
error_report("wav_destroy: dlen fwrite failed: %s",
strerror(errno));
monitor_printf (mon, "wav_destroy: dlen fwrite failed\nReason %s\n",
strerror (errno));
goto doclose;
}
doclose:
@@ -77,7 +77,8 @@ static void wav_capture (void *opaque, void *buf, int size)
WAVState *wav = opaque;
if (fwrite (buf, size, 1, wav->f) != 1) {
error_report("wav_capture: fwrite error: %s", strerror(errno));
monitor_printf (cur_mon, "wav_capture: fwrite error\nReason: %s",
strerror (errno));
}
wav->bytes += size;
}
@@ -108,6 +109,7 @@ static struct capture_ops wav_capture_ops = {
int wav_start_capture (CaptureState *s, const char *path, int freq,
int bits, int nchannels)
{
Monitor *mon = cur_mon;
WAVState *wav;
uint8_t hdr[] = {
0x52, 0x49, 0x46, 0x46, 0x00, 0x00, 0x00, 0x00, 0x57, 0x41, 0x56,
@@ -121,13 +123,13 @@ int wav_start_capture (CaptureState *s, const char *path, int freq,
CaptureVoiceOut *cap;
if (bits != 8 && bits != 16) {
error_report("incorrect bit count %d, must be 8 or 16", bits);
monitor_printf (mon, "incorrect bit count %d, must be 8 or 16\n", bits);
return -1;
}
if (nchannels != 1 && nchannels != 2) {
error_report("incorrect channel count %d, must be 1 or 2",
nchannels);
monitor_printf (mon, "incorrect channel count %d, must be 1 or 2\n",
nchannels);
return -1;
}
@@ -136,7 +138,7 @@ int wav_start_capture (CaptureState *s, const char *path, int freq,
as.freq = freq;
as.nchannels = 1 << stereo;
as.fmt = bits16 ? AUDIO_FORMAT_S16 : AUDIO_FORMAT_U8;
as.fmt = bits16 ? AUD_FMT_S16 : AUD_FMT_U8;
as.endianness = 0;
ops.notify = wav_notify;
@@ -155,8 +157,8 @@ int wav_start_capture (CaptureState *s, const char *path, int freq,
wav->f = fopen (path, "wb");
if (!wav->f) {
error_report("Failed to open wave file `%s': %s",
path, strerror(errno));
monitor_printf (mon, "Failed to open wave file `%s'\nReason: %s\n",
path, strerror (errno));
g_free (wav);
return -1;
}
@@ -167,13 +169,14 @@ int wav_start_capture (CaptureState *s, const char *path, int freq,
wav->freq = freq;
if (fwrite (hdr, sizeof (hdr), 1, wav->f) != 1) {
error_report("Failed to write header: %s", strerror(errno));
monitor_printf (mon, "Failed to write header\nReason: %s\n",
strerror (errno));
goto error_free;
}
cap = AUD_add_capture (&as, &ops, wav);
if (!cap) {
error_report("Failed to add audio capture");
monitor_printf (mon, "Failed to add audio capture\n");
goto error_free;
}
@@ -185,7 +188,8 @@ int wav_start_capture (CaptureState *s, const char *path, int freq,
error_free:
g_free (wav->path);
if (fclose (wav->f)) {
error_report("Failed to close wave file: %s", strerror(errno));
monitor_printf (mon, "Failed to close wave file\nReason: %s\n",
strerror (errno));
}
g_free (wav);
return -1;

View File

@@ -1,7 +0,0 @@
authz-obj-y += base.o
authz-obj-y += simple.o
authz-obj-y += list.o
authz-obj-y += listfile.o
authz-obj-$(CONFIG_AUTH_PAM) += pamacct.o
pamacct.o-libs = -lpam

View File

@@ -1,82 +0,0 @@
/*
* QEMU authorization framework base class
*
* Copyright (c) 2018 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include "qemu/osdep.h"
#include "authz/base.h"
#include "authz/trace.h"
bool qauthz_is_allowed(QAuthZ *authz,
const char *identity,
Error **errp)
{
QAuthZClass *cls = QAUTHZ_GET_CLASS(authz);
bool allowed;
allowed = cls->is_allowed(authz, identity, errp);
trace_qauthz_is_allowed(authz, identity, allowed);
return allowed;
}
bool qauthz_is_allowed_by_id(const char *authzid,
const char *identity,
Error **errp)
{
QAuthZ *authz;
Object *obj;
Object *container;
container = object_get_objects_root();
obj = object_resolve_path_component(container,
authzid);
if (!obj) {
error_setg(errp, "Cannot find QAuthZ object ID %s",
authzid);
return false;
}
if (!object_dynamic_cast(obj, TYPE_QAUTHZ)) {
error_setg(errp, "Object '%s' is not a QAuthZ subclass",
authzid);
return false;
}
authz = QAUTHZ(obj);
return qauthz_is_allowed(authz, identity, errp);
}
static const TypeInfo authz_info = {
.parent = TYPE_OBJECT,
.name = TYPE_QAUTHZ,
.instance_size = sizeof(QAuthZ),
.class_size = sizeof(QAuthZClass),
.abstract = true,
};
static void qauthz_register_types(void)
{
type_register_static(&authz_info);
}
type_init(qauthz_register_types)

View File

@@ -1,271 +0,0 @@
/*
* QEMU access control list authorization driver
*
* Copyright (c) 2018 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include "qemu/osdep.h"
#include "authz/list.h"
#include "authz/trace.h"
#include "qom/object_interfaces.h"
#include "qapi/qapi-visit-authz.h"
static bool qauthz_list_is_allowed(QAuthZ *authz,
const char *identity,
Error **errp)
{
QAuthZList *lauthz = QAUTHZ_LIST(authz);
QAuthZListRuleList *rules = lauthz->rules;
while (rules) {
QAuthZListRule *rule = rules->value;
QAuthZListFormat format = rule->has_format ? rule->format :
QAUTHZ_LIST_FORMAT_EXACT;
trace_qauthz_list_check_rule(authz, rule->match, identity,
format, rule->policy);
switch (format) {
case QAUTHZ_LIST_FORMAT_EXACT:
if (g_str_equal(rule->match, identity)) {
return rule->policy == QAUTHZ_LIST_POLICY_ALLOW;
}
break;
case QAUTHZ_LIST_FORMAT_GLOB:
if (g_pattern_match_simple(rule->match, identity)) {
return rule->policy == QAUTHZ_LIST_POLICY_ALLOW;
}
break;
default:
g_warn_if_reached();
return false;
}
rules = rules->next;
}
trace_qauthz_list_default_policy(authz, identity, lauthz->policy);
return lauthz->policy == QAUTHZ_LIST_POLICY_ALLOW;
}
static void
qauthz_list_prop_set_policy(Object *obj,
int value,
Error **errp G_GNUC_UNUSED)
{
QAuthZList *lauthz = QAUTHZ_LIST(obj);
lauthz->policy = value;
}
static int
qauthz_list_prop_get_policy(Object *obj,
Error **errp G_GNUC_UNUSED)
{
QAuthZList *lauthz = QAUTHZ_LIST(obj);
return lauthz->policy;
}
static void
qauthz_list_prop_get_rules(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
QAuthZList *lauthz = QAUTHZ_LIST(obj);
visit_type_QAuthZListRuleList(v, name, &lauthz->rules, errp);
}
static void
qauthz_list_prop_set_rules(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
QAuthZList *lauthz = QAUTHZ_LIST(obj);
QAuthZListRuleList *oldrules;
oldrules = lauthz->rules;
visit_type_QAuthZListRuleList(v, name, &lauthz->rules, errp);
qapi_free_QAuthZListRuleList(oldrules);
}
static void
qauthz_list_finalize(Object *obj)
{
QAuthZList *lauthz = QAUTHZ_LIST(obj);
qapi_free_QAuthZListRuleList(lauthz->rules);
}
static void
qauthz_list_class_init(ObjectClass *oc, void *data)
{
QAuthZClass *authz = QAUTHZ_CLASS(oc);
object_class_property_add_enum(oc, "policy",
"QAuthZListPolicy",
&QAuthZListPolicy_lookup,
qauthz_list_prop_get_policy,
qauthz_list_prop_set_policy,
NULL);
object_class_property_add(oc, "rules", "QAuthZListRule",
qauthz_list_prop_get_rules,
qauthz_list_prop_set_rules,
NULL, NULL, NULL);
authz->is_allowed = qauthz_list_is_allowed;
}
QAuthZList *qauthz_list_new(const char *id,
QAuthZListPolicy policy,
Error **errp)
{
return QAUTHZ_LIST(
object_new_with_props(TYPE_QAUTHZ_LIST,
object_get_objects_root(),
id, errp,
"policy", QAuthZListPolicy_str(policy),
NULL));
}
ssize_t qauthz_list_append_rule(QAuthZList *auth,
const char *match,
QAuthZListPolicy policy,
QAuthZListFormat format,
Error **errp)
{
QAuthZListRule *rule;
QAuthZListRuleList *rules, *tmp;
size_t i = 0;
rule = g_new0(QAuthZListRule, 1);
rule->policy = policy;
rule->match = g_strdup(match);
rule->format = format;
rule->has_format = true;
tmp = g_new0(QAuthZListRuleList, 1);
tmp->value = rule;
rules = auth->rules;
if (rules) {
while (rules->next) {
i++;
rules = rules->next;
}
rules->next = tmp;
return i + 1;
} else {
auth->rules = tmp;
return 0;
}
}
ssize_t qauthz_list_insert_rule(QAuthZList *auth,
const char *match,
QAuthZListPolicy policy,
QAuthZListFormat format,
size_t index,
Error **errp)
{
QAuthZListRule *rule;
QAuthZListRuleList *rules, *tmp;
size_t i = 0;
rule = g_new0(QAuthZListRule, 1);
rule->policy = policy;
rule->match = g_strdup(match);
rule->format = format;
rule->has_format = true;
tmp = g_new0(QAuthZListRuleList, 1);
tmp->value = rule;
rules = auth->rules;
if (rules && index > 0) {
while (rules->next && i < (index - 1)) {
i++;
rules = rules->next;
}
tmp->next = rules->next;
rules->next = tmp;
return i + 1;
} else {
tmp->next = auth->rules;
auth->rules = tmp;
return 0;
}
}
ssize_t qauthz_list_delete_rule(QAuthZList *auth, const char *match)
{
QAuthZListRule *rule;
QAuthZListRuleList *rules, *prev;
size_t i = 0;
prev = NULL;
rules = auth->rules;
while (rules) {
rule = rules->value;
if (g_str_equal(rule->match, match)) {
if (prev) {
prev->next = rules->next;
} else {
auth->rules = rules->next;
}
rules->next = NULL;
qapi_free_QAuthZListRuleList(rules);
return i;
}
prev = rules;
rules = rules->next;
i++;
}
return -1;
}
static const TypeInfo qauthz_list_info = {
.parent = TYPE_QAUTHZ,
.name = TYPE_QAUTHZ_LIST,
.instance_size = sizeof(QAuthZList),
.instance_finalize = qauthz_list_finalize,
.class_size = sizeof(QAuthZListClass),
.class_init = qauthz_list_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
};
static void
qauthz_list_register_types(void)
{
type_register_static(&qauthz_list_info);
}
type_init(qauthz_list_register_types);

View File

@@ -1,283 +0,0 @@
/*
* QEMU access control list file authorization driver
*
* Copyright (c) 2018 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include "qemu/osdep.h"
#include "authz/listfile.h"
#include "authz/trace.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/sockets.h"
#include "qemu/filemonitor.h"
#include "qom/object_interfaces.h"
#include "qapi/qapi-visit-authz.h"
#include "qapi/qmp/qjson.h"
#include "qapi/qmp/qobject.h"
#include "qapi/qmp/qerror.h"
#include "qapi/qobject-input-visitor.h"
static bool
qauthz_list_file_is_allowed(QAuthZ *authz,
const char *identity,
Error **errp)
{
QAuthZListFile *fauthz = QAUTHZ_LIST_FILE(authz);
if (fauthz->list) {
return qauthz_is_allowed(fauthz->list, identity, errp);
}
return false;
}
static QAuthZ *
qauthz_list_file_load(QAuthZListFile *fauthz, Error **errp)
{
GError *err = NULL;
gchar *content = NULL;
gsize len;
QObject *obj = NULL;
QDict *pdict;
Visitor *v = NULL;
QAuthZ *ret = NULL;
trace_qauthz_list_file_load(fauthz, fauthz->filename);
if (!g_file_get_contents(fauthz->filename, &content, &len, &err)) {
error_setg(errp, "Unable to read '%s': %s",
fauthz->filename, err->message);
goto cleanup;
}
obj = qobject_from_json(content, errp);
if (!obj) {
goto cleanup;
}
pdict = qobject_to(QDict, obj);
if (!pdict) {
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "obj", "dict");
goto cleanup;
}
v = qobject_input_visitor_new(obj);
ret = (QAuthZ *)user_creatable_add_type(TYPE_QAUTHZ_LIST,
NULL, pdict, v, errp);
cleanup:
visit_free(v);
qobject_unref(obj);
if (err) {
g_error_free(err);
}
g_free(content);
return ret;
}
static void
qauthz_list_file_event(int64_t wd G_GNUC_UNUSED,
QFileMonitorEvent ev G_GNUC_UNUSED,
const char *name G_GNUC_UNUSED,
void *opaque)
{
QAuthZListFile *fauthz = opaque;
Error *err = NULL;
if (ev != QFILE_MONITOR_EVENT_MODIFIED &&
ev != QFILE_MONITOR_EVENT_CREATED) {
return;
}
object_unref(OBJECT(fauthz->list));
fauthz->list = qauthz_list_file_load(fauthz, &err);
trace_qauthz_list_file_refresh(fauthz,
fauthz->filename, fauthz->list ? 1 : 0);
if (!fauthz->list) {
error_report_err(err);
}
}
static void
qauthz_list_file_complete(UserCreatable *uc, Error **errp)
{
QAuthZListFile *fauthz = QAUTHZ_LIST_FILE(uc);
gchar *dir = NULL, *file = NULL;
fauthz->list = qauthz_list_file_load(fauthz, errp);
if (!fauthz->refresh) {
return;
}
fauthz->file_monitor = qemu_file_monitor_new(errp);
if (!fauthz->file_monitor) {
return;
}
dir = g_path_get_dirname(fauthz->filename);
if (g_str_equal(dir, ".")) {
error_setg(errp, "Filename must be an absolute path");
goto cleanup;
}
file = g_path_get_basename(fauthz->filename);
if (g_str_equal(file, ".")) {
error_setg(errp, "Path has no trailing filename component");
goto cleanup;
}
fauthz->file_watch = qemu_file_monitor_add_watch(
fauthz->file_monitor, dir, file,
qauthz_list_file_event, fauthz, errp);
if (fauthz->file_watch < 0) {
goto cleanup;
}
cleanup:
g_free(file);
g_free(dir);
}
static void
qauthz_list_file_prop_set_filename(Object *obj,
const char *value,
Error **errp G_GNUC_UNUSED)
{
QAuthZListFile *fauthz = QAUTHZ_LIST_FILE(obj);
g_free(fauthz->filename);
fauthz->filename = g_strdup(value);
}
static char *
qauthz_list_file_prop_get_filename(Object *obj,
Error **errp G_GNUC_UNUSED)
{
QAuthZListFile *fauthz = QAUTHZ_LIST_FILE(obj);
return g_strdup(fauthz->filename);
}
static void
qauthz_list_file_prop_set_refresh(Object *obj,
bool value,
Error **errp G_GNUC_UNUSED)
{
QAuthZListFile *fauthz = QAUTHZ_LIST_FILE(obj);
fauthz->refresh = value;
}
static bool
qauthz_list_file_prop_get_refresh(Object *obj,
Error **errp G_GNUC_UNUSED)
{
QAuthZListFile *fauthz = QAUTHZ_LIST_FILE(obj);
return fauthz->refresh;
}
static void
qauthz_list_file_finalize(Object *obj)
{
QAuthZListFile *fauthz = QAUTHZ_LIST_FILE(obj);
object_unref(OBJECT(fauthz->list));
g_free(fauthz->filename);
qemu_file_monitor_free(fauthz->file_monitor);
}
static void
qauthz_list_file_class_init(ObjectClass *oc, void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
QAuthZClass *authz = QAUTHZ_CLASS(oc);
ucc->complete = qauthz_list_file_complete;
object_class_property_add_str(oc, "filename",
qauthz_list_file_prop_get_filename,
qauthz_list_file_prop_set_filename,
NULL);
object_class_property_add_bool(oc, "refresh",
qauthz_list_file_prop_get_refresh,
qauthz_list_file_prop_set_refresh,
NULL);
authz->is_allowed = qauthz_list_file_is_allowed;
}
static void
qauthz_list_file_init(Object *obj)
{
QAuthZListFile *authz = QAUTHZ_LIST_FILE(obj);
authz->file_watch = -1;
#ifdef CONFIG_INOTIFY1
authz->refresh = TRUE;
#endif
}
QAuthZListFile *qauthz_list_file_new(const char *id,
const char *filename,
bool refresh,
Error **errp)
{
return QAUTHZ_LIST_FILE(
object_new_with_props(TYPE_QAUTHZ_LIST_FILE,
object_get_objects_root(),
id, errp,
"filename", filename,
"refresh", refresh ? "yes" : "no",
NULL));
}
static const TypeInfo qauthz_list_file_info = {
.parent = TYPE_QAUTHZ,
.name = TYPE_QAUTHZ_LIST_FILE,
.instance_init = qauthz_list_file_init,
.instance_size = sizeof(QAuthZListFile),
.instance_finalize = qauthz_list_file_finalize,
.class_size = sizeof(QAuthZListFileClass),
.class_init = qauthz_list_file_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
};
static void
qauthz_list_file_register_types(void)
{
type_register_static(&qauthz_list_file_info);
}
type_init(qauthz_list_file_register_types);

View File

@@ -1,148 +0,0 @@
/*
* QEMU PAM authorization driver
*
* Copyright (c) 2018 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include "qemu/osdep.h"
#include "authz/pamacct.h"
#include "authz/trace.h"
#include "qom/object_interfaces.h"
#include <security/pam_appl.h>
static bool qauthz_pam_is_allowed(QAuthZ *authz,
const char *identity,
Error **errp)
{
QAuthZPAM *pauthz = QAUTHZ_PAM(authz);
const struct pam_conv pam_conversation = { 0 };
pam_handle_t *pamh = NULL;
int ret;
trace_qauthz_pam_check(authz, identity, pauthz->service);
ret = pam_start(pauthz->service,
identity,
&pam_conversation,
&pamh);
if (ret != PAM_SUCCESS) {
error_setg(errp, "Unable to start PAM transaction: %s",
pam_strerror(NULL, ret));
return false;
}
ret = pam_acct_mgmt(pamh, PAM_SILENT);
pam_end(pamh, ret);
if (ret != PAM_SUCCESS) {
error_setg(errp, "Unable to authorize user '%s': %s",
identity, pam_strerror(pamh, ret));
return false;
}
return true;
}
static void
qauthz_pam_prop_set_service(Object *obj,
const char *service,
Error **errp G_GNUC_UNUSED)
{
QAuthZPAM *pauthz = QAUTHZ_PAM(obj);
g_free(pauthz->service);
pauthz->service = g_strdup(service);
}
static char *
qauthz_pam_prop_get_service(Object *obj,
Error **errp G_GNUC_UNUSED)
{
QAuthZPAM *pauthz = QAUTHZ_PAM(obj);
return g_strdup(pauthz->service);
}
static void
qauthz_pam_complete(UserCreatable *uc, Error **errp)
{
}
static void
qauthz_pam_finalize(Object *obj)
{
QAuthZPAM *pauthz = QAUTHZ_PAM(obj);
g_free(pauthz->service);
}
static void
qauthz_pam_class_init(ObjectClass *oc, void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
QAuthZClass *authz = QAUTHZ_CLASS(oc);
ucc->complete = qauthz_pam_complete;
authz->is_allowed = qauthz_pam_is_allowed;
object_class_property_add_str(oc, "service",
qauthz_pam_prop_get_service,
qauthz_pam_prop_set_service,
NULL);
}
QAuthZPAM *qauthz_pam_new(const char *id,
const char *service,
Error **errp)
{
return QAUTHZ_PAM(
object_new_with_props(TYPE_QAUTHZ_PAM,
object_get_objects_root(),
id, errp,
"service", service,
NULL));
}
static const TypeInfo qauthz_pam_info = {
.parent = TYPE_QAUTHZ,
.name = TYPE_QAUTHZ_PAM,
.instance_size = sizeof(QAuthZPAM),
.instance_finalize = qauthz_pam_finalize,
.class_size = sizeof(QAuthZPAMClass),
.class_init = qauthz_pam_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
};
static void
qauthz_pam_register_types(void)
{
type_register_static(&qauthz_pam_info);
}
type_init(qauthz_pam_register_types);

View File

@@ -1,115 +0,0 @@
/*
* QEMU simple authorization driver
*
* Copyright (c) 2018 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include "qemu/osdep.h"
#include "authz/simple.h"
#include "authz/trace.h"
#include "qom/object_interfaces.h"
static bool qauthz_simple_is_allowed(QAuthZ *authz,
const char *identity,
Error **errp)
{
QAuthZSimple *sauthz = QAUTHZ_SIMPLE(authz);
trace_qauthz_simple_is_allowed(authz, sauthz->identity, identity);
return g_str_equal(identity, sauthz->identity);
}
static void
qauthz_simple_prop_set_identity(Object *obj,
const char *value,
Error **errp G_GNUC_UNUSED)
{
QAuthZSimple *sauthz = QAUTHZ_SIMPLE(obj);
g_free(sauthz->identity);
sauthz->identity = g_strdup(value);
}
static char *
qauthz_simple_prop_get_identity(Object *obj,
Error **errp G_GNUC_UNUSED)
{
QAuthZSimple *sauthz = QAUTHZ_SIMPLE(obj);
return g_strdup(sauthz->identity);
}
static void
qauthz_simple_finalize(Object *obj)
{
QAuthZSimple *sauthz = QAUTHZ_SIMPLE(obj);
g_free(sauthz->identity);
}
static void
qauthz_simple_class_init(ObjectClass *oc, void *data)
{
QAuthZClass *authz = QAUTHZ_CLASS(oc);
authz->is_allowed = qauthz_simple_is_allowed;
object_class_property_add_str(oc, "identity",
qauthz_simple_prop_get_identity,
qauthz_simple_prop_set_identity,
NULL);
}
QAuthZSimple *qauthz_simple_new(const char *id,
const char *identity,
Error **errp)
{
return QAUTHZ_SIMPLE(
object_new_with_props(TYPE_QAUTHZ_SIMPLE,
object_get_objects_root(),
id, errp,
"identity", identity,
NULL));
}
static const TypeInfo qauthz_simple_info = {
.parent = TYPE_QAUTHZ,
.name = TYPE_QAUTHZ_SIMPLE,
.instance_size = sizeof(QAuthZSimple),
.instance_finalize = qauthz_simple_finalize,
.class_size = sizeof(QAuthZSimpleClass),
.class_init = qauthz_simple_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
};
static void
qauthz_simple_register_types(void)
{
type_register_static(&qauthz_simple_info);
}
type_init(qauthz_simple_register_types);

View File

@@ -1,18 +0,0 @@
# See docs/devel/tracing.txt for syntax documentation.
# base.c
qauthz_is_allowed(void *authz, const char *identity, bool allowed) "AuthZ %p check identity=%s allowed=%d"
# simple.c
qauthz_simple_is_allowed(void *authz, const char *wantidentity, const char *gotidentity) "AuthZ simple %p check want identity=%s got identity=%s"
# list.c
qauthz_list_check_rule(void *authz, const char *identity, const char *rule, int format, int policy) "AuthZ list %p check rule=%s identity=%s format=%d policy=%d"
qauthz_list_default_policy(void *authz, const char *identity, int policy) "AuthZ list %p default identity=%s policy=%d"
# listfile.c
qauthz_list_file_load(void *authz, const char *filename) "AuthZ file %p load filename=%s"
qauthz_list_file_refresh(void *authz, const char *filename, int success) "AuthZ file %p load filename=%s success=%d"
# pamacct.c
qauthz_pam_check(void *authz, const char *identity, const char *service) "AuthZ PAM %p identity=%s service=%s"

View File

@@ -4,14 +4,7 @@ common-obj-$(CONFIG_POSIX) += rng-random.o
common-obj-$(CONFIG_TPM) += tpm.o
common-obj-y += hostmem.o hostmem-ram.o
common-obj-$(CONFIG_POSIX) += hostmem-file.o
common-obj-$(CONFIG_LINUX) += hostmem-file.o
common-obj-y += cryptodev.o
common-obj-y += cryptodev-builtin.o
ifeq ($(CONFIG_VIRTIO_CRYPTO),y)
common-obj-y += cryptodev-vhost.o
common-obj-$(CONFIG_VHOST_CRYPTO) += cryptodev-vhost-user.o
endif
common-obj-$(CONFIG_LINUX) += hostmem-memfd.o

View File

@@ -78,7 +78,6 @@ static void cryptodev_builtin_init(
"cryptodev-builtin", NULL);
cc->info_str = g_strdup_printf("cryptodev-builtin0");
cc->queue_index = 0;
cc->type = CRYPTODEV_BACKEND_TYPE_BUILTIN;
backend->conf.peers.ccs[0] = cc;
backend->conf.crypto_services =

View File

@@ -1,380 +0,0 @@
/*
* QEMU Cryptodev backend for QEMU cipher APIs
*
* Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
*
* Authors:
* Gonglei <arei.gonglei@huawei.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include "qemu/osdep.h"
#include "hw/boards.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "hw/virtio/vhost-user.h"
#include "standard-headers/linux/virtio_crypto.h"
#include "sysemu/cryptodev-vhost.h"
#include "chardev/char-fe.h"
#include "sysemu/cryptodev-vhost-user.h"
/**
* @TYPE_CRYPTODEV_BACKEND_VHOST_USER:
* name of backend that uses vhost user server
*/
#define TYPE_CRYPTODEV_BACKEND_VHOST_USER "cryptodev-vhost-user"
#define CRYPTODEV_BACKEND_VHOST_USER(obj) \
OBJECT_CHECK(CryptoDevBackendVhostUser, \
(obj), TYPE_CRYPTODEV_BACKEND_VHOST_USER)
typedef struct CryptoDevBackendVhostUser {
CryptoDevBackend parent_obj;
VhostUserState vhost_user;
CharBackend chr;
char *chr_name;
bool opened;
CryptoDevBackendVhost *vhost_crypto[MAX_CRYPTO_QUEUE_NUM];
} CryptoDevBackendVhostUser;
static int
cryptodev_vhost_user_running(
CryptoDevBackendVhost *crypto)
{
return crypto ? 1 : 0;
}
CryptoDevBackendVhost *
cryptodev_vhost_user_get_vhost(
CryptoDevBackendClient *cc,
CryptoDevBackend *b,
uint16_t queue)
{
CryptoDevBackendVhostUser *s =
CRYPTODEV_BACKEND_VHOST_USER(b);
assert(cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER);
assert(queue < MAX_CRYPTO_QUEUE_NUM);
return s->vhost_crypto[queue];
}
static void cryptodev_vhost_user_stop(int queues,
CryptoDevBackendVhostUser *s)
{
size_t i;
for (i = 0; i < queues; i++) {
if (!cryptodev_vhost_user_running(s->vhost_crypto[i])) {
continue;
}
cryptodev_vhost_cleanup(s->vhost_crypto[i]);
s->vhost_crypto[i] = NULL;
}
}
static int
cryptodev_vhost_user_start(int queues,
CryptoDevBackendVhostUser *s)
{
CryptoDevBackendVhostOptions options;
CryptoDevBackend *b = CRYPTODEV_BACKEND(s);
int max_queues;
size_t i;
for (i = 0; i < queues; i++) {
if (cryptodev_vhost_user_running(s->vhost_crypto[i])) {
continue;
}
options.opaque = &s->vhost_user;
options.backend_type = VHOST_BACKEND_TYPE_USER;
options.cc = b->conf.peers.ccs[i];
s->vhost_crypto[i] = cryptodev_vhost_init(&options);
if (!s->vhost_crypto[i]) {
error_report("failed to init vhost_crypto for queue %zu", i);
goto err;
}
if (i == 0) {
max_queues =
cryptodev_vhost_get_max_queues(s->vhost_crypto[i]);
if (queues > max_queues) {
error_report("you are asking more queues than supported: %d",
max_queues);
goto err;
}
}
}
return 0;
err:
cryptodev_vhost_user_stop(i + 1, s);
return -1;
}
static Chardev *
cryptodev_vhost_claim_chardev(CryptoDevBackendVhostUser *s,
Error **errp)
{
Chardev *chr;
if (s->chr_name == NULL) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
"chardev", "a valid character device");
return NULL;
}
chr = qemu_chr_find(s->chr_name);
if (chr == NULL) {
error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
"Device '%s' not found", s->chr_name);
return NULL;
}
return chr;
}
static void cryptodev_vhost_user_event(void *opaque, int event)
{
CryptoDevBackendVhostUser *s = opaque;
CryptoDevBackend *b = CRYPTODEV_BACKEND(s);
int queues = b->conf.peers.queues;
assert(queues < MAX_CRYPTO_QUEUE_NUM);
switch (event) {
case CHR_EVENT_OPENED:
if (cryptodev_vhost_user_start(queues, s) < 0) {
exit(1);
}
b->ready = true;
break;
case CHR_EVENT_CLOSED:
b->ready = false;
cryptodev_vhost_user_stop(queues, s);
break;
}
}
static void cryptodev_vhost_user_init(
CryptoDevBackend *backend, Error **errp)
{
int queues = backend->conf.peers.queues;
size_t i;
Error *local_err = NULL;
Chardev *chr;
CryptoDevBackendClient *cc;
CryptoDevBackendVhostUser *s =
CRYPTODEV_BACKEND_VHOST_USER(backend);
chr = cryptodev_vhost_claim_chardev(s, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
s->opened = true;
for (i = 0; i < queues; i++) {
cc = cryptodev_backend_new_client(
"cryptodev-vhost-user", NULL);
cc->info_str = g_strdup_printf("cryptodev-vhost-user%zu to %s ",
i, chr->label);
cc->queue_index = i;
cc->type = CRYPTODEV_BACKEND_TYPE_VHOST_USER;
backend->conf.peers.ccs[i] = cc;
if (i == 0) {
if (!qemu_chr_fe_init(&s->chr, chr, &local_err)) {
error_propagate(errp, local_err);
return;
}
}
}
if (!vhost_user_init(&s->vhost_user, &s->chr, errp)) {
return;
}
qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
cryptodev_vhost_user_event, NULL, s, NULL, true);
backend->conf.crypto_services =
1u << VIRTIO_CRYPTO_SERVICE_CIPHER |
1u << VIRTIO_CRYPTO_SERVICE_HASH |
1u << VIRTIO_CRYPTO_SERVICE_MAC;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
backend->conf.max_size = UINT64_MAX;
backend->conf.max_cipher_key_len = VHOST_USER_MAX_CIPHER_KEY_LEN;
backend->conf.max_auth_key_len = VHOST_USER_MAX_AUTH_KEY_LEN;
}
static int64_t cryptodev_vhost_user_sym_create_session(
CryptoDevBackend *backend,
CryptoDevBackendSymSessionInfo *sess_info,
uint32_t queue_index, Error **errp)
{
CryptoDevBackendClient *cc =
backend->conf.peers.ccs[queue_index];
CryptoDevBackendVhost *vhost_crypto;
uint64_t session_id = 0;
int ret;
vhost_crypto = cryptodev_vhost_user_get_vhost(cc, backend, queue_index);
if (vhost_crypto) {
struct vhost_dev *dev = &(vhost_crypto->dev);
ret = dev->vhost_ops->vhost_crypto_create_session(dev,
sess_info,
&session_id);
if (ret < 0) {
return -1;
} else {
return session_id;
}
}
return -1;
}
static int cryptodev_vhost_user_sym_close_session(
CryptoDevBackend *backend,
uint64_t session_id,
uint32_t queue_index, Error **errp)
{
CryptoDevBackendClient *cc =
backend->conf.peers.ccs[queue_index];
CryptoDevBackendVhost *vhost_crypto;
int ret;
vhost_crypto = cryptodev_vhost_user_get_vhost(cc, backend, queue_index);
if (vhost_crypto) {
struct vhost_dev *dev = &(vhost_crypto->dev);
ret = dev->vhost_ops->vhost_crypto_close_session(dev,
session_id);
if (ret < 0) {
return -1;
} else {
return 0;
}
}
return -1;
}
static void cryptodev_vhost_user_cleanup(
CryptoDevBackend *backend,
Error **errp)
{
CryptoDevBackendVhostUser *s =
CRYPTODEV_BACKEND_VHOST_USER(backend);
size_t i;
int queues = backend->conf.peers.queues;
CryptoDevBackendClient *cc;
cryptodev_vhost_user_stop(queues, s);
for (i = 0; i < queues; i++) {
cc = backend->conf.peers.ccs[i];
if (cc) {
cryptodev_backend_free_client(cc);
backend->conf.peers.ccs[i] = NULL;
}
}
vhost_user_cleanup(&s->vhost_user);
}
static void cryptodev_vhost_user_set_chardev(Object *obj,
const char *value, Error **errp)
{
CryptoDevBackendVhostUser *s =
CRYPTODEV_BACKEND_VHOST_USER(obj);
if (s->opened) {
error_setg(errp, QERR_PERMISSION_DENIED);
} else {
g_free(s->chr_name);
s->chr_name = g_strdup(value);
}
}
static char *
cryptodev_vhost_user_get_chardev(Object *obj, Error **errp)
{
CryptoDevBackendVhostUser *s =
CRYPTODEV_BACKEND_VHOST_USER(obj);
Chardev *chr = qemu_chr_fe_get_driver(&s->chr);
if (chr && chr->label) {
return g_strdup(chr->label);
}
return NULL;
}
static void cryptodev_vhost_user_instance_int(Object *obj)
{
object_property_add_str(obj, "chardev",
cryptodev_vhost_user_get_chardev,
cryptodev_vhost_user_set_chardev,
NULL);
}
static void cryptodev_vhost_user_finalize(Object *obj)
{
CryptoDevBackendVhostUser *s =
CRYPTODEV_BACKEND_VHOST_USER(obj);
qemu_chr_fe_deinit(&s->chr, false);
g_free(s->chr_name);
}
static void
cryptodev_vhost_user_class_init(ObjectClass *oc, void *data)
{
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc);
bc->init = cryptodev_vhost_user_init;
bc->cleanup = cryptodev_vhost_user_cleanup;
bc->create_session = cryptodev_vhost_user_sym_create_session;
bc->close_session = cryptodev_vhost_user_sym_close_session;
bc->do_sym_op = NULL;
}
static const TypeInfo cryptodev_vhost_user_info = {
.name = TYPE_CRYPTODEV_BACKEND_VHOST_USER,
.parent = TYPE_CRYPTODEV_BACKEND,
.class_init = cryptodev_vhost_user_class_init,
.instance_init = cryptodev_vhost_user_instance_int,
.instance_finalize = cryptodev_vhost_user_finalize,
.instance_size = sizeof(CryptoDevBackendVhostUser),
};
static void
cryptodev_vhost_user_register_types(void)
{
type_register_static(&cryptodev_vhost_user_info);
}
type_init(cryptodev_vhost_user_register_types);

View File

@@ -1,347 +0,0 @@
/*
* QEMU Cryptodev backend for QEMU cipher APIs
*
* Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
*
* Authors:
* Gonglei <arei.gonglei@huawei.com>
* Jay Zhou <jianjay.zhou@huawei.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include "qemu/osdep.h"
#include "hw/virtio/virtio-bus.h"
#include "sysemu/cryptodev-vhost.h"
#ifdef CONFIG_VHOST_CRYPTO
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "hw/virtio/virtio-crypto.h"
#include "sysemu/cryptodev-vhost-user.h"
uint64_t
cryptodev_vhost_get_max_queues(
CryptoDevBackendVhost *crypto)
{
return crypto->dev.max_queues;
}
void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto)
{
vhost_dev_cleanup(&crypto->dev);
g_free(crypto);
}
struct CryptoDevBackendVhost *
cryptodev_vhost_init(
CryptoDevBackendVhostOptions *options)
{
int r;
CryptoDevBackendVhost *crypto;
crypto = g_new(CryptoDevBackendVhost, 1);
crypto->dev.max_queues = 1;
crypto->dev.nvqs = 1;
crypto->dev.vqs = crypto->vqs;
crypto->cc = options->cc;
crypto->dev.protocol_features = 0;
crypto->backend = -1;
/* vhost-user needs vq_index to initiate a specific queue pair */
crypto->dev.vq_index = crypto->cc->queue_index * crypto->dev.nvqs;
r = vhost_dev_init(&crypto->dev, options->opaque, options->backend_type, 0);
if (r < 0) {
goto fail;
}
return crypto;
fail:
g_free(crypto);
return NULL;
}
static int
cryptodev_vhost_start_one(CryptoDevBackendVhost *crypto,
VirtIODevice *dev)
{
int r;
crypto->dev.nvqs = 1;
crypto->dev.vqs = crypto->vqs;
r = vhost_dev_enable_notifiers(&crypto->dev, dev);
if (r < 0) {
goto fail_notifiers;
}
r = vhost_dev_start(&crypto->dev, dev);
if (r < 0) {
goto fail_start;
}
return 0;
fail_start:
vhost_dev_disable_notifiers(&crypto->dev, dev);
fail_notifiers:
return r;
}
static void
cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto,
VirtIODevice *dev)
{
vhost_dev_stop(&crypto->dev, dev);
vhost_dev_disable_notifiers(&crypto->dev, dev);
}
CryptoDevBackendVhost *
cryptodev_get_vhost(CryptoDevBackendClient *cc,
CryptoDevBackend *b,
uint16_t queue)
{
CryptoDevBackendVhost *vhost_crypto = NULL;
if (!cc) {
return NULL;
}
switch (cc->type) {
#if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
case CRYPTODEV_BACKEND_TYPE_VHOST_USER:
vhost_crypto = cryptodev_vhost_user_get_vhost(cc, b, queue);
break;
#endif
default:
break;
}
return vhost_crypto;
}
static void
cryptodev_vhost_set_vq_index(CryptoDevBackendVhost *crypto,
int vq_index)
{
crypto->dev.vq_index = vq_index;
}
static int
vhost_set_vring_enable(CryptoDevBackendClient *cc,
CryptoDevBackend *b,
uint16_t queue, int enable)
{
CryptoDevBackendVhost *crypto =
cryptodev_get_vhost(cc, b, queue);
const VhostOps *vhost_ops;
cc->vring_enable = enable;
if (!crypto) {
return 0;
}
vhost_ops = crypto->dev.vhost_ops;
if (vhost_ops->vhost_set_vring_enable) {
return vhost_ops->vhost_set_vring_enable(&crypto->dev, enable);
}
return 0;
}
int cryptodev_vhost_start(VirtIODevice *dev, int total_queues)
{
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int r, e;
int i;
CryptoDevBackend *b = vcrypto->cryptodev;
CryptoDevBackendVhost *vhost_crypto;
CryptoDevBackendClient *cc;
if (!k->set_guest_notifiers) {
error_report("binding does not support guest notifiers");
return -ENOSYS;
}
for (i = 0; i < total_queues; i++) {
cc = b->conf.peers.ccs[i];
vhost_crypto = cryptodev_get_vhost(cc, b, i);
cryptodev_vhost_set_vq_index(vhost_crypto, i);
/* Suppress the masking guest notifiers on vhost user
* because vhost user doesn't interrupt masking/unmasking
* properly.
*/
if (cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER) {
dev->use_guest_notifier_mask = false;
}
}
r = k->set_guest_notifiers(qbus->parent, total_queues, true);
if (r < 0) {
error_report("error binding guest notifier: %d", -r);
goto err;
}
for (i = 0; i < total_queues; i++) {
cc = b->conf.peers.ccs[i];
vhost_crypto = cryptodev_get_vhost(cc, b, i);
r = cryptodev_vhost_start_one(vhost_crypto, dev);
if (r < 0) {
goto err_start;
}
if (cc->vring_enable) {
/* restore vring enable state */
r = vhost_set_vring_enable(cc, b, i, cc->vring_enable);
if (r < 0) {
goto err_start;
}
}
}
return 0;
err_start:
while (--i >= 0) {
cc = b->conf.peers.ccs[i];
vhost_crypto = cryptodev_get_vhost(cc, b, i);
cryptodev_vhost_stop_one(vhost_crypto, dev);
}
e = k->set_guest_notifiers(qbus->parent, total_queues, false);
if (e < 0) {
error_report("vhost guest notifier cleanup failed: %d", e);
}
err:
return r;
}
void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
CryptoDevBackend *b = vcrypto->cryptodev;
CryptoDevBackendVhost *vhost_crypto;
CryptoDevBackendClient *cc;
size_t i;
int r;
for (i = 0; i < total_queues; i++) {
cc = b->conf.peers.ccs[i];
vhost_crypto = cryptodev_get_vhost(cc, b, i);
cryptodev_vhost_stop_one(vhost_crypto, dev);
}
r = k->set_guest_notifiers(qbus->parent, total_queues, false);
if (r < 0) {
error_report("vhost guest notifier cleanup failed: %d", r);
}
assert(r >= 0);
}
void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev,
int queue,
int idx, bool mask)
{
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
CryptoDevBackend *b = vcrypto->cryptodev;
CryptoDevBackendVhost *vhost_crypto;
CryptoDevBackendClient *cc;
assert(queue < MAX_CRYPTO_QUEUE_NUM);
cc = b->conf.peers.ccs[queue];
vhost_crypto = cryptodev_get_vhost(cc, b, queue);
vhost_virtqueue_mask(&vhost_crypto->dev, dev, idx, mask);
}
bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev,
int queue, int idx)
{
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
CryptoDevBackend *b = vcrypto->cryptodev;
CryptoDevBackendVhost *vhost_crypto;
CryptoDevBackendClient *cc;
assert(queue < MAX_CRYPTO_QUEUE_NUM);
cc = b->conf.peers.ccs[queue];
vhost_crypto = cryptodev_get_vhost(cc, b, queue);
return vhost_virtqueue_pending(&vhost_crypto->dev, idx);
}
#else
uint64_t
cryptodev_vhost_get_max_queues(CryptoDevBackendVhost *crypto)
{
return 0;
}
void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto)
{
}
struct CryptoDevBackendVhost *
cryptodev_vhost_init(CryptoDevBackendVhostOptions *options)
{
return NULL;
}
CryptoDevBackendVhost *
cryptodev_get_vhost(CryptoDevBackendClient *cc,
CryptoDevBackend *b,
uint16_t queue)
{
return NULL;
}
int cryptodev_vhost_start(VirtIODevice *dev, int total_queues)
{
return -1;
}
void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues)
{
}
void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev,
int queue,
int idx, bool mask)
{
}
bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev,
int queue, int idx)
{
return false;
}
#endif

View File

@@ -26,6 +26,8 @@
#include "hw/boards.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qapi-types.h"
#include "qapi-visit.h"
#include "qemu/config-file.h"
#include "qom/object_interfaces.h"
#include "hw/virtio/virtio-crypto.h"

View File

@@ -12,7 +12,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu-common.h"
#include "qemu/error-report.h"
#include "sysemu/hostmem.h"
#include "sysemu/sysemu.h"
#include "qom/object_interfaces.h"
@@ -32,21 +31,15 @@ typedef struct HostMemoryBackendFile HostMemoryBackendFile;
struct HostMemoryBackendFile {
HostMemoryBackend parent_obj;
char *mem_path;
uint64_t align;
bool share;
bool discard_data;
bool is_pmem;
char *mem_path;
};
static void
file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
#ifndef CONFIG_POSIX
error_setg(errp, "backend '%s' not supported on this host",
object_get_typename(OBJECT(backend)));
#else
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(backend);
gchar *name;
if (!backend->size) {
error_setg(errp, "can't create backend with size 0");
@@ -56,38 +49,19 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
error_setg(errp, "mem-path property not set");
return;
}
/*
* Verify pmem file size since starting a guest with an incorrect size
* leads to confusing failures inside the guest.
*/
if (fb->is_pmem) {
Error *local_err = NULL;
uint64_t size;
size = qemu_get_pmem_size(fb->mem_path, &local_err);
if (!size) {
error_propagate(errp, local_err);
return;
}
if (backend->size > size) {
error_setg(errp, "size property %" PRIu64 " is larger than "
"pmem file \"%s\" size %" PRIu64, backend->size,
fb->mem_path, size);
return;
}
#ifndef CONFIG_LINUX
error_setg(errp, "-mem-path not supported on this host");
#else
if (!host_memory_backend_mr_inited(backend)) {
gchar *path;
backend->force_prealloc = mem_prealloc;
path = object_get_canonical_path(OBJECT(backend));
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend),
path,
backend->size, fb->share,
fb->mem_path, errp);
g_free(path);
}
backend->force_prealloc = mem_prealloc;
name = host_memory_backend_get_name(backend);
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend),
name,
backend->size, fb->align,
(backend->share ? RAM_SHARED : 0) |
(fb->is_pmem ? RAM_PMEM : 0),
fb->mem_path, errp);
g_free(name);
#endif
}
@@ -104,14 +78,32 @@ static void set_mem_path(Object *o, const char *str, Error **errp)
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
if (host_memory_backend_mr_inited(backend)) {
error_setg(errp, "cannot change property 'mem-path' of %s",
object_get_typename(o));
error_setg(errp, "cannot change property value");
return;
}
g_free(fb->mem_path);
fb->mem_path = g_strdup(str);
}
static bool file_memory_backend_get_share(Object *o, Error **errp)
{
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
return fb->share;
}
static void file_memory_backend_set_share(Object *o, bool value, Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(o);
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
if (host_memory_backend_mr_inited(backend)) {
error_setg(errp, "cannot change property value");
return;
}
fb->share = value;
}
static bool file_memory_backend_get_discard_data(Object *o, Error **errp)
{
return MEMORY_BACKEND_FILE(o)->discard_data;
@@ -123,74 +115,6 @@ static void file_memory_backend_set_discard_data(Object *o, bool value,
MEMORY_BACKEND_FILE(o)->discard_data = value;
}
static void file_memory_backend_get_align(Object *o, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
uint64_t val = fb->align;
visit_type_size(v, name, &val, errp);
}
static void file_memory_backend_set_align(Object *o, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(o);
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
Error *local_err = NULL;
uint64_t val;
if (host_memory_backend_mr_inited(backend)) {
error_setg(&local_err, "cannot change property '%s' of %s",
name, object_get_typename(o));
goto out;
}
visit_type_size(v, name, &val, &local_err);
if (local_err) {
goto out;
}
fb->align = val;
out:
error_propagate(errp, local_err);
}
static bool file_memory_backend_get_pmem(Object *o, Error **errp)
{
return MEMORY_BACKEND_FILE(o)->is_pmem;
}
static void file_memory_backend_set_pmem(Object *o, bool value, Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(o);
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
if (host_memory_backend_mr_inited(backend)) {
error_setg(errp, "cannot change property 'pmem' of %s.",
object_get_typename(o));
return;
}
#ifndef CONFIG_LIBPMEM
if (value) {
Error *local_err = NULL;
error_setg(&local_err,
"Lack of libpmem support while setting the 'pmem=on'"
" of %s. We can't ensure data persistence.",
object_get_typename(o));
error_propagate(errp, local_err);
return;
}
#endif
fb->is_pmem = value;
}
static void file_backend_unparent(Object *obj)
{
HostMemoryBackend *backend = MEMORY_BACKEND(obj);
@@ -212,19 +136,15 @@ file_backend_class_init(ObjectClass *oc, void *data)
bc->alloc = file_backend_memory_alloc;
oc->unparent = file_backend_unparent;
object_class_property_add_bool(oc, "share",
file_memory_backend_get_share, file_memory_backend_set_share,
&error_abort);
object_class_property_add_bool(oc, "discard-data",
file_memory_backend_get_discard_data, file_memory_backend_set_discard_data,
&error_abort);
object_class_property_add_str(oc, "mem-path",
get_mem_path, set_mem_path,
&error_abort);
object_class_property_add(oc, "align", "int",
file_memory_backend_get_align,
file_memory_backend_set_align,
NULL, NULL, &error_abort);
object_class_property_add_bool(oc, "pmem",
file_memory_backend_get_pmem, file_memory_backend_set_pmem,
&error_abort);
}
static void file_backend_instance_finalize(Object *o)

View File

@@ -1,181 +0,0 @@
/*
* QEMU host memfd memory backend
*
* Copyright (C) 2018 Red Hat Inc
*
* Authors:
* Marc-André Lureau <marcandre.lureau@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "sysemu/hostmem.h"
#include "sysemu/sysemu.h"
#include "qom/object_interfaces.h"
#include "qemu/memfd.h"
#include "qapi/error.h"
#define TYPE_MEMORY_BACKEND_MEMFD "memory-backend-memfd"
#define MEMORY_BACKEND_MEMFD(obj) \
OBJECT_CHECK(HostMemoryBackendMemfd, (obj), TYPE_MEMORY_BACKEND_MEMFD)
typedef struct HostMemoryBackendMemfd HostMemoryBackendMemfd;
struct HostMemoryBackendMemfd {
HostMemoryBackend parent_obj;
bool hugetlb;
uint64_t hugetlbsize;
bool seal;
};
static void
memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(backend);
char *name;
int fd;
if (!backend->size) {
error_setg(errp, "can't create backend with size 0");
return;
}
backend->force_prealloc = mem_prealloc;
fd = qemu_memfd_create(TYPE_MEMORY_BACKEND_MEMFD, backend->size,
m->hugetlb, m->hugetlbsize, m->seal ?
F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL : 0,
errp);
if (fd == -1) {
return;
}
name = host_memory_backend_get_name(backend);
memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend),
name, backend->size,
backend->share, fd, errp);
g_free(name);
}
static bool
memfd_backend_get_hugetlb(Object *o, Error **errp)
{
return MEMORY_BACKEND_MEMFD(o)->hugetlb;
}
static void
memfd_backend_set_hugetlb(Object *o, bool value, Error **errp)
{
MEMORY_BACKEND_MEMFD(o)->hugetlb = value;
}
static void
memfd_backend_set_hugetlbsize(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(obj);
Error *local_err = NULL;
uint64_t value;
if (host_memory_backend_mr_inited(MEMORY_BACKEND(obj))) {
error_setg(&local_err, "cannot change property value");
goto out;
}
visit_type_size(v, name, &value, &local_err);
if (local_err) {
goto out;
}
if (!value) {
error_setg(&local_err, "Property '%s.%s' doesn't take value '%"
PRIu64 "'", object_get_typename(obj), name, value);
goto out;
}
m->hugetlbsize = value;
out:
error_propagate(errp, local_err);
}
static void
memfd_backend_get_hugetlbsize(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(obj);
uint64_t value = m->hugetlbsize;
visit_type_size(v, name, &value, errp);
}
static bool
memfd_backend_get_seal(Object *o, Error **errp)
{
return MEMORY_BACKEND_MEMFD(o)->seal;
}
static void
memfd_backend_set_seal(Object *o, bool value, Error **errp)
{
MEMORY_BACKEND_MEMFD(o)->seal = value;
}
static void
memfd_backend_instance_init(Object *obj)
{
HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(obj);
/* default to sealed file */
m->seal = true;
MEMORY_BACKEND(m)->share = true;
}
static void
memfd_backend_class_init(ObjectClass *oc, void *data)
{
HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc);
bc->alloc = memfd_backend_memory_alloc;
if (qemu_memfd_check(MFD_HUGETLB)) {
object_class_property_add_bool(oc, "hugetlb",
memfd_backend_get_hugetlb,
memfd_backend_set_hugetlb,
&error_abort);
object_class_property_set_description(oc, "hugetlb",
"Use huge pages",
&error_abort);
object_class_property_add(oc, "hugetlbsize", "int",
memfd_backend_get_hugetlbsize,
memfd_backend_set_hugetlbsize,
NULL, NULL, &error_abort);
object_class_property_set_description(oc, "hugetlbsize",
"Huge pages size (ex: 2M, 1G)",
&error_abort);
}
object_class_property_add_bool(oc, "seal",
memfd_backend_get_seal,
memfd_backend_set_seal,
&error_abort);
object_class_property_set_description(oc, "seal",
"Seal growing & shrinking",
&error_abort);
}
static const TypeInfo memfd_backend_info = {
.name = TYPE_MEMORY_BACKEND_MEMFD,
.parent = TYPE_MEMORY_BACKEND,
.instance_init = memfd_backend_instance_init,
.class_init = memfd_backend_class_init,
.instance_size = sizeof(HostMemoryBackendMemfd),
};
static void register_types(void)
{
if (qemu_memfd_check(MFD_ALLOW_SEALING)) {
type_register_static(&memfd_backend_info);
}
}
type_init(register_types);

View File

@@ -16,20 +16,21 @@
#define TYPE_MEMORY_BACKEND_RAM "memory-backend-ram"
static void
ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
char *name;
char *path;
if (!backend->size) {
error_setg(errp, "can't create backend with size 0");
return;
}
name = host_memory_backend_get_name(backend);
memory_region_init_ram_shared_nomigrate(&backend->mr, OBJECT(backend), name,
backend->size, backend->share, errp);
g_free(name);
path = object_get_canonical_path_component(OBJECT(backend));
memory_region_init_ram_nomigrate(&backend->mr, OBJECT(backend), path,
backend->size, errp);
g_free(path);
}
static void

View File

@@ -9,16 +9,15 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "sysemu/hostmem.h"
#include "hw/boards.h"
#include "qapi/error.h"
#include "qapi/qapi-builtin-visit.h"
#include "qapi/visitor.h"
#include "qapi-types.h"
#include "qapi-visit.h"
#include "qemu/config-file.h"
#include "qom/object_interfaces.h"
#include "qemu/mmap-alloc.h"
#ifdef CONFIG_NUMA
#include <numaif.h>
@@ -28,16 +27,6 @@ QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND);
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE);
#endif
char *
host_memory_backend_get_name(HostMemoryBackend *backend)
{
if (!backend->use_canonical_path) {
return object_get_canonical_path_component(OBJECT(backend));
}
return object_get_canonical_path(OBJECT(backend));
}
static void
host_memory_backend_get_size(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
@@ -57,8 +46,7 @@ host_memory_backend_set_size(Object *obj, Visitor *v, const char *name,
uint64_t value;
if (host_memory_backend_mr_inited(backend)) {
error_setg(&local_err, "cannot change property %s of %s ",
name, object_get_typename(obj));
error_setg(&local_err, "cannot change property value");
goto out;
}
@@ -67,9 +55,8 @@ host_memory_backend_set_size(Object *obj, Visitor *v, const char *name,
goto out;
}
if (!value) {
error_setg(&local_err,
"property '%s' of %s doesn't take value '%" PRIu64 "'",
name, object_get_typename(obj), value);
error_setg(&local_err, "Property '%s.%s' doesn't take value '%"
PRIu64 "'", object_get_typename(obj), name, value);
goto out;
}
backend->size = value;
@@ -88,7 +75,7 @@ host_memory_backend_get_host_nodes(Object *obj, Visitor *v, const char *name,
value = find_first_bit(backend->host_nodes, MAX_NODES);
if (value == MAX_NODES) {
goto ret;
return;
}
*node = g_malloc0(sizeof(**node));
@@ -106,7 +93,6 @@ host_memory_backend_get_host_nodes(Object *obj, Visitor *v, const char *name,
node = &(*node)->next;
} while (true);
ret:
visit_type_uint16List(v, name, &host_nodes, errp);
}
@@ -116,23 +102,14 @@ host_memory_backend_set_host_nodes(Object *obj, Visitor *v, const char *name,
{
#ifdef CONFIG_NUMA
HostMemoryBackend *backend = MEMORY_BACKEND(obj);
uint16List *l, *host_nodes = NULL;
uint16List *l = NULL;
visit_type_uint16List(v, name, &host_nodes, errp);
visit_type_uint16List(v, name, &l, errp);
for (l = host_nodes; l; l = l->next) {
if (l->value >= MAX_NODES) {
error_setg(errp, "Invalid host-nodes value: %d", l->value);
goto out;
}
}
for (l = host_nodes; l; l = l->next) {
while (l) {
bitmap_set(backend->host_nodes, l->value, 1);
l = l->next;
}
out:
qapi_free_uint16List(host_nodes);
#else
error_setg(errp, "NUMA node binding are not supported by this QEMU");
#endif
@@ -260,11 +237,6 @@ static void host_memory_backend_init(Object *obj)
backend->prealloc = mem_prealloc;
}
static void host_memory_backend_post_init(Object *obj)
{
object_apply_compat_props(obj);
}
bool host_memory_backend_mr_inited(HostMemoryBackend *backend)
{
/*
@@ -274,7 +246,8 @@ bool host_memory_backend_mr_inited(HostMemoryBackend *backend)
return memory_region_size(&backend->mr) != 0;
}
MemoryRegion *host_memory_backend_get_memory(HostMemoryBackend *backend)
MemoryRegion *
host_memory_backend_get_memory(HostMemoryBackend *backend, Error **errp)
{
return host_memory_backend_mr_inited(backend) ? &backend->mr : NULL;
}
@@ -289,23 +262,6 @@ bool host_memory_backend_is_mapped(HostMemoryBackend *backend)
return backend->is_mapped;
}
#ifdef __linux__
size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
{
Object *obj = OBJECT(memdev);
char *path = object_property_get_str(obj, "mem-path", NULL);
size_t pagesize = qemu_mempath_getpagesize(path);
g_free(path);
return pagesize;
}
#else
size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
{
return getpagesize();
}
#endif
static void
host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
{
@@ -395,39 +351,22 @@ host_memory_backend_can_be_deleted(UserCreatable *uc)
}
}
static bool host_memory_backend_get_share(Object *o, Error **errp)
static char *get_id(Object *o, Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(o);
return backend->share;
return g_strdup(backend->id);
}
static void host_memory_backend_set_share(Object *o, bool value, Error **errp)
static void set_id(Object *o, const char *str, Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(o);
if (host_memory_backend_mr_inited(backend)) {
if (backend->id) {
error_setg(errp, "cannot change property value");
return;
}
backend->share = value;
}
static bool
host_memory_backend_get_use_canonical_path(Object *obj, Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(obj);
return backend->use_canonical_path;
}
static void
host_memory_backend_set_use_canonical_path(Object *obj, bool value,
Error **errp)
{
HostMemoryBackend *backend = MEMORY_BACKEND(obj);
backend->use_canonical_path = value;
backend->id = g_strdup(str);
}
static void
@@ -441,44 +380,31 @@ host_memory_backend_class_init(ObjectClass *oc, void *data)
object_class_property_add_bool(oc, "merge",
host_memory_backend_get_merge,
host_memory_backend_set_merge, &error_abort);
object_class_property_set_description(oc, "merge",
"Mark memory as mergeable", &error_abort);
object_class_property_add_bool(oc, "dump",
host_memory_backend_get_dump,
host_memory_backend_set_dump, &error_abort);
object_class_property_set_description(oc, "dump",
"Set to 'off' to exclude from core dump", &error_abort);
object_class_property_add_bool(oc, "prealloc",
host_memory_backend_get_prealloc,
host_memory_backend_set_prealloc, &error_abort);
object_class_property_set_description(oc, "prealloc",
"Preallocate memory", &error_abort);
object_class_property_add(oc, "size", "int",
host_memory_backend_get_size,
host_memory_backend_set_size,
NULL, NULL, &error_abort);
object_class_property_set_description(oc, "size",
"Size of the memory region (ex: 500M)", &error_abort);
object_class_property_add(oc, "host-nodes", "int",
host_memory_backend_get_host_nodes,
host_memory_backend_set_host_nodes,
NULL, NULL, &error_abort);
object_class_property_set_description(oc, "host-nodes",
"Binds memory to the list of NUMA host nodes", &error_abort);
object_class_property_add_enum(oc, "policy", "HostMemPolicy",
&HostMemPolicy_lookup,
host_memory_backend_get_policy,
host_memory_backend_set_policy, &error_abort);
object_class_property_set_description(oc, "policy",
"Set the NUMA policy", &error_abort);
object_class_property_add_bool(oc, "share",
host_memory_backend_get_share, host_memory_backend_set_share,
&error_abort);
object_class_property_set_description(oc, "share",
"Mark the memory as private to QEMU or shared", &error_abort);
object_class_property_add_bool(oc, "x-use-canonical-path-for-ramblock-id",
host_memory_backend_get_use_canonical_path,
host_memory_backend_set_use_canonical_path, &error_abort);
object_class_property_add_str(oc, "id", get_id, set_id, &error_abort);
}
static void host_memory_backend_finalize(Object *o)
{
HostMemoryBackend *backend = MEMORY_BACKEND(o);
g_free(backend->id);
}
static const TypeInfo host_memory_backend_info = {
@@ -489,7 +415,7 @@ static const TypeInfo host_memory_backend_info = {
.class_init = host_memory_backend_class_init,
.instance_size = sizeof(HostMemoryBackend),
.instance_init = host_memory_backend_init,
.instance_post_init = host_memory_backend_post_init,
.instance_finalize = host_memory_backend_finalize,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }

View File

@@ -15,43 +15,25 @@
#include "qemu/osdep.h"
#include "sysemu/tpm_backend.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "sysemu/tpm.h"
#include "hw/tpm/tpm_int.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
#include "block/thread-pool.h"
#include "qemu/error-report.h"
static void tpm_backend_request_completed(void *opaque, int ret)
static void tpm_backend_worker_thread(gpointer data, gpointer user_data)
{
TPMBackend *s = TPM_BACKEND(opaque);
TPMIfClass *tic = TPM_IF_GET_CLASS(s->tpmif);
TPMBackend *s = TPM_BACKEND(user_data);
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
tic->request_completed(s->tpmif, ret);
/* no need for atomic, as long the BQL is taken */
s->cmd = NULL;
object_unref(OBJECT(s));
assert(k->handle_request != NULL);
k->handle_request(s, (TPMBackendCmd *)data);
}
static int tpm_backend_worker_thread(gpointer data)
static void tpm_backend_thread_end(TPMBackend *s)
{
TPMBackend *s = TPM_BACKEND(data);
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
Error *err = NULL;
k->handle_request(s, s->cmd, &err);
if (err) {
error_report_err(err);
return -1;
}
return 0;
}
void tpm_backend_finish_sync(TPMBackend *s)
{
while (s->cmd) {
aio_poll(qemu_get_aio_context(), true);
if (s->thread_pool) {
g_thread_pool_free(s->thread_pool, FALSE, TRUE);
s->thread_pool = NULL;
}
}
@@ -62,30 +44,26 @@ enum TpmType tpm_backend_get_type(TPMBackend *s)
return k->type;
}
int tpm_backend_init(TPMBackend *s, TPMIf *tpmif, Error **errp)
int tpm_backend_init(TPMBackend *s, TPMState *state)
{
if (s->tpmif) {
error_setg(errp, "TPM backend '%s' is already initialized", s->id);
return -1;
}
s->tpmif = tpmif;
object_ref(OBJECT(tpmif));
s->tpm_state = state;
s->had_startup_error = false;
return 0;
}
int tpm_backend_startup_tpm(TPMBackend *s, size_t buffersize)
int tpm_backend_startup_tpm(TPMBackend *s)
{
int res = 0;
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
/* terminate a running TPM */
tpm_backend_finish_sync(s);
tpm_backend_thread_end(s);
res = k->startup_tpm ? k->startup_tpm(s, buffersize) : 0;
s->thread_pool = g_thread_pool_new(tpm_backend_worker_thread, s, 1, TRUE,
NULL);
res = k->startup_tpm ? k->startup_tpm(s) : 0;
s->had_startup_error = (res != 0);
@@ -99,17 +77,7 @@ bool tpm_backend_had_startup_error(TPMBackend *s)
void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd)
{
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
if (s->cmd != NULL) {
error_report("There is a TPM request pending");
return;
}
s->cmd = cmd;
object_ref(OBJECT(s));
thread_pool_submit_aio(pool, tpm_backend_worker_thread, s,
tpm_backend_request_completed, s);
g_thread_pool_push(s->thread_pool, cmd, NULL);
}
void tpm_backend_reset(TPMBackend *s)
@@ -120,7 +88,7 @@ void tpm_backend_reset(TPMBackend *s)
k->reset(s);
}
tpm_backend_finish_sync(s);
tpm_backend_thread_end(s);
s->had_startup_error = false;
}
@@ -129,6 +97,8 @@ void tpm_backend_cancel_cmd(TPMBackend *s)
{
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
assert(k->cancel_cmd);
k->cancel_cmd(s);
}
@@ -152,41 +122,87 @@ TPMVersion tpm_backend_get_tpm_version(TPMBackend *s)
{
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
assert(k->get_tpm_version);
return k->get_tpm_version(s);
}
size_t tpm_backend_get_buffer_size(TPMBackend *s)
{
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
return k->get_buffer_size(s);
}
TPMInfo *tpm_backend_query_tpm(TPMBackend *s)
{
TPMInfo *info = g_new0(TPMInfo, 1);
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
TPMIfClass *tic = TPM_IF_GET_CLASS(s->tpmif);
info->id = g_strdup(s->id);
info->model = tic->model;
info->options = k->get_tpm_options(s);
info->model = s->fe_model;
if (k->get_tpm_options) {
info->options = k->get_tpm_options(s);
}
return info;
}
static bool tpm_backend_prop_get_opened(Object *obj, Error **errp)
{
TPMBackend *s = TPM_BACKEND(obj);
return s->opened;
}
void tpm_backend_open(TPMBackend *s, Error **errp)
{
object_property_set_bool(OBJECT(s), true, "opened", errp);
}
static void tpm_backend_prop_set_opened(Object *obj, bool value, Error **errp)
{
TPMBackend *s = TPM_BACKEND(obj);
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
Error *local_err = NULL;
if (value == s->opened) {
return;
}
if (!value && s->opened) {
error_setg(errp, QERR_PERMISSION_DENIED);
return;
}
if (k->opened) {
k->opened(s, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
}
s->opened = true;
}
static void tpm_backend_instance_init(Object *obj)
{
TPMBackend *s = TPM_BACKEND(obj);
object_property_add_bool(obj, "opened",
tpm_backend_prop_get_opened,
tpm_backend_prop_set_opened,
NULL);
s->fe_model = -1;
}
static void tpm_backend_instance_finalize(Object *obj)
{
TPMBackend *s = TPM_BACKEND(obj);
object_unref(OBJECT(s->tpmif));
g_free(s->id);
tpm_backend_thread_end(s);
}
static const TypeInfo tpm_backend_info = {
.name = TYPE_TPM_BACKEND,
.parent = TYPE_OBJECT,
.instance_size = sizeof(TPMBackend),
.instance_init = tpm_backend_instance_init,
.instance_finalize = tpm_backend_instance_finalize,
.class_size = sizeof(TPMBackendClass),
.abstract = true,

View File

@@ -26,34 +26,27 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/atomic.h"
#include "exec/cpu-common.h"
#include "sysemu/kvm.h"
#include "sysemu/balloon.h"
#include "trace-root.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-misc.h"
#include "qmp-commands.h"
#include "qapi/qmp/qerror.h"
#include "qapi/qmp/qjson.h"
static QEMUBalloonEvent *balloon_event_fn;
static QEMUBalloonStatus *balloon_stat_fn;
static void *balloon_opaque;
static int balloon_inhibit_count;
static bool balloon_inhibited;
bool qemu_balloon_is_inhibited(void)
{
return atomic_read(&balloon_inhibit_count) > 0;
return balloon_inhibited;
}
void qemu_balloon_inhibit(bool state)
{
if (state) {
atomic_inc(&balloon_inhibit_count);
} else {
atomic_dec(&balloon_inhibit_count);
}
assert(atomic_read(&balloon_inhibit_count) >= 0);
balloon_inhibited = state;
}
static bool have_balloon(Error **errp)

2190
block.c

File diff suppressed because it is too large Load Diff

View File

@@ -1,29 +1,18 @@
block-obj-y += raw-format.o vmdk.o vpc.o
block-obj-$(CONFIG_QCOW1) += qcow.o
block-obj-$(CONFIG_VDI) += vdi.o
block-obj-$(CONFIG_CLOOP) += cloop.o
block-obj-$(CONFIG_BOCHS) += bochs.o
block-obj-$(CONFIG_VVFAT) += vvfat.o
block-obj-$(CONFIG_DMG) += dmg.o
block-obj-y += raw-format.o qcow.o vdi.o vmdk.o cloop.o bochs.o vpc.o vvfat.o dmg.o
block-obj-y += qcow2.o qcow2-refcount.o qcow2-cluster.o qcow2-snapshot.o qcow2-cache.o qcow2-bitmap.o
block-obj-$(CONFIG_QED) += qed.o qed-l2-cache.o qed-table.o qed-cluster.o
block-obj-$(CONFIG_QED) += qed-check.o
block-obj-y += qed.o qed-l2-cache.o qed-table.o qed-cluster.o
block-obj-y += qed-check.o
block-obj-y += vhdx.o vhdx-endian.o vhdx-log.o
block-obj-y += quorum.o
block-obj-y += blkdebug.o blkverify.o blkreplay.o
block-obj-$(CONFIG_PARALLELS) += parallels.o
block-obj-y += blklogwrites.o
block-obj-y += parallels.o blkdebug.o blkverify.o blkreplay.o
block-obj-y += block-backend.o snapshot.o qapi.o
block-obj-$(CONFIG_WIN32) += file-win32.o win32-aio.o
block-obj-$(CONFIG_POSIX) += file-posix.o
block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
block-obj-y += null.o mirror.o commit.o io.o create.o
block-obj-y += null.o mirror.o commit.o io.o
block-obj-y += throttle-groups.o
block-obj-$(CONFIG_LINUX) += nvme.o
block-obj-y += nbd.o nbd-client.o
block-obj-$(CONFIG_SHEEPDOG) += sheepdog.o
block-obj-y += nbd.o nbd-client.o sheepdog.o
block-obj-$(CONFIG_LIBISCSI) += iscsi.o
block-obj-$(if $(CONFIG_LIBISCSI),y,n) += iscsi-opts.o
block-obj-$(CONFIG_LIBNFS) += nfs.o
@@ -36,7 +25,7 @@ block-obj-y += accounting.o dirty-bitmap.o
block-obj-y += write-threshold.o
block-obj-y += backup.o
block-obj-$(CONFIG_REPLICATION) += replication.o
block-obj-y += throttle.o copy-on-read.o
block-obj-y += throttle.o
block-obj-y += crypto.o
@@ -54,12 +43,7 @@ gluster.o-libs := $(GLUSTERFS_LIBS)
vxhs.o-libs := $(VXHS_LIBS)
ssh.o-cflags := $(LIBSSH2_CFLAGS)
ssh.o-libs := $(LIBSSH2_LIBS)
block-obj-dmg-bz2-$(CONFIG_BZIP2) += dmg-bz2.o
block-obj-$(if $(CONFIG_DMG),m,n) += $(block-obj-dmg-bz2-y)
block-obj-$(if $(CONFIG_BZIP2),m,n) += dmg-bz2.o
dmg-bz2.o-libs := $(BZIP2_LIBS)
block-obj-$(if $(CONFIG_LZFSE),m,n) += dmg-lzfse.o
dmg-lzfse.o-libs := $(LZFSE_LIBS)
qcow.o-libs := -lz
linux-aio.o-libs := -laio
parallels.o-cflags := $(LIBXML2_CFLAGS)
parallels.o-libs := $(LIBXML2_LIBS)

View File

@@ -94,94 +94,6 @@ void block_acct_start(BlockAcctStats *stats, BlockAcctCookie *cookie,
cookie->type = type;
}
/* block_latency_histogram_compare_func:
* Compare @key with interval [@it[0], @it[1]).
* Return: -1 if @key < @it[0]
* 0 if @key in [@it[0], @it[1])
* +1 if @key >= @it[1]
*/
static int block_latency_histogram_compare_func(const void *key, const void *it)
{
uint64_t k = *(uint64_t *)key;
uint64_t a = ((uint64_t *)it)[0];
uint64_t b = ((uint64_t *)it)[1];
return k < a ? -1 : (k < b ? 0 : 1);
}
static void block_latency_histogram_account(BlockLatencyHistogram *hist,
int64_t latency_ns)
{
uint64_t *pos;
if (hist->bins == NULL) {
/* histogram disabled */
return;
}
if (latency_ns < hist->boundaries[0]) {
hist->bins[0]++;
return;
}
if (latency_ns >= hist->boundaries[hist->nbins - 2]) {
hist->bins[hist->nbins - 1]++;
return;
}
pos = bsearch(&latency_ns, hist->boundaries, hist->nbins - 2,
sizeof(hist->boundaries[0]),
block_latency_histogram_compare_func);
assert(pos != NULL);
hist->bins[pos - hist->boundaries + 1]++;
}
int block_latency_histogram_set(BlockAcctStats *stats, enum BlockAcctType type,
uint64List *boundaries)
{
BlockLatencyHistogram *hist = &stats->latency_histogram[type];
uint64List *entry;
uint64_t *ptr;
uint64_t prev = 0;
int new_nbins = 1;
for (entry = boundaries; entry; entry = entry->next) {
if (entry->value <= prev) {
return -EINVAL;
}
new_nbins++;
prev = entry->value;
}
hist->nbins = new_nbins;
g_free(hist->boundaries);
hist->boundaries = g_new(uint64_t, hist->nbins - 1);
for (entry = boundaries, ptr = hist->boundaries; entry;
entry = entry->next, ptr++)
{
*ptr = entry->value;
}
g_free(hist->bins);
hist->bins = g_new0(uint64_t, hist->nbins);
return 0;
}
void block_latency_histograms_clear(BlockAcctStats *stats)
{
int i;
for (i = 0; i < BLOCK_MAX_IOTYPE; i++) {
BlockLatencyHistogram *hist = &stats->latency_histogram[i];
g_free(hist->bins);
g_free(hist->boundaries);
memset(hist, 0, sizeof(*hist));
}
}
static void block_account_one_io(BlockAcctStats *stats, BlockAcctCookie *cookie,
bool failed)
{
@@ -204,9 +116,6 @@ static void block_account_one_io(BlockAcctStats *stats, BlockAcctCookie *cookie,
stats->nr_ops[cookie->type]++;
}
block_latency_histogram_account(&stats->latency_histogram[cookie->type],
latency_ns);
if (!failed || stats->account_failed) {
stats->total_time_ns[cookie->type] += latency_ns;
stats->last_access_time_ns = time_ns;

View File

@@ -27,13 +27,7 @@
#include "qemu/error-report.h"
#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
typedef struct CowRequest {
int64_t start_byte;
int64_t end_byte;
QLIST_ENTRY(CowRequest) list;
CoQueue wait_queue; /* coroutines blocked on this request */
} CowRequest;
#define SLICE_TIME 100000000ULL /* ns */
typedef struct BackupBlockJob {
BlockJob common;
@@ -41,25 +35,18 @@ typedef struct BackupBlockJob {
/* bitmap for sync=incremental */
BdrvDirtyBitmap *sync_bitmap;
MirrorSyncMode sync_mode;
RateLimit limit;
BlockdevOnError on_source_error;
BlockdevOnError on_target_error;
CoRwlock flush_rwlock;
uint64_t len;
uint64_t bytes_read;
unsigned long *done_bitmap;
int64_t cluster_size;
bool compress;
NotifierWithReturn before_write;
QLIST_HEAD(, CowRequest) inflight_reqs;
HBitmap *copy_bitmap;
bool use_copy_range;
int64_t copy_range_size;
bool serialize_target_writes;
} BackupBlockJob;
static const BlockJobDriver backup_job_driver;
/* See if in-flight requests overlap and wait for them to complete */
static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
int64_t start,
@@ -97,101 +84,19 @@ static void cow_request_end(CowRequest *req)
qemu_co_queue_restart_all(&req->wait_queue);
}
/* Copy range to target with a bounce buffer and return the bytes copied. If
* error occurred, return a negative error number */
static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
int64_t start,
int64_t end,
bool is_write_notifier,
bool *error_is_read,
void **bounce_buffer)
{
int ret;
QEMUIOVector qiov;
BlockBackend *blk = job->common.blk;
int nbytes;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(start, job->cluster_size));
hbitmap_reset(job->copy_bitmap, start, job->cluster_size);
nbytes = MIN(job->cluster_size, job->len - start);
if (!*bounce_buffer) {
*bounce_buffer = blk_blockalign(blk, job->cluster_size);
}
qemu_iovec_init_buf(&qiov, *bounce_buffer, nbytes);
ret = blk_co_preadv(blk, start, qiov.size, &qiov, read_flags);
if (ret < 0) {
trace_backup_do_cow_read_fail(job, start, ret);
if (error_is_read) {
*error_is_read = true;
}
goto fail;
}
if (qemu_iovec_is_zero(&qiov)) {
ret = blk_co_pwrite_zeroes(job->target, start,
qiov.size, write_flags | BDRV_REQ_MAY_UNMAP);
} else {
ret = blk_co_pwritev(job->target, start,
qiov.size, &qiov, write_flags |
(job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0));
}
if (ret < 0) {
trace_backup_do_cow_write_fail(job, start, ret);
if (error_is_read) {
*error_is_read = false;
}
goto fail;
}
return nbytes;
fail:
hbitmap_set(job->copy_bitmap, start, job->cluster_size);
return ret;
}
/* Copy range to target and return the bytes copied. If error occurred, return a
* negative error number. */
static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
int64_t start,
int64_t end,
bool is_write_notifier)
{
int ret;
int nr_clusters;
BlockBackend *blk = job->common.blk;
int nbytes;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
assert(QEMU_IS_ALIGNED(start, job->cluster_size));
nbytes = MIN(job->copy_range_size, end - start);
nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
hbitmap_reset(job->copy_bitmap, start, job->cluster_size * nr_clusters);
ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
read_flags, write_flags);
if (ret < 0) {
trace_backup_do_cow_copy_range_fail(job, start, ret);
hbitmap_set(job->copy_bitmap, start, job->cluster_size * nr_clusters);
return ret;
}
return nbytes;
}
static int coroutine_fn backup_do_cow(BackupBlockJob *job,
int64_t offset, uint64_t bytes,
bool *error_is_read,
bool is_write_notifier)
{
BlockBackend *blk = job->common.blk;
CowRequest cow_request;
struct iovec iov;
QEMUIOVector bounce_qiov;
void *bounce_buffer = NULL;
int ret = 0;
int64_t start, end; /* bytes */
void *bounce_buffer = NULL;
int n; /* bytes */
qemu_co_rwlock_rdlock(&job->flush_rwlock);
@@ -203,47 +108,59 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
wait_for_overlapping_requests(job, start, end);
cow_request_begin(&cow_request, job, start, end);
while (start < end) {
int64_t dirty_end;
if (!hbitmap_get(job->copy_bitmap, start)) {
for (; start < end; start += job->cluster_size) {
if (test_bit(start / job->cluster_size, job->done_bitmap)) {
trace_backup_do_cow_skip(job, start);
start += job->cluster_size;
continue; /* already copied */
}
dirty_end = hbitmap_next_zero(job->copy_bitmap, start, (end - start));
if (dirty_end < 0) {
dirty_end = end;
}
trace_backup_do_cow_process(job, start);
if (job->use_copy_range) {
ret = backup_cow_with_offload(job, start, dirty_end,
is_write_notifier);
if (ret < 0) {
job->use_copy_range = false;
}
n = MIN(job->cluster_size, job->common.len - start);
if (!bounce_buffer) {
bounce_buffer = blk_blockalign(blk, job->cluster_size);
}
if (!job->use_copy_range) {
ret = backup_cow_with_bounce_buffer(job, start, dirty_end,
is_write_notifier,
error_is_read, &bounce_buffer);
iov.iov_base = bounce_buffer;
iov.iov_len = n;
qemu_iovec_init_external(&bounce_qiov, &iov, 1);
ret = blk_co_preadv(blk, start, bounce_qiov.size, &bounce_qiov,
is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
if (ret < 0) {
trace_backup_do_cow_read_fail(job, start, ret);
if (error_is_read) {
*error_is_read = true;
}
goto out;
}
if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
ret = blk_co_pwrite_zeroes(job->target, start,
bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
} else {
ret = blk_co_pwritev(job->target, start,
bounce_qiov.size, &bounce_qiov,
job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
}
if (ret < 0) {
break;
trace_backup_do_cow_write_fail(job, start, ret);
if (error_is_read) {
*error_is_read = false;
}
goto out;
}
set_bit(start / job->cluster_size, job->done_bitmap);
/* Publish progress, guest I/O counts as progress too. Note that the
* offset field is an opaque progress value, it is not a disk offset.
*/
start += ret;
job->bytes_read += ret;
job_progress_update(&job->common.job, ret);
ret = 0;
job->bytes_read += n;
job->common.offset += n;
}
out:
if (bounce_buffer) {
qemu_vfree(bounce_buffer);
}
@@ -271,12 +188,23 @@ static int coroutine_fn backup_before_write_notify(
return backup_do_cow(job, req->offset, req->bytes, NULL, true);
}
static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common);
if (speed < 0) {
error_setg(errp, QERR_INVALID_PARAMETER, "speed");
return;
}
ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
}
static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
{
BdrvDirtyBitmap *bm;
BlockDriverState *bs = blk_bs(job->common.blk);
if (ret < 0) {
if (ret < 0 || block_job_is_cancelled(&job->common)) {
/* Merge the successor back into the parent, delete nothing. */
bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
assert(bm);
@@ -287,33 +215,28 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
}
}
static void backup_commit(Job *job)
static void backup_commit(BlockJob *job)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
BackupBlockJob *s = container_of(job, BackupBlockJob, common);
if (s->sync_bitmap) {
backup_cleanup_sync_bitmap(s, 0);
}
}
static void backup_abort(Job *job)
static void backup_abort(BlockJob *job)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
BackupBlockJob *s = container_of(job, BackupBlockJob, common);
if (s->sync_bitmap) {
backup_cleanup_sync_bitmap(s, -1);
}
}
static void backup_clean(Job *job)
static void backup_clean(BlockJob *job)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
BackupBlockJob *s = container_of(job, BackupBlockJob, common);
assert(s->target);
blk_unref(s->target);
s->target = NULL;
if (s->copy_bitmap) {
hbitmap_free(s->copy_bitmap);
s->copy_bitmap = NULL;
}
}
static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
@@ -326,8 +249,9 @@ static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
void backup_do_checkpoint(BlockJob *job, Error **errp)
{
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
int64_t len;
assert(block_job_driver(job) == &backup_job_driver);
assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) {
error_setg(errp, "The backup job only supports block checkpoint in"
@@ -335,7 +259,39 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
return;
}
hbitmap_set(backup_job->copy_bitmap, 0, backup_job->len);
len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size);
bitmap_zero(backup_job->done_bitmap, len);
}
void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset,
uint64_t bytes)
{
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
int64_t start, end;
assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size);
end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size);
wait_for_overlapping_requests(backup_job, start, end);
}
void backup_cow_request_begin(CowRequest *req, BlockJob *job,
int64_t offset, uint64_t bytes)
{
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
int64_t start, end;
assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size);
end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size);
cow_request_begin(req, backup_job, start, end);
}
void backup_cow_request_end(CowRequest *req)
{
cow_request_end(req);
}
static void backup_drain(BlockJob *job)
@@ -365,193 +321,216 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
}
}
typedef struct {
int ret;
} BackupCompleteData;
static void backup_complete(BlockJob *job, void *opaque)
{
BackupCompleteData *data = opaque;
block_job_completed(job, data->ret);
g_free(data);
}
static bool coroutine_fn yield_and_check(BackupBlockJob *job)
{
uint64_t delay_ns;
if (job_is_cancelled(&job->common.job)) {
if (block_job_is_cancelled(&job->common)) {
return true;
}
/* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
* return. Without a yield, the VM would not reboot. */
delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
job->bytes_read = 0;
job_sleep_ns(&job->common.job, delay_ns);
/* we need to yield so that bdrv_drain_all() returns.
* (without, VM does not reboot)
*/
if (job->common.speed) {
uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
job->bytes_read);
job->bytes_read = 0;
block_job_sleep_ns(&job->common, delay_ns);
} else {
block_job_sleep_ns(&job->common, 0);
}
if (job_is_cancelled(&job->common.job)) {
if (block_job_is_cancelled(&job->common)) {
return true;
}
return false;
}
static bool bdrv_is_unallocated_range(BlockDriverState *bs,
int64_t offset, int64_t bytes)
static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
{
int64_t end = offset + bytes;
while (offset < end && !bdrv_is_allocated(bs, offset, bytes, &bytes)) {
if (bytes == 0) {
return true;
}
offset += bytes;
bytes = end - offset;
}
return offset >= end;
}
static int coroutine_fn backup_loop(BackupBlockJob *job)
{
int ret;
bool error_is_read;
int64_t offset;
HBitmapIter hbi;
BlockDriverState *bs = blk_bs(job->common.blk);
hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
while ((offset = hbitmap_iter_next(&hbi)) != -1) {
if (job->sync_mode == MIRROR_SYNC_MODE_TOP &&
bdrv_is_unallocated_range(bs, offset, job->cluster_size))
{
hbitmap_reset(job->copy_bitmap, offset, job->cluster_size);
continue;
}
do {
if (yield_and_check(job)) {
return 0;
}
ret = backup_do_cow(job, offset,
job->cluster_size, &error_is_read, false);
if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
BLOCK_ERROR_ACTION_REPORT)
{
return ret;
}
} while (ret < 0);
}
return 0;
}
/* init copy_bitmap from sync_bitmap */
static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
{
uint64_t offset = 0;
uint64_t bytes = job->len;
while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap,
&offset, &bytes))
{
hbitmap_set(job->copy_bitmap, offset, bytes);
offset += bytes;
if (offset >= job->len) {
break;
}
bytes = job->len - offset;
}
/* TODO job_progress_set_remaining() would make more sense */
job_progress_update(&job->common.job,
job->len - hbitmap_count(job->copy_bitmap));
}
static int coroutine_fn backup_run(Job *job, Error **errp)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
BlockDriverState *bs = blk_bs(s->common.blk);
int ret = 0;
int clusters_per_iter;
uint32_t granularity;
int64_t offset;
int64_t cluster;
int64_t end;
int64_t last_cluster = -1;
BdrvDirtyBitmapIter *dbi;
QLIST_INIT(&s->inflight_reqs);
qemu_co_rwlock_init(&s->flush_rwlock);
granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
clusters_per_iter = MAX((granularity / job->cluster_size), 1);
dbi = bdrv_dirty_iter_new(job->sync_bitmap);
job_progress_set_remaining(job, s->len);
/* Find the next dirty sector(s) */
while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) {
cluster = offset / job->cluster_size;
if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
backup_incremental_init_copy_bitmap(s);
} else {
hbitmap_set(s->copy_bitmap, 0, s->len);
}
s->before_write.notify = backup_before_write_notify;
bdrv_add_before_write_notifier(bs, &s->before_write);
if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
/* All bits are set in copy_bitmap to allow any cluster to be copied.
* This does not actually require them to be copied. */
while (!job_is_cancelled(job)) {
/* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */
job_yield(job);
/* Fake progress updates for any clusters we skipped */
if (cluster != last_cluster + 1) {
job->common.offset += ((cluster - last_cluster - 1) *
job->cluster_size);
}
} else {
ret = backup_loop(s);
for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
do {
if (yield_and_check(job)) {
goto out;
}
ret = backup_do_cow(job, cluster * job->cluster_size,
job->cluster_size, &error_is_read,
false);
if ((ret < 0) &&
backup_error_action(job, error_is_read, -ret) ==
BLOCK_ERROR_ACTION_REPORT) {
goto out;
}
} while (ret < 0);
}
/* If the bitmap granularity is smaller than the backup granularity,
* we need to advance the iterator pointer to the next cluster. */
if (granularity < job->cluster_size) {
bdrv_set_dirty_iter(dbi, cluster * job->cluster_size);
}
last_cluster = cluster - 1;
}
notifier_with_return_remove(&s->before_write);
/* wait until pending backup_do_cow() calls have completed */
qemu_co_rwlock_wrlock(&s->flush_rwlock);
qemu_co_rwlock_unlock(&s->flush_rwlock);
/* Play some final catchup with the progress meter */
end = DIV_ROUND_UP(job->common.len, job->cluster_size);
if (last_cluster + 1 < end) {
job->common.offset += ((end - last_cluster - 1) * job->cluster_size);
}
out:
bdrv_dirty_iter_free(dbi);
return ret;
}
static void coroutine_fn backup_run(void *opaque)
{
BackupBlockJob *job = opaque;
BackupCompleteData *data;
BlockDriverState *bs = blk_bs(job->common.blk);
int64_t offset;
int ret = 0;
QLIST_INIT(&job->inflight_reqs);
qemu_co_rwlock_init(&job->flush_rwlock);
job->done_bitmap = bitmap_new(DIV_ROUND_UP(job->common.len,
job->cluster_size));
job->before_write.notify = backup_before_write_notify;
bdrv_add_before_write_notifier(bs, &job->before_write);
if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
while (!block_job_is_cancelled(&job->common)) {
/* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */
block_job_yield(&job->common);
}
} else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
ret = backup_run_incremental(job);
} else {
/* Both FULL and TOP SYNC_MODE's require copying.. */
for (offset = 0; offset < job->common.len;
offset += job->cluster_size) {
bool error_is_read;
int alloced = 0;
if (yield_and_check(job)) {
break;
}
if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
int i;
int64_t n;
/* Check to see if these blocks are already in the
* backing file. */
for (i = 0; i < job->cluster_size;) {
/* bdrv_is_allocated() only returns true/false based
* on the first set of sectors it comes across that
* are are all in the same state.
* For that reason we must verify each sector in the
* backup cluster length. We end up copying more than
* needed but at some point that is always the case. */
alloced =
bdrv_is_allocated(bs, offset + i,
job->cluster_size - i, &n);
i += n;
if (alloced || n == 0) {
break;
}
}
/* If the above loop never found any sectors that are in
* the topmost image, skip this backup. */
if (alloced == 0) {
continue;
}
}
/* FULL sync mode we copy the whole drive. */
if (alloced < 0) {
ret = alloced;
} else {
ret = backup_do_cow(job, offset, job->cluster_size,
&error_is_read, false);
}
if (ret < 0) {
/* Depending on error action, fail now or retry cluster */
BlockErrorAction action =
backup_error_action(job, error_is_read, -ret);
if (action == BLOCK_ERROR_ACTION_REPORT) {
break;
} else {
offset -= job->cluster_size;
continue;
}
}
}
}
notifier_with_return_remove(&job->before_write);
/* wait until pending backup_do_cow() calls have completed */
qemu_co_rwlock_wrlock(&job->flush_rwlock);
qemu_co_rwlock_unlock(&job->flush_rwlock);
g_free(job->done_bitmap);
data = g_malloc(sizeof(*data));
data->ret = ret;
block_job_defer_to_main_loop(&job->common, backup_complete, data);
}
static const BlockJobDriver backup_job_driver = {
.job_driver = {
.instance_size = sizeof(BackupBlockJob),
.job_type = JOB_TYPE_BACKUP,
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = backup_run,
.commit = backup_commit,
.abort = backup_abort,
.clean = backup_clean,
},
.instance_size = sizeof(BackupBlockJob),
.job_type = BLOCK_JOB_TYPE_BACKUP,
.start = backup_run,
.set_speed = backup_set_speed,
.commit = backup_commit,
.abort = backup_abort,
.clean = backup_clean,
.attached_aio_context = backup_attached_aio_context,
.drain = backup_drain,
};
static int64_t backup_calculate_cluster_size(BlockDriverState *target,
Error **errp)
{
int ret;
BlockDriverInfo bdi;
/*
* If there is no backing file on the target, we cannot rely on COW if our
* backup cluster size is smaller than the target cluster size. Even for
* targets with a backing file, try to avoid COW if possible.
*/
ret = bdrv_get_info(target, &bdi);
if (ret == -ENOTSUP && !target->backing) {
/* Cluster size is not defined */
warn_report("The target block device doesn't provide "
"information about the block size and it doesn't have a "
"backing file. The default block size of %u bytes is "
"used. If the actual block size of the target exceeds "
"this default, the backup may be unusable",
BACKUP_CLUSTER_SIZE_DEFAULT);
return BACKUP_CLUSTER_SIZE_DEFAULT;
} else if (ret < 0 && !target->backing) {
error_setg_errno(errp, -ret,
"Couldn't determine the cluster size of the target image, "
"which has no backing file");
error_append_hint(errp,
"Aborting, since this may create an unusable destination image\n");
return ret;
} else if (ret < 0 && target->backing) {
/* Not fatal; just trudge on ahead. */
return BACKUP_CLUSTER_SIZE_DEFAULT;
}
return MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
}
BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BlockDriverState *target, int64_t speed,
MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
@@ -560,13 +539,12 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BlockdevOnError on_target_error,
int creation_flags,
BlockCompletionFunc *cb, void *opaque,
JobTxn *txn, Error **errp)
BlockJobTxn *txn, Error **errp)
{
int64_t len;
BlockDriverInfo bdi;
BackupBlockJob *job = NULL;
int ret;
int64_t cluster_size;
HBitmap *copy_bitmap = NULL;
assert(bs);
assert(target);
@@ -628,15 +606,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
goto error;
}
cluster_size = backup_calculate_cluster_size(target, errp);
if (cluster_size < 0) {
goto error;
}
copy_bitmap = hbitmap_alloc(len, ctz32(cluster_size));
/* job->len is fixed, so we can't allow resize */
job = block_job_create(job_id, &backup_job_driver, txn, bs,
/* job->common.len is fixed, so we can't allow resize */
job = block_job_create(job_id, &backup_job_driver, bs,
BLK_PERM_CONSISTENT_READ,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD,
@@ -661,36 +632,48 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
sync_bitmap : NULL;
job->compress = compress;
/* Detect image-fleecing (and similar) schemes */
job->serialize_target_writes = bdrv_chain_contains(target, bs);
job->cluster_size = cluster_size;
job->copy_bitmap = copy_bitmap;
copy_bitmap = NULL;
job->use_copy_range = !compress; /* compression isn't supported for it */
job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
blk_get_max_transfer(job->target));
job->copy_range_size = MAX(job->cluster_size,
QEMU_ALIGN_UP(job->copy_range_size,
job->cluster_size));
/* If there is no backing file on the target, we cannot rely on COW if our
* backup cluster size is smaller than the target cluster size. Even for
* targets with a backing file, try to avoid COW if possible. */
ret = bdrv_get_info(target, &bdi);
if (ret == -ENOTSUP && !target->backing) {
/* Cluster size is not defined */
warn_report("The target block device doesn't provide "
"information about the block size and it doesn't have a "
"backing file. The default block size of %u bytes is "
"used. If the actual block size of the target exceeds "
"this default, the backup may be unusable",
BACKUP_CLUSTER_SIZE_DEFAULT);
job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
} else if (ret < 0 && !target->backing) {
error_setg_errno(errp, -ret,
"Couldn't determine the cluster size of the target image, "
"which has no backing file");
error_append_hint(errp,
"Aborting, since this may create an unusable destination image\n");
goto error;
} else if (ret < 0 && target->backing) {
/* Not fatal; just trudge on ahead. */
job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
} else {
job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
}
/* Required permissions are already taken with target's blk_new() */
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
&error_abort);
job->len = len;
job->common.len = len;
block_job_txn_add_job(txn, &job->common);
return &job->common;
error:
if (copy_bitmap) {
assert(!job || !job->copy_bitmap);
hbitmap_free(copy_bitmap);
}
if (sync_bitmap) {
bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
}
if (job) {
backup_clean(&job->common.job);
job_early_fail(&job->common.job);
backup_clean(&job->common);
block_job_early_fail(&job->common);
}
return NULL;

View File

@@ -29,7 +29,7 @@
#include "qemu/config-file.h"
#include "block/block_int.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qapi/qmp/qbool.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qstring.h"
#include "sysemu/qtest.h"
@@ -305,7 +305,7 @@ static void blkdebug_parse_filename(const char *filename, QDict *options,
if (c != filename) {
QString *config_path;
config_path = qstring_from_substr(filename, 0, c - filename);
config_path = qstring_from_substr(filename, 0, c - filename - 1);
qdict_put(options, "config", config_path);
}
@@ -398,11 +398,10 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
goto out;
}
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
(BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) &
bs->file->bs->supported_zero_flags);
bs->supported_write_flags = BDRV_REQ_FUA &
bs->file->bs->supported_write_flags;
bs->supported_zero_flags = (BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP) &
bs->file->bs->supported_zero_flags;
ret = -EINVAL;
/* Set alignment overrides */
@@ -625,20 +624,18 @@ static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs,
return err;
}
return bdrv_co_pdiscard(bs->file, offset, bytes);
return bdrv_co_pdiscard(bs->file->bs, offset, bytes);
}
static int coroutine_fn blkdebug_co_block_status(BlockDriverState *bs,
bool want_zero,
int64_t offset,
int64_t bytes,
int64_t *pnum,
int64_t *map,
BlockDriverState **file)
static int64_t coroutine_fn blkdebug_co_get_block_status(
BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
BlockDriverState **file)
{
assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
return bdrv_co_block_status_from_file(bs, want_zero, offset, bytes,
pnum, map, file);
assert(QEMU_IS_ALIGNED(sector_num | nb_sectors,
DIV_ROUND_UP(bs->bl.request_alignment,
BDRV_SECTOR_SIZE)));
return bdrv_co_get_block_status_from_file(bs, sector_num, nb_sectors,
pnum, file);
}
static void blkdebug_close(BlockDriverState *bs)
@@ -811,37 +808,52 @@ static int64_t blkdebug_getlength(BlockDriverState *bs)
return bdrv_getlength(bs->file->bs);
}
static void blkdebug_refresh_filename(BlockDriverState *bs)
static void blkdebug_refresh_filename(BlockDriverState *bs, QDict *options)
{
BDRVBlkdebugState *s = bs->opaque;
QDict *opts;
const QDictEntry *e;
int ret;
bool force_json = false;
if (!bs->file->bs->exact_filename[0]) {
return;
}
for (e = qdict_first(bs->full_open_options); e;
e = qdict_next(bs->full_open_options, e))
{
/* Real child options are under "image", but "x-image" may
* contain a filename */
for (e = qdict_first(options); e; e = qdict_next(options, e)) {
if (strcmp(qdict_entry_key(e), "config") &&
strcmp(qdict_entry_key(e), "image") &&
strcmp(qdict_entry_key(e), "x-image") &&
strcmp(qdict_entry_key(e), "driver"))
strcmp(qdict_entry_key(e), "x-image"))
{
return;
force_json = true;
break;
}
}
ret = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
"blkdebug:%s:%s",
s->config_file ?: "", bs->file->bs->exact_filename);
if (ret >= sizeof(bs->exact_filename)) {
/* An overflow makes the filename unusable, so do not report any */
bs->exact_filename[0] = 0;
if (force_json && !bs->file->bs->full_open_options) {
/* The config file cannot be recreated, so creating a plain filename
* is impossible */
return;
}
if (!force_json && bs->file->bs->exact_filename[0]) {
int ret = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
"blkdebug:%s:%s", s->config_file ?: "",
bs->file->bs->exact_filename);
if (ret >= sizeof(bs->exact_filename)) {
/* An overflow makes the filename unusable, so do not report any */
bs->exact_filename[0] = 0;
}
}
opts = qdict_new();
qdict_put_str(opts, "driver", "blkdebug");
QINCREF(bs->file->bs->full_open_options);
qdict_put(opts, "image", bs->file->bs->full_open_options);
for (e = qdict_first(options); e; e = qdict_next(options, e)) {
if (strcmp(qdict_entry_key(e), "x-image")) {
qobject_incref(qdict_entry_value(e));
qdict_put_obj(opts, qdict_entry_key(e), qdict_entry_value(e));
}
}
bs->full_open_options = opts;
}
static void blkdebug_refresh_limits(BlockDriverState *bs, Error **errp)
@@ -874,20 +886,6 @@ static int blkdebug_reopen_prepare(BDRVReopenState *reopen_state,
return 0;
}
static const char *const blkdebug_strong_runtime_opts[] = {
"config",
"inject-error.",
"set-state.",
"align",
"max-transfer",
"opt-write-zero",
"max-write-zero",
"opt-discard",
"max-discard",
NULL
};
static BlockDriver bdrv_blkdebug = {
.format_name = "blkdebug",
.protocol_name = "blkdebug",
@@ -909,7 +907,7 @@ static BlockDriver bdrv_blkdebug = {
.bdrv_co_flush_to_disk = blkdebug_co_flush,
.bdrv_co_pwrite_zeroes = blkdebug_co_pwrite_zeroes,
.bdrv_co_pdiscard = blkdebug_co_pdiscard,
.bdrv_co_block_status = blkdebug_co_block_status,
.bdrv_co_get_block_status = blkdebug_co_get_block_status,
.bdrv_debug_event = blkdebug_debug_event,
.bdrv_debug_breakpoint = blkdebug_debug_breakpoint,
@@ -917,8 +915,6 @@ static BlockDriver bdrv_blkdebug = {
= blkdebug_debug_remove_breakpoint,
.bdrv_debug_resume = blkdebug_debug_resume,
.bdrv_debug_is_suspended = blkdebug_debug_is_suspended,
.strong_runtime_opts = blkdebug_strong_runtime_opts,
};
static void bdrv_blkdebug_init(void)

View File

@@ -1,532 +0,0 @@
/*
* Write logging blk driver based on blkverify and blkdebug.
*
* Copyright (c) 2017 Tuomas Tynkkynen <tuomas@tuxera.com>
* Copyright (c) 2018 Aapo Vienamo <aapo@tuxera.com>
* Copyright (c) 2018 Ari Sundholm <ari@tuxera.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/sockets.h" /* for EINPROGRESS on Windows */
#include "block/block_int.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qstring.h"
#include "qemu/cutils.h"
#include "qemu/option.h"
/* Disk format stuff - taken from Linux drivers/md/dm-log-writes.c */
#define LOG_FLUSH_FLAG (1 << 0)
#define LOG_FUA_FLAG (1 << 1)
#define LOG_DISCARD_FLAG (1 << 2)
#define LOG_MARK_FLAG (1 << 3)
#define LOG_FLAG_MASK (LOG_FLUSH_FLAG \
| LOG_FUA_FLAG \
| LOG_DISCARD_FLAG \
| LOG_MARK_FLAG)
#define WRITE_LOG_VERSION 1ULL
#define WRITE_LOG_MAGIC 0x6a736677736872ULL
/* All fields are little-endian. */
struct log_write_super {
uint64_t magic;
uint64_t version;
uint64_t nr_entries;
uint32_t sectorsize;
} QEMU_PACKED;
struct log_write_entry {
uint64_t sector;
uint64_t nr_sectors;
uint64_t flags;
uint64_t data_len;
} QEMU_PACKED;
/* End of disk format structures. */
typedef struct {
BdrvChild *log_file;
uint32_t sectorsize;
uint32_t sectorbits;
uint64_t cur_log_sector;
uint64_t nr_entries;
uint64_t update_interval;
} BDRVBlkLogWritesState;
static QemuOptsList runtime_opts = {
.name = "blklogwrites",
.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
.desc = {
{
.name = "log-append",
.type = QEMU_OPT_BOOL,
.help = "Append to an existing log",
},
{
.name = "log-sector-size",
.type = QEMU_OPT_SIZE,
.help = "Log sector size",
},
{
.name = "log-super-update-interval",
.type = QEMU_OPT_NUMBER,
.help = "Log superblock update interval (# of write requests)",
},
{ /* end of list */ }
},
};
static inline uint32_t blk_log_writes_log2(uint32_t value)
{
assert(value > 0);
return 31 - clz32(value);
}
static inline bool blk_log_writes_sector_size_valid(uint32_t sector_size)
{
return is_power_of_2(sector_size) &&
sector_size >= sizeof(struct log_write_super) &&
sector_size >= sizeof(struct log_write_entry) &&
sector_size < (1ull << 24);
}
static uint64_t blk_log_writes_find_cur_log_sector(BdrvChild *log,
uint32_t sector_size,
uint64_t nr_entries,
Error **errp)
{
uint64_t cur_sector = 1;
uint64_t cur_idx = 0;
uint32_t sector_bits = blk_log_writes_log2(sector_size);
struct log_write_entry cur_entry;
while (cur_idx < nr_entries) {
int read_ret = bdrv_pread(log, cur_sector << sector_bits, &cur_entry,
sizeof(cur_entry));
if (read_ret < 0) {
error_setg_errno(errp, -read_ret,
"Failed to read log entry %"PRIu64, cur_idx);
return (uint64_t)-1ull;
}
if (cur_entry.flags & ~cpu_to_le64(LOG_FLAG_MASK)) {
error_setg(errp, "Invalid flags 0x%"PRIx64" in log entry %"PRIu64,
le64_to_cpu(cur_entry.flags), cur_idx);
return (uint64_t)-1ull;
}
/* Account for the sector of the entry itself */
++cur_sector;
/*
* Account for the data of the write.
* For discards, this data is not present.
*/
if (!(cur_entry.flags & cpu_to_le64(LOG_DISCARD_FLAG))) {
cur_sector += le64_to_cpu(cur_entry.nr_sectors);
}
++cur_idx;
}
return cur_sector;
}
static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVBlkLogWritesState *s = bs->opaque;
QemuOpts *opts;
Error *local_err = NULL;
int ret;
uint64_t log_sector_size;
bool log_append;
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
if (local_err) {
ret = -EINVAL;
error_propagate(errp, local_err);
goto fail;
}
/* Open the file */
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, false,
&local_err);
if (local_err) {
ret = -EINVAL;
error_propagate(errp, local_err);
goto fail;
}
/* Open the log file */
s->log_file = bdrv_open_child(NULL, options, "log", bs, &child_file, false,
&local_err);
if (local_err) {
ret = -EINVAL;
error_propagate(errp, local_err);
goto fail;
}
log_append = qemu_opt_get_bool(opts, "log-append", false);
if (log_append) {
struct log_write_super log_sb = { 0, 0, 0, 0 };
if (qemu_opt_find(opts, "log-sector-size")) {
ret = -EINVAL;
error_setg(errp, "log-append and log-sector-size are mutually "
"exclusive");
goto fail_log;
}
/* Read log superblock or fake one for an empty log */
if (!bdrv_getlength(s->log_file->bs)) {
log_sb.magic = cpu_to_le64(WRITE_LOG_MAGIC);
log_sb.version = cpu_to_le64(WRITE_LOG_VERSION);
log_sb.nr_entries = cpu_to_le64(0);
log_sb.sectorsize = cpu_to_le32(BDRV_SECTOR_SIZE);
} else {
ret = bdrv_pread(s->log_file, 0, &log_sb, sizeof(log_sb));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read log superblock");
goto fail_log;
}
}
if (log_sb.magic != cpu_to_le64(WRITE_LOG_MAGIC)) {
ret = -EINVAL;
error_setg(errp, "Invalid log superblock magic");
goto fail_log;
}
if (log_sb.version != cpu_to_le64(WRITE_LOG_VERSION)) {
ret = -EINVAL;
error_setg(errp, "Unsupported log version %"PRIu64,
le64_to_cpu(log_sb.version));
goto fail_log;
}
log_sector_size = le32_to_cpu(log_sb.sectorsize);
s->cur_log_sector = 1;
s->nr_entries = 0;
if (blk_log_writes_sector_size_valid(log_sector_size)) {
s->cur_log_sector =
blk_log_writes_find_cur_log_sector(s->log_file, log_sector_size,
le64_to_cpu(log_sb.nr_entries), &local_err);
if (local_err) {
ret = -EINVAL;
error_propagate(errp, local_err);
goto fail_log;
}
s->nr_entries = le64_to_cpu(log_sb.nr_entries);
}
} else {
log_sector_size = qemu_opt_get_size(opts, "log-sector-size",
BDRV_SECTOR_SIZE);
s->cur_log_sector = 1;
s->nr_entries = 0;
}
if (!blk_log_writes_sector_size_valid(log_sector_size)) {
ret = -EINVAL;
error_setg(errp, "Invalid log sector size %"PRIu64, log_sector_size);
goto fail_log;
}
s->sectorsize = log_sector_size;
s->sectorbits = blk_log_writes_log2(log_sector_size);
s->update_interval = qemu_opt_get_number(opts, "log-super-update-interval",
4096);
if (!s->update_interval) {
ret = -EINVAL;
error_setg(errp, "Invalid log superblock update interval %"PRIu64,
s->update_interval);
goto fail_log;
}
ret = 0;
fail_log:
if (ret < 0) {
bdrv_unref_child(bs, s->log_file);
s->log_file = NULL;
}
fail:
if (ret < 0) {
bdrv_unref_child(bs, bs->file);
bs->file = NULL;
}
qemu_opts_del(opts);
return ret;
}
static void blk_log_writes_close(BlockDriverState *bs)
{
BDRVBlkLogWritesState *s = bs->opaque;
bdrv_unref_child(bs, s->log_file);
s->log_file = NULL;
}
static int64_t blk_log_writes_getlength(BlockDriverState *bs)
{
return bdrv_getlength(bs->file->bs);
}
static void blk_log_writes_child_perm(BlockDriverState *bs, BdrvChild *c,
const BdrvChildRole *role,
BlockReopenQueue *ro_q,
uint64_t perm, uint64_t shrd,
uint64_t *nperm, uint64_t *nshrd)
{
if (!c) {
*nperm = perm & DEFAULT_PERM_PASSTHROUGH;
*nshrd = (shrd & DEFAULT_PERM_PASSTHROUGH) | DEFAULT_PERM_UNCHANGED;
return;
}
if (!strcmp(c->name, "log")) {
bdrv_format_default_perms(bs, c, role, ro_q, perm, shrd, nperm, nshrd);
} else {
bdrv_filter_default_perms(bs, c, role, ro_q, perm, shrd, nperm, nshrd);
}
}
static void blk_log_writes_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVBlkLogWritesState *s = bs->opaque;
bs->bl.request_alignment = s->sectorsize;
}
static int coroutine_fn
blk_log_writes_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
}
typedef struct BlkLogWritesFileReq {
BlockDriverState *bs;
uint64_t offset;
uint64_t bytes;
int file_flags;
QEMUIOVector *qiov;
int (*func)(struct BlkLogWritesFileReq *r);
int file_ret;
} BlkLogWritesFileReq;
typedef struct {
BlockDriverState *bs;
QEMUIOVector *qiov;
struct log_write_entry entry;
uint64_t zero_size;
int log_ret;
} BlkLogWritesLogReq;
static void coroutine_fn blk_log_writes_co_do_log(BlkLogWritesLogReq *lr)
{
BDRVBlkLogWritesState *s = lr->bs->opaque;
uint64_t cur_log_offset = s->cur_log_sector << s->sectorbits;
s->nr_entries++;
s->cur_log_sector +=
ROUND_UP(lr->qiov->size, s->sectorsize) >> s->sectorbits;
lr->log_ret = bdrv_co_pwritev(s->log_file, cur_log_offset, lr->qiov->size,
lr->qiov, 0);
/* Logging for the "write zeroes" operation */
if (lr->log_ret == 0 && lr->zero_size) {
cur_log_offset = s->cur_log_sector << s->sectorbits;
s->cur_log_sector +=
ROUND_UP(lr->zero_size, s->sectorsize) >> s->sectorbits;
lr->log_ret = bdrv_co_pwrite_zeroes(s->log_file, cur_log_offset,
lr->zero_size, 0);
}
/* Update super block on flush or every update interval */
if (lr->log_ret == 0 && ((lr->entry.flags & LOG_FLUSH_FLAG)
|| (s->nr_entries % s->update_interval == 0)))
{
struct log_write_super super = {
.magic = cpu_to_le64(WRITE_LOG_MAGIC),
.version = cpu_to_le64(WRITE_LOG_VERSION),
.nr_entries = cpu_to_le64(s->nr_entries),
.sectorsize = cpu_to_le32(s->sectorsize),
};
void *zeroes = g_malloc0(s->sectorsize - sizeof(super));
QEMUIOVector qiov;
qemu_iovec_init(&qiov, 2);
qemu_iovec_add(&qiov, &super, sizeof(super));
qemu_iovec_add(&qiov, zeroes, s->sectorsize - sizeof(super));
lr->log_ret =
bdrv_co_pwritev(s->log_file, 0, s->sectorsize, &qiov, 0);
if (lr->log_ret == 0) {
lr->log_ret = bdrv_co_flush(s->log_file->bs);
}
qemu_iovec_destroy(&qiov);
g_free(zeroes);
}
}
static void coroutine_fn blk_log_writes_co_do_file(BlkLogWritesFileReq *fr)
{
fr->file_ret = fr->func(fr);
}
static int coroutine_fn
blk_log_writes_co_log(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags,
int (*file_func)(BlkLogWritesFileReq *r),
uint64_t entry_flags, bool is_zero_write)
{
QEMUIOVector log_qiov;
size_t niov = qiov ? qiov->niov : 0;
BDRVBlkLogWritesState *s = bs->opaque;
BlkLogWritesFileReq fr = {
.bs = bs,
.offset = offset,
.bytes = bytes,
.file_flags = flags,
.qiov = qiov,
.func = file_func,
};
BlkLogWritesLogReq lr = {
.bs = bs,
.qiov = &log_qiov,
.entry = {
.sector = cpu_to_le64(offset >> s->sectorbits),
.nr_sectors = cpu_to_le64(bytes >> s->sectorbits),
.flags = cpu_to_le64(entry_flags),
.data_len = 0,
},
.zero_size = is_zero_write ? bytes : 0,
};
void *zeroes = g_malloc0(s->sectorsize - sizeof(lr.entry));
assert((1 << s->sectorbits) == s->sectorsize);
assert(bs->bl.request_alignment == s->sectorsize);
assert(QEMU_IS_ALIGNED(offset, bs->bl.request_alignment));
assert(QEMU_IS_ALIGNED(bytes, bs->bl.request_alignment));
qemu_iovec_init(&log_qiov, niov + 2);
qemu_iovec_add(&log_qiov, &lr.entry, sizeof(lr.entry));
qemu_iovec_add(&log_qiov, zeroes, s->sectorsize - sizeof(lr.entry));
if (qiov) {
qemu_iovec_concat(&log_qiov, qiov, 0, qiov->size);
}
blk_log_writes_co_do_file(&fr);
blk_log_writes_co_do_log(&lr);
qemu_iovec_destroy(&log_qiov);
g_free(zeroes);
if (lr.log_ret < 0) {
return lr.log_ret;
}
return fr.file_ret;
}
static int coroutine_fn
blk_log_writes_co_do_file_pwritev(BlkLogWritesFileReq *fr)
{
return bdrv_co_pwritev(fr->bs->file, fr->offset, fr->bytes,
fr->qiov, fr->file_flags);
}
static int coroutine_fn
blk_log_writes_co_do_file_pwrite_zeroes(BlkLogWritesFileReq *fr)
{
return bdrv_co_pwrite_zeroes(fr->bs->file, fr->offset, fr->bytes,
fr->file_flags);
}
static int coroutine_fn blk_log_writes_co_do_file_flush(BlkLogWritesFileReq *fr)
{
return bdrv_co_flush(fr->bs->file->bs);
}
static int coroutine_fn
blk_log_writes_co_do_file_pdiscard(BlkLogWritesFileReq *fr)
{
return bdrv_co_pdiscard(fr->bs->file, fr->offset, fr->bytes);
}
static int coroutine_fn
blk_log_writes_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
return blk_log_writes_co_log(bs, offset, bytes, qiov, flags,
blk_log_writes_co_do_file_pwritev, 0, false);
}
static int coroutine_fn
blk_log_writes_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
BdrvRequestFlags flags)
{
return blk_log_writes_co_log(bs, offset, bytes, NULL, flags,
blk_log_writes_co_do_file_pwrite_zeroes, 0,
true);
}
static int coroutine_fn blk_log_writes_co_flush_to_disk(BlockDriverState *bs)
{
return blk_log_writes_co_log(bs, 0, 0, NULL, 0,
blk_log_writes_co_do_file_flush,
LOG_FLUSH_FLAG, false);
}
static int coroutine_fn
blk_log_writes_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
{
return blk_log_writes_co_log(bs, offset, count, NULL, 0,
blk_log_writes_co_do_file_pdiscard,
LOG_DISCARD_FLAG, false);
}
static const char *const blk_log_writes_strong_runtime_opts[] = {
"log-append",
"log-sector-size",
NULL
};
static BlockDriver bdrv_blk_log_writes = {
.format_name = "blklogwrites",
.instance_size = sizeof(BDRVBlkLogWritesState),
.bdrv_open = blk_log_writes_open,
.bdrv_close = blk_log_writes_close,
.bdrv_getlength = blk_log_writes_getlength,
.bdrv_child_perm = blk_log_writes_child_perm,
.bdrv_refresh_limits = blk_log_writes_refresh_limits,
.bdrv_co_preadv = blk_log_writes_co_preadv,
.bdrv_co_pwritev = blk_log_writes_co_pwritev,
.bdrv_co_pwrite_zeroes = blk_log_writes_co_pwrite_zeroes,
.bdrv_co_flush_to_disk = blk_log_writes_co_flush_to_disk,
.bdrv_co_pdiscard = blk_log_writes_co_pdiscard,
.bdrv_co_block_status = bdrv_co_block_status_from_file,
.is_filter = true,
.strong_runtime_opts = blk_log_writes_strong_runtime_opts,
};
static void bdrv_blk_log_writes_init(void)
{
bdrv_register(&bdrv_blk_log_writes);
}
block_init(bdrv_blk_log_writes_init);

13
block/blkreplay.c Normal file → Executable file
View File

@@ -35,14 +35,15 @@ static int blkreplay_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED;
ret = 0;
fail:
return ret;
}
static void blkreplay_close(BlockDriverState *bs)
{
}
static int64_t blkreplay_getlength(BlockDriverState *bs)
{
return bdrv_getlength(bs->file->bs);
@@ -109,7 +110,7 @@ static int coroutine_fn blkreplay_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
{
uint64_t reqid = blkreplay_next_id();
int ret = bdrv_co_pdiscard(bs->file, offset, bytes);
int ret = bdrv_co_pdiscard(bs->file->bs, offset, bytes);
block_request_create(reqid, bs, qemu_coroutine_self());
qemu_coroutine_yield();
@@ -128,9 +129,11 @@ static int coroutine_fn blkreplay_co_flush(BlockDriverState *bs)
static BlockDriver bdrv_blkreplay = {
.format_name = "blkreplay",
.protocol_name = "blkreplay",
.instance_size = 0,
.bdrv_open = blkreplay_open,
.bdrv_file_open = blkreplay_open,
.bdrv_close = blkreplay_close,
.bdrv_child_perm = bdrv_filter_default_perms,
.bdrv_getlength = blkreplay_getlength,

View File

@@ -14,7 +14,6 @@
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qstring.h"
#include "qemu/cutils.h"
#include "qemu/option.h"
typedef struct {
BdrvChild *test_file;
@@ -80,7 +79,7 @@ static void blkverify_parse_filename(const char *filename, QDict *options,
}
/* TODO Implement option pass-through and set raw.filename here */
raw_path = qstring_from_substr(filename, 0, c - filename);
raw_path = qstring_from_substr(filename, 0, c - filename - 1);
qdict_put(options, "x-raw", raw_path);
/* TODO Allow multi-level nesting and set file.filename here */
@@ -141,9 +140,6 @@ static int blkverify_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED;
ret = 0;
fail:
qemu_opts_del(opts);
@@ -281,10 +277,27 @@ static bool blkverify_recurse_is_first_non_filter(BlockDriverState *bs,
return bdrv_recurse_is_first_non_filter(s->test_file->bs, candidate);
}
static void blkverify_refresh_filename(BlockDriverState *bs)
static void blkverify_refresh_filename(BlockDriverState *bs, QDict *options)
{
BDRVBlkverifyState *s = bs->opaque;
/* bs->file->bs has already been refreshed */
bdrv_refresh_filename(s->test_file->bs);
if (bs->file->bs->full_open_options
&& s->test_file->bs->full_open_options)
{
QDict *opts = qdict_new();
qdict_put_str(opts, "driver", "blkverify");
QINCREF(bs->file->bs->full_open_options);
qdict_put(opts, "raw", bs->file->bs->full_open_options);
QINCREF(s->test_file->bs->full_open_options);
qdict_put(opts, "test", s->test_file->bs->full_open_options);
bs->full_open_options = opts;
}
if (bs->file->bs->exact_filename[0]
&& s->test_file->bs->exact_filename[0])
{
@@ -299,15 +312,6 @@ static void blkverify_refresh_filename(BlockDriverState *bs)
}
}
static char *blkverify_dirname(BlockDriverState *bs, Error **errp)
{
/* In general, there are two BDSs with different dirnames below this one;
* so there is no unique dirname we could return (unless both are equal by
* chance). Therefore, to be consistent, just always return NULL. */
error_setg(errp, "Cannot generate a base directory for blkverify nodes");
return NULL;
}
static BlockDriver bdrv_blkverify = {
.format_name = "blkverify",
.protocol_name = "blkverify",
@@ -319,7 +323,6 @@ static BlockDriver bdrv_blkverify = {
.bdrv_child_perm = bdrv_filter_default_perms,
.bdrv_getlength = blkverify_getlength,
.bdrv_refresh_filename = blkverify_refresh_filename,
.bdrv_dirname = blkverify_dirname,
.bdrv_co_preadv = blkverify_co_preadv,
.bdrv_co_pwritev = blkverify_co_pwritev,

View File

@@ -17,10 +17,8 @@
#include "block/throttle-groups.h"
#include "sysemu/blockdev.h"
#include "sysemu/sysemu.h"
#include "qapi/error.h"
#include "qapi/qapi-events-block.h"
#include "qapi-event.h"
#include "qemu/id.h"
#include "qemu/option.h"
#include "trace.h"
#include "migration/misc.h"
@@ -31,13 +29,6 @@
static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
typedef struct BlockBackendAioNotifier {
void (*attached_aio_context)(AioContext *new_context, void *opaque);
void (*detach_aio_context)(void *opaque);
void *opaque;
QLIST_ENTRY(BlockBackendAioNotifier) list;
} BlockBackendAioNotifier;
struct BlockBackend {
char *name;
int refcnt;
@@ -47,7 +38,9 @@ struct BlockBackend {
QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
BlockBackendPublic public;
DeviceState *dev; /* attached device model, if any */
void *dev; /* attached device model, if any */
bool legacy_dev; /* true if dev is not a DeviceState */
/* TODO change to DeviceState when all users are qdevified */
const BlockDevOps *dev_ops;
void *dev_opaque;
@@ -74,18 +67,10 @@ struct BlockBackend {
bool allow_write_beyond_eof;
NotifierList remove_bs_notifiers, insert_bs_notifiers;
QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
int quiesce_counter;
VMChangeStateEntry *vmsh;
bool force_allow_inactivate;
/* Number of in-flight aio requests. BlockDriverState also counts
* in-flight requests but aio requests can exist even when blk->root is
* NULL, so we cannot rely on its counter for that case.
* Accessed with atomic ops.
*/
unsigned int in_flight;
};
typedef struct BlockBackendAIOCB {
@@ -118,7 +103,6 @@ static void blk_root_inherit_options(int *child_flags, QDict *child_options,
abort();
}
static void blk_root_drained_begin(BdrvChild *child);
static bool blk_root_drained_poll(BdrvChild *child);
static void blk_root_drained_end(BdrvChild *child);
static void blk_root_change_media(BdrvChild *child, bool load);
@@ -253,36 +237,6 @@ static int blk_root_inactivate(BdrvChild *child)
return 0;
}
static void blk_root_attach(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
BlockBackendAioNotifier *notifier;
trace_blk_root_attach(child, blk, child->bs);
QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
bdrv_add_aio_context_notifier(child->bs,
notifier->attached_aio_context,
notifier->detach_aio_context,
notifier->opaque);
}
}
static void blk_root_detach(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
BlockBackendAioNotifier *notifier;
trace_blk_root_detach(child, blk, child->bs);
QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
bdrv_remove_aio_context_notifier(child->bs,
notifier->attached_aio_context,
notifier->detach_aio_context,
notifier->opaque);
}
}
static const BdrvChildRole child_root = {
.inherit_options = blk_root_inherit_options,
@@ -292,14 +246,10 @@ static const BdrvChildRole child_root = {
.get_parent_desc = blk_root_get_parent_desc,
.drained_begin = blk_root_drained_begin,
.drained_poll = blk_root_drained_poll,
.drained_end = blk_root_drained_end,
.activate = blk_root_activate,
.inactivate = blk_root_inactivate,
.attach = blk_root_attach,
.detach = blk_root_detach,
};
/*
@@ -323,14 +273,10 @@ BlockBackend *blk_new(uint64_t perm, uint64_t shared_perm)
blk->shared_perm = shared_perm;
blk_set_enable_write_cache(blk, true);
blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT;
blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
block_acct_init(&blk->stats);
notifier_list_init(&blk->remove_bs_notifiers);
notifier_list_init(&blk->insert_bs_notifiers);
QLIST_INIT(&blk->aio_notifiers);
QTAILQ_INSERT_TAIL(&block_backends, blk, link);
return blk;
@@ -408,7 +354,6 @@ static void blk_delete(BlockBackend *blk)
}
assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
assert(QLIST_EMPTY(&blk->aio_notifiers));
QTAILQ_REMOVE(&block_backends, blk, link);
drive_info_del(blk->legacy_dinfo);
block_acct_cleanup(&blk->stats);
@@ -421,6 +366,7 @@ static void drive_info_del(DriveInfo *dinfo)
return;
}
qemu_opts_del(dinfo->opts);
g_free(dinfo->serial);
g_free(dinfo);
}
@@ -435,7 +381,6 @@ int blk_get_refcnt(BlockBackend *blk)
*/
void blk_ref(BlockBackend *blk)
{
assert(blk->refcnt > 0);
blk->refcnt++;
}
@@ -448,13 +393,7 @@ void blk_unref(BlockBackend *blk)
{
if (blk) {
assert(blk->refcnt > 0);
if (blk->refcnt > 1) {
blk->refcnt--;
} else {
blk_drain(blk);
/* blk_drain() cannot resurrect blk, nobody held a reference */
assert(blk->refcnt == 1);
blk->refcnt = 0;
if (!--blk->refcnt) {
blk_delete(blk);
}
}
@@ -776,11 +715,6 @@ void blk_remove_bs(BlockBackend *blk)
blk_update_root_state(blk);
/* bdrv_root_unref_child() will cause blk->root to become stale and may
* switch to a completion coroutine later on. Let's drain all I/O here
* to avoid that and a potential QEMU crash.
*/
blk_drain(blk);
bdrv_root_unref_child(blk->root);
blk->root = NULL;
}
@@ -834,11 +768,7 @@ void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
*shared_perm = blk->shared_perm;
}
/*
* Attach device model @dev to @blk.
* Return 0 on success, -EBUSY when a device model is attached already.
*/
int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
static int blk_do_attach_dev(BlockBackend *blk, void *dev)
{
if (blk->dev) {
return -EBUSY;
@@ -853,16 +783,40 @@ int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
blk_ref(blk);
blk->dev = dev;
blk->legacy_dev = false;
blk_iostatus_reset(blk);
return 0;
}
/*
* Attach device model @dev to @blk.
* Return 0 on success, -EBUSY when a device model is attached already.
*/
int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
{
return blk_do_attach_dev(blk, dev);
}
/*
* Attach device model @dev to @blk.
* @blk must not have a device model attached already.
* TODO qdevified devices don't use this, remove when devices are qdevified
*/
void blk_attach_dev_legacy(BlockBackend *blk, void *dev)
{
if (blk_do_attach_dev(blk, dev) < 0) {
abort();
}
blk->legacy_dev = true;
}
/*
* Detach device model @dev from @blk.
* @dev must be currently attached to @blk.
*/
void blk_detach_dev(BlockBackend *blk, DeviceState *dev)
void blk_detach_dev(BlockBackend *blk, void *dev)
/* TODO change to DeviceState *dev when all users are qdevified */
{
assert(blk->dev == dev);
blk->dev = NULL;
@@ -876,7 +830,8 @@ void blk_detach_dev(BlockBackend *blk, DeviceState *dev)
/*
* Return the device model attached to @blk if any, else null.
*/
DeviceState *blk_get_attached_dev(BlockBackend *blk)
void *blk_get_attached_dev(BlockBackend *blk)
/* TODO change to return DeviceState * when all users are qdevified */
{
return blk->dev;
}
@@ -885,15 +840,17 @@ DeviceState *blk_get_attached_dev(BlockBackend *blk)
* device attached to the BlockBackend. */
char *blk_get_attached_dev_id(BlockBackend *blk)
{
DeviceState *dev = blk->dev;
DeviceState *dev;
assert(!blk->legacy_dev);
dev = blk->dev;
if (!dev) {
return g_strdup("");
} else if (dev->id) {
return g_strdup(dev->id);
}
return object_get_canonical_path(OBJECT(dev)) ?: g_strdup("");
return object_get_canonical_path(OBJECT(dev));
}
/*
@@ -923,6 +880,11 @@ BlockBackend *blk_by_dev(void *dev)
void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
void *opaque)
{
/* All drivers that use blk_set_dev_ops() are qdevified and we want to keep
* it that way, so we can assume blk->dev, if present, is a DeviceState if
* blk->dev_ops is set. Non-device users may use dev_ops without device. */
assert(!blk->legacy_dev);
blk->dev_ops = ops;
blk->dev_opaque = opaque;
@@ -948,6 +910,8 @@ void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp)
bool tray_was_open, tray_is_open;
Error *local_err = NULL;
assert(!blk->legacy_dev);
tray_was_open = blk_dev_is_tray_open(blk);
blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err);
if (local_err) {
@@ -959,7 +923,8 @@ void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp)
if (tray_was_open != tray_is_open) {
char *id = blk_get_attached_dev_id(blk);
qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open);
qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open,
&error_abort);
g_free(id);
}
}
@@ -1175,7 +1140,7 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
typedef struct BlkRwCo {
BlockBackend *blk;
int64_t offset;
void *iobuf;
QEMUIOVector *qiov;
int ret;
BdrvRequestFlags flags;
} BlkRwCo;
@@ -1183,32 +1148,37 @@ typedef struct BlkRwCo {
static void blk_read_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
QEMUIOVector *qiov = rwco->iobuf;
rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, qiov->size,
qiov, rwco->flags);
aio_wait_kick();
rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, rwco->qiov->size,
rwco->qiov, rwco->flags);
}
static void blk_write_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
QEMUIOVector *qiov = rwco->iobuf;
rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, qiov->size,
qiov, rwco->flags);
aio_wait_kick();
rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, rwco->qiov->size,
rwco->qiov, rwco->flags);
}
static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
int64_t bytes, CoroutineEntry co_entry,
BdrvRequestFlags flags)
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
BlkRwCo rwco = {
QEMUIOVector qiov;
struct iovec iov;
BlkRwCo rwco;
iov = (struct iovec) {
.iov_base = buf,
.iov_len = bytes,
};
qemu_iovec_init_external(&qiov, &iov, 1);
rwco = (BlkRwCo) {
.blk = blk,
.offset = offset,
.iobuf = &qiov,
.qiov = &qiov,
.flags = flags,
.ret = NOT_DONE,
};
@@ -1253,22 +1223,11 @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
return bdrv_make_zero(blk->root, flags);
}
void blk_inc_in_flight(BlockBackend *blk)
{
atomic_inc(&blk->in_flight);
}
void blk_dec_in_flight(BlockBackend *blk)
{
atomic_dec(&blk->in_flight);
aio_wait_kick();
}
static void error_callback_bh(void *opaque)
{
struct BlockBackendAIOCB *acb = opaque;
blk_dec_in_flight(acb->blk);
bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->ret);
qemu_aio_unref(acb);
}
@@ -1279,7 +1238,7 @@ BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
{
struct BlockBackendAIOCB *acb;
blk_inc_in_flight(blk);
bdrv_inc_in_flight(blk_bs(blk));
acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
acb->blk = blk;
acb->ret = ret;
@@ -1302,8 +1261,8 @@ static const AIOCBInfo blk_aio_em_aiocb_info = {
static void blk_aio_complete(BlkAioEmAIOCB *acb)
{
if (acb->has_returned) {
bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->rwco.ret);
blk_dec_in_flight(acb->rwco.blk);
qemu_aio_unref(acb);
}
}
@@ -1316,19 +1275,19 @@ static void blk_aio_complete_bh(void *opaque)
}
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
void *iobuf, CoroutineEntry co_entry,
QEMUIOVector *qiov, CoroutineEntry co_entry,
BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
BlkAioEmAIOCB *acb;
Coroutine *co;
blk_inc_in_flight(blk);
bdrv_inc_in_flight(blk_bs(blk));
acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
acb->rwco = (BlkRwCo) {
.blk = blk,
.offset = offset,
.iobuf = iobuf,
.qiov = qiov,
.flags = flags,
.ret = NOT_DONE,
};
@@ -1351,11 +1310,10 @@ static void blk_aio_read_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
QEMUIOVector *qiov = rwco->iobuf;
assert(qiov->size == acb->bytes);
assert(rwco->qiov->size == acb->bytes);
rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
qiov, rwco->flags);
rwco->qiov, rwco->flags);
blk_aio_complete(acb);
}
@@ -1363,11 +1321,10 @@ static void blk_aio_write_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
QEMUIOVector *qiov = rwco->iobuf;
assert(!qiov || qiov->size == acb->bytes);
assert(!rwco->qiov || rwco->qiov->size == acb->bytes);
rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
qiov, rwco->flags);
rwco->qiov, rwco->flags);
blk_aio_complete(acb);
}
@@ -1496,11 +1453,8 @@ int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
static void blk_ioctl_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
QEMUIOVector *qiov = rwco->iobuf;
rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
qiov->iov[0].iov_base);
aio_wait_kick();
rwco->qiov->iov[0].iov_base);
}
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
@@ -1513,15 +1467,24 @@ static void blk_aio_ioctl_entry(void *opaque)
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
rwco->qiov->iov[0].iov_base);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque)
{
return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque);
QEMUIOVector qiov;
struct iovec iov;
iov = (struct iovec) {
.iov_base = buf,
.iov_len = 0,
};
qemu_iovec_init_external(&qiov, &iov, 1);
return blk_aio_prwv(blk, req, 0, &qiov, blk_aio_ioctl_entry, 0, cb, opaque);
}
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
@@ -1531,7 +1494,7 @@ int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
return ret;
}
return bdrv_co_pdiscard(blk->root, offset, bytes);
return bdrv_co_pdiscard(blk_bs(blk), offset, bytes);
}
int blk_co_flush(BlockBackend *blk)
@@ -1547,7 +1510,6 @@ static void blk_flush_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
rwco->ret = blk_co_flush(rwco->blk);
aio_wait_kick();
}
int blk_flush(BlockBackend *blk)
@@ -1557,39 +1519,14 @@ int blk_flush(BlockBackend *blk)
void blk_drain(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
bdrv_drained_begin(bs);
}
/* We may have -ENOMEDIUM completions in flight */
AIO_WAIT_WHILE(blk_get_aio_context(blk),
atomic_mb_read(&blk->in_flight) > 0);
if (bs) {
bdrv_drained_end(bs);
if (blk_bs(blk)) {
bdrv_drain(blk_bs(blk));
}
}
void blk_drain_all(void)
{
BlockBackend *blk = NULL;
bdrv_drain_all_begin();
while ((blk = blk_all_next(blk)) != NULL) {
AioContext *ctx = blk_get_aio_context(blk);
aio_context_acquire(ctx);
/* We may have -ENOMEDIUM completions in flight */
AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0);
aio_context_release(ctx);
}
bdrv_drain_all_end();
bdrv_drain_all();
}
void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
@@ -1630,13 +1567,13 @@ static void send_qmp_error_event(BlockBackend *blk,
bool is_read, int error)
{
IoOperationType optype;
BlockDriverState *bs = blk_bs(blk);
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
qapi_event_send_block_io_error(blk_name(blk), !!bs,
bs ? bdrv_get_node_name(bs) : NULL, optype,
qapi_event_send_block_io_error(blk_name(blk),
bdrv_get_node_name(blk_bs(blk)), optype,
action, blk_iostatus_is_enabled(blk),
error == ENOSPC, strerror(error));
error == ENOSPC, strerror(error),
&error_abort);
}
/* This is done by device models because, while the block layer knows
@@ -1671,7 +1608,7 @@ void blk_error_action(BlockBackend *blk, BlockErrorAction action,
}
}
bool blk_is_read_only(BlockBackend *blk)
int blk_is_read_only(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
@@ -1682,18 +1619,18 @@ bool blk_is_read_only(BlockBackend *blk)
}
}
bool blk_is_sg(BlockBackend *blk)
int blk_is_sg(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
if (!bs) {
return false;
return 0;
}
return bdrv_is_sg(bs);
}
bool blk_enable_write_cache(BlockBackend *blk)
int blk_enable_write_cache(BlockBackend *blk)
{
return blk->enable_write_cache;
}
@@ -1741,6 +1678,9 @@ void blk_eject(BlockBackend *blk, bool eject_flag)
BlockDriverState *bs = blk_bs(blk);
char *id;
/* blk_eject is only called by qdevified devices */
assert(!blk->legacy_dev);
if (bs) {
bdrv_eject(bs, eject_flag);
}
@@ -1749,7 +1689,7 @@ void blk_eject(BlockBackend *blk, bool eject_flag)
* the frontend experienced a tray event. */
id = blk_get_attached_dev_id(blk);
qapi_event_send_device_tray_moved(blk_name(blk), id,
eject_flag);
eject_flag, &error_abort);
g_free(id);
}
@@ -1764,13 +1704,6 @@ int blk_get_flags(BlockBackend *blk)
}
}
/* Returns the minimum request alignment, in bytes; guaranteed nonzero */
uint32_t blk_get_request_alignment(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE;
}
/* Returns the maximum transfer length, in bytes; guaranteed nonzero */
uint32_t blk_get_max_transfer(BlockBackend *blk)
{
@@ -1843,7 +1776,13 @@ void blk_op_unblock_all(BlockBackend *blk, Error *reason)
AioContext *blk_get_aio_context(BlockBackend *blk)
{
return bdrv_get_aio_context(blk_bs(blk));
BlockDriverState *bs = blk_bs(blk);
if (bs) {
return bdrv_get_aio_context(bs);
} else {
return qemu_get_aio_context();
}
}
static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
@@ -1872,15 +1811,8 @@ void blk_add_aio_context_notifier(BlockBackend *blk,
void (*attached_aio_context)(AioContext *new_context, void *opaque),
void (*detach_aio_context)(void *opaque), void *opaque)
{
BlockBackendAioNotifier *notifier;
BlockDriverState *bs = blk_bs(blk);
notifier = g_new(BlockBackendAioNotifier, 1);
notifier->attached_aio_context = attached_aio_context;
notifier->detach_aio_context = detach_aio_context;
notifier->opaque = opaque;
QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list);
if (bs) {
bdrv_add_aio_context_notifier(bs, attached_aio_context,
detach_aio_context, opaque);
@@ -1893,25 +1825,12 @@ void blk_remove_aio_context_notifier(BlockBackend *blk,
void (*detach_aio_context)(void *),
void *opaque)
{
BlockBackendAioNotifier *notifier;
BlockDriverState *bs = blk_bs(blk);
if (bs) {
bdrv_remove_aio_context_notifier(bs, attached_aio_context,
detach_aio_context, opaque);
}
QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
if (notifier->attached_aio_context == attached_aio_context &&
notifier->detach_aio_context == detach_aio_context &&
notifier->opaque == opaque) {
QLIST_REMOVE(notifier, list);
g_free(notifier);
return;
}
}
abort();
}
void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
@@ -1981,10 +1900,7 @@ int blk_truncate(BlockBackend *blk, int64_t offset, PreallocMode prealloc,
static void blk_pdiscard_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
QEMUIOVector *qiov = rwco->iobuf;
rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size);
aio_wait_kick();
rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, rwco->qiov->size);
}
int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
@@ -2166,13 +2082,6 @@ static void blk_root_drained_begin(BdrvChild *child)
}
}
static bool blk_root_drained_poll(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
assert(blk->quiesce_counter);
return !!blk->in_flight;
}
static void blk_root_drained_end(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
@@ -2187,37 +2096,3 @@ static void blk_root_drained_end(BdrvChild *child)
}
}
}
void blk_register_buf(BlockBackend *blk, void *host, size_t size)
{
bdrv_register_buf(blk_bs(blk), host, size);
}
void blk_unregister_buf(BlockBackend *blk, void *host)
{
bdrv_unregister_buf(blk_bs(blk), host);
}
int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
BlockBackend *blk_out, int64_t off_out,
int bytes, BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
int r;
r = blk_check_byte_request(blk_in, off_in, bytes);
if (r) {
return r;
}
r = blk_check_byte_request(blk_out, off_out, bytes);
if (r) {
return r;
}
return bdrv_co_copy_range(blk_in->root, off_in,
blk_out->root, off_out,
bytes, read_flags, write_flags);
}
const BdrvChild *blk_root(BlockBackend *blk)
{
return blk->root;
}

View File

@@ -85,14 +85,14 @@ static int bochs_probe(const uint8_t *buf, int buf_size, const char *filename)
const struct bochs_header *bochs = (const void *)buf;
if (buf_size < HEADER_SIZE)
return 0;
return 0;
if (!strcmp(bochs->magic, HEADER_MAGIC) &&
!strcmp(bochs->type, REDOLOG_TYPE) &&
!strcmp(bochs->subtype, GROWING_TYPE) &&
((le32_to_cpu(bochs->version) == HEADER_VERSION) ||
(le32_to_cpu(bochs->version) == HEADER_V1)))
return 100;
!strcmp(bochs->type, REDOLOG_TYPE) &&
!strcmp(bochs->subtype, GROWING_TYPE) &&
((le32_to_cpu(bochs->version) == HEADER_VERSION) ||
(le32_to_cpu(bochs->version) == HEADER_V1)))
return 100;
return 0;
}
@@ -105,18 +105,23 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
struct bochs_header bochs;
int ret;
/* No write support yet */
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
if (ret < 0) {
return ret;
}
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
false, errp);
if (!bs->file) {
return -EINVAL;
}
if (!bdrv_is_read_only(bs)) {
error_report("Opening bochs images without an explicit read-only=on "
"option is deprecated. Future versions will refuse to "
"open the image instead of automatically marking the "
"image read-only.");
ret = bdrv_set_read_only(bs, true, errp); /* no write support yet */
if (ret < 0) {
return ret;
}
}
ret = bdrv_pread(bs->file, 0, &bochs, sizeof(bochs));
if (ret < 0) {
return ret;
@@ -125,8 +130,8 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
if (strcmp(bochs.magic, HEADER_MAGIC) ||
strcmp(bochs.type, REDOLOG_TYPE) ||
strcmp(bochs.subtype, GROWING_TYPE) ||
((le32_to_cpu(bochs.version) != HEADER_VERSION) &&
(le32_to_cpu(bochs.version) != HEADER_V1))) {
((le32_to_cpu(bochs.version) != HEADER_VERSION) &&
(le32_to_cpu(bochs.version) != HEADER_V1))) {
error_setg(errp, "Image not in Bochs format");
return -EINVAL;
}
@@ -158,7 +163,7 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
}
for (i = 0; i < s->catalog_size; i++)
le32_to_cpus(&s->catalog_bitmap[i]);
le32_to_cpus(&s->catalog_bitmap[i]);
s->data_offset = le32_to_cpu(bochs.header) + (s->catalog_size * 4);
@@ -217,7 +222,7 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
extent_offset = (offset % s->extent_size) / 512;
if (s->catalog_bitmap[extent_index] == 0xffffffff) {
return 0; /* not allocated */
return 0; /* not allocated */
}
bitmap_offset = s->data_offset +
@@ -232,7 +237,7 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
}
if (!((bitmap_entry >> (extent_offset % 8)) & 1)) {
return 0; /* not allocated */
return 0; /* not allocated */
}
return bitmap_offset + (512 * (s->bitmap_blocks + extent_offset));

View File

@@ -67,17 +67,23 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
uint32_t offsets_size, max_compressed_block_size = 1, i;
int ret;
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
if (ret < 0) {
return ret;
}
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
false, errp);
if (!bs->file) {
return -EINVAL;
}
if (!bdrv_is_read_only(bs)) {
error_report("Opening cloop images without an explicit read-only=on "
"option is deprecated. Future versions will refuse to "
"open the image instead of automatically marking the "
"image read-only.");
ret = bdrv_set_read_only(bs, true, errp);
if (ret < 0) {
return ret;
}
}
/* read header */
ret = bdrv_pread(bs->file, 128, &s->block_size, 4);
if (ret < 0) {

View File

@@ -31,15 +31,16 @@ enum {
COMMIT_BUFFER_SIZE = 512 * 1024, /* in bytes */
};
#define SLICE_TIME 100000000ULL /* ns */
typedef struct CommitBlockJob {
BlockJob common;
RateLimit limit;
BlockDriverState *commit_top_bs;
BlockBackend *top;
BlockBackend *base;
BlockDriverState *base_bs;
BlockdevOnError on_error;
bool base_read_only;
bool chain_frozen;
int base_flags;
char *backing_file_str;
} CommitBlockJob;
@@ -48,9 +49,14 @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
void *buf)
{
int ret = 0;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
QEMUIOVector qiov;
struct iovec iov = {
.iov_base = buf,
.iov_len = bytes,
};
assert(bytes < SIZE_MAX);
qemu_iovec_init_external(&qiov, &iov, 1);
ret = blk_co_preadv(bs, offset, qiov.size, &qiov, 0);
if (ret < 0) {
@@ -65,100 +71,96 @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
return 0;
}
static int commit_prepare(Job *job)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
typedef struct {
int ret;
} CommitCompleteData;
bdrv_unfreeze_backing_chain(s->commit_top_bs, s->base_bs);
s->chain_frozen = false;
static void commit_complete(BlockJob *job, void *opaque)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common);
CommitCompleteData *data = opaque;
BlockDriverState *top = blk_bs(s->top);
BlockDriverState *base = blk_bs(s->base);
BlockDriverState *commit_top_bs = s->commit_top_bs;
int ret = data->ret;
bool remove_commit_top_bs = false;
/* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
bdrv_ref(top);
bdrv_ref(commit_top_bs);
/* Remove base node parent that still uses BLK_PERM_WRITE/RESIZE before
* the normal backing chain can be restored. */
blk_unref(s->base);
s->base = NULL;
/* FIXME: bdrv_drop_intermediate treats total failures and partial failures
* identically. Further work is needed to disambiguate these cases. */
return bdrv_drop_intermediate(s->commit_top_bs, s->base_bs,
s->backing_file_str);
}
static void commit_abort(Job *job)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
BlockDriverState *top_bs = blk_bs(s->top);
if (s->chain_frozen) {
bdrv_unfreeze_backing_chain(s->commit_top_bs, s->base_bs);
if (!block_job_is_cancelled(&s->common) && ret == 0) {
/* success */
ret = bdrv_drop_intermediate(s->commit_top_bs, base,
s->backing_file_str);
} else {
/* XXX Can (or should) we somehow keep 'consistent read' blocked even
* after the failed/cancelled commit job is gone? If we already wrote
* something to base, the intermediate images aren't valid any more. */
remove_commit_top_bs = true;
}
/* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
bdrv_ref(top_bs);
bdrv_ref(s->commit_top_bs);
if (s->base) {
blk_unref(s->base);
}
/* free the blockers on the intermediate nodes so that bdrv_replace_nodes
* can succeed */
block_job_remove_all_bdrv(&s->common);
/* If bdrv_drop_intermediate() failed (or was not invoked), remove the
* commit filter driver from the backing chain now. Do this as the final
* step so that the 'consistent read' permission can be granted.
*
* XXX Can (or should) we somehow keep 'consistent read' blocked even
* after the failed/cancelled commit job is gone? If we already wrote
* something to base, the intermediate images aren't valid any more. */
bdrv_child_try_set_perm(s->commit_top_bs->backing, 0, BLK_PERM_ALL,
&error_abort);
bdrv_replace_node(s->commit_top_bs, backing_bs(s->commit_top_bs),
&error_abort);
bdrv_unref(s->commit_top_bs);
bdrv_unref(top_bs);
}
static void commit_clean(Job *job)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
/* restore base open flags here if appropriate (e.g., change the base back
* to r/o). These reopens do not need to be atomic, since we won't abort
* even on failure here */
if (s->base_read_only) {
bdrv_reopen_set_read_only(s->base_bs, true, NULL);
if (s->base_flags != bdrv_get_flags(base)) {
bdrv_reopen(base, s->base_flags, NULL);
}
g_free(s->backing_file_str);
blk_unref(s->top);
/* If there is more than one reference to the job (e.g. if called from
* block_job_finish_sync()), block_job_completed() won't free it and
* therefore the blockers on the intermediate nodes remain. This would
* cause bdrv_set_backing_hd() to fail. */
block_job_remove_all_bdrv(job);
block_job_completed(&s->common, ret);
g_free(data);
/* If bdrv_drop_intermediate() didn't already do that, remove the commit
* filter driver from the backing chain. Do this as the final step so that
* the 'consistent read' permission can be granted. */
if (remove_commit_top_bs) {
bdrv_child_try_set_perm(commit_top_bs->backing, 0, BLK_PERM_ALL,
&error_abort);
bdrv_replace_node(commit_top_bs, backing_bs(commit_top_bs),
&error_abort);
}
bdrv_unref(commit_top_bs);
bdrv_unref(top);
}
static int coroutine_fn commit_run(Job *job, Error **errp)
static void coroutine_fn commit_run(void *opaque)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
CommitBlockJob *s = opaque;
CommitCompleteData *data;
int64_t offset;
uint64_t delay_ns = 0;
int ret = 0;
int64_t n = 0; /* bytes */
void *buf = NULL;
int bytes_written = 0;
int64_t len, base_len;
int64_t base_len;
ret = len = blk_getlength(s->top);
if (len < 0) {
ret = s->common.len = blk_getlength(s->top);
if (s->common.len < 0) {
goto out;
}
job_progress_set_remaining(&s->common.job, len);
ret = base_len = blk_getlength(s->base);
if (base_len < 0) {
goto out;
}
if (base_len < len) {
ret = blk_truncate(s->base, len, PREALLOC_MODE_OFF, NULL);
if (base_len < s->common.len) {
ret = blk_truncate(s->base, s->common.len, PREALLOC_MODE_OFF, NULL);
if (ret) {
goto out;
}
@@ -166,14 +168,14 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
buf = blk_blockalign(s->top, COMMIT_BUFFER_SIZE);
for (offset = 0; offset < len; offset += n) {
for (offset = 0; offset < s->common.len; offset += n) {
bool copy;
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
job_sleep_ns(&s->common.job, delay_ns);
if (job_is_cancelled(&s->common.job)) {
block_job_sleep_ns(&s->common, delay_ns);
if (block_job_is_cancelled(&s->common)) {
break;
}
/* Copy if allocated above the base */
@@ -196,12 +198,10 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
}
}
/* Publish progress */
job_progress_update(&s->common.job, n);
s->common.offset += n;
if (copy) {
delay_ns = block_job_ratelimit_get_delay(&s->common, n);
} else {
delay_ns = 0;
if (copy && s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, n);
}
}
@@ -210,21 +210,27 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
out:
qemu_vfree(buf);
return ret;
data = g_malloc(sizeof(*data));
data->ret = ret;
block_job_defer_to_main_loop(&s->common, commit_complete, data);
}
static void commit_set_speed(BlockJob *job, int64_t speed, Error **errp)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common);
if (speed < 0) {
error_setg(errp, QERR_INVALID_PARAMETER, "speed");
return;
}
ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
}
static const BlockJobDriver commit_job_driver = {
.job_driver = {
.instance_size = sizeof(CommitBlockJob),
.job_type = JOB_TYPE_COMMIT,
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = commit_run,
.prepare = commit_prepare,
.abort = commit_abort,
.clean = commit_clean
},
.instance_size = sizeof(CommitBlockJob),
.job_type = BLOCK_JOB_TYPE_COMMIT,
.set_speed = commit_set_speed,
.start = commit_run,
};
static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs,
@@ -233,12 +239,17 @@ static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs,
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
}
static void bdrv_commit_top_refresh_filename(BlockDriverState *bs)
static void bdrv_commit_top_refresh_filename(BlockDriverState *bs, QDict *opts)
{
bdrv_refresh_filename(bs->backing->bs);
pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
bs->backing->bs->filename);
}
static void bdrv_commit_top_close(BlockDriverState *bs)
{
}
static void bdrv_commit_top_child_perm(BlockDriverState *bs, BdrvChild *c,
const BdrvChildRole *role,
BlockReopenQueue *reopen_queue,
@@ -254,18 +265,20 @@ static void bdrv_commit_top_child_perm(BlockDriverState *bs, BdrvChild *c,
static BlockDriver bdrv_commit_top = {
.format_name = "commit_top",
.bdrv_co_preadv = bdrv_commit_top_preadv,
.bdrv_co_block_status = bdrv_co_block_status_from_backing,
.bdrv_co_get_block_status = bdrv_co_get_block_status_from_backing,
.bdrv_refresh_filename = bdrv_commit_top_refresh_filename,
.bdrv_close = bdrv_commit_top_close,
.bdrv_child_perm = bdrv_commit_top_child_perm,
};
void commit_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, BlockDriverState *top,
int creation_flags, int64_t speed,
BlockDriverState *base, BlockDriverState *top, int64_t speed,
BlockdevOnError on_error, const char *backing_file_str,
const char *filter_node_name, Error **errp)
{
CommitBlockJob *s;
BlockReopenQueue *reopen_queue = NULL;
int orig_base_flags;
BlockDriverState *iter;
BlockDriverState *commit_top_bs = NULL;
Error *local_err = NULL;
@@ -277,16 +290,23 @@ void commit_start(const char *job_id, BlockDriverState *bs,
return;
}
s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL,
speed, creation_flags, NULL, NULL, errp);
s = block_job_create(job_id, &commit_job_driver, bs, 0, BLK_PERM_ALL,
speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp);
if (!s) {
return;
}
/* convert base to r/w, if necessary */
s->base_read_only = bdrv_is_read_only(base);
if (s->base_read_only) {
if (bdrv_reopen_set_read_only(base, false, errp) != 0) {
orig_base_flags = bdrv_get_flags(base);
if (!(orig_base_flags & BDRV_O_RDWR)) {
reopen_queue = bdrv_reopen_queue(reopen_queue, base, NULL,
orig_base_flags | BDRV_O_RDWR);
}
if (reopen_queue) {
bdrv_reopen_multiple(bdrv_get_aio_context(bs), reopen_queue, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
goto fail;
}
}
@@ -338,11 +358,6 @@ void commit_start(const char *job_id, BlockDriverState *bs,
}
}
if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
goto fail;
}
s->chain_frozen = true;
ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
if (ret < 0) {
goto fail;
@@ -358,7 +373,6 @@ void commit_start(const char *job_id, BlockDriverState *bs,
if (ret < 0) {
goto fail;
}
s->base_bs = base;
/* Required permissions are already taken with block_job_add_bdrv() */
s->top = blk_new(0, BLK_PERM_ALL);
@@ -367,29 +381,25 @@ void commit_start(const char *job_id, BlockDriverState *bs,
goto fail;
}
s->base_flags = orig_base_flags;
s->backing_file_str = g_strdup(backing_file_str);
s->on_error = on_error;
trace_commit_start(bs, base, top, s);
job_start(&s->common.job);
block_job_start(&s->common);
return;
fail:
if (s->chain_frozen) {
bdrv_unfreeze_backing_chain(commit_top_bs, base);
}
if (s->base) {
blk_unref(s->base);
}
if (s->top) {
blk_unref(s->top);
}
job_early_fail(&s->common.job);
/* commit_top_bs has to be replaced after deleting the block job,
* otherwise this would fail because of lack of permissions. */
if (commit_top_bs) {
bdrv_replace_node(commit_top_bs, top, &error_abort);
}
block_job_early_fail(&s->common);
}
@@ -403,7 +413,7 @@ int bdrv_commit(BlockDriverState *bs)
BlockDriverState *commit_top_bs = NULL;
BlockDriver *drv = bs->drv;
int64_t offset, length, backing_length;
int ro;
int ro, open_flags;
int64_t n;
int ret = 0;
uint8_t *buf = NULL;
@@ -422,9 +432,10 @@ int bdrv_commit(BlockDriverState *bs)
}
ro = bs->backing->bs->read_only;
open_flags = bs->backing->bs->open_flags;
if (ro) {
if (bdrv_reopen_set_read_only(bs->backing->bs, false, NULL)) {
if (bdrv_reopen(bs->backing->bs, open_flags | BDRV_O_RDWR, NULL)) {
return -EACCES;
}
}
@@ -534,7 +545,7 @@ ro_cleanup:
if (ro) {
/* ignoring error return here */
bdrv_reopen_set_read_only(bs->backing->bs, true, NULL);
bdrv_reopen(bs->backing->bs, open_flags & ~BDRV_O_RDWR, NULL);
}
return ret;

View File

@@ -1,166 +0,0 @@
/*
* Copy-on-read filter block driver
*
* Copyright (c) 2018 Red Hat, Inc.
*
* Author:
* Max Reitz <mreitz@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 or
* (at your option) version 3 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "block/block_int.h"
static int cor_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, false,
errp);
if (!bs->file) {
return -EINVAL;
}
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
(BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) &
bs->file->bs->supported_zero_flags);
return 0;
}
#define PERM_PASSTHROUGH (BLK_PERM_CONSISTENT_READ \
| BLK_PERM_WRITE \
| BLK_PERM_RESIZE)
#define PERM_UNCHANGED (BLK_PERM_ALL & ~PERM_PASSTHROUGH)
static void cor_child_perm(BlockDriverState *bs, BdrvChild *c,
const BdrvChildRole *role,
BlockReopenQueue *reopen_queue,
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
{
if (c == NULL) {
*nperm = (perm & PERM_PASSTHROUGH) | BLK_PERM_WRITE_UNCHANGED;
*nshared = (shared & PERM_PASSTHROUGH) | PERM_UNCHANGED;
return;
}
*nperm = (perm & PERM_PASSTHROUGH) |
(c->perm & PERM_UNCHANGED);
*nshared = (shared & PERM_PASSTHROUGH) |
(c->shared_perm & PERM_UNCHANGED);
}
static int64_t cor_getlength(BlockDriverState *bs)
{
return bdrv_getlength(bs->file->bs);
}
static int coroutine_fn cor_co_truncate(BlockDriverState *bs, int64_t offset,
PreallocMode prealloc, Error **errp)
{
return bdrv_co_truncate(bs->file, offset, prealloc, errp);
}
static int coroutine_fn cor_co_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
return bdrv_co_preadv(bs->file, offset, bytes, qiov,
flags | BDRV_REQ_COPY_ON_READ);
}
static int coroutine_fn cor_co_pwritev(BlockDriverState *bs,
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
}
static int coroutine_fn cor_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes,
BdrvRequestFlags flags)
{
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
}
static int coroutine_fn cor_co_pdiscard(BlockDriverState *bs,
int64_t offset, int bytes)
{
return bdrv_co_pdiscard(bs->file, offset, bytes);
}
static void cor_eject(BlockDriverState *bs, bool eject_flag)
{
bdrv_eject(bs->file->bs, eject_flag);
}
static void cor_lock_medium(BlockDriverState *bs, bool locked)
{
bdrv_lock_medium(bs->file->bs, locked);
}
static bool cor_recurse_is_first_non_filter(BlockDriverState *bs,
BlockDriverState *candidate)
{
return bdrv_recurse_is_first_non_filter(bs->file->bs, candidate);
}
static BlockDriver bdrv_copy_on_read = {
.format_name = "copy-on-read",
.bdrv_open = cor_open,
.bdrv_child_perm = cor_child_perm,
.bdrv_getlength = cor_getlength,
.bdrv_co_truncate = cor_co_truncate,
.bdrv_co_preadv = cor_co_preadv,
.bdrv_co_pwritev = cor_co_pwritev,
.bdrv_co_pwrite_zeroes = cor_co_pwrite_zeroes,
.bdrv_co_pdiscard = cor_co_pdiscard,
.bdrv_eject = cor_eject,
.bdrv_lock_medium = cor_lock_medium,
.bdrv_co_block_status = bdrv_co_block_status_from_file,
.bdrv_recurse_is_first_non_filter = cor_recurse_is_first_non_filter,
.has_variable_length = true,
.is_filter = true,
};
static void bdrv_copy_on_read_init(void)
{
bdrv_register(&bdrv_copy_on_read);
}
block_init(bdrv_copy_on_read_init);

Some files were not shown because too many files have changed in this diff Show More