Backport CVE fixes #66
@@ -985,8 +985,7 @@ static void virtser_port_device_realize(DeviceState *dev, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
port->bh = qemu_bh_new_guarded(flush_queued_data_bh, port,
|
||||
&dev->mem_reentrancy_guard);
|
||||
port->bh = virtio_bh_new_guarded(dev, flush_queued_data_bh, port);
|
||||
port->elem = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -1463,10 +1463,8 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
|
||||
|
||||
g->ctrl_vq = virtio_get_queue(vdev, 0);
|
||||
g->cursor_vq = virtio_get_queue(vdev, 1);
|
||||
g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g,
|
||||
&qdev->mem_reentrancy_guard);
|
||||
g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g,
|
||||
&qdev->mem_reentrancy_guard);
|
||||
g->ctrl_bh = virtio_bh_new_guarded(qdev, virtio_gpu_ctrl_bh, g);
|
||||
g->cursor_bh = virtio_bh_new_guarded(qdev, virtio_gpu_cursor_bh, g);
|
||||
g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g);
|
||||
qemu_cond_init(&g->reset_cond);
|
||||
QTAILQ_INIT(&g->reslist);
|
||||
|
||||
@@ -141,6 +141,10 @@ bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt)
|
||||
uint32_t csum = 0;
|
||||
struct iovec *pl_start_frag = pkt->vec + NET_TX_PKT_PL_START_FRAG;
|
||||
|
||||
if (iov_size(pl_start_frag, pkt->payload_frags) < 8 + sizeof(csum)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -8466,36 +8466,26 @@ static void nvme_pci_reset(DeviceState *qdev)
|
||||
nvme_ctrl_reset(n, NVME_RESET_FUNCTION);
|
||||
}
|
||||
|
||||
static void nvme_sriov_pre_write_ctrl(PCIDevice *dev, uint32_t address,
|
||||
uint32_t val, int len)
|
||||
static void nvme_sriov_post_write_config(PCIDevice *dev, uint16_t old_num_vfs)
|
||||
{
|
||||
NvmeCtrl *n = NVME(dev);
|
||||
NvmeSecCtrlEntry *sctrl;
|
||||
uint16_t sriov_cap = dev->exp.sriov_cap;
|
||||
uint32_t off = address - sriov_cap;
|
||||
int i, num_vfs;
|
||||
int i;
|
||||
|
||||
if (!sriov_cap) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) {
|
||||
if (!(val & PCI_SRIOV_CTRL_VFE)) {
|
||||
num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF);
|
||||
for (i = 0; i < num_vfs; i++) {
|
||||
sctrl = &n->sec_ctrl_list.sec[i];
|
||||
nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false);
|
||||
}
|
||||
}
|
||||
for (i = pcie_sriov_num_vfs(dev); i < old_num_vfs; i++) {
|
||||
sctrl = &n->sec_ctrl_list.sec[i];
|
||||
nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false);
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_pci_write_config(PCIDevice *dev, uint32_t address,
|
||||
uint32_t val, int len)
|
||||
{
|
||||
nvme_sriov_pre_write_ctrl(dev, address, val, len);
|
||||
uint16_t old_num_vfs = pcie_sriov_num_vfs(dev);
|
||||
|
||||
pci_default_write_config(dev, address, val, len);
|
||||
pcie_cap_flr_write_config(dev, address, val, len);
|
||||
nvme_sriov_post_write_config(dev, old_num_vfs);
|
||||
}
|
||||
|
||||
static const VMStateDescription nvme_vmstate = {
|
||||
|
||||
@@ -473,6 +473,7 @@ static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size)
|
||||
}
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
assert(s->data_count < s->buf_maxsz);
|
||||
value |= s->fifo_buffer[s->data_count] << i * 8;
|
||||
s->data_count++;
|
||||
/* check if we've read all valid data (blksize bytes) from buffer */
|
||||
@@ -561,6 +562,7 @@ static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size)
|
||||
}
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
assert(s->data_count < s->buf_maxsz);
|
||||
s->fifo_buffer[s->data_count] = value & 0xFF;
|
||||
s->data_count++;
|
||||
value >>= 8;
|
||||
@@ -1208,6 +1210,12 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
|
||||
if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) {
|
||||
value &= ~SDHC_TRNS_DMA;
|
||||
}
|
||||
|
||||
/* TRNMOD writes are inhibited while Command Inhibit (DAT) is true */
|
||||
if (s->prnsts & SDHC_DATA_INHIBIT) {
|
||||
mask |= 0xffff;
|
||||
}
|
||||
|
||||
MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK);
|
||||
MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16);
|
||||
|
||||
|
||||
@@ -1080,8 +1080,8 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
|
||||
vcrypto->vqs[i].dataq =
|
||||
virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh);
|
||||
vcrypto->vqs[i].dataq_bh =
|
||||
qemu_bh_new_guarded(virtio_crypto_dataq_bh, &vcrypto->vqs[i],
|
||||
&dev->mem_reentrancy_guard);
|
||||
virtio_bh_new_guarded(dev, virtio_crypto_dataq_bh,
|
||||
&vcrypto->vqs[i]);
|
||||
vcrypto->vqs[i].vcrypto = vcrypto;
|
||||
}
|
||||
|
||||
|
||||
@@ -4137,3 +4137,13 @@ static void virtio_register_types(void)
|
||||
}
|
||||
|
||||
type_init(virtio_register_types)
|
||||
|
||||
QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev,
|
||||
QEMUBHFunc *cb, void *opaque,
|
||||
const char *name)
|
||||
{
|
||||
DeviceState *transport = qdev_get_parent_bus(dev)->parent;
|
||||
|
||||
return qemu_bh_new_full(cb, opaque, name,
|
||||
&transport->mem_reentrancy_guard);
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "standard-headers/linux/virtio_config.h"
|
||||
#include "standard-headers/linux/virtio_ring.h"
|
||||
#include "qom/object.h"
|
||||
#include "block/aio.h"
|
||||
|
||||
/*
|
||||
* A guest should never accept this. It implies negotiation is broken
|
||||
@@ -508,4 +509,10 @@ static inline bool virtio_device_disabled(VirtIODevice *vdev)
|
||||
bool virtio_legacy_allowed(VirtIODevice *vdev);
|
||||
bool virtio_legacy_check_disabled(VirtIODevice *vdev);
|
||||
|
||||
QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev,
|
||||
QEMUBHFunc *cb, void *opaque,
|
||||
const char *name);
|
||||
#define virtio_bh_new_guarded(dev, cb, opaque) \
|
||||
virtio_bh_new_guarded_full((dev), (cb), (opaque), (stringify(cb)))
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user