diff --git a/bundles.tar.xz b/bundles.tar.xz index 3fb6132e..8046a1e7 100644 --- a/bundles.tar.xz +++ b/bundles.tar.xz @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97271682301b58874f99fa7afda93d80e26e22b879380fa67535e221e1d3a63e -size 35280 +oid sha256:8c033ad0591f42ffbdd2f4f151592dc7aa0f14f98b991623667620b6fcff7382 +size 41756 diff --git a/libvhost-user-handle-endianness-as-manda.patch b/libvhost-user-handle-endianness-as-manda.patch new file mode 100644 index 00000000..c2c6cd00 --- /dev/null +++ b/libvhost-user-handle-endianness-as-manda.patch @@ -0,0 +1,275 @@ +From: Marc Hartmayer +Date: Tue, 1 Sep 2020 17:00:19 +0200 +Subject: libvhost-user: handle endianness as mandated by the spec + +Git-commit: 2ffc54708087c6e524297957be2fc5d543abb767 +References: jsc#sle-14618 + +Since virtio existed even before it got standardized, the virtio +standard defines the following types of virtio devices: + + + legacy device (pre-virtio 1.0) + + non-legacy or VIRTIO 1.0 device + + transitional device (which can act both as legacy and non-legacy) + +Virtio 1.0 defines the fields of the virtqueues as little endian, +while legacy uses guest's native endian [1]. Currently libvhost-user +does not handle virtio endianness at all, i.e. it works only if the +native endianness matches with whatever is actually needed. That means +things break spectacularly on big-endian targets. Let us handle virtio +endianness for non-legacy as required by the virtio specification [1] +and fence legacy virtio, as there is no safe way to figure out the +needed endianness conversions for all cases. The fencing of legacy +virtio devices is done in `vu_set_features_exec`. + +[1] https://docs.oasis-open.org/virtio/virtio/v1.1/cs01/virtio-v1.1-cs01.html#x1-210003 + +Reviewed-by: Michael S. Tsirkin +Signed-off-by: Marc Hartmayer +Message-id: 20200901150019.29229-3-mhartmay@linux.ibm.com +Signed-off-by: Stefan Hajnoczi +Signed-off-by: Liang Yan +--- + contrib/libvhost-user/libvhost-user.c | 77 +++++++++++++++------------ + 1 file changed, 43 insertions(+), 34 deletions(-) + +diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c +index 53f16bdf082c758e795859b71d22..e2238a0400c9630be1cdab30788c 100644 +--- a/contrib/libvhost-user/libvhost-user.c ++++ b/contrib/libvhost-user/libvhost-user.c +@@ -42,6 +42,7 @@ + + #include "qemu/atomic.h" + #include "qemu/osdep.h" ++#include "qemu/bswap.h" + #include "qemu/memfd.h" + + #include "libvhost-user.h" +@@ -539,6 +540,14 @@ vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg) + DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); + + dev->features = vmsg->payload.u64; ++ if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) { ++ /* ++ * We only support devices conforming to VIRTIO 1.0 or ++ * later ++ */ ++ vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user"); ++ return false; ++ } + + if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { + vu_set_enable_all_rings(dev, true); +@@ -1074,7 +1083,7 @@ vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg) + return false; + } + +- vq->used_idx = vq->vring.used->idx; ++ vq->used_idx = lduw_le_p(&vq->vring.used->idx); + + if (vq->last_avail_idx != vq->used_idx) { + bool resume = dev->iface->queue_is_processed_in_order && +@@ -1191,7 +1200,7 @@ vu_check_queue_inflights(VuDev *dev, VuVirtq *vq) + return 0; + } + +- vq->used_idx = vq->vring.used->idx; ++ vq->used_idx = lduw_le_p(&vq->vring.used->idx); + vq->resubmit_num = 0; + vq->resubmit_list = NULL; + vq->counter = 0; +@@ -2021,13 +2030,13 @@ vu_queue_started(const VuDev *dev, const VuVirtq *vq) + static inline uint16_t + vring_avail_flags(VuVirtq *vq) + { +- return vq->vring.avail->flags; ++ return lduw_le_p(&vq->vring.avail->flags); + } + + static inline uint16_t + vring_avail_idx(VuVirtq *vq) + { +- vq->shadow_avail_idx = vq->vring.avail->idx; ++ vq->shadow_avail_idx = lduw_le_p(&vq->vring.avail->idx); + + return vq->shadow_avail_idx; + } +@@ -2035,7 +2044,7 @@ vring_avail_idx(VuVirtq *vq) + static inline uint16_t + vring_avail_ring(VuVirtq *vq, int i) + { +- return vq->vring.avail->ring[i]; ++ return lduw_le_p(&vq->vring.avail->ring[i]); + } + + static inline uint16_t +@@ -2123,12 +2132,12 @@ virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc, + int i, unsigned int max, unsigned int *next) + { + /* If this descriptor says it doesn't chain, we're done. */ +- if (!(desc[i].flags & VRING_DESC_F_NEXT)) { ++ if (!(lduw_le_p(&desc[i].flags) & VRING_DESC_F_NEXT)) { + return VIRTQUEUE_READ_DESC_DONE; + } + + /* Check they're not leading us off end of descriptors. */ +- *next = desc[i].next; ++ *next = lduw_le_p(&desc[i].next); + /* Make sure compiler knows to grab that: we don't want it changing! */ + smp_wmb(); + +@@ -2171,8 +2180,8 @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, + } + desc = vq->vring.desc; + +- if (desc[i].flags & VRING_DESC_F_INDIRECT) { +- if (desc[i].len % sizeof(struct vring_desc)) { ++ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_INDIRECT) { ++ if (ldl_le_p(&desc[i].len) % sizeof(struct vring_desc)) { + vu_panic(dev, "Invalid size for indirect buffer table"); + goto err; + } +@@ -2185,8 +2194,8 @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, + + /* loop over the indirect descriptor table */ + indirect = 1; +- desc_addr = desc[i].addr; +- desc_len = desc[i].len; ++ desc_addr = ldq_le_p(&desc[i].addr); ++ desc_len = ldl_le_p(&desc[i].len); + max = desc_len / sizeof(struct vring_desc); + read_len = desc_len; + desc = vu_gpa_to_va(dev, &read_len, desc_addr); +@@ -2213,10 +2222,10 @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, + goto err; + } + +- if (desc[i].flags & VRING_DESC_F_WRITE) { +- in_total += desc[i].len; ++ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_WRITE) { ++ in_total += ldl_le_p(&desc[i].len); + } else { +- out_total += desc[i].len; ++ out_total += ldl_le_p(&desc[i].len); + } + if (in_total >= max_in_bytes && out_total >= max_out_bytes) { + goto done; +@@ -2367,7 +2376,7 @@ vring_used_flags_set_bit(VuVirtq *vq, int mask) + + flags = (uint16_t *)((char*)vq->vring.used + + offsetof(struct vring_used, flags)); +- *flags |= mask; ++ stw_le_p(flags, lduw_le_p(flags) | mask); + } + + static inline void +@@ -2377,7 +2386,7 @@ vring_used_flags_unset_bit(VuVirtq *vq, int mask) + + flags = (uint16_t *)((char*)vq->vring.used + + offsetof(struct vring_used, flags)); +- *flags &= ~mask; ++ stw_le_p(flags, lduw_le_p(flags) & ~mask); + } + + static inline void +@@ -2387,7 +2396,7 @@ vring_set_avail_event(VuVirtq *vq, uint16_t val) + return; + } + +- *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val; ++ stw_le_p(&vq->vring.used->ring[vq->vring.num], val); + } + + void +@@ -2476,14 +2485,14 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) + struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; + int rc; + +- if (desc[i].flags & VRING_DESC_F_INDIRECT) { +- if (desc[i].len % sizeof(struct vring_desc)) { ++ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_INDIRECT) { ++ if (ldl_le_p(&desc[i].len) % sizeof(struct vring_desc)) { + vu_panic(dev, "Invalid size for indirect buffer table"); + } + + /* loop over the indirect descriptor table */ +- desc_addr = desc[i].addr; +- desc_len = desc[i].len; ++ desc_addr = ldq_le_p(&desc[i].addr); ++ desc_len = ldl_le_p(&desc[i].len); + max = desc_len / sizeof(struct vring_desc); + read_len = desc_len; + desc = vu_gpa_to_va(dev, &read_len, desc_addr); +@@ -2505,10 +2514,10 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) + + /* Collect all the descriptors */ + do { +- if (desc[i].flags & VRING_DESC_F_WRITE) { ++ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_WRITE) { + virtqueue_map_desc(dev, &in_num, iov + out_num, + VIRTQUEUE_MAX_SIZE - out_num, true, +- desc[i].addr, desc[i].len); ++ ldq_le_p(&desc[i].addr), ldl_le_p(&desc[i].len)); + } else { + if (in_num) { + vu_panic(dev, "Incorrect order for descriptors"); +@@ -2516,7 +2525,7 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) + } + virtqueue_map_desc(dev, &out_num, iov, + VIRTQUEUE_MAX_SIZE, false, +- desc[i].addr, desc[i].len); ++ ldq_le_p(&desc[i].addr), ldl_le_p(&desc[i].len)); + } + + /* If we've got too many, that implies a descriptor loop. */ +@@ -2712,14 +2721,14 @@ vu_log_queue_fill(VuDev *dev, VuVirtq *vq, + max = vq->vring.num; + i = elem->index; + +- if (desc[i].flags & VRING_DESC_F_INDIRECT) { +- if (desc[i].len % sizeof(struct vring_desc)) { ++ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_INDIRECT) { ++ if (ldl_le_p(&desc[i].len) % sizeof(struct vring_desc)) { + vu_panic(dev, "Invalid size for indirect buffer table"); + } + + /* loop over the indirect descriptor table */ +- desc_addr = desc[i].addr; +- desc_len = desc[i].len; ++ desc_addr = ldq_le_p(&desc[i].addr); ++ desc_len = ldl_le_p(&desc[i].len); + max = desc_len / sizeof(struct vring_desc); + read_len = desc_len; + desc = vu_gpa_to_va(dev, &read_len, desc_addr); +@@ -2745,9 +2754,9 @@ vu_log_queue_fill(VuDev *dev, VuVirtq *vq, + return; + } + +- if (desc[i].flags & VRING_DESC_F_WRITE) { +- min = MIN(desc[i].len, len); +- vu_log_write(dev, desc[i].addr, min); ++ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_WRITE) { ++ min = MIN(ldl_le_p(&desc[i].len), len); ++ vu_log_write(dev, ldq_le_p(&desc[i].addr), min); + len -= min; + } + +@@ -2772,15 +2781,15 @@ vu_queue_fill(VuDev *dev, VuVirtq *vq, + + idx = (idx + vq->used_idx) % vq->vring.num; + +- uelem.id = elem->index; +- uelem.len = len; ++ stl_le_p(&uelem.id, elem->index); ++ stl_le_p(&uelem.len, len); + vring_used_write(dev, vq, &uelem, idx); + } + + static inline + void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val) + { +- vq->vring.used->idx = val; ++ stw_le_p(&vq->vring.used->idx, val); + vu_log_write(dev, + vq->vring.log_guest_addr + offsetof(struct vring_used, idx), + sizeof(vq->vring.used->idx)); diff --git a/qemu.changes b/qemu.changes index cc5b68e9..84b5db6e 100644 --- a/qemu.changes +++ b/qemu.changes @@ -1,3 +1,10 @@ +------------------------------------------------------------------- +Wed Nov 4 16:40:36 UTC 2020 - Liang Yan + +- Add virtio-fs support for s390x (jsc#SLE-14618) + libvhost-user-handle-endianness-as-manda.patch + virtio-add-vhost-user-fs-ccw-device.patch + ------------------------------------------------------------------- Wed Oct 14 13:05:43 UTC 2020 - Bruce Rogers diff --git a/qemu.spec b/qemu.spec index f17cd360..d8900ec8 100644 --- a/qemu.spec +++ b/qemu.spec @@ -179,6 +179,8 @@ Patch00043: docs-add-SUSE-support-statements-to-html.patch Patch00044: s390x-Fix-stringop-truncation-issue-repo.patch Patch00045: Revert-qht-constify-qht_statistics_init.patch Patch00046: qht-Revert-some-constification-in-qht.c.patch +Patch00047: libvhost-user-handle-endianness-as-manda.patch +Patch00048: virtio-add-vhost-user-fs-ccw-device.patch # Patches applied in roms/seabios/: Patch01000: seabios-use-python2-explicitly-as-needed.patch Patch01001: seabios-switch-to-python3-as-needed.patch @@ -949,6 +951,8 @@ This package provides a service file for starting and stopping KSM. %patch00044 -p1 %patch00045 -p1 %patch00046 -p1 +%patch00047 -p1 +%patch00048 -p1 %patch01000 -p1 %patch01001 -p1 %patch01002 -p1 diff --git a/virtio-add-vhost-user-fs-ccw-device.patch b/virtio-add-vhost-user-fs-ccw-device.patch new file mode 100644 index 00000000..65da1ed8 --- /dev/null +++ b/virtio-add-vhost-user-fs-ccw-device.patch @@ -0,0 +1,99 @@ +From: Halil Pasic +Date: Tue, 1 Sep 2020 17:00:18 +0200 +Subject: virtio: add vhost-user-fs-ccw device + +Git-commit: bd0bbb9aba2afbc2ea24b0475be04f795468b381 +References: jsc#sle-14618 + +Wire up the CCW device for vhost-user-fs. + +Reviewed-by: Cornelia Huck +Signed-off-by: Halil Pasic +Message-id: 20200901150019.29229-2-mhartmay@linux.ibm.com +Signed-off-by: Stefan Hajnoczi +Signed-off-by: Liang Yan +--- + hw/s390x/vhost-user-fs-ccw.c | 75 ++++++++++++++++++++++++++++++++++++ + 1 file changed, 75 insertions(+) + +diff --git a/hw/s390x/vhost-user-fs-ccw.c b/hw/s390x/vhost-user-fs-ccw.c +new file mode 100644 +index 0000000000000000000000000000000000000000..6c6f2692930110aebf8038061b259407238942bf +--- /dev/null ++++ b/hw/s390x/vhost-user-fs-ccw.c +@@ -0,0 +1,75 @@ ++/* ++ * virtio ccw vhost-user-fs implementation ++ * ++ * Copyright 2020 IBM Corp. ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or (at ++ * your option) any later version. See the COPYING file in the top-level ++ * directory. ++ */ ++#include "qemu/osdep.h" ++#include "hw/qdev-properties.h" ++#include "qapi/error.h" ++#include "hw/virtio/vhost-user-fs.h" ++#include "virtio-ccw.h" ++ ++typedef struct VHostUserFSCcw { ++ VirtioCcwDevice parent_obj; ++ VHostUserFS vdev; ++} VHostUserFSCcw; ++ ++#define TYPE_VHOST_USER_FS_CCW "vhost-user-fs-ccw" ++#define VHOST_USER_FS_CCW(obj) \ ++ OBJECT_CHECK(VHostUserFSCcw, (obj), TYPE_VHOST_USER_FS_CCW) ++ ++ ++static Property vhost_user_fs_ccw_properties[] = { ++ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, ++ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), ++ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, ++ VIRTIO_CCW_MAX_REV), ++ DEFINE_PROP_END_OF_LIST(), ++}; ++ ++static void vhost_user_fs_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp) ++{ ++ VHostUserFSCcw *dev = VHOST_USER_FS_CCW(ccw_dev); ++ DeviceState *vdev = DEVICE(&dev->vdev); ++ ++ qdev_realize(vdev, BUS(&ccw_dev->bus), errp); ++} ++ ++static void vhost_user_fs_ccw_instance_init(Object *obj) ++{ ++ VHostUserFSCcw *dev = VHOST_USER_FS_CCW(obj); ++ VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj); ++ ++ ccw_dev->force_revision_1 = true; ++ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), ++ TYPE_VHOST_USER_FS); ++} ++ ++static void vhost_user_fs_ccw_class_init(ObjectClass *klass, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(klass); ++ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); ++ ++ k->realize = vhost_user_fs_ccw_realize; ++ device_class_set_props(dc, vhost_user_fs_ccw_properties); ++ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); ++} ++ ++static const TypeInfo vhost_user_fs_ccw = { ++ .name = TYPE_VHOST_USER_FS_CCW, ++ .parent = TYPE_VIRTIO_CCW_DEVICE, ++ .instance_size = sizeof(VHostUserFSCcw), ++ .instance_init = vhost_user_fs_ccw_instance_init, ++ .class_init = vhost_user_fs_ccw_class_init, ++}; ++ ++static void vhost_user_fs_ccw_register(void) ++{ ++ type_register_static(&vhost_user_fs_ccw); ++} ++ ++type_init(vhost_user_fs_ccw_register)