diff --git a/_constraints b/_constraints index aa386187..5172f149 100644 --- a/_constraints +++ b/_constraints @@ -3,8 +3,8 @@ qemu - qemu-linux-user - qemu-testsuite + qemu:linux-user + qemu:testsuite @@ -30,7 +30,7 @@ i586 x86_64 ppc64le - qemu-testsuite + qemu:testsuite @@ -42,7 +42,7 @@ ppc64 - qemu-testsuite + qemu:testsuite diff --git a/block-Add-bdrv_co_get_self_request.patch b/block-Add-bdrv_co_get_self_request.patch new file mode 100644 index 00000000..a547488c --- /dev/null +++ b/block-Add-bdrv_co_get_self_request.patch @@ -0,0 +1,57 @@ +From: Max Reitz +Date: Fri, 1 Nov 2019 16:25:09 +0100 +Subject: block: Add bdrv_co_get_self_request() + +Git-commit: c28107e9e55b11cd35cf3dc2505e3e69d10dcf13 + +Cc: qemu-stable@nongnu.org +Signed-off-by: Max Reitz +Message-id: 20191101152510.11719-3-mreitz@redhat.com +Signed-off-by: Max Reitz +Signed-off-by: Bruce Rogers +--- + block/io.c | 18 ++++++++++++++++++ + include/block/block_int.h | 1 + + 2 files changed, 19 insertions(+) + +diff --git a/block/io.c b/block/io.c +index d9f632f450b744515d3f91d2aa26..0366daf27f4a2133148716135d63 100644 +--- a/block/io.c ++++ b/block/io.c +@@ -721,6 +721,24 @@ static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req) + (req->bytes == req->overlap_bytes); + } + ++/** ++ * Return the tracked request on @bs for the current coroutine, or ++ * NULL if there is none. ++ */ ++BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) ++{ ++ BdrvTrackedRequest *req; ++ Coroutine *self = qemu_coroutine_self(); ++ ++ QLIST_FOREACH(req, &bs->tracked_requests, list) { ++ if (req->co == self) { ++ return req; ++ } ++ } ++ ++ return NULL; ++} ++ + /** + * Round a region to cluster boundaries + */ +diff --git a/include/block/block_int.h b/include/block/block_int.h +index 4465b022424c23aea82942547cc3..05ee6b4866f84a9ab9ba0dcda5da 100644 +--- a/include/block/block_int.h ++++ b/include/block/block_int.h +@@ -964,6 +964,7 @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); + + bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self); + void bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align); ++BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs); + + int get_tmp_filename(char *filename, int size); + BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, diff --git a/block-Make-wait-mark-serialising-request.patch b/block-Make-wait-mark-serialising-request.patch new file mode 100644 index 00000000..56b8f680 --- /dev/null +++ b/block-Make-wait-mark-serialising-request.patch @@ -0,0 +1,126 @@ +From: Max Reitz +Date: Fri, 1 Nov 2019 16:25:08 +0100 +Subject: block: Make wait/mark serialising requests public + +Git-commit: 304d9d7f034ff7f5e1e66a65b7f720f63a72c57e + +Make both bdrv_mark_request_serialising() and +bdrv_wait_serialising_requests() public so they can be used from block +drivers. + +Cc: qemu-stable@nongnu.org +Signed-off-by: Max Reitz +Message-id: 20191101152510.11719-2-mreitz@redhat.com +Signed-off-by: Max Reitz +Signed-off-by: Bruce Rogers +--- + block/io.c | 24 ++++++++++++------------ + include/block/block_int.h | 3 +++ + 2 files changed, 15 insertions(+), 12 deletions(-) + +diff --git a/block/io.c b/block/io.c +index bfb2653d8ee853e99bd4d55a1a87..d9f632f450b744515d3f91d2aa26 100644 +--- a/block/io.c ++++ b/block/io.c +@@ -694,7 +694,7 @@ static void tracked_request_begin(BdrvTrackedRequest *req, + qemu_co_mutex_unlock(&bs->reqs_lock); + } + +-static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) ++void bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) + { + int64_t overlap_offset = req->offset & ~(align - 1); + uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) +@@ -784,7 +784,7 @@ void bdrv_dec_in_flight(BlockDriverState *bs) + bdrv_wakeup(bs); + } + +-static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) ++bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) + { + BlockDriverState *bs = self->bs; + BdrvTrackedRequest *req; +@@ -1340,14 +1340,14 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, + * with each other for the same cluster. For example, in copy-on-read + * it ensures that the CoR read and write operations are atomic and + * guest writes cannot interleave between them. */ +- mark_request_serialising(req, bdrv_get_cluster_size(bs)); ++ bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); + } + + /* BDRV_REQ_SERIALISING is only for write operation */ + assert(!(flags & BDRV_REQ_SERIALISING)); + + if (!(flags & BDRV_REQ_NO_SERIALISING)) { +- wait_serialising_requests(req); ++ bdrv_wait_serialising_requests(req); + } + + if (flags & BDRV_REQ_COPY_ON_READ) { +@@ -1736,10 +1736,10 @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, + assert(!(flags & ~BDRV_REQ_MASK)); + + if (flags & BDRV_REQ_SERIALISING) { +- mark_request_serialising(req, bdrv_get_cluster_size(bs)); ++ bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); + } + +- waited = wait_serialising_requests(req); ++ waited = bdrv_wait_serialising_requests(req); + + assert(!waited || !req->serialising || + is_request_serialising_and_aligned(req)); +@@ -1905,8 +1905,8 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, + + padding = bdrv_init_padding(bs, offset, bytes, &pad); + if (padding) { +- mark_request_serialising(req, align); +- wait_serialising_requests(req); ++ bdrv_mark_request_serialising(req, align); ++ bdrv_wait_serialising_requests(req); + + bdrv_padding_rmw_read(child, req, &pad, true); + +@@ -1993,8 +1993,8 @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child, + } + + if (bdrv_pad_request(bs, &qiov, &offset, &bytes, &pad)) { +- mark_request_serialising(&req, align); +- wait_serialising_requests(&req); ++ bdrv_mark_request_serialising(&req, align); ++ bdrv_wait_serialising_requests(&req); + bdrv_padding_rmw_read(child, &req, &pad, false); + } + +@@ -3078,7 +3078,7 @@ static int coroutine_fn bdrv_co_copy_range_internal( + /* BDRV_REQ_SERIALISING is only for write operation */ + assert(!(read_flags & BDRV_REQ_SERIALISING)); + if (!(read_flags & BDRV_REQ_NO_SERIALISING)) { +- wait_serialising_requests(&req); ++ bdrv_wait_serialising_requests(&req); + } + + ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, +@@ -3205,7 +3205,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, + * new area, we need to make sure that no write requests are made to it + * concurrently or they might be overwritten by preallocation. */ + if (new_bytes) { +- mark_request_serialising(&req, 1); ++ bdrv_mark_request_serialising(&req, 1); + } + if (bs->read_only) { + error_setg(errp, "Image is read-only"); +diff --git a/include/block/block_int.h b/include/block/block_int.h +index 3aa1e832a8fdf32bf3f33d1e1508..4465b022424c23aea82942547cc3 100644 +--- a/include/block/block_int.h ++++ b/include/block/block_int.h +@@ -962,6 +962,9 @@ extern unsigned int bdrv_drain_all_count; + void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent); + void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); + ++bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self); ++void bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align); ++ + int get_tmp_filename(char *filename, int size); + BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, + const char *filename); diff --git a/block-file-posix-Let-post-EOF-fallocate-.patch b/block-file-posix-Let-post-EOF-fallocate-.patch new file mode 100644 index 00000000..34509dbc --- /dev/null +++ b/block-file-posix-Let-post-EOF-fallocate-.patch @@ -0,0 +1,67 @@ +From: Max Reitz +Date: Fri, 1 Nov 2019 16:25:10 +0100 +Subject: block/file-posix: Let post-EOF fallocate serialize + +Git-commit: 292d06b925b2787ee6f2430996b95651cae42fce + +The XFS kernel driver has a bug that may cause data corruption for qcow2 +images as of qemu commit c8bb23cbdbe32f. We can work around it by +treating post-EOF fallocates as serializing up until infinity (INT64_MAX +in practice). + +Cc: qemu-stable@nongnu.org +Signed-off-by: Max Reitz +Message-id: 20191101152510.11719-4-mreitz@redhat.com +Signed-off-by: Max Reitz +Signed-off-by: Bruce Rogers +--- + block/file-posix.c | 36 ++++++++++++++++++++++++++++++++++++ + 1 file changed, 36 insertions(+) + +diff --git a/block/file-posix.c b/block/file-posix.c +index 992eb4a798b99fe02e93103028c6..c5df61b47735ee7e5201cebec46c 100644 +--- a/block/file-posix.c ++++ b/block/file-posix.c +@@ -2623,6 +2623,42 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, + RawPosixAIOData acb; + ThreadPoolFunc *handler; + ++#ifdef CONFIG_FALLOCATE ++ if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { ++ BdrvTrackedRequest *req; ++ uint64_t end; ++ ++ /* ++ * This is a workaround for a bug in the Linux XFS driver, ++ * where writes submitted through the AIO interface will be ++ * discarded if they happen beyond a concurrently running ++ * fallocate() that increases the file length (i.e., both the ++ * write and the fallocate() happen beyond the EOF). ++ * ++ * To work around it, we extend the tracked request for this ++ * zero write until INT64_MAX (effectively infinity), and mark ++ * it as serializing. ++ * ++ * We have to enable this workaround for all filesystems and ++ * AIO modes (not just XFS with aio=native), because for ++ * remote filesystems we do not know the host configuration. ++ */ ++ ++ req = bdrv_co_get_self_request(bs); ++ assert(req); ++ assert(req->type == BDRV_TRACKED_WRITE); ++ assert(req->offset <= offset); ++ assert(req->offset + req->bytes >= offset + bytes); ++ ++ end = INT64_MAX & -(uint64_t)bs->bl.request_alignment; ++ req->bytes = end - req->offset; ++ req->overlap_bytes = req->bytes; ++ ++ bdrv_mark_request_serialising(req, bs->bl.request_alignment); ++ bdrv_wait_serialising_requests(req); ++ } ++#endif ++ + acb = (RawPosixAIOData) { + .bs = bs, + .aio_fildes = s->fd, diff --git a/block-io-refactor-padding.patch b/block-io-refactor-padding.patch new file mode 100644 index 00000000..288b3870 --- /dev/null +++ b/block-io-refactor-padding.patch @@ -0,0 +1,478 @@ +From: Vladimir Sementsov-Ogievskiy +Date: Tue, 4 Jun 2019 19:15:05 +0300 +Subject: block/io: refactor padding + +Git-commit: 7a3f542fbdfd799be4fa6f8b96dc8c1e6933fce4 + +We have similar padding code in bdrv_co_pwritev, +bdrv_co_do_pwrite_zeroes and bdrv_co_preadv. Let's combine and unify +it. + +[Squashed in Vladimir's qemu-iotests 077 fix +--Stefan] + +Signed-off-by: Vladimir Sementsov-Ogievskiy +Acked-by: Stefan Hajnoczi +Message-id: 20190604161514.262241-4-vsementsov@virtuozzo.com +Message-Id: <20190604161514.262241-4-vsementsov@virtuozzo.com> +Signed-off-by: Stefan Hajnoczi +Signed-off-by: Bruce Rogers +--- + block/io.c | 365 +++++++++++++++++++++++++++++------------------------ + 1 file changed, 200 insertions(+), 165 deletions(-) + +diff --git a/block/io.c b/block/io.c +index 06305c6ea62efabf1efb43933bf6..bfb2653d8ee853e99bd4d55a1a87 100644 +--- a/block/io.c ++++ b/block/io.c +@@ -1408,28 +1408,177 @@ out: + } + + /* +- * Handle a read request in coroutine context ++ * Request padding ++ * ++ * |<---- align ----->| |<----- align ---->| ++ * |<- head ->|<------------- bytes ------------->|<-- tail -->| ++ * | | | | | | ++ * -*----------$-------*-------- ... --------*-----$------------*--- ++ * | | | | | | ++ * | offset | | end | ++ * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) ++ * [buf ... ) [tail_buf ) ++ * ++ * @buf is an aligned allocation needed to store @head and @tail paddings. @head ++ * is placed at the beginning of @buf and @tail at the @end. ++ * ++ * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk ++ * around tail, if tail exists. ++ * ++ * @merge_reads is true for small requests, ++ * if @buf_len == @head + bytes + @tail. In this case it is possible that both ++ * head and tail exist but @buf_len == align and @tail_buf == @buf. ++ */ ++typedef struct BdrvRequestPadding { ++ uint8_t *buf; ++ size_t buf_len; ++ uint8_t *tail_buf; ++ size_t head; ++ size_t tail; ++ bool merge_reads; ++ QEMUIOVector local_qiov; ++} BdrvRequestPadding; ++ ++static bool bdrv_init_padding(BlockDriverState *bs, ++ int64_t offset, int64_t bytes, ++ BdrvRequestPadding *pad) ++{ ++ uint64_t align = bs->bl.request_alignment; ++ size_t sum; ++ ++ memset(pad, 0, sizeof(*pad)); ++ ++ pad->head = offset & (align - 1); ++ pad->tail = ((offset + bytes) & (align - 1)); ++ if (pad->tail) { ++ pad->tail = align - pad->tail; ++ } ++ ++ if ((!pad->head && !pad->tail) || !bytes) { ++ return false; ++ } ++ ++ sum = pad->head + bytes + pad->tail; ++ pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; ++ pad->buf = qemu_blockalign(bs, pad->buf_len); ++ pad->merge_reads = sum == pad->buf_len; ++ if (pad->tail) { ++ pad->tail_buf = pad->buf + pad->buf_len - align; ++ } ++ ++ return true; ++} ++ ++static int bdrv_padding_rmw_read(BdrvChild *child, ++ BdrvTrackedRequest *req, ++ BdrvRequestPadding *pad, ++ bool zero_middle) ++{ ++ QEMUIOVector local_qiov; ++ BlockDriverState *bs = child->bs; ++ uint64_t align = bs->bl.request_alignment; ++ int ret; ++ ++ assert(req->serialising && pad->buf); ++ ++ if (pad->head || pad->merge_reads) { ++ uint64_t bytes = pad->merge_reads ? pad->buf_len : align; ++ ++ qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); ++ ++ if (pad->head) { ++ bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); ++ } ++ if (pad->merge_reads && pad->tail) { ++ bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); ++ } ++ ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, ++ align, &local_qiov, 0); ++ if (ret < 0) { ++ return ret; ++ } ++ if (pad->head) { ++ bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); ++ } ++ if (pad->merge_reads && pad->tail) { ++ bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); ++ } ++ ++ if (pad->merge_reads) { ++ goto zero_mem; ++ } ++ } ++ ++ if (pad->tail) { ++ qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); ++ ++ bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); ++ ret = bdrv_aligned_preadv( ++ child, req, ++ req->overlap_offset + req->overlap_bytes - align, ++ align, align, &local_qiov, 0); ++ if (ret < 0) { ++ return ret; ++ } ++ bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); ++ } ++ ++zero_mem: ++ if (zero_middle) { ++ memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); ++ } ++ ++ return 0; ++} ++ ++static void bdrv_padding_destroy(BdrvRequestPadding *pad) ++{ ++ if (pad->buf) { ++ qemu_vfree(pad->buf); ++ qemu_iovec_destroy(&pad->local_qiov); ++ } ++} ++ ++/* ++ * bdrv_pad_request ++ * ++ * Exchange request parameters with padded request if needed. Don't include RMW ++ * read of padding, bdrv_padding_rmw_read() should be called separately if ++ * needed. ++ * ++ * All parameters except @bs are in-out: they represent original request at ++ * function call and padded (if padding needed) at function finish. ++ * ++ * Function always succeeds. + */ ++static bool bdrv_pad_request(BlockDriverState *bs, QEMUIOVector **qiov, ++ int64_t *offset, unsigned int *bytes, ++ BdrvRequestPadding *pad) ++{ ++ if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { ++ return false; ++ } ++ ++ qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, ++ *qiov, 0, *bytes, ++ pad->buf + pad->buf_len - pad->tail, pad->tail); ++ *bytes += pad->head + pad->tail; ++ *offset -= pad->head; ++ *qiov = &pad->local_qiov; ++ ++ return true; ++} ++ + int coroutine_fn bdrv_co_preadv(BdrvChild *child, + int64_t offset, unsigned int bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) + { + BlockDriverState *bs = child->bs; +- BlockDriver *drv = bs->drv; + BdrvTrackedRequest req; +- +- uint64_t align = bs->bl.request_alignment; +- uint8_t *head_buf = NULL; +- uint8_t *tail_buf = NULL; +- QEMUIOVector local_qiov; +- bool use_local_qiov = false; ++ BdrvRequestPadding pad; + int ret; + +- trace_bdrv_co_preadv(child->bs, offset, bytes, flags); +- +- if (!drv) { +- return -ENOMEDIUM; +- } ++ trace_bdrv_co_preadv(bs, offset, bytes, flags); + + ret = bdrv_check_byte_request(bs, offset, bytes); + if (ret < 0) { +@@ -1443,43 +1592,16 @@ int coroutine_fn bdrv_co_preadv(BdrvChild *child, + flags |= BDRV_REQ_COPY_ON_READ; + } + +- /* Align read if necessary by padding qiov */ +- if (offset & (align - 1)) { +- head_buf = qemu_blockalign(bs, align); +- qemu_iovec_init(&local_qiov, qiov->niov + 2); +- qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); +- qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); +- use_local_qiov = true; +- +- bytes += offset & (align - 1); +- offset = offset & ~(align - 1); +- } +- +- if ((offset + bytes) & (align - 1)) { +- if (!use_local_qiov) { +- qemu_iovec_init(&local_qiov, qiov->niov + 1); +- qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); +- use_local_qiov = true; +- } +- tail_buf = qemu_blockalign(bs, align); +- qemu_iovec_add(&local_qiov, tail_buf, +- align - ((offset + bytes) & (align - 1))); +- +- bytes = ROUND_UP(bytes, align); +- } ++ bdrv_pad_request(bs, &qiov, &offset, &bytes, &pad); + + tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); +- ret = bdrv_aligned_preadv(child, &req, offset, bytes, align, +- use_local_qiov ? &local_qiov : qiov, +- flags); ++ ret = bdrv_aligned_preadv(child, &req, offset, bytes, ++ bs->bl.request_alignment, ++ qiov, flags); + tracked_request_end(&req); + bdrv_dec_in_flight(bs); + +- if (use_local_qiov) { +- qemu_iovec_destroy(&local_qiov); +- qemu_vfree(head_buf); +- qemu_vfree(tail_buf); +- } ++ bdrv_padding_destroy(&pad); + + return ret; + } +@@ -1775,44 +1897,34 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, + BdrvTrackedRequest *req) + { + BlockDriverState *bs = child->bs; +- uint8_t *buf = NULL; + QEMUIOVector local_qiov; + uint64_t align = bs->bl.request_alignment; +- unsigned int head_padding_bytes, tail_padding_bytes; + int ret = 0; ++ bool padding; ++ BdrvRequestPadding pad; + +- head_padding_bytes = offset & (align - 1); +- tail_padding_bytes = (align - (offset + bytes)) & (align - 1); +- +- +- assert(flags & BDRV_REQ_ZERO_WRITE); +- if (head_padding_bytes || tail_padding_bytes) { +- buf = qemu_blockalign(bs, align); +- qemu_iovec_init_buf(&local_qiov, buf, align); +- } +- if (head_padding_bytes) { +- uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); +- +- /* RMW the unaligned part before head. */ ++ padding = bdrv_init_padding(bs, offset, bytes, &pad); ++ if (padding) { + mark_request_serialising(req, align); + wait_serialising_requests(req); +- bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); +- ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align, +- align, &local_qiov, 0); +- if (ret < 0) { +- goto fail; +- } +- bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); + +- memset(buf + head_padding_bytes, 0, zero_bytes); +- ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align, +- align, &local_qiov, +- flags & ~BDRV_REQ_ZERO_WRITE); +- if (ret < 0) { +- goto fail; ++ bdrv_padding_rmw_read(child, req, &pad, true); ++ ++ if (pad.head || pad.merge_reads) { ++ int64_t aligned_offset = offset & ~(align - 1); ++ int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; ++ ++ qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); ++ ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, ++ align, &local_qiov, ++ flags & ~BDRV_REQ_ZERO_WRITE); ++ if (ret < 0 || pad.merge_reads) { ++ /* Error or all work is done */ ++ goto out; ++ } ++ offset += write_bytes - pad.head; ++ bytes -= write_bytes - pad.head; + } +- offset += zero_bytes; +- bytes -= zero_bytes; + } + + assert(!bytes || (offset & (align - 1)) == 0); +@@ -1822,7 +1934,7 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, + ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, + NULL, flags); + if (ret < 0) { +- goto fail; ++ goto out; + } + bytes -= aligned_bytes; + offset += aligned_bytes; +@@ -1830,26 +1942,17 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, + + assert(!bytes || (offset & (align - 1)) == 0); + if (bytes) { +- assert(align == tail_padding_bytes + bytes); +- /* RMW the unaligned part after tail. */ +- mark_request_serialising(req, align); +- wait_serialising_requests(req); +- bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); +- ret = bdrv_aligned_preadv(child, req, offset, align, +- align, &local_qiov, 0); +- if (ret < 0) { +- goto fail; +- } +- bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); ++ assert(align == pad.tail + bytes); + +- memset(buf, 0, bytes); ++ qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); + ret = bdrv_aligned_pwritev(child, req, offset, align, align, + &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE); + } +-fail: +- qemu_vfree(buf); +- return ret; + ++out: ++ bdrv_padding_destroy(&pad); ++ ++ return ret; + } + + /* +@@ -1862,10 +1965,7 @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child, + BlockDriverState *bs = child->bs; + BdrvTrackedRequest req; + uint64_t align = bs->bl.request_alignment; +- uint8_t *head_buf = NULL; +- uint8_t *tail_buf = NULL; +- QEMUIOVector local_qiov; +- bool use_local_qiov = false; ++ BdrvRequestPadding pad; + int ret; + + trace_bdrv_co_pwritev(child->bs, offset, bytes, flags); +@@ -1892,86 +1992,21 @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child, + goto out; + } + +- if (offset & (align - 1)) { +- QEMUIOVector head_qiov; +- ++ if (bdrv_pad_request(bs, &qiov, &offset, &bytes, &pad)) { + mark_request_serialising(&req, align); + wait_serialising_requests(&req); +- +- head_buf = qemu_blockalign(bs, align); +- qemu_iovec_init_buf(&head_qiov, head_buf, align); +- +- bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); +- ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align, +- align, &head_qiov, 0); +- if (ret < 0) { +- goto fail; +- } +- bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); +- +- qemu_iovec_init(&local_qiov, qiov->niov + 2); +- qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); +- qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); +- use_local_qiov = true; +- +- bytes += offset & (align - 1); +- offset = offset & ~(align - 1); +- +- /* We have read the tail already if the request is smaller +- * than one aligned block. +- */ +- if (bytes < align) { +- qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes); +- bytes = align; +- } +- } +- +- if ((offset + bytes) & (align - 1)) { +- QEMUIOVector tail_qiov; +- size_t tail_bytes; +- bool waited; +- +- mark_request_serialising(&req, align); +- waited = wait_serialising_requests(&req); +- assert(!waited || !use_local_qiov); +- +- tail_buf = qemu_blockalign(bs, align); +- qemu_iovec_init_buf(&tail_qiov, tail_buf, align); +- +- bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); +- ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1), +- align, align, &tail_qiov, 0); +- if (ret < 0) { +- goto fail; +- } +- bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); +- +- if (!use_local_qiov) { +- qemu_iovec_init(&local_qiov, qiov->niov + 1); +- qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); +- use_local_qiov = true; +- } +- +- tail_bytes = (offset + bytes) & (align - 1); +- qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); +- +- bytes = ROUND_UP(bytes, align); ++ bdrv_padding_rmw_read(child, &req, &pad, false); + } + + ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, +- use_local_qiov ? &local_qiov : qiov, +- flags); ++ qiov, flags); + +-fail: ++ bdrv_padding_destroy(&pad); + +- if (use_local_qiov) { +- qemu_iovec_destroy(&local_qiov); +- } +- qemu_vfree(head_buf); +- qemu_vfree(tail_buf); + out: + tracked_request_end(&req); + bdrv_dec_in_flight(bs); ++ + return ret; + } + diff --git a/bundles.tar.xz b/bundles.tar.xz index 30a06382..575cbd3b 100644 --- a/bundles.tar.xz +++ b/bundles.tar.xz @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86afea4d0fcdfa578dd3b660ccbadd49e996057de08ca8078020cb5c10611f91 -size 56424 +oid sha256:4c674d3cdda7189a79c9f7babf6858dddc3bf621d19f43e80cacf8fb6c85cc12 +size 60840 diff --git a/config.sh b/config.sh index fe40c7c1..6cbf2991 100644 --- a/config.sh +++ b/config.sh @@ -2,16 +2,12 @@ # config.sh: -# The next few VARIABLES may be edited (or uncommented) as required: +# The next few VARIABLES are to be edited as required: # The following specifies the upstream tag or commit upon which our patchqueue # gets rebased. The special value LATEST may be used to "automatically" track # the upstream development tree in the master branch GIT_UPSTREAM_COMMIT_ISH=v4.1.0 -if [[ "$GIT_UPSTREAM_COMMIT_ISH" != "LATEST" ]]; then - # This is the git branch used (otherwise it is computed) - GIT_BRANCH=opensuse-4.1 -fi # WARNING: If transitioning from using LATEST to not, MANUALLY re-set the # tarball present. If transitioning TO LATEST, make sure that # NEXT_RELEASE_IS_MAJOR is set correctly diff --git a/qcow2-Fix-QCOW2_COMPRESSED_SECTOR_MASK.patch b/qcow2-Fix-QCOW2_COMPRESSED_SECTOR_MASK.patch new file mode 100644 index 00000000..04e89ec7 --- /dev/null +++ b/qcow2-Fix-QCOW2_COMPRESSED_SECTOR_MASK.patch @@ -0,0 +1,33 @@ +From: Max Reitz +Date: Mon, 28 Oct 2019 17:18:40 +0100 +Subject: qcow2: Fix QCOW2_COMPRESSED_SECTOR_MASK + +Git-commit: 24552feb6ae2f615b76c2b95394af43901f75046 + +Masks for L2 table entries should have 64 bit. + +Fixes: b6c246942b14d3e0dec46a6c5868ed84e7dbea19 +Buglink: https://bugs.launchpad.net/qemu/+bug/1850000 +Cc: qemu-stable@nongnu.org +Signed-off-by: Max Reitz +Message-id: 20191028161841.1198-2-mreitz@redhat.com +Reviewed-by: Alberto Garcia +Signed-off-by: Max Reitz +Signed-off-by: Bruce Rogers +--- + block/qcow2.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/block/qcow2.h b/block/qcow2.h +index fc1b0d3c1e1e3110e86f36f0dcd9..359197f89fb9547d861d852cbf68 100644 +--- a/block/qcow2.h ++++ b/block/qcow2.h +@@ -77,7 +77,7 @@ + + /* Defined in the qcow2 spec (compressed cluster descriptor) */ + #define QCOW2_COMPRESSED_SECTOR_SIZE 512U +-#define QCOW2_COMPRESSED_SECTOR_MASK (~(QCOW2_COMPRESSED_SECTOR_SIZE - 1)) ++#define QCOW2_COMPRESSED_SECTOR_MASK (~(QCOW2_COMPRESSED_SECTOR_SIZE - 1ULL)) + + /* Must be at least 2 to cover COW */ + #define MIN_L2_CACHE_SIZE 2 /* cache entries */ diff --git a/qcow2-bitmap-Fix-uint64_t-left-shift-ove.patch b/qcow2-bitmap-Fix-uint64_t-left-shift-ove.patch new file mode 100644 index 00000000..a891a027 --- /dev/null +++ b/qcow2-bitmap-Fix-uint64_t-left-shift-ove.patch @@ -0,0 +1,66 @@ +From: Tuguoyi +Date: Fri, 1 Nov 2019 07:37:35 +0000 +Subject: qcow2-bitmap: Fix uint64_t left-shift overflow + +Git-commit: 570542ecb11e04b61ef4b3f4d0965a6915232a88 + +There are two issues in In check_constraints_on_bitmap(), +1) The sanity check on the granularity will cause uint64_t +integer left-shift overflow when cluster_size is 2M and the +granularity is BIGGER than 32K. +2) The way to calculate image size that the maximum bitmap +supported can map to is a bit incorrect. +This patch fix it by add a helper function to calculate the +number of bytes needed by a normal bitmap in image and compare +it to the maximum bitmap bytes supported by qemu. + +Fixes: 5f72826e7fc62167cf3a +Signed-off-by: Guoyi Tu +Message-id: 4ba40cd1e7ee4a708b40899952e49f22@h3c.com +Reviewed-by: Vladimir Sementsov-Ogievskiy +Cc: qemu-stable@nongnu.org +Signed-off-by: Max Reitz +Signed-off-by: Bruce Rogers +--- + block/qcow2-bitmap.c | 14 +++++++++++--- + 1 file changed, 11 insertions(+), 3 deletions(-) + +diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c +index b2487101edec0bdde613c561da89..65034da1c0766603d273c200797d 100644 +--- a/block/qcow2-bitmap.c ++++ b/block/qcow2-bitmap.c +@@ -142,6 +142,13 @@ static int check_table_entry(uint64_t entry, int cluster_size) + return 0; + } + ++static int64_t get_bitmap_bytes_needed(int64_t len, uint32_t granularity) ++{ ++ int64_t num_bits = DIV_ROUND_UP(len, granularity); ++ ++ return DIV_ROUND_UP(num_bits, 8); ++} ++ + static int check_constraints_on_bitmap(BlockDriverState *bs, + const char *name, + uint32_t granularity, +@@ -150,6 +157,7 @@ static int check_constraints_on_bitmap(BlockDriverState *bs, + BDRVQcow2State *s = bs->opaque; + int granularity_bits = ctz32(granularity); + int64_t len = bdrv_getlength(bs); ++ int64_t bitmap_bytes; + + assert(granularity > 0); + assert((granularity & (granularity - 1)) == 0); +@@ -171,9 +179,9 @@ static int check_constraints_on_bitmap(BlockDriverState *bs, + return -EINVAL; + } + +- if ((len > (uint64_t)BME_MAX_PHYS_SIZE << granularity_bits) || +- (len > (uint64_t)BME_MAX_TABLE_SIZE * s->cluster_size << +- granularity_bits)) ++ bitmap_bytes = get_bitmap_bytes_needed(len, granularity); ++ if ((bitmap_bytes > (uint64_t)BME_MAX_PHYS_SIZE) || ++ (bitmap_bytes > (uint64_t)BME_MAX_TABLE_SIZE * s->cluster_size)) + { + error_setg(errp, "Too much space will be occupied by the bitmap. " + "Use larger granularity"); diff --git a/qemu.changes b/qemu.changes index 1661f17b..59bf50c5 100644 --- a/qemu.changes +++ b/qemu.changes @@ -1,3 +1,28 @@ +------------------------------------------------------------------- +Thu Nov 7 19:02:39 UTC 2019 - Bruce Rogers + +- Fix two issues with qcow2 image processing which could affect + disk integrity + qcow2-Fix-QCOW2_COMPRESSED_SECTOR_MASK.patch + qcow2-bitmap-Fix-uint64_t-left-shift-ove.patch + +------------------------------------------------------------------- +Wed Nov 6 20:43:48 UTC 2019 - Bruce Rogers + +- Work around a host kernel xfs bug which can result in qcow2 image + corruption + block-io-refactor-padding.patch + util-iov-introduce-qemu_iovec_init_exten.patch + block-Make-wait-mark-serialising-request.patch + block-Add-bdrv_co_get_self_request.patch + block-file-posix-Let-post-EOF-fallocate-.patch +- Patch queue updated from git://github.com/openSUSE/qemu.git opensuse-4.1 + +------------------------------------------------------------------- +Mon Nov 4 13:47:02 UTC 2019 - Stefan BrĂ¼ns + +- Correct package names in _constraints after switch to multibuild. + ------------------------------------------------------------------- Sat Oct 26 03:07:00 UTC 2019 - Bruce Rogers diff --git a/qemu.spec b/qemu.spec index 97c45192..ce55ae2a 100644 --- a/qemu.spec +++ b/qemu.spec @@ -149,46 +149,53 @@ Patch00023: make-release-pull-in-edk2-submodules-so-.patch Patch00024: roms-Makefile.edk2-don-t-pull-in-submodu.patch Patch00025: coroutine-Add-qemu_co_mutex_assert_locke.patch Patch00026: qcow2-Fix-corruption-bug-in-qcow2_detect.patch -Patch00027: XXX-dont-dump-core-on-sigabort.patch -Patch00028: qemu-binfmt-conf-Modify-default-path.patch -Patch00029: qemu-cvs-gettimeofday.patch -Patch00030: qemu-cvs-ioctl_debug.patch -Patch00031: qemu-cvs-ioctl_nodirection.patch -Patch00032: linux-user-add-binfmt-wrapper-for-argv-0.patch -Patch00033: PPC-KVM-Disable-mmu-notifier-check.patch -Patch00034: linux-user-binfmt-support-host-binaries.patch -Patch00035: linux-user-Fake-proc-cpuinfo.patch -Patch00036: linux-user-use-target_ulong.patch -Patch00037: Make-char-muxer-more-robust-wrt-small-FI.patch -Patch00038: linux-user-lseek-explicitly-cast-non-set.patch -Patch00039: AIO-Reduce-number-of-threads-for-32bit-h.patch -Patch00040: xen_disk-Add-suse-specific-flush-disable.patch -Patch00041: qemu-bridge-helper-reduce-security-profi.patch -Patch00042: qemu-binfmt-conf-use-qemu-ARCH-binfmt.patch -Patch00043: linux-user-properly-test-for-infinite-ti.patch -Patch00044: roms-Makefile-pass-a-packaging-timestamp.patch -Patch00045: Raise-soft-address-space-limit-to-hard-l.patch -Patch00046: increase-x86_64-physical-bits-to-42.patch -Patch00047: vga-Raise-VRAM-to-16-MiB-for-pc-0.15-and.patch -Patch00048: i8254-Fix-migration-from-SLE11-SP2.patch -Patch00049: acpi_piix4-Fix-migration-from-SLE11-SP2.patch -Patch00050: Switch-order-of-libraries-for-mpath-supp.patch -Patch00051: Make-installed-scripts-explicitly-python.patch -Patch00052: hw-smbios-handle-both-file-formats-regar.patch -Patch00053: xen-add-block-resize-support-for-xen-dis.patch -Patch00054: tests-qemu-iotests-Triple-timeout-of-i-o.patch -Patch00055: tests-Fix-block-tests-to-be-compatible-w.patch -Patch00056: xen-ignore-live-parameter-from-xen-save-.patch -Patch00057: Conditionalize-ui-bitmap-installation-be.patch -Patch00058: tests-change-error-message-in-test-162.patch -Patch00059: hw-usb-hcd-xhci-Fix-GCC-9-build-warning.patch -Patch00060: hw-usb-dev-mtp-Fix-GCC-9-build-warning.patch -Patch00061: hw-intc-exynos4210_gic-provide-more-room.patch -Patch00062: configure-only-populate-roms-if-softmmu.patch -Patch00063: pc-bios-s390-ccw-net-avoid-warning-about.patch -Patch00064: roms-change-cross-compiler-naming-to-be-.patch -Patch00065: tests-Disable-some-block-tests-for-now.patch -Patch00066: test-add-mapping-from-arch-of-i686-to-qe.patch +Patch00027: block-io-refactor-padding.patch +Patch00028: util-iov-introduce-qemu_iovec_init_exten.patch +Patch00029: block-Make-wait-mark-serialising-request.patch +Patch00030: block-Add-bdrv_co_get_self_request.patch +Patch00031: block-file-posix-Let-post-EOF-fallocate-.patch +Patch00032: qcow2-bitmap-Fix-uint64_t-left-shift-ove.patch +Patch00033: qcow2-Fix-QCOW2_COMPRESSED_SECTOR_MASK.patch +Patch00034: XXX-dont-dump-core-on-sigabort.patch +Patch00035: qemu-binfmt-conf-Modify-default-path.patch +Patch00036: qemu-cvs-gettimeofday.patch +Patch00037: qemu-cvs-ioctl_debug.patch +Patch00038: qemu-cvs-ioctl_nodirection.patch +Patch00039: linux-user-add-binfmt-wrapper-for-argv-0.patch +Patch00040: PPC-KVM-Disable-mmu-notifier-check.patch +Patch00041: linux-user-binfmt-support-host-binaries.patch +Patch00042: linux-user-Fake-proc-cpuinfo.patch +Patch00043: linux-user-use-target_ulong.patch +Patch00044: Make-char-muxer-more-robust-wrt-small-FI.patch +Patch00045: linux-user-lseek-explicitly-cast-non-set.patch +Patch00046: AIO-Reduce-number-of-threads-for-32bit-h.patch +Patch00047: xen_disk-Add-suse-specific-flush-disable.patch +Patch00048: qemu-bridge-helper-reduce-security-profi.patch +Patch00049: qemu-binfmt-conf-use-qemu-ARCH-binfmt.patch +Patch00050: linux-user-properly-test-for-infinite-ti.patch +Patch00051: roms-Makefile-pass-a-packaging-timestamp.patch +Patch00052: Raise-soft-address-space-limit-to-hard-l.patch +Patch00053: increase-x86_64-physical-bits-to-42.patch +Patch00054: vga-Raise-VRAM-to-16-MiB-for-pc-0.15-and.patch +Patch00055: i8254-Fix-migration-from-SLE11-SP2.patch +Patch00056: acpi_piix4-Fix-migration-from-SLE11-SP2.patch +Patch00057: Switch-order-of-libraries-for-mpath-supp.patch +Patch00058: Make-installed-scripts-explicitly-python.patch +Patch00059: hw-smbios-handle-both-file-formats-regar.patch +Patch00060: xen-add-block-resize-support-for-xen-dis.patch +Patch00061: tests-qemu-iotests-Triple-timeout-of-i-o.patch +Patch00062: tests-Fix-block-tests-to-be-compatible-w.patch +Patch00063: xen-ignore-live-parameter-from-xen-save-.patch +Patch00064: Conditionalize-ui-bitmap-installation-be.patch +Patch00065: tests-change-error-message-in-test-162.patch +Patch00066: hw-usb-hcd-xhci-Fix-GCC-9-build-warning.patch +Patch00067: hw-usb-dev-mtp-Fix-GCC-9-build-warning.patch +Patch00068: hw-intc-exynos4210_gic-provide-more-room.patch +Patch00069: configure-only-populate-roms-if-softmmu.patch +Patch00070: pc-bios-s390-ccw-net-avoid-warning-about.patch +Patch00071: roms-change-cross-compiler-naming-to-be-.patch +Patch00072: tests-Disable-some-block-tests-for-now.patch +Patch00073: test-add-mapping-from-arch-of-i686-to-qe.patch # Patches applied in roms/seabios/: Patch01000: seabios-use-python2-explicitly-as-needed.patch Patch01001: seabios-switch-to-python3-as-needed.patch @@ -945,6 +952,13 @@ This package provides a service file for starting and stopping KSM. %patch00064 -p1 %patch00065 -p1 %patch00066 -p1 +%patch00067 -p1 +%patch00068 -p1 +%patch00069 -p1 +%patch00070 -p1 +%patch00071 -p1 +%patch00072 -p1 +%patch00073 -p1 %patch01000 -p1 %patch01001 -p1 %patch01002 -p1 diff --git a/update_git.sh b/update_git.sh index adacb8e9..21802bf6 100644 --- a/update_git.sh +++ b/update_git.sh @@ -14,6 +14,35 @@ source ./config.sh declare -A COMMIT_IDS_BY_SUBMODULE_PATH +# Get version info from the packages' tarball - decode and do some checks +BASE_RE="qemu-[[:digit:]]+(\.[[:digit:]]+){2}(-rc[[:digit:]])?" +EXTRA_RE="\+git\.[[:digit:]]+\.([[:xdigit:]]+)" +SUFFIX_RE="\.tar\.xz" +SIG_SUFFIX_RE="\.tar\.xz\.sig" +QEMU_TARBALL=($(find -maxdepth 1 -type f -regextype posix-extended -regex \ + "\./$BASE_RE($EXTRA_RE)?$SUFFIX_RE" -printf "%f ")) +QEMU_TARBALL_SIG=($(find -maxdepth 1 -type f -regextype posix-extended -regex \ + "\./$BASE_RE($EXTRA_RE)?$SIG_SUFFIX_RE" -printf "%f ")) + +if [ ${#QEMU_TARBALL[@]} -gt 1 ]; then + echo "Multiple qemu tarballs detected. Please clean up" + exit +fi +if [ ${#QEMU_TARBALL_SIG[@]} -gt 1 ]; then + echo "Multiple qemu tarballs signature files detected. Please clean up" + exit +fi +OLD_SOURCE_VERSION_AND_EXTRA=$(echo $QEMU_TARBALL 2>/dev/null | head --bytes=-8\ + | cut --bytes=6-) +VERSION_EXTRA=$(echo $OLD_SOURCE_VERSION_AND_EXTRA|awk -F+ '{if ($2) print \ + "+"$2}') +if [ "$OLD_SOURCE_VERSION_AND_EXTRA" = "" ]; then + echo "ERROR: No tarball found!" + exit +fi + +#============================================================================== + TEMP_CHECK() { # TEMPORARY! FOR NOW WE REQUIRE THESE LOCALLY TO DO WORK ON PACKAGE REQUIRED_LOCAL_REPO_MAP=( @@ -38,6 +67,7 @@ for entry in ${REQUIRED_LOCAL_REPO_MAP[@]}; do else echo "ERROR! For now, you need to have these local git repos available:" echo ${REQUIRED_LOCAL_REPO_MAP[@]} + exit fi done } @@ -65,13 +95,13 @@ rm -rf $BUNDLE_DIR mkdir -p $BUNDLE_DIR for (( i=0; i <$SUBMODULE_COUNT; i++ )); do mkdir -p $BUNDLE_DIR/${SUBMODULE_DIRS[$i]} - # what should this file be? for now use an extension of id +# what should this file be? for now use an extension of id touch $BUNDLE_DIR/${SUBMODULE_DIRS[$i]}/${SUBMODULE_COMMIT_IDS[$i]}.id done if [ "$GIT_UPSTREAM_COMMIT_ISH" = "LATEST" ]; then - GIT_UPSTREAM_COMMIT=$(cd ${LOCAL_REPO_MAP[0]} && git rev-parse upstream/master) + GIT_UPSTREAM_COMMIT=$NEW_COMMIT_ISH_FULL else -# (I need to make this smarter, or change something - works for tag, but not normal commit?): +# TODO: make this smarter, or change something - works for tag, but not normal commit? GIT_UPSTREAM_COMMIT=$(git -C ${LOCAL_REPO_MAP[0]} show-ref -d $GIT_UPSTREAM_COMMIT_ISH|grep -F "^{}"|awk '{print $1}') fi touch $BUNDLE_DIR/$GIT_UPSTREAM_COMMIT.id @@ -93,13 +123,15 @@ for (( i=0; i <$REPO_COUNT; i++ )); do if [[ $(git -C $GIT_DIR/$SUBDIR ls-remote --heads origin $GIT_BRANCH) ]]; then git -C $GIT_DIR/$SUBDIR fetch origin $GIT_BRANCH if [[ $(git -C $GIT_DIR/$SUBDIR rev-list $GITREPO_COMMIT_ISH..FETCH_HEAD) ]]; then - git -C $GIT_DIR/$SUBDIR bundle create $BUNDLE_DIR/$SUBDIR$GITREPO_COMMIT_ISH.bundle $GITREPO_COMMIT_ISH..FETCH_HEAD || true + git -C $GIT_DIR/$SUBDIR bundle create $BUNDLE_DIR/$SUBDIR$GITREPO_COMMIT_ISH.bundle $GITREPO_COMMIT_ISH..FETCH_HEAD fi fi fi fi done -tar cJvf bundles.tar.xz -C $BUNDLE_DIR . +# keep diffs to a minimum - touch bundle files to "something common" TODO: decide if there's something better +find $BUNDLE_DIR -exec touch -r qemu-$SOURCE_VERSION$VERSION_EXTRA.tar.xz {} \; +tar --format gnu --xz -cf bundles.tar.xz -C $BUNDLE_DIR . rm -rf $BUNDLE_DIR rm -rf $GIT_DIR } @@ -129,14 +161,11 @@ for entry in ${BUNDLE_FILES[@]}; do LOCAL_REPO=$(readlink -f ${LOCAL_REPO_MAP[$PATCH_RANGE_INDEX]}) if [ -e $LOCAL_REPO ]; then -# TODO: Detect if it's there before trying to remove! git -C $LOCAL_REPO remote remove bundlerepo || true -# git won't let you delete this branch if it's the current branch (returns 1) HOW TO HANDLE? -# detect this case, and ask user to switch to another branch? or do it for them - switch to master killing any "state" for this branch + # git won't let you delete a branch we're on - so get onto master temporarily (TODO: is there a better approach?) git -C $LOCAL_REPO checkout master -f git -C $LOCAL_REPO branch -D frombundle || true git -C $LOCAL_REPO remote add bundlerepo $BUNDLE_DIR/$entry -# in next, the head may be FETCH_HEAD or HEAD depending on how we created: git -C $LOCAL_REPO fetch bundlerepo FETCH_HEAD git -C $LOCAL_REPO branch frombundle FETCH_HEAD git -C $LOCAL_REPO remote remove bundlerepo @@ -150,232 +179,122 @@ rm -rf $BUNDLE_DIR #============================================================================== -bundle2spec() { +redo_tarball_and_rebase_patches() { rm -rf $GIT_DIR -rm -rf $CMP_DIR -rm -rf $BUNDLE_DIR -rm -f checkpatch.log -rm -f checkthese -# there's probably a better place for the next: (only needed due to development failures?) -rm -rf checkdir -if [ "$GIT_UPSTREAM_COMMIT_ISH" = "LATEST" ]; then - for (( i=0; i <$REPO_COUNT; i++ )); do - if [[ -e $(readlink -f ${LOCAL_REPO_MAP[$i]}) ]]; then - git -C ${LOCAL_REPO_MAP[$i]} remote update upstream &> /dev/null - fi - done -#TODO: do we really want to checkout here? the code which gets the latest submodule commits doesnt rely on this !!! IN FACT master here isn't for latest upstream - that is the upstream branch! -# git -C ${LOCAL_REPO_MAP[0]} checkout master --recurse-submodules -f -# TODO: THE FOLLOWING NEEDS HELP - QEMU_VERSION=$(git -C ${LOCAL_REPO_MAP[0]} show origin:VERSION) - MAJOR_VERSION=$(echo $QEMU_VERSION|awk -F. '{print $1}') - MINOR_VERSION=$(echo $QEMU_VERSION|awk -F. '{print $2}') - if [ "$NEXT_RELEASE_IS_MAJOR" = "0" ]; then - GIT_BRANCH=opensuse-$MAJOR_VERSION.$[$MINOR_VERSION+1] - else - GIT_BRANCH=opensuse-$[$MAJOR_VERSION+1].0 - fi -fi - -BASE_RE="qemu-[[:digit:]]+(\.[[:digit:]]+){2}(-rc[[:digit:]])?" -EXTRA_RE="\+git\.[[:digit:]]+\.([[:xdigit:]]+)" -SUFFIX_RE="\.tar\.xz" -SIG_SUFFIX_RE="\.tar\.xz\.sig" -QEMU_TARBALL=($(find -maxdepth 1 -type f -regextype posix-extended -regex \ - "\./$BASE_RE($EXTRA_RE)?$SUFFIX_RE" -printf "%f ")) -QEMU_TARBALL_SIG=($(find -maxdepth 1 -type f -regextype posix-extended -regex \ - "\./$BASE_RE($EXTRA_RE)?$SIG_SUFFIX_RE" -printf "%f ")) - -if [ ${#QEMU_TARBALL[@]} -gt 1 ]; then - echo "Multiple qemu tarballs detected. Please clean up" - exit -fi -if [ ${#QEMU_TARBALL_SIG[@]} -gt 1 ]; then - echo "Multiple qemu tarballs signature files detected. Please clean up" - exit -fi -# It's ok for either of these to be empty when using "LATEST" -OLD_SOURCE_VERSION_AND_EXTRA=$(echo $QEMU_TARBALL 2>/dev/null | head --bytes=-8\ - | cut --bytes=6-) -VERSION_EXTRA=$(echo $OLD_SOURCE_VERSION_AND_EXTRA|awk -F+ '{if ($2) print \ - "+"$2}') -if [ "$OLD_SOURCE_VERSION_AND_EXTRA" = "" ]; then - echo "Warning: No tarball found" -fi - -# TODO: (repo file not yet done) -if [ "$GIT_UPSTREAM_COMMIT_ISH" = "LATEST" ]; then #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# DO TARBALL, GETTING ALL FROM UPSTREAM DIRECTLY +# CREATE TARBALL, USING FRESH REPO - WE COULD RELY MORE ON LOCAL IF WE WERE MORE CAREFUL #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - if [[ $QEMU_TARBALL =~ $BASE_RE$EXTRA_RE$SUFFIX_RE ]]; then - OLD_COMMIT_ISH=${BASH_REMATCH[3]} - else - #Assume release (or release candidate) tarball with equivalent tag: - OLD_COMMIT_ISH=$(cd ${LOCAL_REPO_MAP[0]} && git rev-list --abbrev-commit \ - --abbrev=9 -1 v$OLD_SOURCE_VERSION_AND_EXTRA) - fi - if [ ${#QEMU_TARBALL_SIG[@]} -ne 0 ]; then - echo "INFO: Ignoring signature file: $QEMU_TARBALL_SIG" - QEMU_TARBALL_SIG= - fi -# TODO: HERE WE REFERENCE MASTER - NEEDS FIXING - NEW_COMMIT_ISH_FULL=$(cd ${LOCAL_REPO_MAP[0]} && git rev-parse upstream/master) - NEW_COMMIT_ISH=$(cd ${LOCAL_REPO_MAP[0]} && git rev-parse --short=9 \ - upstream/master) - NOW_SECONDS=$(date +%s) -# TODO: HERE WE REFERENCE MASTER - NEEDS FIXING - git clone -ls ${LOCAL_REPO_MAP[0]} $GIT_DIR -b master --single-branch &>/dev/null - if [ "$OLD_COMMIT_ISH" != "$NEW_COMMIT_ISH" ]; then - echo "Please wait..." - (cd $GIT_DIR && git remote add upstream \ - git://git.qemu-project.org/qemu.git &>/dev/null) - (cd $GIT_DIR && git remote update upstream &>/dev/null) - (cd $GIT_DIR && git checkout $NEW_COMMIT_ISH &>/dev/null) +# TODO: WHAT IS THIS NEXT LINE EVEN DOING FOR US?? (OK, it's initing a repo, what do we rely on there?) +git clone -ls ${LOCAL_REPO_MAP[0]} $GIT_DIR -b master --single-branch &>/dev/null +echo "Please wait..." +(cd $GIT_DIR && git remote add upstream \ +git://git.qemu-project.org/qemu.git &>/dev/null) +(cd $GIT_DIR && git remote update upstream &>/dev/null) +(cd $GIT_DIR && git checkout $NEW_COMMIT_ISH &>/dev/null) # As an alternative, we could add a --recurse-submodules to the checkout instead here as well, right? #UPSTREAM DOESNT DO THIS (time takes 17 minutes!): # (cd $GIT_DIR && git submodule update --init --recursive &>/dev/null) #INSTEAD THESE NEXT TWO LINES ARE WHAT IS DONE (these take 9 minutes and 3 minutes respectively): - (cd $GIT_DIR && git submodule update --init &>/dev/null) - (cd $GIT_DIR/roms/edk2 && git submodule update --init &>/dev/null) - VERSION_EXTRA=+git.$NOW_SECONDS.$NEW_COMMIT_ISH - fi - QEMU_VERSION=$(cat $GIT_DIR/VERSION) - MAJOR_VERSION=$(echo $QEMU_VERSION|awk -F. '{print $1}') - MINOR_VERSION=$(echo $QEMU_VERSION|awk -F. '{print $2}') - X=$(echo $QEMU_VERSION|awk -F. '{print $3}') - # 0 = release, 50 = development cycle, 90..99 equate to release candidates - if [ "$X" != "0" -a "$X" != "50" ]; then - if [ "$NEXT_RELEASE_IS_MAJOR" = "0" ]; then - SOURCE_VERSION=$MAJOR_VERSION.$[$MINOR_VERSION+1].0-rc$[X-90] - else - SOURCE_VERSION=$[$MAJOR_VERSION+1].0.0-rc$[X-90] - fi +(cd $GIT_DIR && git submodule update --init &>/dev/null) +(cd $GIT_DIR/roms/edk2 && git submodule update --init &>/dev/null) +VERSION_EXTRA=+git.$NOW_SECONDS.$NEW_COMMIT_ISH +if (cd ${LOCAL_REPO_MAP[0]} && git describe --exact-match $NEW_COMMIT_ISH \ + &>/dev/null); then + if [ "$X" = "50" ]; then + echo "Ignoring non-standard tag" else - SOURCE_VERSION=$MAJOR_VERSION.$MINOR_VERSION.$X +# there is no VERSION_EXTRA + VERSION_EXTRA= fi - if [ "$OLD_COMMIT_ISH" != "$NEW_COMMIT_ISH" ]; then - if (cd ${LOCAL_REPO_MAP[0]} && git describe --exact-match $NEW_COMMIT_ISH \ - &>/dev/null); then - if [ "$X" = "50" ]; then - echo "Ignoring non-standard tag" - else - # there is no VERSION_EXTRA - VERSION_EXTRA= - fi - fi - (cd $GIT_DIR/roms/seabios && git describe --tags --long --dirty > \ - .version) - (cd $GIT_DIR/roms/skiboot && ./make_version.sh > .version) - echo "Almost there..." - tar --exclude=.git --transform "s,$GIT_DIR,qemu-$SOURCE_VERSION," \ - -Pcf qemu-$SOURCE_VERSION$VERSION_EXTRA.tar $GIT_DIR - osc rm --force qemu-$OLD_SOURCE_VERSION_AND_EXTRA.tar.xz &>/dev/null ||\ - true - osc rm --force qemu-$OLD_SOURCE_VERSION_AND_EXTRA.tar.xz.sig \ - &>/dev/null || true - unset QEMU_TARBALL_SIG - xz -T 0 qemu-$SOURCE_VERSION$VERSION_EXTRA.tar - osc add qemu-$SOURCE_VERSION$VERSION_EXTRA.tar.xz +fi +(cd $GIT_DIR/roms/seabios && git describe --tags --long --dirty > \ + .version) +(cd $GIT_DIR/roms/skiboot && ./make_version.sh > .version) +echo "Almost there..." +tar --exclude=.git --transform "s,$GIT_DIR,qemu-$SOURCE_VERSION," \ + -Pcf qemu-$SOURCE_VERSION$VERSION_EXTRA.tar $GIT_DIR +osc rm --force qemu-$OLD_SOURCE_VERSION_AND_EXTRA.tar.xz &>/dev/null ||\ + true +osc rm --force qemu-$OLD_SOURCE_VERSION_AND_EXTRA.tar.xz.sig \ + &>/dev/null || true +unset QEMU_TARBALL_SIG +xz -T 0 qemu-$SOURCE_VERSION$VERSION_EXTRA.tar +osc add qemu-$SOURCE_VERSION$VERSION_EXTRA.tar.xz + #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# OK GET THE SUBMODULE COMMIT ID'S FROM THIS NEWLY MINTED QEMU CHECKOUT! WE'LL USE THAT WHEN WE REBASE OUR PATCHES +# GET THE SUBMODULE COMMIT ID'S FROM THIS NEWLY MINTED QEMU CHECKOUT. WE'LL USE THAT WHEN WE REBASE OUR PATCHES #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # !! We (perhaps temporarily) do MORE recursive submodules, since we are tracking ALL in these scripts, while upstream doesn't include all in tarball currently - (cd $GIT_DIR && git submodule update --init --recursive &>/dev/null) - SUBMODULE_COMMIT_IDS=($(git -C $GIT_DIR submodule status --recursive|awk '{print $1}')) - SUBMODULE_DIRS=($(git -C $GIT_DIR submodule status --recursive|awk '{print $2}')) - SUBMODULE_COUNT=${#SUBMODULE_COMMIT_IDS[@]} +(cd $GIT_DIR && git submodule update --init --recursive &>/dev/null) +SUBMODULE_COMMIT_IDS=($(git -C $GIT_DIR submodule status --recursive|awk '{print $1}')) +SUBMODULE_DIRS=($(git -C $GIT_DIR submodule status --recursive|awk '{print $2}')) +SUBMODULE_COUNT=${#SUBMODULE_COMMIT_IDS[@]} # TODO: do this with simply math - ie: use (( ... )) - if [[ "$REPO_COUNT" != "$(expr $SUBMODULE_COUNT + 1)" ]]; then - echo "ERROR: submodule count doesn't match the REPO_COUNT variable in config.sh file!" - exit - fi +if [[ "$REPO_COUNT" != "$(expr $SUBMODULE_COUNT + 1)" ]]; then + echo "ERROR: submodule count doesn't match the REPO_COUNT variable in config.sh file!" + exit +fi # We have the submodule commits, but not in the PATCH ORDER which our config.sh has (see $PATCH_PATH_MAP) - for (( i=0; i <$REPO_COUNT-1; i++ )); do - COMMIT_IDS_BY_SUBMODULE_PATH[${SUBMODULE_DIRS[$i]}/]=${SUBMODULE_COMMIT_IDS[$i]} - done - COMMIT_IDS_BY_SUBMODULE_PATH[SUPERPROJECT]=$NEW_COMMIT_ISH_FULL +for (( i=0; i <$REPO_COUNT-1; i++ )); do + COMMIT_IDS_BY_SUBMODULE_PATH[${SUBMODULE_DIRS[$i]}/]=${SUBMODULE_COMMIT_IDS[$i]} +done +COMMIT_IDS_BY_SUBMODULE_PATH[SUPERPROJECT]=$NEW_COMMIT_ISH_FULL + #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # MOVE BUNDLE COMMITS OVER TO LOCAL frombundle BRANCH #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - bundle2local - mkdir -p $BUNDLE_DIR - tar xJf bundles.tar.xz -C $BUNDLE_DIR + +bundle2local + #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # REBASE frombundle patches USING COMMIT_IDS_BY_SUBMODULE, ALSO USING OLD ID'S STORED IN OLD BUNDLE #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +mkdir -p $BUNDLE_DIR +tar xJf bundles.tar.xz -C $BUNDLE_DIR # Now go through all the submodule local repos that are present and create a bundle file for the patches found there - for (( i=0; i <$REPO_COUNT; i++ )); do - if [[ -e $(readlink -f ${LOCAL_REPO_MAP[$i]}) ]]; then - if $(git -C ${LOCAL_REPO_MAP[$i]} branch | grep -F "frombundle" >/dev/null); then - SUBDIR=${PATCH_PATH_MAP[$i]} - GITREPO_COMMIT_ISH=($BUNDLE_DIR/$SUBDIR*.id) - if [[ $GITREPO_COMMIT_ISH =~ .*(.{40})[.]id ]]; then - GITREPO_COMMIT_ISH=${BASH_REMATCH[1]} - fi - git -C ${LOCAL_REPO_MAP[$i]} checkout frombundle -f - git -C ${LOCAL_REPO_MAP[$i]} branch -D $GIT_BRANCH - git -C ${LOCAL_REPO_MAP[$i]} checkout -b $GIT_BRANCH - if [[ "$SUBDIR" = "" ]]; then - SUBDIR=SUPERPROJECT - fi - if ! $(git -C ${LOCAL_REPO_MAP[$i]} rebase --onto ${COMMIT_IDS_BY_SUBMODULE_PATH[$SUBDIR]} $GITREPO_COMMIT_ISH >/dev/null); then -# TODO: record that this one needs manual help! - echo "Rebase of ${LOCAL_REPO_MAP[$i]}, branch $GIT_BRANCH needs manual help" - fi - fi +for (( i=0; i <$REPO_COUNT; i++ )); do + if [[ -e $(readlink -f ${LOCAL_REPO_MAP[$i]}) ]]; then + if $(git -C ${LOCAL_REPO_MAP[$i]} branch | grep -F "frombundle" >/dev/null); then + SUBDIR=${PATCH_PATH_MAP[$i]} + GITREPO_COMMIT_ISH=($BUNDLE_DIR/$SUBDIR*.id) + if [[ $GITREPO_COMMIT_ISH =~ .*(.{40})[.]id ]]; then + GITREPO_COMMIT_ISH=${BASH_REMATCH[1]} + fi + git -C ${LOCAL_REPO_MAP[$i]} checkout frombundle -f + git -C ${LOCAL_REPO_MAP[$i]} branch -D $GIT_BRANCH + git -C ${LOCAL_REPO_MAP[$i]} checkout -b $GIT_BRANCH + if [[ "$SUBDIR" = "" ]]; then + SUBDIR=SUPERPROJECT + fi + if ! $(git -C ${LOCAL_REPO_MAP[$i]} rebase --onto ${COMMIT_IDS_BY_SUBMODULE_PATH[$SUBDIR]} $GITREPO_COMMIT_ISH >/dev/null); then +# TODO: record that this one needs manual help! + echo "Rebase of ${LOCAL_REPO_MAP[$i]}, branch $GIT_BRANCH needs manual help" + REBASE_FAILS="${LOCAL_REPO_MAP[$i]} $REBASE_FAILS" fi - done -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# CREATE BUNDLE FROM $GIT_BRANCH branch -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - initbundle -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# GET BUNDLE PATCHES FROM BUNDLE_DIR -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - fi - rm -rf $GIT_DIR - # We're done with GIT_UPSTREAM_COMMIT_ISH carrying the special value LATEST - GIT_UPSTREAM_COMMIT_ISH=$NEW_COMMIT_ISH - WRITE_LOG=0 -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# DONE WITH LATEST WORK -#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -else # not based on LATEST upstream master, rather any upstream commitish - if [ "$OLD_SOURCE_VERSION_AND_EXTRA" = "" ]; then - echo "Failure: tarball required which corresponds to commitish:" \ - "$GIT_UPSTREAM_COMMITISH" - exit - fi - if [ -d "${LOCAL_REPO_MAP[0]}" ]; then - echo "Processing local git tree branch: master, using commitish:"\ - "$GIT_UPSTREAM_COMMIT_ISH" - if ! (cd ${LOCAL_REPO_MAP[0]} && git show-branch master &>/dev/null) - then - echo "Error: Branch master not found - please create a remote"\ - "tracking branch of origin/master" - exit fi -# ( THIS ISNT WORKING - IS OLD HISTORY:) - else - echo "Processing $GIT_BRANCH branch of remote git tree, using"\ - "commitish: $GIT_UPSTREAM_COMMIT_ISH" - echo "(For fast processing, consider establishing a local git tree"\ - "at ${LOCAL_REPO_MAP[0]})" fi - SOURCE_VERSION=$OLD_SOURCE_VERSION_AND_EXTRA - QEMU_VERSION=$(tar JxfO qemu-$SOURCE_VERSION$VERSION_EXTRA.tar.xz qemu-$SOURCE_VERSION/VERSION) - NEW_COMMIT_ISH= - WRITE_LOG=1 -fi +done +} + +#============================================================================== + +bundle2spec() { +rm -f checkpatch.log +rm -f checkthese +rm -rf checkdir +rm -rf $GIT_DIR +rm -rf $CMP_DIR +rm -rf $BUNDLE_DIR +mkdir -p $BUNDLE_DIR + #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # NOW PROCESS BUNDLES INTO COMMITS AND FILL SPEC FILE #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -mkdir -p $BUNDLE_DIR + tar xJf bundles.tar.xz -C $BUNDLE_DIR BUNDLE_FILES=$(find $BUNDLE_DIR -printf "%P\n"|grep "bundle$") @@ -422,11 +341,13 @@ rm -rf $BUNDLE_DIR shopt -s nullglob for i in $CMP_DIR/*; do - # index line isn't consistent, so cut full index to normal line length +# index line isn't consistent, so cut full index to normal line length sed -E -i 's/(^index [a-f0-9]{28})[a-f0-9]{12}([.][.][a-f0-9]{28})[a-f0-9]{12}( [0-9]{6}$)/\1\2\3/' $i BASENAME=$(basename $i) if [ "$FIVE_DIGIT_POTENTIAL" = "1" ]; then - if [[ $BASENAME =~ [[:digit:]]{4}-.* ]]; then + if [[ $BASENAME =~ [[:digit:]]{5}.* ]]; then + : + else BASENAME=0$BASENAME fi fi @@ -442,7 +363,7 @@ rm -rf $BUNDLE_DIR for i in [0-9]*.patch; do osc rm --force $i done - # we need to make sure that w/out the numbered prefixes, the patchnames are all unique +# make sure that w/out the numbered prefixes, the patchnames are all unique mkdir checkdir for i in $CMP_DIR/*; do BASENAME=$(basename $i) @@ -530,7 +451,7 @@ rm -rf $BUNDLE_DIR rm -f checkpatch.log if [ "$TOTAL_COUNT" != "0" -a "$VERSION_EXTRA" != "" -a "$OLD_COMMIT_ISH" =\ "$NEW_COMMIT_ISH" ]; then - # Patches changed, so update the version using current time +# Only patches changed: update the version using current timestamp VERSION_EXTRA=+git.$NOW_SECONDS.$OLD_COMMIT_ISH osc mv qemu-$OLD_SOURCE_VERSION_AND_EXTRA.tar.xz \ qemu-$SOURCE_VERSION$VERSION_EXTRA.tar.xz @@ -593,7 +514,7 @@ rm -rf $BUNDLE_DIR elif [[ "$line" =~ ^Source: ]]; then echo "$line" if [ ${#QEMU_TARBALL_SIG[@]} -eq 1 ]; then - # We assume the signature file corresponds - just add .sig +# We assume the signature file corresponds - just add .sig echo "$line.sig"|sed 's/^Source: /Source99:/' fi elif [ "$line" = "SEABIOS_VERSION" ]; then @@ -611,7 +532,7 @@ rm -rf $BUNDLE_DIR fi if [ "$WRITE_LOG" = "1" ]; then - # Factory requires all deleted and added patches to be mentioned +# Factory requires all deleted and added patches to be mentioned if [ -e qemu.changes.deleted ] || [ -e qemu.changes.added ]; then echo "Patch queue updated from ${GIT_TREE} ${GIT_BRANCH}" > \ $package.changes.proposed @@ -639,6 +560,9 @@ rm -rf $BUNDLE_DIR if [ -e qemu.changes.added ]; then rm -f qemu.changes.added fi + if [[ "0" = "$(expr $CHANGED_COUNT + $DELETED_COUNT + $ADDED_COUNT)" ]]; then + osc revert bundles.tar.xz + fi echo "git patch summary" echo " unchanged: $UNCHANGED_COUNT" echo " changed: $CHANGED_COUNT" @@ -655,67 +579,190 @@ osc service localrun format_spec_file #============================================================================== usage() { - echo "Usage:" - echo "bash ./git_update.sh - echo description: package maintenance using a git-based workflow. Commands:" - echo " git2pkg (update package spec file and patches from git. Is default)" - echo " pkg2git (update git (frombundle branch) from the package "bundleofbundles")" - echo " refresh (refresh spec file from spec file template and "bundlofbundles")" +echo "Usage:" +echo "bash ./git_update.sh " +echo "description: package maintenance using a git-based workflow. Commands:" +echo " git2pkg (update package spec file and patches from git. Is default)" +echo " pkg2git (update git (frombundle branch) from the package "bundleofbundles")" +echo " refresh (refresh spec file from spec file template and "bundlofbundles")" +echo "(See script for details on doing 'LATEST' workflow)" } #============================================================================== -# LATEST processing currently doesn't expect cmdline params, so do it here, up front +echo "WARNING: Script using local git repos. Some operations may be time consuming..." +#TODO: Most of these checks are not necessary +for (( i=0; i <$REPO_COUNT; i++ )); do + if [[ -e $(readlink -f ${LOCAL_REPO_MAP[$i]}) ]]; then + if [[ -d ${LOCAL_REPO_MAP[$i]}/.git/rebase-merge || \ + -d ${LOCAL_REPO_MAP[$i]}/.git/rebase-apply ]]; then + echo "ERROR! Rebase appears to be in progress in ${LOCAL_REPO_MAP[$i]}. Please resolve" + exit + fi + if ! git -C ${LOCAL_REPO_MAP[$i]} submodule update --init --recursive &> /dev/null; then + echo "Please clean up state of local repo ${LOCAL_REPO_MAP[$i]} before using script" + echo "(ensure git submodule update --init --recursive is successful)" + exit + fi + if [ "$(git -C ${LOCAL_REPO_MAP[$i]} status --porcelain)" ]; then + echo "Please clean up state of local repo ${LOCAL_REPO_MAP[$i]} before using script" + echo "(ensure git status --porcelain produces no output)" + exit + fi + if ! git -C ${LOCAL_REPO_MAP[$i]} checkout master --recurse-submodules -f &> /dev/null; then + echo "Please clean up state of local repo ${LOCAL_REPO_MAP[$i]} before using script" + echo "(cannot check out master, incl. it's submodules)" + exit + fi + if ! git -C ${LOCAL_REPO_MAP[$i]} submodule update --init --recursive &> /dev/null; then + echo "Please clean up state of local repo ${LOCAL_REPO_MAP[$i]} before using script" + echo "(cannot init and update master submodules)" + exit + fi + if [ "$(git -C ${LOCAL_REPO_MAP[$i]} status --porcelain)" ]; then + echo "Please clean up state of local repo ${LOCAL_REPO_MAP[$i]} before using script" + echo "(ensure git status --porcelain produces no output)" + exit + fi + fi +done if [ "$GIT_UPSTREAM_COMMIT_ISH" = "LATEST" ]; then - echo "Processing latest upstream changes" + if [ "$1" = "continue" ]; then + CONTINUE_AFTER_REBASE=1 + else + if [ "$1" = "pause" ]; then + PAUSE_BEFORE_BUNDLE_CREATION=1 + else + if [ "$1" ]; then + echo "ERROR: unrecognized option '$1'. Script in LATEST mode only recognizes 'pause' and 'continue' options" + exit + fi + fi + fi + for (( i=0; i <$REPO_COUNT; i++ )); do + if [[ -e $(readlink -f ${LOCAL_REPO_MAP[$i]}) ]]; then + git -C ${LOCAL_REPO_MAP[$i]} remote update upstream &> /dev/null + fi + done + NEW_COMMIT_ISH_FULL=$(cd ${LOCAL_REPO_MAP[0]} && git rev-parse upstream/master) + NEW_COMMIT_ISH=${NEW_COMMIT_ISH_FULL:0:8} + git -C ${LOCAL_REPO_MAP[0]} checkout $NEW_COMMIT_ISH_FULL --recurse-submodules -f &> /dev/null + QEMU_VERSION=$(git -C ${LOCAL_REPO_MAP[0]} show upstream/master:VERSION) + MAJOR_VERSION=$(echo $QEMU_VERSION|awk -F. '{print $1}') + MINOR_VERSION=$(echo $QEMU_VERSION|awk -F. '{print $2}') + X=$(echo $QEMU_VERSION|awk -F. '{print $3}') +# 0 = release, 50 = development cycle, 90..99 equate to release candidates + if [ "$X" != "0" -a "$X" != "50" ]; then + if [ "$NEXT_RELEASE_IS_MAJOR" = "0" ]; then + SOURCE_VERSION=$MAJOR_VERSION.$[$MINOR_VERSION+1].0-rc$[X-90] + GIT_BRANCH=opensuse-$MAJOR_VERSION.$[$MINOR_VERSION+1] + else + SOURCE_VERSION=$[$MAJOR_VERSION+1].0.0-rc$[X-90] + GIT_BRANCH=opensuse-$[$MAJOR_VERSION+1].0 + fi + else + SOURCE_VERSION=$MAJOR_VERSION.$MINOR_VERSION.$X + GIT_BRANCH=opensuse-$MAJOR_VERSION.$[$MINOR_VERSION+1] + fi + WRITE_LOG=0 + echo "Processing LATEST upstream changes" echo "(If SUCCESS is not printed upon completion, see /tmp/latest.log for issues)" TEMP_CHECK - bundle2spec &> /tmp/latest.log + if [[ $QEMU_TARBALL =~ $BASE_RE$EXTRA_RE$SUFFIX_RE ]]; then + OLD_COMMIT_ISH=${BASH_REMATCH[3]} + else +#Assume release (or release candidate) tarball with equivalent tag: + OLD_COMMIT_ISH=$(cd ${LOCAL_REPO_MAP[0]} && git rev-list --abbrev-commit \ + --abbrev=9 -1 v$OLD_SOURCE_VERSION_AND_EXTRA) + fi + if [ ${#QEMU_TARBALL_SIG[@]} -ne 0 ]; then + echo "INFO: Ignoring signature file: $QEMU_TARBALL_SIG" + QEMU_TARBALL_SIG= + fi + NOW_SECONDS=$(date +%s) + if [ "$OLD_COMMIT_ISH" != "$NEW_COMMIT_ISH" ]; then + if [ "$CONTINUE_AFTER_REBASE" = "1" ]; then + echo "continue after rebase selected but tarball is out of date. Continuing not possible." + echo "If desired, save your rebase work (eg, branch $GIT_BRANCH), because otherwise it will" + echo "be lost. Then run script again without the continue option" + exit + fi + redo_tarball_and_rebase_patches &> /tmp/latest.log + if [[ "$REBASE_FAILS" ]]; then + echo "ERROR! Rebase of the $GIT_BRANCH branch failed in the following local git repos:" + echo $REBASE_FAILS + echo "Manually resolve all these rebases, then finish the workflow by passing 'continue' to script" + if [[ "$PAUSE_BEFORE_BUNDLE_CREATION" = "1" ]]; then + echo "Feel free to also do the work now occasioned by the selected 'pause' option" + fi + exit + fi + CONTINUE_AFTER_REBASE=1 + fi + if [[ "$PAUSE_BEFORE_BUNDLE_CREATION" = "1" ]]; then + echo "As requested, pausing before re-creating bundle of bundles for additional patch or specfile work" + echo "(using current 'ready to go' $GIT_BRANCH branch of local repos to produce patches.)" + echo "When changes are complete, finish the workflow by passing 'continue' to script" + exit + fi + if [ "$CONTINUE_AFTER_REBASE" = "1" ]; then + initbundle &>> /tmp/latest.log + fi + bundle2spec &>> /tmp/latest.log echo "SUCCESS" tail -9 /tmp/latest.log - exit +else # not LATEST + git -C ${LOCAL_REPO_MAP[0]} checkout $GIT_UPSTREAM_COMMIT_ISH --recurse-submodules -f &> /dev/null + NEW_COMMIT_ISH= + SOURCE_VERSION=$OLD_SOURCE_VERSION_AND_EXTRA + QEMU_VERSION=$(tar JxfO qemu-$SOURCE_VERSION$VERSION_EXTRA.tar.xz qemu-$SOURCE_VERSION/VERSION) + MAJOR_VERSION=$(echo $QEMU_VERSION|awk -F. '{print $1}') + MINOR_VERSION=$(echo $QEMU_VERSION|awk -F. '{print $2}') + GIT_BRANCH=opensuse-$MAJOR_VERSION.$MINOR_VERSION + WRITE_LOG=1 + if [ "$1" = "" ]; then + set -- git2pkg + fi + case $1 in + initbundle ) + initbundle + ;; + git2pkg ) + echo "Updating the package using the $GIT_BRANCH branch of the local repos." + echo "(If SUCCESS is not printed upon completion, see /tmp/git2pkg.log for issues)" + TEMP_CHECK + initbundle &> /tmp/git2pkg.log + bundle2spec &>> /tmp/git2pkg.log + echo "SUCCESS" + tail -9 /tmp/git2pkg.log + ;; + pkg2git ) + echo "Exporting the package's git bundles to the local repo's frombundle branches..." + echo "(If SUCCESS is not printed upon completion, see /tmp/pkg2git.log for issues)" + TEMP_CHECK + bundle2local &> /tmp/pkg2git.log + echo "SUCCESS" + echo "To modify package patches, use the frombundle branch as the basis for updating" + echo "the $GIT_BRANCH branch with the new patch queue." + echo "Then export the changes back to the package using update_git.sh git2pkg" + ;; + refresh ) + echo "Updating the spec file and patches from the spec file template and the bundle" + echo "of bundles (bundles.tar.xz)" + echo "(If SUCCESS is not printed upon completion, see /tmp/refresh.log for issues)" + TEMP_CHECK + bundle2spec &> /tmp/refresh.log + echo "SUCCESS" + tail -9 /tmp/refresh.log + ;; + * ) + echo "Unknown command" + usage + ;; + help ) + usage + ;; + esac fi +exit -if [ "$1" = "" ]; then - set -- git2pkg -fi -case $1 in - initbundle ) - initbundle - ;; - git2pkg ) - echo "Updating the package from the $GIT_BRANCH branch of the local repos." - echo "(If SUCCESS is not printed upon completion, see /tmp/git2pkg.log for issues)" - TEMP_CHECK - initbundle &> /tmp/git2pkg.log - bundle2spec &>> /tmp/git2pkg.log - echo "SUCCESS" - tail -9 /tmp/git2pkg.log - ;; - pkg2git ) - echo "Exporting the package's git bundles to the local repo's frombundle branches..." - echo "(If SUCCESS is not printed upon completion, see /tmp/pkg2git.log for issues)" - TEMP_CHECK - bundle2local &> /tmp/pkg2git.log - echo "SUCCESS" - echo "To modify package patches, use the frombundle branch as the basis for updating" - echo "the $GIT_BRANCH branch with the new patch queue." - echo "Then export the changes back to the package using update_git.sh git2pkg" - ;; - refresh ) - echo "Updating the spec file and patches from the spec file template and the bundle" - echo "of bundles (bundles.tar.xz)" - echo "(If SUCCESS is not printed upon completion, see /tmp/refresh.log for issues)" - TEMP_CHECK - bundle2spec &> /tmp/refresh.log - echo "SUCCESS" - tail -9 /tmp/refresh.log - ;; - * ) - echo "Unknown command" - usage - ;; - help ) - usage - ;; -esac diff --git a/util-iov-introduce-qemu_iovec_init_exten.patch b/util-iov-introduce-qemu_iovec_init_exten.patch new file mode 100644 index 00000000..9aca4cc7 --- /dev/null +++ b/util-iov-introduce-qemu_iovec_init_exten.patch @@ -0,0 +1,174 @@ +From: Vladimir Sementsov-Ogievskiy +Date: Tue, 4 Jun 2019 19:15:03 +0300 +Subject: util/iov: introduce qemu_iovec_init_extended + +Git-commit: d953169d4840f312d3b9a54952f4a7ccfcb3b311 + +Introduce new initialization API, to create requests with padding. Will +be used in the following patch. New API uses qemu_iovec_init_buf if +resulting io vector has only one element, to avoid extra allocations. +So, we need to update qemu_iovec_destroy to support destroying such +QIOVs. + +Signed-off-by: Vladimir Sementsov-Ogievskiy +Acked-by: Stefan Hajnoczi +Message-id: 20190604161514.262241-2-vsementsov@virtuozzo.com +Message-Id: <20190604161514.262241-2-vsementsov@virtuozzo.com> +Signed-off-by: Stefan Hajnoczi +Signed-off-by: Bruce Rogers +--- + include/qemu/iov.h | 7 +++ + util/iov.c | 112 +++++++++++++++++++++++++++++++++++++++++++-- + 2 files changed, 114 insertions(+), 5 deletions(-) + +diff --git a/include/qemu/iov.h b/include/qemu/iov.h +index 48b45987b70ea28879af7989c31f..f3787a0cf768bd0ea1031913a038 100644 +--- a/include/qemu/iov.h ++++ b/include/qemu/iov.h +@@ -199,6 +199,13 @@ static inline void *qemu_iovec_buf(QEMUIOVector *qiov) + + void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint); + void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov); ++void qemu_iovec_init_extended( ++ QEMUIOVector *qiov, ++ void *head_buf, size_t head_len, ++ QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len, ++ void *tail_buf, size_t tail_len); ++void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source, ++ size_t offset, size_t len); + void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len); + void qemu_iovec_concat(QEMUIOVector *dst, + QEMUIOVector *src, size_t soffset, size_t sbytes); +diff --git a/util/iov.c b/util/iov.c +index 74e6ca8ed7298c833e52257923c1..366ff9cdd1dee06c5d62712cb5ef 100644 +--- a/util/iov.c ++++ b/util/iov.c +@@ -353,6 +353,103 @@ void qemu_iovec_concat(QEMUIOVector *dst, + qemu_iovec_concat_iov(dst, src->iov, src->niov, soffset, sbytes); + } + ++/* ++ * qiov_find_iov ++ * ++ * Return pointer to iovec structure, where byte at @offset in original vector ++ * @iov exactly is. ++ * Set @remaining_offset to be offset inside that iovec to the same byte. ++ */ ++static struct iovec *iov_skip_offset(struct iovec *iov, size_t offset, ++ size_t *remaining_offset) ++{ ++ while (offset > 0 && offset >= iov->iov_len) { ++ offset -= iov->iov_len; ++ iov++; ++ } ++ *remaining_offset = offset; ++ ++ return iov; ++} ++ ++/* ++ * qiov_slice ++ * ++ * Find subarray of iovec's, containing requested range. @head would ++ * be offset in first iov (returned by the function), @tail would be ++ * count of extra bytes in last iovec (returned iov + @niov - 1). ++ */ ++static struct iovec *qiov_slice(QEMUIOVector *qiov, ++ size_t offset, size_t len, ++ size_t *head, size_t *tail, int *niov) ++{ ++ struct iovec *iov, *end_iov; ++ ++ assert(offset + len <= qiov->size); ++ ++ iov = iov_skip_offset(qiov->iov, offset, head); ++ end_iov = iov_skip_offset(iov, *head + len, tail); ++ ++ if (*tail > 0) { ++ assert(*tail < end_iov->iov_len); ++ *tail = end_iov->iov_len - *tail; ++ end_iov++; ++ } ++ ++ *niov = end_iov - iov; ++ ++ return iov; ++} ++ ++/* ++ * Compile new iovec, combining @head_buf buffer, sub-qiov of @mid_qiov, ++ * and @tail_buf buffer into new qiov. ++ */ ++void qemu_iovec_init_extended( ++ QEMUIOVector *qiov, ++ void *head_buf, size_t head_len, ++ QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len, ++ void *tail_buf, size_t tail_len) ++{ ++ size_t mid_head, mid_tail; ++ int total_niov, mid_niov = 0; ++ struct iovec *p, *mid_iov; ++ ++ if (mid_len) { ++ mid_iov = qiov_slice(mid_qiov, mid_offset, mid_len, ++ &mid_head, &mid_tail, &mid_niov); ++ } ++ ++ total_niov = !!head_len + mid_niov + !!tail_len; ++ if (total_niov == 1) { ++ qemu_iovec_init_buf(qiov, NULL, 0); ++ p = &qiov->local_iov; ++ } else { ++ qiov->niov = qiov->nalloc = total_niov; ++ qiov->size = head_len + mid_len + tail_len; ++ p = qiov->iov = g_new(struct iovec, qiov->niov); ++ } ++ ++ if (head_len) { ++ p->iov_base = head_buf; ++ p->iov_len = head_len; ++ p++; ++ } ++ ++ if (mid_len) { ++ memcpy(p, mid_iov, mid_niov * sizeof(*p)); ++ p[0].iov_base = (uint8_t *)p[0].iov_base + mid_head; ++ p[0].iov_len -= mid_head; ++ p[mid_niov - 1].iov_len -= mid_tail; ++ p += mid_niov; ++ } ++ ++ if (tail_len) { ++ p->iov_base = tail_buf; ++ p->iov_len = tail_len; ++ } ++} ++ + /* + * Check if the contents of the iovecs are all zero + */ +@@ -374,14 +471,19 @@ bool qemu_iovec_is_zero(QEMUIOVector *qiov) + return true; + } + ++void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source, ++ size_t offset, size_t len) ++{ ++ qemu_iovec_init_extended(qiov, NULL, 0, source, offset, len, NULL, 0); ++} ++ + void qemu_iovec_destroy(QEMUIOVector *qiov) + { +- assert(qiov->nalloc != -1); ++ if (qiov->nalloc != -1) { ++ g_free(qiov->iov); ++ } + +- qemu_iovec_reset(qiov); +- g_free(qiov->iov); +- qiov->nalloc = 0; +- qiov->iov = NULL; ++ memset(qiov, 0, sizeof(*qiov)); + } + + void qemu_iovec_reset(QEMUIOVector *qiov)