436 lines
13 KiB
Diff
436 lines
13 KiB
Diff
From: Olaf Hering <olaf@aepfle.de>
|
|
Date: Tue, 27 Oct 2020 19:21:50 +0100
|
|
Subject: libxc sr restore handle_buffered_page_data
|
|
|
|
tools: restore: split handle_page_data
|
|
|
|
handle_page_data must be able to read directly into mapped guest memory.
|
|
This will avoid unneccesary memcpy calls for data that can be consumed verbatim.
|
|
|
|
Split the various steps of record processing:
|
|
- move processing to handle_buffered_page_data
|
|
- adjust xenforeignmemory_map to set errno in case of failure
|
|
- adjust verify mode to set errno in case of failure
|
|
|
|
This change is preparation for future changes in handle_page_data,
|
|
no change in behavior is intended.
|
|
|
|
Signed-off-by: Olaf Hering <olaf@aepfle.de>
|
|
---
|
|
tools/libs/guest/xg_sr_common.h | 4 +
|
|
tools/libs/guest/xg_sr_restore.c | 320 ++++++++++++++++++++-----------
|
|
2 files changed, 207 insertions(+), 117 deletions(-)
|
|
|
|
--- a/tools/libs/guest/xg_sr_common.h
|
|
+++ b/tools/libs/guest/xg_sr_common.h
|
|
@@ -262,6 +262,10 @@ struct xc_sr_context
|
|
int *map_errs;
|
|
xen_pfn_t *pp_pfns;
|
|
xen_pfn_t *pp_mfns;
|
|
+ void **guest_data;
|
|
+
|
|
+ void *guest_mapping;
|
|
+ uint32_t nr_mapped_pages;
|
|
|
|
int send_back_fd;
|
|
unsigned long p2m_size;
|
|
--- a/tools/libs/guest/xg_sr_restore.c
|
|
+++ b/tools/libs/guest/xg_sr_restore.c
|
|
@@ -183,121 +183,18 @@ int populate_pfns(struct xc_sr_context *
|
|
return rc;
|
|
}
|
|
|
|
-/*
|
|
- * Given a list of pfns, their types, and a block of page data from the
|
|
- * stream, populate and record their types, map the relevant subset and copy
|
|
- * the data into the guest.
|
|
- */
|
|
-static int process_page_data(struct xc_sr_context *ctx, unsigned int count,
|
|
- xen_pfn_t *pfns, uint32_t *types, void *page_data)
|
|
+static int handle_static_data_end_v2(struct xc_sr_context *ctx)
|
|
{
|
|
- xc_interface *xch = ctx->xch;
|
|
- int rc;
|
|
- void *mapping = NULL, *guest_page = NULL;
|
|
- unsigned int i, /* i indexes the pfns from the record. */
|
|
- j, /* j indexes the subset of pfns we decide to map. */
|
|
- nr_pages = 0;
|
|
-
|
|
- rc = populate_pfns(ctx, count, pfns, types);
|
|
- if ( rc )
|
|
- {
|
|
- ERROR("Failed to populate pfns for batch of %u pages", count);
|
|
- goto err;
|
|
- }
|
|
-
|
|
- for ( i = 0; i < count; ++i )
|
|
- {
|
|
- ctx->restore.ops.set_page_type(ctx, pfns[i], types[i]);
|
|
-
|
|
- if ( page_type_has_stream_data(types[i]) )
|
|
- ctx->restore.mfns[nr_pages++] = ctx->restore.ops.pfn_to_gfn(ctx, pfns[i]);
|
|
- }
|
|
-
|
|
- /* Nothing to do? */
|
|
- if ( nr_pages == 0 )
|
|
- goto done;
|
|
-
|
|
- mapping = guest_page = xenforeignmemory_map(
|
|
- xch->fmem, ctx->domid, PROT_READ | PROT_WRITE,
|
|
- nr_pages, ctx->restore.mfns, ctx->restore.map_errs);
|
|
- if ( !mapping )
|
|
- {
|
|
- rc = -1;
|
|
- PERROR("Unable to map %u mfns for %u pages of data",
|
|
- nr_pages, count);
|
|
- goto err;
|
|
- }
|
|
-
|
|
- for ( i = 0, j = 0; i < count; ++i )
|
|
- {
|
|
- if ( !page_type_has_stream_data(types[i]) )
|
|
- continue;
|
|
-
|
|
- if ( ctx->restore.map_errs[j] )
|
|
- {
|
|
- rc = -1;
|
|
- ERROR("Mapping pfn %#"PRIpfn" (mfn %#"PRIpfn", type %#"PRIx32") failed with %d",
|
|
- pfns[i], ctx->restore.mfns[j], types[i], ctx->restore.map_errs[j]);
|
|
- goto err;
|
|
- }
|
|
-
|
|
- /* Undo page normalisation done by the saver. */
|
|
- rc = ctx->restore.ops.localise_page(ctx, types[i], page_data);
|
|
- if ( rc )
|
|
- {
|
|
- ERROR("Failed to localise pfn %#"PRIpfn" (type %#"PRIx32")",
|
|
- pfns[i], types[i] >> XEN_DOMCTL_PFINFO_LTAB_SHIFT);
|
|
- goto err;
|
|
- }
|
|
-
|
|
- if ( ctx->restore.verify )
|
|
- {
|
|
- /* Verify mode - compare incoming data to what we already have. */
|
|
- if ( memcmp(guest_page, page_data, PAGE_SIZE) )
|
|
- ERROR("verify pfn %#"PRIpfn" failed (type %#"PRIx32")",
|
|
- pfns[i], types[i] >> XEN_DOMCTL_PFINFO_LTAB_SHIFT);
|
|
- }
|
|
- else
|
|
- {
|
|
- /* Regular mode - copy incoming data into place. */
|
|
- memcpy(guest_page, page_data, PAGE_SIZE);
|
|
- }
|
|
-
|
|
- ++j;
|
|
- guest_page += PAGE_SIZE;
|
|
- page_data += PAGE_SIZE;
|
|
- }
|
|
-
|
|
- done:
|
|
- rc = 0;
|
|
-
|
|
- err:
|
|
- if ( mapping )
|
|
- xenforeignmemory_unmap(xch->fmem, mapping, nr_pages);
|
|
-
|
|
- return rc;
|
|
-}
|
|
+ int rc = 0;
|
|
|
|
-/*
|
|
- * Validate a PAGE_DATA record from the stream, and pass the results to
|
|
- * process_page_data() to actually perform the legwork.
|
|
- */
|
|
-static int handle_page_data(struct xc_sr_context *ctx, struct xc_sr_record *rec)
|
|
-{
|
|
+#if defined(__i386__) || defined(__x86_64__)
|
|
xc_interface *xch = ctx->xch;
|
|
- struct xc_sr_rec_page_data_header *pages = rec->data;
|
|
- unsigned int i, pages_of_data = 0;
|
|
- int rc = -1;
|
|
-
|
|
- xen_pfn_t pfn;
|
|
- uint32_t type;
|
|
-
|
|
/*
|
|
* v2 compatibility only exists for x86 streams. This is a bit of a
|
|
* bodge, but it is less bad than duplicating handle_page_data() between
|
|
* different architectures.
|
|
*/
|
|
-#if defined(__i386__) || defined(__x86_64__)
|
|
+
|
|
/* v2 compat. Infer the position of STATIC_DATA_END. */
|
|
if ( ctx->restore.format_version < 3 && !ctx->restore.seen_static_data_end )
|
|
{
|
|
@@ -315,12 +212,26 @@ static int handle_page_data(struct xc_sr
|
|
ERROR("No STATIC_DATA_END seen");
|
|
goto err;
|
|
}
|
|
+
|
|
+ rc = 0;
|
|
+err:
|
|
#endif
|
|
|
|
- if ( rec->length < sizeof(*pages) )
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+static bool verify_rec_page_hdr(struct xc_sr_context *ctx, uint32_t rec_length,
|
|
+ struct xc_sr_rec_page_data_header *pages)
|
|
+{
|
|
+ xc_interface *xch = ctx->xch;
|
|
+ bool ret = false;
|
|
+
|
|
+ errno = EINVAL;
|
|
+
|
|
+ if ( rec_length < sizeof(*pages) )
|
|
{
|
|
ERROR("PAGE_DATA record truncated: length %u, min %zu",
|
|
- rec->length, sizeof(*pages));
|
|
+ rec_length, sizeof(*pages));
|
|
goto err;
|
|
}
|
|
|
|
@@ -330,13 +241,28 @@ static int handle_page_data(struct xc_sr
|
|
goto err;
|
|
}
|
|
|
|
- if ( rec->length < sizeof(*pages) + (pages->count * sizeof(uint64_t)) )
|
|
+ if ( rec_length < sizeof(*pages) + (pages->count * sizeof(uint64_t)) )
|
|
{
|
|
ERROR("PAGE_DATA record (length %u) too short to contain %u"
|
|
- " pfns worth of information", rec->length, pages->count);
|
|
+ " pfns worth of information", rec_length, pages->count);
|
|
goto err;
|
|
}
|
|
|
|
+ ret = true;
|
|
+
|
|
+err:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static bool verify_rec_page_pfns(struct xc_sr_context *ctx, uint32_t rec_length,
|
|
+ struct xc_sr_rec_page_data_header *pages)
|
|
+{
|
|
+ xc_interface *xch = ctx->xch;
|
|
+ uint32_t i, pages_of_data = 0;
|
|
+ xen_pfn_t pfn;
|
|
+ uint32_t type;
|
|
+ bool ret = false;
|
|
+
|
|
for ( i = 0; i < pages->count; ++i )
|
|
{
|
|
pfn = pages->pfn[i] & PAGE_DATA_PFN_MASK;
|
|
@@ -363,19 +289,177 @@ static int handle_page_data(struct xc_sr
|
|
ctx->restore.types[i] = type;
|
|
}
|
|
|
|
- if ( rec->length != (sizeof(*pages) +
|
|
+ if ( rec_length != (sizeof(*pages) +
|
|
(sizeof(uint64_t) * pages->count) +
|
|
(PAGE_SIZE * pages_of_data)) )
|
|
{
|
|
ERROR("PAGE_DATA record wrong size: length %u, expected "
|
|
- "%zu + %zu + %lu", rec->length, sizeof(*pages),
|
|
+ "%zu + %zu + %lu", rec_length, sizeof(*pages),
|
|
(sizeof(uint64_t) * pages->count), (PAGE_SIZE * pages_of_data));
|
|
goto err;
|
|
}
|
|
|
|
- rc = process_page_data(ctx, pages->count, ctx->restore.pfns,
|
|
- ctx->restore.types, &pages->pfn[pages->count]);
|
|
+ ret = true;
|
|
+
|
|
+err:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Populate pfns, if required
|
|
+ * Fill guest_data with either mapped address or NULL
|
|
+ * The caller must unmap guest_mapping
|
|
+ */
|
|
+static int map_guest_pages(struct xc_sr_context *ctx,
|
|
+ struct xc_sr_rec_page_data_header *pages)
|
|
+{
|
|
+ xc_interface *xch = ctx->xch;
|
|
+ uint32_t i, p;
|
|
+ int rc;
|
|
+
|
|
+ rc = populate_pfns(ctx, pages->count, ctx->restore.pfns, ctx->restore.types);
|
|
+ if ( rc )
|
|
+ {
|
|
+ ERROR("Failed to populate pfns for batch of %u pages", pages->count);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ ctx->restore.nr_mapped_pages = 0;
|
|
+
|
|
+ for ( i = 0; i < pages->count; i++ )
|
|
+ {
|
|
+ ctx->restore.ops.set_page_type(ctx, ctx->restore.pfns[i], ctx->restore.types[i]);
|
|
+
|
|
+ if ( page_type_has_stream_data(ctx->restore.types[i]) == false )
|
|
+ {
|
|
+ ctx->restore.guest_data[i] = NULL;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ ctx->restore.mfns[ctx->restore.nr_mapped_pages++] = ctx->restore.ops.pfn_to_gfn(ctx, ctx->restore.pfns[i]);
|
|
+ }
|
|
+
|
|
+ /* Nothing to do? */
|
|
+ if ( ctx->restore.nr_mapped_pages == 0 )
|
|
+ goto done;
|
|
+
|
|
+ ctx->restore.guest_mapping = xenforeignmemory_map(xch->fmem, ctx->domid,
|
|
+ PROT_READ | PROT_WRITE, ctx->restore.nr_mapped_pages,
|
|
+ ctx->restore.mfns, ctx->restore.map_errs);
|
|
+ if ( !ctx->restore.guest_mapping )
|
|
+ {
|
|
+ rc = -1;
|
|
+ PERROR("Unable to map %u mfns for %u pages of data",
|
|
+ ctx->restore.nr_mapped_pages, pages->count);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ /* Verify mapping, and assign address to pfn data */
|
|
+ for ( i = 0, p = 0; i < pages->count; i++ )
|
|
+ {
|
|
+ if ( !page_type_has_stream_data(ctx->restore.types[i]) )
|
|
+ continue;
|
|
+
|
|
+ if ( ctx->restore.map_errs[p] == 0 )
|
|
+ {
|
|
+ ctx->restore.guest_data[i] = ctx->restore.guest_mapping + (p * PAGE_SIZE);
|
|
+ p++;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ errno = ctx->restore.map_errs[p];
|
|
+ rc = -1;
|
|
+ PERROR("Mapping pfn %#"PRIpfn" (mfn %#"PRIpfn", type %#"PRIx32") failed",
|
|
+ ctx->restore.pfns[i], ctx->restore.mfns[p], ctx->restore.types[i]);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+done:
|
|
+ rc = 0;
|
|
+
|
|
+err:
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Handle PAGE_DATA record from an existing buffer
|
|
+ * Given a list of pfns, their types, and a block of page data from the
|
|
+ * stream, populate and record their types, map the relevant subset and copy
|
|
+ * the data into the guest.
|
|
+ */
|
|
+static int handle_buffered_page_data(struct xc_sr_context *ctx,
|
|
+ struct xc_sr_record *rec)
|
|
+{
|
|
+ xc_interface *xch = ctx->xch;
|
|
+ struct xc_sr_rec_page_data_header *pages = rec->data;
|
|
+ void *p;
|
|
+ uint32_t i;
|
|
+ int rc = -1, idx;
|
|
+
|
|
+ rc = handle_static_data_end_v2(ctx);
|
|
+ if ( rc )
|
|
+ goto err;
|
|
+
|
|
+ /* First read and verify the header */
|
|
+ if ( !verify_rec_page_hdr(ctx, rec->length, pages) )
|
|
+ {
|
|
+ rc = -1;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ /* Then read and verify the pfn numbers */
|
|
+ if ( !verify_rec_page_pfns(ctx, rec->length, pages) )
|
|
+ {
|
|
+ rc = -1;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ /* Map the target pfn */
|
|
+ rc = map_guest_pages(ctx, pages);
|
|
+ if ( rc )
|
|
+ goto err;
|
|
+
|
|
+ for ( i = 0, idx = 0; i < pages->count; i++ )
|
|
+ {
|
|
+ if ( !ctx->restore.guest_data[i] )
|
|
+ continue;
|
|
+
|
|
+ p = &pages->pfn[pages->count] + (idx * PAGE_SIZE);
|
|
+ rc = ctx->restore.ops.localise_page(ctx, ctx->restore.types[i], p);
|
|
+ if ( rc )
|
|
+ {
|
|
+ ERROR("Failed to localise pfn %#"PRIpfn" (type %#"PRIx32")",
|
|
+ ctx->restore.pfns[i], ctx->restore.types[i] >> XEN_DOMCTL_PFINFO_LTAB_SHIFT);
|
|
+ goto err;
|
|
+
|
|
+ }
|
|
+
|
|
+ if ( ctx->restore.verify )
|
|
+ {
|
|
+ if ( memcmp(ctx->restore.guest_data[i], p, PAGE_SIZE) )
|
|
+ {
|
|
+ errno = EIO;
|
|
+ ERROR("verify pfn %#"PRIpfn" failed (type %#"PRIx32")",
|
|
+ ctx->restore.pfns[i], ctx->restore.types[i] >> XEN_DOMCTL_PFINFO_LTAB_SHIFT);
|
|
+ goto err;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ memcpy(ctx->restore.guest_data[i], p, PAGE_SIZE);
|
|
+ }
|
|
+
|
|
+ idx++;
|
|
+ }
|
|
+
|
|
+ rc = 0;
|
|
+
|
|
err:
|
|
+ if ( ctx->restore.guest_mapping )
|
|
+ {
|
|
+ xenforeignmemory_unmap(xch->fmem, ctx->restore.guest_mapping, ctx->restore.nr_mapped_pages);
|
|
+ ctx->restore.guest_mapping = NULL;
|
|
+ }
|
|
return rc;
|
|
}
|
|
|
|
@@ -623,7 +707,7 @@ static int process_buffered_record(struc
|
|
break;
|
|
|
|
case REC_TYPE_PAGE_DATA:
|
|
- rc = handle_page_data(ctx, rec);
|
|
+ rc = handle_buffered_page_data(ctx, rec);
|
|
break;
|
|
|
|
case REC_TYPE_VERIFY:
|
|
@@ -703,9 +787,10 @@ static int setup(struct xc_sr_context *c
|
|
ctx->restore.map_errs = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.map_errs));
|
|
ctx->restore.pp_pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pp_pfns));
|
|
ctx->restore.pp_mfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pp_mfns));
|
|
+ ctx->restore.guest_data = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.guest_data));
|
|
if ( !ctx->restore.pfns || !ctx->restore.types || !ctx->restore.mfns ||
|
|
!ctx->restore.map_errs || !ctx->restore.pp_pfns ||
|
|
- !ctx->restore.pp_mfns )
|
|
+ !ctx->restore.pp_mfns || !ctx->restore.guest_data )
|
|
{
|
|
ERROR("Unable to allocate memory");
|
|
rc = -1;
|
|
@@ -742,6 +827,7 @@ static void cleanup(struct xc_sr_context
|
|
|
|
free(ctx->restore.buffered_records);
|
|
free(ctx->restore.populated_pfns);
|
|
+ free(ctx->restore.guest_data);
|
|
free(ctx->restore.pp_mfns);
|
|
free(ctx->restore.pp_pfns);
|
|
free(ctx->restore.map_errs);
|