migration/ram: cancel ram saving when migration is cancelled

When the live migration of a confidential guest is cancelled, ram save
needs to be cancelled with some vendor specific work required (e.g.
restore some private pages's state). Here are 3 cases:
If the migration cancelling request is received before ram saving
starts (i.e. cgs_epochs is 0), nothing needs to be done;
If the migration cancelling request is received during the first round
of ram saving, provide the vendor specific handling via the cgs_mig API
with the gfn of the last page that has been saved; and
If the migration cancelling request is received after the first round of
ram saving, it is likely all the pages have been saved. The gfn of the
last page is set to the end of the guest ram page.

As clearing the KVM's bitmap write-protects the private pages in chunks
(e.g. 256K pages by default), so the end_gfn needs to be aligned to the
chunk boundary.

Signed-off-by: Wei Wang <wei.w.wang@intel.com>
This commit is contained in:
Wei Wang
2022-11-09 16:44:48 +08:00
committed by Nikolay Borisov
parent 74f6267c0b
commit fc85e57efd
3 changed files with 38 additions and 0 deletions

View File

@@ -3816,6 +3816,7 @@ static void migration_iteration_finish(MigrationState *s)
case MIGRATION_STATUS_FAILED:
case MIGRATION_STATUS_CANCELLED:
case MIGRATION_STATUS_CANCELLING:
ram_save_cancel();
if (s->vm_was_running) {
if (!runstate_check(RUN_STATE_SHUTDOWN)) {
vm_start();

View File

@@ -2533,6 +2533,42 @@ static int ram_save_cgs_start_epoch(RAMState *rs)
return 0;
}
void ram_save_cancel(void)
{
RAMBlock *block = ram_state->last_seen_block;
unsigned long page = ram_state->last_page;
ram_addr_t offset = ((ram_addr_t)page) << TARGET_PAGE_BITS;
uint64_t align_size = 1UL << block->clear_bmap_shift;
hwaddr gpa, gfn, gfn_aligned;
int ret;
if (!ram_counters.cgs_epochs) {
return;
} else if (ram_counters.cgs_epochs == 1) {
ret = kvm_physical_memory_addr_from_host(kvm_state,
block->host + offset, &gpa);
assert(ret == 1);
} else {
/*
* All the pages have likely been saved in the first round. Just
* provide the end of the guest-physical page to cancel from.
*/
gpa = ram_bytes_total() - TARGET_PAGE_SIZE;
}
gfn = gpa >> TARGET_PAGE_BITS;
/*
* Pages that have been cleared in the clear log are write-protected, and
* need to be restored. So make it aligned on the clear_bmap boundary.
*/
gfn_aligned = QEMU_ALIGN_UP(gfn, align_size);
ret = cgs_mig_savevm_state_ram_cancel(ram_state->f, gfn_aligned);
if (ret) {
error_report("%s failed: %s", __func__, strerror(ret));
}
}
/**
* ram_save_host_page: save a whole host page
*

View File

@@ -79,6 +79,7 @@ void *postcopy_preempt_thread(void *opaque);
void ram_save_cgs_epoch_header(QEMUFile *f);
size_t ram_save_cgs_ram_header(QEMUFile *f, RAMBlock *block,
ram_addr_t offset);
void ram_save_cancel(void);
/* ram cache */
int colo_init_ram_cache(void);