xen/26673-Avoid-stale-pointer-when-moving-domain-to-another-cpupool.patch
Charles Arnold 9621add6e3 - Load blktap module in xencommons init script. blktap2 doesn't
support qcow2, so blktap is needed to support domains with
  'tap:qcow2' disk configurations.
  modified tmp-initscript-modprobe.patch

- bnc#809203 - xen.efi isn't signed with SUSE Secure Boot key
  xen.spec 

- Fix adding managed PCI device to an inactive domain
  modified xen-managed-pci-device.patch

- bnc#805094 - xen hot plug attach/detach fails
  modified blktap-pv-cdrom.patch

- bnc# 802690 - domain locking can prevent a live migration from
  completing
  modified xend-domain-lock.patch

- bnc#797014 - no way to control live migrations
  26675-tools-xentoollog_update_tty_detection_in_stdiostream_progress.patch
  xen.migrate.tools-xc_print_messages_from_xc_save_with_xc_report.patch
  xen.migrate.tools-xc_document_printf_calls_in_xc_restore.patch
  xen.migrate.tools-xc_rework_xc_save.cswitch_qemu_logdirty.patch
  xen.migrate.tools_set_migration_constraints_from_cmdline.patch
  xen.migrate.tools_add_xm_migrate_--log_progress_option.patch

- Upstream patches from Jan
  26585-x86-mm-Take-the-p2m-lock-even-in-shadow-mode.patch
  26595-x86-nhvm-properly-clean-up-after-failure-to-set-up-all-vCPU-s.patch
  26601-honor-ACPI-v4-FADT-flags.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=232
2013-03-21 22:43:53 +00:00

81 lines
2.2 KiB
Diff

# Commit 482300def7d08e773ccd2a0d978bcb9469fdd810
# Date 2013-02-28 14:56:45 +0000
# Author Juergen Gross <juergen.gross@ts.fujitsu.com>
# Committer Keir Fraser <keir@xen.org>
Avoid stale pointer when moving domain to another cpupool
When a domain is moved to another cpupool the scheduler private data pointers
in vcpu and domain structures must never point to an already freed memory
area.
While at it, simplify sched_init_vcpu() by using DOM2OP instead VCPU2OP.
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -220,7 +220,7 @@ int sched_init_vcpu(struct vcpu *v, unsi
if ( v->sched_priv == NULL )
return 1;
- SCHED_OP(VCPU2OP(v), insert_vcpu, v);
+ SCHED_OP(DOM2OP(d), insert_vcpu, v);
return 0;
}
@@ -231,6 +231,9 @@ int sched_move_domain(struct domain *d,
unsigned int new_p;
void **vcpu_priv;
void *domdata;
+ void *vcpudata;
+ struct scheduler *old_ops;
+ void *old_domdata;
domdata = SCHED_OP(c->sched, alloc_domdata, d);
if ( domdata == NULL )
@@ -261,21 +264,22 @@ int sched_move_domain(struct domain *d,
domain_pause(d);
+ old_ops = DOM2OP(d);
+ old_domdata = d->sched_priv;
+
for_each_vcpu ( d, v )
{
- SCHED_OP(VCPU2OP(v), remove_vcpu, v);
- SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
- v->sched_priv = NULL;
+ SCHED_OP(old_ops, remove_vcpu, v);
}
- SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
-
d->cpupool = c;
d->sched_priv = domdata;
new_p = cpumask_first(c->cpu_valid);
for_each_vcpu ( d, v )
{
+ vcpudata = v->sched_priv;
+
migrate_timer(&v->periodic_timer, new_p);
migrate_timer(&v->singleshot_timer, new_p);
migrate_timer(&v->poll_timer, new_p);
@@ -288,12 +292,16 @@ int sched_move_domain(struct domain *d,
new_p = cpumask_cycle(new_p, c->cpu_valid);
SCHED_OP(c->sched, insert_vcpu, v);
+
+ SCHED_OP(old_ops, free_vdata, vcpudata);
}
domain_update_node_affinity(d);
domain_unpause(d);
+ SCHED_OP(old_ops, free_domdata, old_domdata);
+
xfree(vcpu_priv);
return 0;