SHA256
1
0
forked from pool/xen
xen/26891-x86-S3-Fix-cpu-pool-scheduling-after-suspend-resume.patch
Olaf Hering b9d38dfc8d - add xorg-x11-util-devel to BuildRequires to get lndir(1)
- remove xen.migrate.tools_notify_restore_to_hangup_during_migration_--abort_if_busy.patch
  It changed migration protocol and upstream wants a different solution

- bnc#802221 - fix xenpaging
  readd xenpaging.qemu.flush-cache.patch

- Upstream patches from Jan
  26891-x86-S3-Fix-cpu-pool-scheduling-after-suspend-resume.patch
  26930-x86-EFI-fix-runtime-call-status-for-compat-mode-Dom0.patch
- Additional fix for bnc#816159
  CVE-2013-1918-xsa45-followup.patch

- bnc#817068 - Xen guest with >1 sr-iov vf won't start
  xen-managed-pci-device.patch

- Update to Xen 4.2.2 c/s 26064
  The following recent security patches are included in the tarball
  CVE-2013-0151-xsa34.patch (bnc#797285)
  CVE-2012-6075-xsa41.patch (bnc#797523)
  CVE-2013-1917-xsa44.patch (bnc#813673)
  CVE-2013-1919-xsa46.patch (bnc#813675)

- Upstream patch from Jan
  26902-x86-EFI-pass-boot-services-variable-info-to-runtime-code.patch 

- bnc#816159 - VUL-0: xen: CVE-2013-1918: XSA-45: Several long
  latency operations are not preemptible
  CVE-2013-1918-xsa45-1-vcpu-destroy-pagetables-preemptible.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=237
2013-05-07 14:35:00 +00:00

145 lines
4.3 KiB
Diff

# Commit 9aa356bc9f7533c3cb7f02c823f532532876d444
# Date 2013-04-19 12:29:01 +0200
# Author Ben Guthro <benjamin.guthro@citrix.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/S3: Fix cpu pool scheduling after suspend/resume
This review is another S3 scheduler problem with the system_state
variable introduced with the following changeset:
http://xenbits.xen.org/gitweb/?p=xen.git;a=commit;h=269f543ea750ed567d18f2e819e5d5ce58eda5c5
Specifically, the cpu_callback function that takes the CPU down during
suspend, and back up during resume. We were seeing situations where,
after S3, only CPU0 was in cpupool0. Guest performance suffered
greatly, since all vcpus were only on a single pcpu. Guests under high
CPU load showed the problem much more quickly than an idle guest.
Removing this if condition forces the CPUs to go through the expected
online/offline state, and be properly scheduled after S3.
This also includes a necessary partial change proposed earlier by
Tomasz Wroblewski here:
http://lists.xen.org/archives/html/xen-devel/2013-01/msg02206.html
It should also resolve the issues discussed in this thread:
http://lists.xen.org/archives/html/xen-devel/2012-11/msg01801.html
Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
Acked-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -41,16 +41,28 @@ static struct cpupool *alloc_cpupool_str
{
struct cpupool *c = xzalloc(struct cpupool);
- if ( c && zalloc_cpumask_var(&c->cpu_valid) )
- return c;
- xfree(c);
- return NULL;
+ if ( !c || !zalloc_cpumask_var(&c->cpu_valid) )
+ {
+ xfree(c);
+ c = NULL;
+ }
+ else if ( !zalloc_cpumask_var(&c->cpu_suspended) )
+ {
+ free_cpumask_var(c->cpu_valid);
+ xfree(c);
+ c = NULL;
+ }
+
+ return c;
}
static void free_cpupool_struct(struct cpupool *c)
{
if ( c )
+ {
+ free_cpumask_var(c->cpu_suspended);
free_cpumask_var(c->cpu_valid);
+ }
xfree(c);
}
@@ -417,14 +429,32 @@ void cpupool_rm_domain(struct domain *d)
/*
* called to add a new cpu to pool admin
- * we add a hotplugged cpu to the cpupool0 to be able to add it to dom0
+ * we add a hotplugged cpu to the cpupool0 to be able to add it to dom0,
+ * unless we are resuming from S3, in which case we put the cpu back
+ * in the cpupool it was in prior to suspend.
*/
static void cpupool_cpu_add(unsigned int cpu)
{
spin_lock(&cpupool_lock);
cpumask_clear_cpu(cpu, &cpupool_locked_cpus);
cpumask_set_cpu(cpu, &cpupool_free_cpus);
- cpupool_assign_cpu_locked(cpupool0, cpu);
+
+ if ( system_state == SYS_STATE_resume )
+ {
+ struct cpupool **c;
+
+ for_each_cpupool(c)
+ {
+ if ( cpumask_test_cpu(cpu, (*c)->cpu_suspended ) )
+ {
+ cpupool_assign_cpu_locked(*c, cpu);
+ cpumask_clear_cpu(cpu, (*c)->cpu_suspended);
+ }
+ }
+ }
+
+ if ( cpumask_test_cpu(cpu, &cpupool_free_cpus) )
+ cpupool_assign_cpu_locked(cpupool0, cpu);
spin_unlock(&cpupool_lock);
}
@@ -436,7 +466,7 @@ static void cpupool_cpu_add(unsigned int
static int cpupool_cpu_remove(unsigned int cpu)
{
int ret = 0;
-
+
spin_lock(&cpupool_lock);
if ( !cpumask_test_cpu(cpu, cpupool0->cpu_valid))
ret = -EBUSY;
@@ -633,9 +663,14 @@ static int cpu_callback(
unsigned int cpu = (unsigned long)hcpu;
int rc = 0;
- if ( (system_state == SYS_STATE_suspend) ||
- (system_state == SYS_STATE_resume) )
- goto out;
+ if ( system_state == SYS_STATE_suspend )
+ {
+ struct cpupool **c;
+
+ for_each_cpupool(c)
+ if ( cpumask_test_cpu(cpu, (*c)->cpu_valid ) )
+ cpumask_set_cpu(cpu, (*c)->cpu_suspended);
+ }
switch ( action )
{
@@ -650,7 +685,6 @@ static int cpu_callback(
break;
}
-out:
return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
}
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -199,6 +199,7 @@ struct cpupool
{
int cpupool_id;
cpumask_var_t cpu_valid; /* all cpus assigned to pool */
+ cpumask_var_t cpu_suspended; /* cpus in S3 that should be in this pool */
struct cpupool *next;
unsigned int n_dom;
struct scheduler *sched;