155 lines
4.8 KiB
Diff
155 lines
4.8 KiB
Diff
|
# HG changeset patch
|
||
|
# User Keir Fraser <keir.fraser@citrix.com>
|
||
|
# Date 1270624140 -3600
|
||
|
# Node ID 7794f61c61f3b0c90e367a87b287850b31645742
|
||
|
# Parent adce8bc43fcccf8730b1da962be60c457c51fa1b
|
||
|
x86, cpu hotplug: Synchronise vcpu state earlier during cpu offline.
|
||
|
|
||
|
Needs to happen before non-idle VCPU is fully descheduled after CPU is
|
||
|
removed from cpu_online_map. Else sync_vcpu_execstate() doesn't work
|
||
|
properly.
|
||
|
|
||
|
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
|
||
|
|
||
|
# HG changeset patch
|
||
|
# User Keir Fraser <keir.fraser@citrix.com>
|
||
|
# Date 1270737112 -3600
|
||
|
# Node ID c9e8369e49be7396eca444cfce27e6782e4aa248
|
||
|
# Parent a33909be109cefb0aef251c7c7e48168ed05512a
|
||
|
Fix two issues for CPU online/offline.
|
||
|
|
||
|
Firstly, we should return if we fail to get spin lock in cpu_down.
|
||
|
Secondly, in credit scheduler, the idlers need be limited only to
|
||
|
online map.
|
||
|
|
||
|
Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
|
||
|
|
||
|
# HG changeset patch
|
||
|
# User Keir Fraser <keir.fraser@citrix.com>
|
||
|
# Date 1271090027 -3600
|
||
|
# Node ID 5d4038d41fd894f9fc71e64043d559d7c465bd15
|
||
|
# Parent 1d3bec66528900b1b442b6d36f24cd35ca076506
|
||
|
cpufreq: fix racing issue for cpu hotplug
|
||
|
|
||
|
To eliminate racing between dbs timer handler and cpufreq_del_cpu,
|
||
|
using kill_timer instead of stop_timer to make sure timer handler
|
||
|
execution finished before other stuff in cpufreq_del_cpu.
|
||
|
|
||
|
BTW, fix a lost point of cpufreq_statistic_lock taking sequence.
|
||
|
|
||
|
Signed-off-by: Wei Gang <gang.wei@intel.com>
|
||
|
|
||
|
--- a/xen/arch/x86/domain.c
|
||
|
+++ b/xen/arch/x86/domain.c
|
||
|
@@ -1442,7 +1442,8 @@ void context_switch(struct vcpu *prev, s
|
||
|
|
||
|
set_current(next);
|
||
|
|
||
|
- if ( (per_cpu(curr_vcpu, cpu) == next) || is_idle_vcpu(next) )
|
||
|
+ if ( (per_cpu(curr_vcpu, cpu) == next) ||
|
||
|
+ (is_idle_vcpu(next) && cpu_online(cpu)) )
|
||
|
{
|
||
|
local_irq_enable();
|
||
|
}
|
||
|
--- a/xen/arch/x86/smpboot.c
|
||
|
+++ b/xen/arch/x86/smpboot.c
|
||
|
@@ -997,17 +997,13 @@ static int __devinit do_boot_cpu(int api
|
||
|
return boot_error;
|
||
|
}
|
||
|
|
||
|
-static void idle_task_exit(void)
|
||
|
-{
|
||
|
- /* Give up lazy state borrowed by this idle vcpu */
|
||
|
- __sync_lazy_execstate();
|
||
|
-}
|
||
|
-
|
||
|
void cpu_exit_clear(void)
|
||
|
{
|
||
|
int cpu = raw_smp_processor_id();
|
||
|
|
||
|
- idle_task_exit();
|
||
|
+ /* Previous non-idle state should be synchronised already. */
|
||
|
+ if (__sync_lazy_execstate())
|
||
|
+ BUG();
|
||
|
|
||
|
cpucount --;
|
||
|
cpu_uninit();
|
||
|
@@ -1302,6 +1298,13 @@ int __cpu_disable(void)
|
||
|
|
||
|
remove_siblinginfo(cpu);
|
||
|
|
||
|
+ /*
|
||
|
+ * If we are running the idle vcpu, sync last non-idle vcpu's state
|
||
|
+ * before changing cpu_online_map. If we are running non-idle vcpu,
|
||
|
+ * we will synchronously sync the state in context_switch() later.
|
||
|
+ */
|
||
|
+ __sync_lazy_execstate();
|
||
|
+
|
||
|
/* It's now safe to remove this processor from the online map */
|
||
|
cpu_clear(cpu, cpu_online_map);
|
||
|
fixup_irqs();
|
||
|
@@ -1340,10 +1343,8 @@ int cpu_down(unsigned int cpu)
|
||
|
int err = 0;
|
||
|
|
||
|
/* spin_trylock() avoids deadlock with stop_machine_run(). */
|
||
|
- if (!spin_trylock(&cpu_add_remove_lock)) {
|
||
|
- err = -EBUSY;
|
||
|
- goto out;
|
||
|
- }
|
||
|
+ if (!spin_trylock(&cpu_add_remove_lock))
|
||
|
+ return -EBUSY;
|
||
|
|
||
|
if (num_online_cpus() == 1) {
|
||
|
err = -EBUSY;
|
||
|
--- a/xen/common/sched_credit.c
|
||
|
+++ b/xen/common/sched_credit.c
|
||
|
@@ -410,7 +410,7 @@ _csched_cpu_pick(struct vcpu *vc, bool_t
|
||
|
* like run two VCPUs on co-hyperthreads while there are idle cores
|
||
|
* or sockets.
|
||
|
*/
|
||
|
- idlers = csched_priv.idlers;
|
||
|
+ cpus_and(idlers, cpu_online_map, csched_priv.idlers);
|
||
|
cpu_set(cpu, idlers);
|
||
|
cpus_and(cpus, cpus, idlers);
|
||
|
cpu_clear(cpu, cpus);
|
||
|
--- a/xen/drivers/acpi/pmstat.c
|
||
|
+++ b/xen/drivers/acpi/pmstat.c
|
||
|
@@ -86,12 +86,13 @@ int do_get_pm_info(struct xen_sysctl_get
|
||
|
case PMSTAT_get_pxstat:
|
||
|
{
|
||
|
uint32_t ct;
|
||
|
- struct pm_px *pxpt = cpufreq_statistic_data[op->cpuid];
|
||
|
+ struct pm_px *pxpt;
|
||
|
spinlock_t *cpufreq_statistic_lock =
|
||
|
&per_cpu(cpufreq_statistic_lock, op->cpuid);
|
||
|
|
||
|
spin_lock(cpufreq_statistic_lock);
|
||
|
|
||
|
+ pxpt = cpufreq_statistic_data[op->cpuid];
|
||
|
if ( !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt )
|
||
|
{
|
||
|
spin_unlock(cpufreq_statistic_lock);
|
||
|
--- a/xen/drivers/cpufreq/cpufreq_ondemand.c
|
||
|
+++ b/xen/drivers/cpufreq/cpufreq_ondemand.c
|
||
|
@@ -196,9 +196,8 @@ static void dbs_timer_init(struct cpu_db
|
||
|
{
|
||
|
dbs_info->enable = 1;
|
||
|
|
||
|
- if ( !dbs_timer[dbs_info->cpu].function )
|
||
|
- init_timer(&dbs_timer[dbs_info->cpu], do_dbs_timer,
|
||
|
- (void *)dbs_info, dbs_info->cpu);
|
||
|
+ init_timer(&dbs_timer[dbs_info->cpu], do_dbs_timer,
|
||
|
+ (void *)dbs_info, dbs_info->cpu);
|
||
|
|
||
|
set_timer(&dbs_timer[dbs_info->cpu], NOW()+dbs_tuners_ins.sampling_rate);
|
||
|
|
||
|
@@ -213,7 +212,7 @@ static void dbs_timer_exit(struct cpu_db
|
||
|
{
|
||
|
dbs_info->enable = 0;
|
||
|
dbs_info->stoppable = 0;
|
||
|
- stop_timer(&dbs_timer[dbs_info->cpu]);
|
||
|
+ kill_timer(&dbs_timer[dbs_info->cpu]);
|
||
|
}
|
||
|
|
||
|
int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
|