4c73609012
22998-x86-get_page_from_l1e-retcode.patch 22999-x86-mod_l1_entry-retcode.patch 23000-x86-mod_l2_entry-retcode.patch 23096-x86-hpet-no-cpumask_lock.patch 23099-x86-rwlock-scalability.patch 23103-x86-pirq-guest-eoi-check.patch 23127-vtd-bios-settings.patch 23153-x86-amd-clear-DramModEn.patch 23154-x86-amd-iorr-no-rdwr.patch 23199-amd-iommu-unmapped-intr-fault.patch 23200-amd-iommu-intremap-sync.patch 23228-x86-conditional-write_tsc.patch - update xenalyze to revision 98 * Unify setting of vcpu data type * Unify record size checks * Fix cr3_switch not to access hvm struct before it's initialized - add xenalyze.gcc46.patch to fix unused-but-set-variable errors - bnc#688473 - VUL-0: potential buffer overflow in tools cve-2011-1583-4.0.patch - hotplug.losetup.patch correct dev:inode detection and use variable expansion OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=115
221 lines
6.2 KiB
Diff
221 lines
6.2 KiB
Diff
References: bnc#623680
|
|
|
|
# HG changeset patch
|
|
# User Keir Fraser <keir@xen.org>
|
|
# Date 1302853928 -3600
|
|
# Node ID 1329d99b4f161b7617a667f601077cc92559f248
|
|
# Parent b5165fb66b56d9438d77b475eaa9db67318d1ea1
|
|
x86: don't write_tsc() non-zero values on CPUs updating only the lower 32 bits
|
|
|
|
This means suppressing the uses in time_calibration_tsc_rendezvous(),
|
|
cstate_restore_tsc(), and synchronize_tsc_slave(), and fixes a boot
|
|
hang of Linux Dom0 when loading processor.ko on such systems that
|
|
have support for C states above C1.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@novell.com>
|
|
Signed-off-by: Keir Fraser <keir@xen.org>
|
|
|
|
--- a/xen/arch/x86/acpi/cpu_idle.c
|
|
+++ b/xen/arch/x86/acpi/cpu_idle.c
|
|
@@ -1099,3 +1099,7 @@ void cpuidle_disable_deep_cstate(void)
|
|
hpet_disable_legacy_broadcast();
|
|
}
|
|
|
|
+bool_t cpuidle_using_deep_cstate(void)
|
|
+{
|
|
+ return xen_cpuidle && max_cstate > (local_apic_timer_c2_ok ? 2 : 1);
|
|
+}
|
|
--- a/xen/arch/x86/hpet.c
|
|
+++ b/xen/arch/x86/hpet.c
|
|
@@ -634,6 +634,9 @@ void hpet_disable_legacy_broadcast(void)
|
|
u32 cfg;
|
|
unsigned long flags;
|
|
|
|
+ if ( !legacy_hpet_event.shift )
|
|
+ return;
|
|
+
|
|
spin_lock_irqsave(&legacy_hpet_event.lock, flags);
|
|
|
|
legacy_hpet_event.flags |= HPET_EVT_DISABLE;
|
|
--- a/xen/arch/x86/smpboot.c
|
|
+++ b/xen/arch/x86/smpboot.c
|
|
@@ -41,6 +41,7 @@
|
|
#include <asm/flushtlb.h>
|
|
#include <asm/msr.h>
|
|
#include <asm/mtrr.h>
|
|
+#include <asm/time.h>
|
|
#include <mach_apic.h>
|
|
#include <mach_wakecpu.h>
|
|
#include <smpboot_hooks.h>
|
|
@@ -134,6 +135,12 @@ static void smp_store_cpu_info(int id)
|
|
;
|
|
}
|
|
|
|
+/*
|
|
+ * TSC's upper 32 bits can't be written in earlier CPUs (before
|
|
+ * Prescott), there is no way to resync one AP against BP.
|
|
+ */
|
|
+bool_t disable_tsc_sync;
|
|
+
|
|
static atomic_t tsc_count;
|
|
static uint64_t tsc_value;
|
|
static cpumask_t tsc_sync_cpu_mask;
|
|
@@ -142,6 +149,9 @@ static void synchronize_tsc_master(unsig
|
|
{
|
|
unsigned int i;
|
|
|
|
+ if ( disable_tsc_sync )
|
|
+ return;
|
|
+
|
|
if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) &&
|
|
!cpu_isset(slave, tsc_sync_cpu_mask) )
|
|
return;
|
|
@@ -163,6 +173,9 @@ static void synchronize_tsc_slave(unsign
|
|
{
|
|
unsigned int i;
|
|
|
|
+ if ( disable_tsc_sync )
|
|
+ return;
|
|
+
|
|
if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) &&
|
|
!cpu_isset(slave, tsc_sync_cpu_mask) )
|
|
return;
|
|
--- a/xen/arch/x86/time.c
|
|
+++ b/xen/arch/x86/time.c
|
|
@@ -21,6 +21,7 @@
|
|
#include <xen/smp.h>
|
|
#include <xen/irq.h>
|
|
#include <xen/softirq.h>
|
|
+#include <xen/cpuidle.h>
|
|
#include <xen/keyhandler.h>
|
|
#include <xen/guest_access.h>
|
|
#include <asm/io.h>
|
|
@@ -682,6 +683,8 @@ void cstate_restore_tsc(void)
|
|
if ( boot_cpu_has(X86_FEATURE_NONSTOP_TSC) )
|
|
return;
|
|
|
|
+ ASSERT(boot_cpu_has(X86_FEATURE_TSC_RELIABLE));
|
|
+
|
|
write_tsc(stime2tsc(read_platform_stime()));
|
|
}
|
|
|
|
@@ -1384,6 +1387,66 @@ void init_percpu_time(void)
|
|
}
|
|
}
|
|
|
|
+/*
|
|
+ * On certain older Intel CPUs writing the TSC MSR clears the upper 32 bits.
|
|
+ * Obviously we must not use write_tsc() on such CPUs.
|
|
+ *
|
|
+ * Additionally, AMD specifies that being able to write the TSC MSR is not an
|
|
+ * architectural feature (but, other than their manual says, also cannot be
|
|
+ * determined from CPUID bits).
|
|
+ */
|
|
+static void __init tsc_check_writability(void)
|
|
+{
|
|
+ const char *what = NULL;
|
|
+ uint64_t tsc;
|
|
+
|
|
+ /*
|
|
+ * If all CPUs are reported as synchronised and in sync, we never write
|
|
+ * the TSCs (except unavoidably, when a CPU is physically hot-plugged).
|
|
+ * Hence testing for writability is pointless and even harmful.
|
|
+ */
|
|
+ if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) )
|
|
+ return;
|
|
+
|
|
+ rdtscll(tsc);
|
|
+ if ( wrmsr_safe(MSR_IA32_TSC, 0) == 0 )
|
|
+ {
|
|
+ uint64_t tmp, tmp2;
|
|
+ rdtscll(tmp2);
|
|
+ write_tsc(tsc | (1ULL << 32));
|
|
+ rdtscll(tmp);
|
|
+ if ( ABS((s64)tmp - (s64)tmp2) < (1LL << 31) )
|
|
+ what = "only partially";
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ what = "not";
|
|
+ }
|
|
+
|
|
+ /* Nothing to do if the TSC is fully writable. */
|
|
+ if ( !what )
|
|
+ {
|
|
+ /*
|
|
+ * Paranoia - write back original TSC value. However, APs get synced
|
|
+ * with BSP as they are brought up, so this doesn't much matter.
|
|
+ */
|
|
+ write_tsc(tsc);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ printk(XENLOG_WARNING "TSC %s writable\n", what);
|
|
+
|
|
+ /* time_calibration_tsc_rendezvous() must not be used */
|
|
+ setup_clear_cpu_cap(X86_FEATURE_CONSTANT_TSC);
|
|
+
|
|
+ /* cstate_restore_tsc() must not be used (or do nothing) */
|
|
+ if ( !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) )
|
|
+ cpuidle_disable_deep_cstate();
|
|
+
|
|
+ /* synchronize_tsc_slave() must do nothing */
|
|
+ disable_tsc_sync = 1;
|
|
+}
|
|
+
|
|
/* Late init function (after all CPUs are booted). */
|
|
int __init init_xen_time(void)
|
|
{
|
|
@@ -1400,6 +1463,8 @@ int __init init_xen_time(void)
|
|
setup_clear_cpu_cap(X86_FEATURE_TSC_RELIABLE);
|
|
}
|
|
|
|
+ tsc_check_writability();
|
|
+
|
|
/* If we have constant-rate TSCs then scale factor can be shared. */
|
|
if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
|
|
{
|
|
@@ -1451,7 +1516,7 @@ static int disable_pit_irq(void)
|
|
* XXX dom0 may rely on RTC interrupt delivery, so only enable
|
|
* hpet_broadcast if FSB mode available or if force_hpet_broadcast.
|
|
*/
|
|
- if ( xen_cpuidle && !boot_cpu_has(X86_FEATURE_ARAT) )
|
|
+ if ( cpuidle_using_deep_cstate() && !boot_cpu_has(X86_FEATURE_ARAT) )
|
|
{
|
|
hpet_broadcast_init();
|
|
if ( !hpet_broadcast_is_available() )
|
|
--- a/xen/include/asm-x86/setup.h
|
|
+++ b/xen/include/asm-x86/setup.h
|
|
@@ -4,7 +4,6 @@
|
|
#include <xen/multiboot.h>
|
|
|
|
extern bool_t early_boot;
|
|
-extern s8 xen_cpuidle;
|
|
extern unsigned long xenheap_initial_phys_start;
|
|
|
|
void init_done(void);
|
|
--- a/xen/include/asm-x86/time.h
|
|
+++ b/xen/include/asm-x86/time.h
|
|
@@ -24,6 +24,8 @@
|
|
|
|
typedef u64 cycles_t;
|
|
|
|
+extern bool_t disable_tsc_sync;
|
|
+
|
|
static inline cycles_t get_cycles(void)
|
|
{
|
|
cycles_t c;
|
|
--- a/xen/include/xen/cpuidle.h
|
|
+++ b/xen/include/xen/cpuidle.h
|
|
@@ -85,7 +85,10 @@ struct cpuidle_governor
|
|
void (*reflect) (struct acpi_processor_power *dev);
|
|
};
|
|
|
|
+extern s8 xen_cpuidle;
|
|
extern struct cpuidle_governor *cpuidle_current_governor;
|
|
+
|
|
+bool_t cpuidle_using_deep_cstate(void);
|
|
void cpuidle_disable_deep_cstate(void);
|
|
|
|
extern void cpuidle_wakeup_mwait(cpumask_t *mask);
|