92 lines
3.1 KiB
Diff
92 lines
3.1 KiB
Diff
|
# HG changeset patch
|
||
|
# User Keir Fraser <keir.fraser@citrix.com>
|
||
|
# Date 1284537635 -3600
|
||
|
# Node ID 1087f9a03ab61d3a8bb0a1c65e5b09f82f3a4277
|
||
|
# Parent 62edd2611cbbe4c50574b6f6f73dda2ae1136dde
|
||
|
C6 state with EOI issue fix for some Intel processors
|
||
|
|
||
|
There is an errata in some of Intel processors.
|
||
|
|
||
|
AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6
|
||
|
During an Interrupt Service Routine
|
||
|
|
||
|
If core C6 is entered after the start of an interrupt service routine
|
||
|
but before a write to the APIC EOI register, the core may not send an
|
||
|
EOI transaction (if needed) and further interrupts from the same
|
||
|
priority level or lower may be blocked.
|
||
|
|
||
|
This patch fix this issue, by checking if ISR is pending before enter
|
||
|
deep Cx state. If so, it would use power->safe_state instead of deep
|
||
|
Cx state to prevent the above issue happen.
|
||
|
|
||
|
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
|
||
|
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
|
||
|
|
||
|
--- a/xen/arch/x86/acpi/cpu_idle.c
|
||
|
+++ b/xen/arch/x86/acpi/cpu_idle.c
|
||
|
@@ -226,6 +226,31 @@ static int sched_has_urgent_vcpu(void)
|
||
|
return atomic_read(&this_cpu(schedule_data).urgent_count);
|
||
|
}
|
||
|
|
||
|
+/*
|
||
|
+ * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During
|
||
|
+ * an Interrupt Service Routine"
|
||
|
+ *
|
||
|
+ * There was an errata with some Core i7 processors that an EOI transaction
|
||
|
+ * may not be sent if software enters core C6 during an interrupt service
|
||
|
+ * routine. So we don't enter deep Cx state if there is an EOI pending.
|
||
|
+ */
|
||
|
+bool_t errata_c6_eoi_workaround(void)
|
||
|
+{
|
||
|
+ static bool_t fix_needed = -1;
|
||
|
+
|
||
|
+ if ( unlikely(fix_needed == -1) )
|
||
|
+ {
|
||
|
+ int model = boot_cpu_data.x86_model;
|
||
|
+ fix_needed = (cpu_has_apic && !directed_eoi_enabled &&
|
||
|
+ (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
|
||
|
+ (boot_cpu_data.x86 == 6) &&
|
||
|
+ ((model == 0x1a) || (model == 0x1e) || (model == 0x1f) ||
|
||
|
+ (model == 0x25) || (model == 0x2c) || (model == 0x2f)));
|
||
|
+ }
|
||
|
+
|
||
|
+ return (fix_needed && cpu_has_pending_apic_eoi());
|
||
|
+}
|
||
|
+
|
||
|
static void acpi_processor_idle(void)
|
||
|
{
|
||
|
struct acpi_processor_power *power = processor_powers[smp_processor_id()];
|
||
|
@@ -277,6 +302,9 @@ static void acpi_processor_idle(void)
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
+ if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() )
|
||
|
+ cx = power->safe_state;
|
||
|
+
|
||
|
power->last_state = cx;
|
||
|
|
||
|
/*
|
||
|
--- a/xen/arch/x86/irq.c
|
||
|
+++ b/xen/arch/x86/irq.c
|
||
|
@@ -752,6 +752,11 @@ struct pending_eoi {
|
||
|
static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_DYNAMIC_VECTORS]);
|
||
|
#define pending_eoi_sp(p) ((p)[NR_DYNAMIC_VECTORS-1].vector)
|
||
|
|
||
|
+bool_t cpu_has_pending_apic_eoi(void)
|
||
|
+{
|
||
|
+ return (pending_eoi_sp(this_cpu(pending_eoi)) != 0);
|
||
|
+}
|
||
|
+
|
||
|
static inline void set_pirq_eoi(struct domain *d, unsigned int irq)
|
||
|
{
|
||
|
if ( d->arch.pirq_eoi_map )
|
||
|
--- a/xen/include/asm-x86/irq.h
|
||
|
+++ b/xen/include/asm-x86/irq.h
|
||
|
@@ -150,4 +150,6 @@ void irq_set_affinity(int irq, cpumask_t
|
||
|
#define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
|
||
|
#define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
|
||
|
|
||
|
+bool_t cpu_has_pending_apic_eoi(void);
|
||
|
+
|
||
|
#endif /* _ASM_HW_IRQ_H */
|