74 lines
2.5 KiB
Diff
74 lines
2.5 KiB
Diff
|
# HG changeset patch
|
||
|
# User Jan Beulich <jbeulich@suse.com>
|
||
|
# Date 1337697011 -7200
|
||
|
# Node ID 6dc80df50fa8a01da0494a6413d70573aeeef7a2
|
||
|
# Parent d718706de1f0667c4a4ee137b9ba18e5c7d9817c
|
||
|
x86: don't hold off NMI delivery when MCE is masked
|
||
|
|
||
|
Likely through copy'n'paste, all three instances of guest MCE
|
||
|
processing jumped to the wrong place (where NMI processing code
|
||
|
correctly jumps to) when MCE-s are temporarily masked (due to one
|
||
|
currently being processed by the guest). A nested, unmasked NMI should
|
||
|
get delivered immediately, however.
|
||
|
|
||
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||
|
Acked-by: Keir Fraser <keir@xen.org>
|
||
|
|
||
|
--- a/xen/arch/x86/x86_32/entry.S
|
||
|
+++ b/xen/arch/x86/x86_32/entry.S
|
||
|
@@ -215,6 +215,7 @@ test_all_events:
|
||
|
jnz process_softirqs
|
||
|
testb $1,VCPU_mce_pending(%ebx)
|
||
|
jnz process_mce
|
||
|
+.Ltest_guest_nmi:
|
||
|
testb $1,VCPU_nmi_pending(%ebx)
|
||
|
jnz process_nmi
|
||
|
test_guest_events:
|
||
|
@@ -244,7 +245,7 @@ process_softirqs:
|
||
|
/* %ebx: struct vcpu */
|
||
|
process_mce:
|
||
|
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx)
|
||
|
- jnz test_guest_events
|
||
|
+ jnz .Ltest_guest_nmi
|
||
|
sti
|
||
|
movb $0,VCPU_mce_pending(%ebx)
|
||
|
call set_guest_machinecheck_trapbounce
|
||
|
--- a/xen/arch/x86/x86_64/compat/entry.S
|
||
|
+++ b/xen/arch/x86/x86_64/compat/entry.S
|
||
|
@@ -104,6 +104,7 @@ ENTRY(compat_test_all_events)
|
||
|
jnz compat_process_softirqs
|
||
|
testb $1,VCPU_mce_pending(%rbx)
|
||
|
jnz compat_process_mce
|
||
|
+.Lcompat_test_guest_nmi:
|
||
|
testb $1,VCPU_nmi_pending(%rbx)
|
||
|
jnz compat_process_nmi
|
||
|
compat_test_guest_events:
|
||
|
@@ -134,7 +135,7 @@ compat_process_softirqs:
|
||
|
/* %rbx: struct vcpu */
|
||
|
compat_process_mce:
|
||
|
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
|
||
|
- jnz compat_test_guest_events
|
||
|
+ jnz .Lcompat_test_guest_nmi
|
||
|
sti
|
||
|
movb $0,VCPU_mce_pending(%rbx)
|
||
|
call set_guest_machinecheck_trapbounce
|
||
|
--- a/xen/arch/x86/x86_64/entry.S
|
||
|
+++ b/xen/arch/x86/x86_64/entry.S
|
||
|
@@ -204,6 +204,7 @@ test_all_events:
|
||
|
jnz process_softirqs
|
||
|
testb $1,VCPU_mce_pending(%rbx)
|
||
|
jnz process_mce
|
||
|
+.Ltest_guest_nmi:
|
||
|
testb $1,VCPU_nmi_pending(%rbx)
|
||
|
jnz process_nmi
|
||
|
test_guest_events:
|
||
|
@@ -232,7 +233,7 @@ process_softirqs:
|
||
|
/* %rbx: struct vcpu */
|
||
|
process_mce:
|
||
|
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
|
||
|
- jnz test_guest_events
|
||
|
+ jnz .Ltest_guest_nmi
|
||
|
sti
|
||
|
movb $0,VCPU_mce_pending(%rbx)
|
||
|
call set_guest_machinecheck_trapbounce
|