58 lines
2.4 KiB
Diff
58 lines
2.4 KiB
Diff
|
# HG changeset patch
|
||
|
# User Keir Fraser <keir@xen.org>
|
||
|
# Date 1291041272 0
|
||
|
# Node ID 5cd9612db2bba51f63ff7897aca4c72cc4b8e8c2
|
||
|
# Parent aba70e59a90dcb0cacc2c6834a6ce19d03d88601
|
||
|
x86-64: don't crash Xen upon direct pv guest access to GDT/LDT mapping area
|
||
|
|
||
|
handle_gdt_ldt_mapping_fault() is intended to deal with indirect
|
||
|
accesses (i.e. those caused by descriptor loads) to the GDT/LDT
|
||
|
mapping area only. While for 32-bit segment limits indeed prevent the
|
||
|
function being entered for direct accesses (i.e. a #GP fault will be
|
||
|
raised even before the address translation gets done, on 64-bit even
|
||
|
user mode accesses would lead to control reaching the BUG_ON() at the
|
||
|
beginning of that function.
|
||
|
|
||
|
Fortunately the fix is simple: Since the guest kernel runs in ring 3,
|
||
|
any guest direct access will have the "user mode" bit set, whereas
|
||
|
descriptor loads always do the translations to access the actual
|
||
|
descriptors as kernel mode ones.
|
||
|
|
||
|
Signed-off-by: Jan Beulich <jbeulich@novell.com>
|
||
|
|
||
|
Further, relax the BUG_ON() in handle_gdt_ldt_mapping_fault() to a
|
||
|
check-and-bail. This avoids any problems in future, if we don't
|
||
|
execute x86_64 guest kernels in ring 3 (e.g., because we use a
|
||
|
lightweight HVM container).
|
||
|
|
||
|
Signed-off-by: Keir Fraser <keir@xen.org>
|
||
|
|
||
|
--- a/xen/arch/x86/traps.c
|
||
|
+++ b/xen/arch/x86/traps.c
|
||
|
@@ -1051,8 +1051,14 @@ static int handle_gdt_ldt_mapping_fault(
|
||
|
unsigned int is_ldt_area = (offset >> (GDT_LDT_VCPU_VA_SHIFT-1)) & 1;
|
||
|
unsigned int vcpu_area = (offset >> GDT_LDT_VCPU_VA_SHIFT);
|
||
|
|
||
|
- /* Should never fault in another vcpu's area. */
|
||
|
- BUG_ON(vcpu_area != curr->vcpu_id);
|
||
|
+ /*
|
||
|
+ * If the fault is in another vcpu's area, it cannot be due to
|
||
|
+ * a GDT/LDT descriptor load. Thus we can reasonably exit immediately, and
|
||
|
+ * indeed we have to since map_ldt_shadow_page() works correctly only on
|
||
|
+ * accesses to a vcpu's own area.
|
||
|
+ */
|
||
|
+ if ( vcpu_area != curr->vcpu_id )
|
||
|
+ return 0;
|
||
|
|
||
|
/* Byte offset within the gdt/ldt sub-area. */
|
||
|
offset &= (1UL << (GDT_LDT_VCPU_VA_SHIFT-1)) - 1UL;
|
||
|
@@ -1223,7 +1229,7 @@ static int fixup_page_fault(unsigned lon
|
||
|
|
||
|
if ( unlikely(IN_HYPERVISOR_RANGE(addr)) )
|
||
|
{
|
||
|
- if ( !(regs->error_code & PFEC_reserved_bit) &&
|
||
|
+ if ( !(regs->error_code & (PFEC_user_mode | PFEC_reserved_bit)) &&
|
||
|
(addr >= GDT_LDT_VIRT_START) && (addr < GDT_LDT_VIRT_END) )
|
||
|
return handle_gdt_ldt_mapping_fault(
|
||
|
addr - GDT_LDT_VIRT_START, regs);
|