sles10. Patch pygrub to get the kernel and initrd from the image. pygrub-boot-legacy-sles.patch - bnc#842515 - VUL-0: CVE-2013-4375: XSA-71: xen: qemu disk backend (qdisk) resource leak CVE-2013-4375-xsa71.patch - Upstream patches from Jan 52496bea-x86-properly-handle-hvm_copy_from_guest_-phys-virt-errors.patch (Replaces CVE-2013-4355-xsa63.patch) 52496c11-x86-mm-shadow-Fix-initialization-of-PV-shadow-L4-tables.patch (Replaces CVE-2013-4356-xsa64.patch) 52496c32-x86-properly-set-up-fbld-emulation-operand-address.patch (Replaces CVE-2013-4361-xsa66.patch) 52497c6c-x86-don-t-blindly-create-L3-tables-for-the-direct-map.patch 524e971b-x86-idle-Fix-get_cpu_idle_time-s-interaction-with-offline-pcpus.patch 524e9762-x86-percpu-Force-INVALID_PERCPU_AREA-to-non-canonical.patch 524e983e-Nested-VMX-check-VMX-capability-before-read-VMX-related-MSRs.patch 524e98b1-Nested-VMX-fix-IA32_VMX_CR4_FIXED1-msr-emulation.patch 524e9dc0-xsm-forbid-PV-guest-console-reads.patch 5256a979-x86-check-segment-descriptor-read-result-in-64-bit-OUTS-emulation.patch 5256be57-libxl-fix-vif-rate-parsing.patch 5256be84-tools-ocaml-fix-erroneous-free-of-cpumap-in-stub_xc_vcpu_getaffinity.patch 5256be92-libxl-fix-out-of-memory-error-handling-in-libxl_list_cpupool.patch 5257a89a-x86-correct-LDT-checks.patch 5257a8e7-x86-add-address-validity-check-to-guest_map_l1e.patch 5257a944-x86-check-for-canonical-address-before-doing-page-walks.patch 525b95f4-scheduler-adjust-internal-locking-interface.patch 525b9617-sched-fix-race-between-sched_move_domain-and-vcpu_wake.patch 525e69e8-credit-unpause-parked-vcpu-before-destroying-it.patch 525faf5e-x86-print-relevant-tail-part-of-filename-for-warnings-and-crashes.patch - bnc#840196 - L3: MTU size on Dom0 gets reset when booting DomU OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=276
177 lines
6.6 KiB
Diff
177 lines
6.6 KiB
Diff
# Commit 40d66baa46ca8a9ffa6df3e063a967d08ec92bcf
|
|
# Date 2013-10-11 09:28:26 +0200
|
|
# Author Jan Beulich <jbeulich@suse.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
x86: correct LDT checks
|
|
|
|
- MMUEXT_SET_LDT should behave as similarly to the LLDT instruction as
|
|
possible: fail only if the base address is non-canonical
|
|
- instead LDT descriptor accesses should fault if the descriptor
|
|
address ends up being non-canonical (by ensuring this we at once
|
|
avoid reading an entry from the mach-to-phys table and consider it a
|
|
page table entry)
|
|
- fault propagation on using LDT selectors must distinguish #PF and #GP
|
|
(the latter must be raised for a non-canonical descriptor address,
|
|
which also applies to several other uses of propagate_page_fault(),
|
|
and hence the problem is being fixed there)
|
|
- map_ldt_shadow_page() should properly wrap addresses for 32-bit VMs
|
|
|
|
At once remove the odd invokation of map_ldt_shadow_page() from the
|
|
MMUEXT_SET_LDT handler: There's nothing really telling us that the
|
|
first LDT page is going to be preferred over others.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Acked-by: Keir Fraser <keir@xen.org>
|
|
|
|
--- a/xen/arch/x86/domain.c
|
|
+++ b/xen/arch/x86/domain.c
|
|
@@ -674,12 +674,7 @@ int arch_set_info_guest(
|
|
fixup_guest_code_selector(d, c.nat->trap_ctxt[i].cs);
|
|
}
|
|
|
|
- /* LDT safety checks. */
|
|
- if ( ((c.nat->ldt_base & (PAGE_SIZE-1)) != 0) ||
|
|
- (c.nat->ldt_ents > 8192) ||
|
|
- !array_access_ok(c.nat->ldt_base,
|
|
- c.nat->ldt_ents,
|
|
- LDT_ENTRY_SIZE) )
|
|
+ if ( !__addr_ok(c.nat->ldt_base) )
|
|
return -EINVAL;
|
|
}
|
|
else
|
|
@@ -692,15 +687,12 @@ int arch_set_info_guest(
|
|
|
|
for ( i = 0; i < ARRAY_SIZE(c.cmp->trap_ctxt); i++ )
|
|
fixup_guest_code_selector(d, c.cmp->trap_ctxt[i].cs);
|
|
-
|
|
- /* LDT safety checks. */
|
|
- if ( ((c.cmp->ldt_base & (PAGE_SIZE-1)) != 0) ||
|
|
- (c.cmp->ldt_ents > 8192) ||
|
|
- !compat_array_access_ok(c.cmp->ldt_base,
|
|
- c.cmp->ldt_ents,
|
|
- LDT_ENTRY_SIZE) )
|
|
- return -EINVAL;
|
|
}
|
|
+
|
|
+ /* LDT safety checks. */
|
|
+ if ( ((c(ldt_base) & (PAGE_SIZE - 1)) != 0) ||
|
|
+ (c(ldt_ents) > 8192) )
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
v->fpu_initialised = !!(flags & VGCF_I387_VALID);
|
|
--- a/xen/arch/x86/mm.c
|
|
+++ b/xen/arch/x86/mm.c
|
|
@@ -582,6 +582,8 @@ int map_ldt_shadow_page(unsigned int off
|
|
|
|
BUG_ON(unlikely(in_irq()));
|
|
|
|
+ if ( is_pv_32bit_domain(d) )
|
|
+ gva = (u32)gva;
|
|
guest_get_eff_kern_l1e(v, gva, &l1e);
|
|
if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
|
|
return 0;
|
|
@@ -3229,9 +3231,8 @@ long do_mmuext_op(
|
|
MEM_LOG("ignoring SET_LDT hypercall from external domain");
|
|
okay = 0;
|
|
}
|
|
- else if ( ((ptr & (PAGE_SIZE-1)) != 0) ||
|
|
- (ents > 8192) ||
|
|
- !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) )
|
|
+ else if ( ((ptr & (PAGE_SIZE - 1)) != 0) || !__addr_ok(ptr) ||
|
|
+ (ents > 8192) )
|
|
{
|
|
okay = 0;
|
|
MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents);
|
|
@@ -3244,8 +3245,6 @@ long do_mmuext_op(
|
|
curr->arch.pv_vcpu.ldt_base = ptr;
|
|
curr->arch.pv_vcpu.ldt_ents = ents;
|
|
load_LDT(curr);
|
|
- if ( ents != 0 )
|
|
- (void)map_ldt_shadow_page(0);
|
|
}
|
|
break;
|
|
}
|
|
--- a/xen/arch/x86/traps.c
|
|
+++ b/xen/arch/x86/traps.c
|
|
@@ -1070,12 +1070,24 @@ static void reserved_bit_page_fault(
|
|
show_execution_state(regs);
|
|
}
|
|
|
|
-void propagate_page_fault(unsigned long addr, u16 error_code)
|
|
+struct trap_bounce *propagate_page_fault(unsigned long addr, u16 error_code)
|
|
{
|
|
struct trap_info *ti;
|
|
struct vcpu *v = current;
|
|
struct trap_bounce *tb = &v->arch.pv_vcpu.trap_bounce;
|
|
|
|
+ if ( unlikely(!is_canonical_address(addr)) )
|
|
+ {
|
|
+ ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
|
|
+ tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
|
|
+ tb->error_code = 0;
|
|
+ tb->cs = ti->cs;
|
|
+ tb->eip = ti->address;
|
|
+ if ( TI_GET_IF(ti) )
|
|
+ tb->flags |= TBF_INTERRUPT;
|
|
+ return tb;
|
|
+ }
|
|
+
|
|
v->arch.pv_vcpu.ctrlreg[2] = addr;
|
|
arch_set_cr2(v, addr);
|
|
|
|
@@ -1102,6 +1114,8 @@ void propagate_page_fault(unsigned long
|
|
|
|
if ( unlikely(error_code & PFEC_reserved_bit) )
|
|
reserved_bit_page_fault(addr, guest_cpu_user_regs());
|
|
+
|
|
+ return NULL;
|
|
}
|
|
|
|
static int handle_gdt_ldt_mapping_fault(
|
|
@@ -1135,13 +1149,16 @@ static int handle_gdt_ldt_mapping_fault(
|
|
}
|
|
else
|
|
{
|
|
+ struct trap_bounce *tb;
|
|
+
|
|
/* In hypervisor mode? Leave it to the #PF handler to fix up. */
|
|
if ( !guest_mode(regs) )
|
|
return 0;
|
|
- /* In guest mode? Propagate #PF to guest, with adjusted %cr2. */
|
|
- propagate_page_fault(
|
|
- curr->arch.pv_vcpu.ldt_base + offset,
|
|
- regs->error_code);
|
|
+ /* In guest mode? Propagate fault to guest, with adjusted %cr2. */
|
|
+ tb = propagate_page_fault(curr->arch.pv_vcpu.ldt_base + offset,
|
|
+ regs->error_code);
|
|
+ if ( tb )
|
|
+ tb->error_code = ((u16)offset & ~3) | 4;
|
|
}
|
|
}
|
|
else
|
|
--- a/xen/include/asm-x86/mm.h
|
|
+++ b/xen/include/asm-x86/mm.h
|
|
@@ -555,7 +555,7 @@ int new_guest_cr3(unsigned long pfn);
|
|
void make_cr3(struct vcpu *v, unsigned long mfn);
|
|
void update_cr3(struct vcpu *v);
|
|
int vcpu_destroy_pagetables(struct vcpu *);
|
|
-void propagate_page_fault(unsigned long addr, u16 error_code);
|
|
+struct trap_bounce *propagate_page_fault(unsigned long addr, u16 error_code);
|
|
void *do_page_walk(struct vcpu *v, unsigned long addr);
|
|
|
|
int __sync_local_execstate(void);
|
|
--- a/xen/include/asm-x86/paging.h
|
|
+++ b/xen/include/asm-x86/paging.h
|
|
@@ -386,7 +386,8 @@ guest_get_eff_l1e(struct vcpu *v, unsign
|
|
if ( likely(!paging_mode_translate(v->domain)) )
|
|
{
|
|
ASSERT(!paging_mode_external(v->domain));
|
|
- if ( __copy_from_user(eff_l1e,
|
|
+ if ( !__addr_ok(addr) ||
|
|
+ __copy_from_user(eff_l1e,
|
|
&__linear_l1_table[l1_linear_offset(addr)],
|
|
sizeof(l1_pgentry_t)) != 0 )
|
|
*(l1_pgentry_t *)eff_l1e = l1e_empty();
|