References: CVE-2012-5511 XSA-27 bnc#789944 hvm: Limit the size of large HVM op batches Doing large p2m updates for HVMOP_track_dirty_vram without preemption ties up the physical processor. Integrating preemption into the p2m updates is hard so simply limit to 1GB which is sufficient for a 15000 * 15000 * 32bpp framebuffer. For HVMOP_modified_memory and HVMOP_set_mem_type preemptible add the necessary machinery to handle preemption. This is CVE-2012-5511 / XSA-27. Signed-off-by: Tim Deegan Signed-off-by: Ian Campbell Acked-by: Ian Jackson v2: Provide definition of GB to fix x86-32 compile. Signed-off-by: Jan Beulich Acked-by: Ian Jackson --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4033,6 +4033,9 @@ long do_hvm_op(unsigned long op, XEN_GUE if ( !is_hvm_domain(d) ) goto param_fail2; + if ( a.nr > GB(1) >> PAGE_SHIFT ) + goto param_fail2; + rc = xsm_hvm_param(d, op); if ( rc ) goto param_fail2; @@ -4059,7 +4062,6 @@ long do_hvm_op(unsigned long op, XEN_GUE { struct xen_hvm_modified_memory a; struct domain *d; - unsigned long pfn; if ( copy_from_guest(&a, arg, 1) ) return -EFAULT; @@ -4086,9 +4088,11 @@ long do_hvm_op(unsigned long op, XEN_GUE if ( !paging_mode_log_dirty(d) ) goto param_fail3; - for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ ) + while ( a.nr > 0 ) { + unsigned long pfn = a.first_pfn; struct page_info *page; + page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE); if ( page ) { @@ -4098,6 +4102,19 @@ long do_hvm_op(unsigned long op, XEN_GUE sh_remove_shadows(d->vcpu[0], _mfn(page_to_mfn(page)), 1, 0); put_page(page); } + + a.first_pfn++; + a.nr--; + + /* Check for continuation if it's not the last interation */ + if ( a.nr > 0 && hypercall_preempt_check() ) + { + if ( copy_to_guest(arg, &a, 1) ) + rc = -EFAULT; + else + rc = -EAGAIN; + break; + } } param_fail3: @@ -4153,7 +4170,6 @@ long do_hvm_op(unsigned long op, XEN_GUE { struct xen_hvm_set_mem_type a; struct domain *d; - unsigned long pfn; /* Interface types to internal p2m types */ p2m_type_t memtype[] = { @@ -4186,8 +4202,9 @@ long do_hvm_op(unsigned long op, XEN_GUE if ( a.hvmmem_type >= ARRAY_SIZE(memtype) ) goto param_fail4; - for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ ) + while ( a.nr ) { + unsigned long pfn = a.first_pfn; p2m_type_t t; p2m_type_t nt; mfn_t mfn; @@ -4227,6 +4244,19 @@ long do_hvm_op(unsigned long op, XEN_GUE } } put_gfn(d, pfn); + + a.first_pfn++; + a.nr--; + + /* Check for continuation if it's not the last interation */ + if ( a.nr > 0 && hypercall_preempt_check() ) + { + if ( copy_to_guest(arg, &a, 1) ) + rc = -EFAULT; + else + rc = -EAGAIN; + goto param_fail4; + } } rc = 0; --- a/xen/include/asm-x86/config.h +++ b/xen/include/asm-x86/config.h @@ -119,6 +119,9 @@ extern char wakeup_start[]; extern unsigned int video_mode, video_flags; extern unsigned short boot_edid_caps; extern unsigned char boot_edid_info[128]; + +#define GB(_gb) (_gb ## UL << 30) + #endif #define asmlinkage @@ -134,7 +137,6 @@ extern unsigned char boot_edid_info[128] #define PML4_ADDR(_slot) \ ((((_slot ## UL) >> 8) * 0xffff000000000000UL) | \ (_slot ## UL << PML4_ENTRY_BITS)) -#define GB(_gb) (_gb ## UL << 30) #else #define PML4_ENTRY_BYTES (1 << PML4_ENTRY_BITS) #define PML4_ADDR(_slot) \