--- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -2431,6 +2431,29 @@ static inline cpumask_t vcpumask_to_pcpu return pmask; } +#ifdef __i386__ +static inline void *fixmap_domain_page(unsigned long mfn) +{ + unsigned int cpu = smp_processor_id(); + void *ptr = (void *)fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu); + + l1e_write(fix_pae_highmem_pl1e - cpu, + l1e_from_pfn(mfn, __PAGE_HYPERVISOR)); + flush_tlb_one_local(ptr); + return ptr; +} +static inline void fixunmap_domain_page(const void *ptr) +{ + unsigned int cpu = virt_to_fix((unsigned long)ptr) - FIX_PAE_HIGHMEM_0; + + l1e_write(fix_pae_highmem_pl1e - cpu, l1e_empty()); + this_cpu(make_cr3_timestamp) = this_cpu(tlbflush_time); +} +#else +#define fixmap_domain_page(mfn) mfn_to_virt(mfn) +#define fixunmap_domain_page(ptr) ((void)(ptr)) +#endif + int do_mmuext_op( XEN_GUEST_HANDLE(mmuext_op_t) uops, unsigned int count, @@ -2700,6 +2723,66 @@ int do_mmuext_op( break; } + case MMUEXT_CLEAR_PAGE: + { + unsigned char *ptr; + + okay = !get_page_and_type_from_pagenr(mfn, PGT_writable_page, + FOREIGNDOM, 0); + if ( unlikely(!okay) ) + { + MEM_LOG("Error while clearing mfn %lx", mfn); + break; + } + + /* A page is dirtied when it's being cleared. */ + paging_mark_dirty(d, mfn); + + ptr = fixmap_domain_page(mfn); + clear_page(ptr); + fixunmap_domain_page(ptr); + + put_page_and_type(page); + break; + } + + case MMUEXT_COPY_PAGE: + { + const unsigned char *src; + unsigned char *dst; + unsigned long src_mfn; + + src_mfn = gmfn_to_mfn(FOREIGNDOM, op.arg2.src_mfn); + okay = get_page_from_pagenr(src_mfn, FOREIGNDOM); + if ( unlikely(!okay) ) + { + MEM_LOG("Error while copying from mfn %lx", src_mfn); + break; + } + + okay = !get_page_and_type_from_pagenr(mfn, PGT_writable_page, + FOREIGNDOM, 0); + if ( unlikely(!okay) ) + { + put_page(mfn_to_page(src_mfn)); + MEM_LOG("Error while copying to mfn %lx", mfn); + break; + } + + /* A page is dirtied when it's being copied to. */ + paging_mark_dirty(d, mfn); + + src = map_domain_page(src_mfn); + dst = fixmap_domain_page(mfn); + copy_page(dst, src); + fixunmap_domain_page(dst); + unmap_domain_page(src); + + put_page_and_type(page); + put_page(mfn_to_page(src_mfn)); + break; + } + default: MEM_LOG("Invalid extended pt command 0x%x", op.cmd); rc = -ENOSYS; --- a/xen/arch/x86/x86_32/domain_page.c +++ b/xen/arch/x86/x86_32/domain_page.c @@ -114,7 +114,7 @@ void *map_domain_page(unsigned long mfn) return (void *)va; } -void unmap_domain_page(void *va) +void unmap_domain_page(const void *va) { unsigned int idx; struct vcpu *v; @@ -241,7 +241,7 @@ void *map_domain_page_global(unsigned lo return (void *)va; } -void unmap_domain_page_global(void *va) +void unmap_domain_page_global(const void *va) { unsigned long __va = (unsigned long)va; l2_pgentry_t *pl2e; --- a/xen/arch/x86/x86_64/compat/mm.c +++ b/xen/arch/x86/x86_64/compat/mm.c @@ -217,6 +217,8 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm case MMUEXT_PIN_L4_TABLE: case MMUEXT_UNPIN_TABLE: case MMUEXT_NEW_BASEPTR: + case MMUEXT_CLEAR_PAGE: + case MMUEXT_COPY_PAGE: arg1 = XLAT_mmuext_op_arg1_mfn; break; default: @@ -244,6 +246,9 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm case MMUEXT_INVLPG_MULTI: arg2 = XLAT_mmuext_op_arg2_vcpumask; break; + case MMUEXT_COPY_PAGE: + arg2 = XLAT_mmuext_op_arg2_src_mfn; + break; default: arg2 = -1; break; --- a/xen/include/asm-x86/fixmap.h +++ b/xen/include/asm-x86/fixmap.h @@ -29,6 +29,7 @@ * from the end of virtual memory backwards. */ enum fixed_addresses { + FIX_HOLE, #ifdef __i386__ FIX_PAE_HIGHMEM_0, FIX_PAE_HIGHMEM_END = FIX_PAE_HIGHMEM_0 + NR_CPUS-1, --- a/xen/include/public/xen.h +++ b/xen/include/public/xen.h @@ -231,6 +231,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. + * + * cmd: MMUEXT_CLEAR_PAGE + * mfn: Machine frame number to be cleared. + * + * cmd: MMUEXT_COPY_PAGE + * mfn: Machine frame number of the destination page. + * src_mfn: Machine frame number of the source page. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 @@ -247,12 +254,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 +#define MMUEXT_CLEAR_PAGE 16 +#define MMUEXT_COPY_PAGE 17 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { - /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */ + /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR + * CLEAR_PAGE, COPY_PAGE */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; @@ -266,6 +276,8 @@ struct mmuext_op { #else void *vcpumask; #endif + /* COPY_PAGE */ + xen_pfn_t src_mfn; } arg2; }; typedef struct mmuext_op mmuext_op_t; --- a/xen/include/xen/domain_page.h +++ b/xen/include/xen/domain_page.h @@ -24,7 +24,7 @@ void *map_domain_page(unsigned long mfn) * Pass a VA within a page previously mapped in the context of the * currently-executing VCPU via a call to map_domain_page(). */ -void unmap_domain_page(void *va); +void unmap_domain_page(const void *va); /* * Similar to the above calls, except the mapping is accessible in all @@ -32,7 +32,7 @@ void unmap_domain_page(void *va); * mappings can also be unmapped from any context. */ void *map_domain_page_global(unsigned long mfn); -void unmap_domain_page_global(void *va); +void unmap_domain_page_global(const void *va); #define DMCACHE_ENTRY_VALID 1U #define DMCACHE_ENTRY_HELD 2U @@ -75,7 +75,7 @@ map_domain_page_with_cache(unsigned long } static inline void -unmap_domain_page_with_cache(void *va, struct domain_mmap_cache *cache) +unmap_domain_page_with_cache(const void *va, struct domain_mmap_cache *cache) { ASSERT(cache != NULL); cache->flags &= ~DMCACHE_ENTRY_HELD;