diff --git a/23506-x86_Disable_set_gpfn_from_mfn_until_m2p_table_is_allocated..patch b/23506-x86_Disable_set_gpfn_from_mfn_until_m2p_table_is_allocated..patch new file mode 100644 index 0000000..0609398 --- /dev/null +++ b/23506-x86_Disable_set_gpfn_from_mfn_until_m2p_table_is_allocated..patch @@ -0,0 +1,80 @@ +changeset: 23506:d1309a79bde8 +user: Keir Fraser +date: Fri Jun 10 08:18:33 2011 +0100 +files: xen/arch/x86/x86_64/mm.c xen/include/asm-x86/mm.h +description: +x86: Disable set_gpfn_from_mfn until m2p table is allocated. + +This is a prerequisite for calling set_gpfn_from_mfn() unconditionally +from free_heap_pages(). + +Signed-off-by: Keir Fraser + + +--- + xen/arch/x86/x86_64/mm.c | 4 ++++ + xen/include/asm-x86/mm.h | 15 +++++++++++++-- + 2 files changed, 17 insertions(+), 2 deletions(-) + +Index: xen-4.1.2-testing/xen/arch/x86/x86_64/mm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/x86_64/mm.c ++++ xen-4.1.2-testing/xen/arch/x86/x86_64/mm.c +@@ -47,6 +47,8 @@ unsigned int __read_mostly pfn_pdx_hole_ + + unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START; + ++bool_t __read_mostly machine_to_phys_mapping_valid = 0; ++ + /* Top-level master (and idle-domain) page directory. */ + l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) + idle_pg_table[L4_PAGETABLE_ENTRIES]; +@@ -800,6 +802,8 @@ void __init paging_init(void) + #undef CNT + #undef MFN + ++ machine_to_phys_mapping_valid = 1; ++ + /* Set up linear page table mapping. */ + l4e_write(&idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)], + l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR)); +Index: xen-4.1.2-testing/xen/include/asm-x86/mm.h +=================================================================== +--- xen-4.1.2-testing.orig/xen/include/asm-x86/mm.h ++++ xen-4.1.2-testing/xen/include/asm-x86/mm.h +@@ -469,7 +469,7 @@ TYPE_SAFE(unsigned long,mfn); + + #ifdef CONFIG_COMPAT + #define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START) +-#define set_gpfn_from_mfn(mfn, pfn) ({ \ ++#define _set_gpfn_from_mfn(mfn, pfn) ({ \ + struct domain *d = page_get_owner(__mfn_to_page(mfn)); \ + unsigned long entry = (d && (d == dom_cow)) ? \ + SHARED_M2P_ENTRY : (pfn); \ +@@ -478,7 +478,7 @@ TYPE_SAFE(unsigned long,mfn); + machine_to_phys_mapping[(mfn)] = (entry)); \ + }) + #else +-#define set_gpfn_from_mfn(mfn, pfn) ({ \ ++#define _set_gpfn_from_mfn(mfn, pfn) ({ \ + struct domain *d = page_get_owner(__mfn_to_page(mfn)); \ + if(d && (d == dom_cow)) \ + machine_to_phys_mapping[(mfn)] = SHARED_M2P_ENTRY; \ +@@ -486,6 +486,17 @@ TYPE_SAFE(unsigned long,mfn); + machine_to_phys_mapping[(mfn)] = (pfn); \ + }) + #endif ++ ++/* ++ * Disable some users of set_gpfn_from_mfn() (e.g., free_heap_pages()) until ++ * the machine_to_phys_mapping is actually set up. ++ */ ++extern bool_t machine_to_phys_mapping_valid; ++#define set_gpfn_from_mfn(mfn, pfn) do { \ ++ if ( machine_to_phys_mapping_valid ) \ ++ _set_gpfn_from_mfn(mfn, pfn); \ ++} while (0) ++ + #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)]) + + #define mfn_to_gmfn(_d, mfn) \ diff --git a/23507-xenpaging_update_machine_to_phys_mapping_during_page_deallocation.patch b/23507-xenpaging_update_machine_to_phys_mapping_during_page_deallocation.patch new file mode 100644 index 0000000..99f2a7f --- /dev/null +++ b/23507-xenpaging_update_machine_to_phys_mapping_during_page_deallocation.patch @@ -0,0 +1,62 @@ +changeset: 23507:0a29c8c3ddf7 +user: Keir Fraser +date: Fri Jun 10 08:19:07 2011 +0100 +files: xen/common/page_alloc.c +description: +xenpaging: update machine_to_phys_mapping[] during page deallocation + +The machine_to_phys_mapping[] array needs updating during page +deallocation. If that page is allocated again, a call to +get_gpfn_from_mfn() will still return an old gfn from another guest. +This will cause trouble because this gfn number has no or different +meaning in the context of the current guest. + +This happens when the entire guest ram is paged-out before +xen_vga_populate_vram() runs. Then XENMEM_populate_physmap is called +with gfn 0xff000. A new page is allocated with alloc_domheap_pages. +This new page does not have a gfn yet. However, in +guest_physmap_add_entry() the passed mfn maps still to an old gfn +(perhaps from another old guest). This old gfn is in paged-out state +in this guests context and has no mfn anymore. As a result, the +ASSERT() triggers because p2m_is_ram() is true for p2m_ram_paging* +types. If the machine_to_phys_mapping[] array is updated properly, +both loops in guest_physmap_add_entry() turn into no-ops for the new +page and the mfn/gfn mapping will be done at the end of the function. + +If XENMEM_add_to_physmap is used with XENMAPSPACE_gmfn, +get_gpfn_from_mfn() will return an appearently valid gfn. As a +result, guest_physmap_remove_page() is called. The ASSERT in +p2m_remove_page triggers because the passed mfn does not match the old +mfn for the passed gfn. + +Signed-off-by: Olaf Hering + + +--- + xen/common/page_alloc.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +Index: xen-4.1.2-testing/xen/common/page_alloc.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/common/page_alloc.c ++++ xen-4.1.2-testing/xen/common/page_alloc.c +@@ -528,7 +528,7 @@ static int reserve_offlined_page(struct + static void free_heap_pages( + struct page_info *pg, unsigned int order) + { +- unsigned long mask; ++ unsigned long mask, mfn = page_to_mfn(pg); + unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0; + unsigned int zone = page_to_zone(pg); + +@@ -539,6 +539,10 @@ static void free_heap_pages( + + for ( i = 0; i < (1 << order); i++ ) + { ++ /* This page is not a guest frame any more. */ ++ page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */ ++ set_gpfn_from_mfn(mfn + i, INVALID_M2P_ENTRY); ++ + /* + * Cannot assume that count_info == 0, as there are some corner cases + * where it isn't the case and yet it isn't a bug: diff --git a/23509-x86_32_Fix_build_Define_machine_to_phys_mapping_valid.patch b/23509-x86_32_Fix_build_Define_machine_to_phys_mapping_valid.patch new file mode 100644 index 0000000..b5c718a --- /dev/null +++ b/23509-x86_32_Fix_build_Define_machine_to_phys_mapping_valid.patch @@ -0,0 +1,36 @@ +changeset: 23509:782bc7b2661a +user: Keir Fraser +date: Fri Jun 10 13:51:39 2011 +0100 +files: xen/arch/x86/x86_32/mm.c +description: +x86_32: Fix build: Define machine_to_phys_mapping_valid + +Signed-off-by: Keir Fraser + + +--- + xen/arch/x86/x86_32/mm.c | 4 ++++ + 1 file changed, 4 insertions(+) + +Index: xen-4.1.2-testing/xen/arch/x86/x86_32/mm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/x86_32/mm.c ++++ xen-4.1.2-testing/xen/arch/x86/x86_32/mm.c +@@ -39,6 +39,8 @@ extern l1_pgentry_t l1_identmap[L1_PAGET + unsigned int __read_mostly PAGE_HYPERVISOR = __PAGE_HYPERVISOR; + unsigned int __read_mostly PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE; + ++bool_t __read_mostly machine_to_phys_mapping_valid = 0; ++ + static unsigned long __read_mostly mpt_size; + + void *alloc_xen_pagetable(void) +@@ -123,6 +125,8 @@ void __init paging_init(void) + #undef CNT + #undef MFN + ++ machine_to_phys_mapping_valid = 1; ++ + /* Create page tables for ioremap()/map_domain_page_global(). */ + for ( i = 0; i < (IOREMAP_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ ) + { diff --git a/23562-xenpaging_remove_unused_spinlock_in_pager.patch b/23562-xenpaging_remove_unused_spinlock_in_pager.patch new file mode 100644 index 0000000..430f3d6 --- /dev/null +++ b/23562-xenpaging_remove_unused_spinlock_in_pager.patch @@ -0,0 +1,199 @@ +changeset: 23562:8a7f52c59d64 +user: Olaf Hering +date: Fri Jun 10 10:47:02 2011 +0200 +files: tools/xenpaging/mem_event.h tools/xenpaging/spinlock.h tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h +description: +xenpaging: remove unused spinlock in pager + +The spinlock code in the pager is a no-op because xenpaging is a single +threaded application. There is no locking when put_response() places a +response into the ringbuffer. +The only locking is inside the hypervisor, where mem_event_put_request() and +mem_event_get_response() lock the ringbuffer to protect multiple vcpus from +each other. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/mem_event.h | 5 --- + tools/xenpaging/spinlock.h | 69 -------------------------------------------- + tools/xenpaging/xenpaging.c | 12 ------- + tools/xenpaging/xenpaging.h | 1 + 4 files changed, 87 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/mem_event.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/mem_event.h ++++ xen-4.1.2-testing/tools/xenpaging/mem_event.h +@@ -25,7 +25,6 @@ + #define __XEN_MEM_EVENT_H__ + + +-#include "spinlock.h" + #include "xc.h" + #include + +@@ -33,9 +32,6 @@ + #include + + +-#define mem_event_ring_lock_init(_m) spin_lock_init(&(_m)->ring_lock) +-#define mem_event_ring_lock(_m) spin_lock(&(_m)->ring_lock) +-#define mem_event_ring_unlock(_m) spin_unlock(&(_m)->ring_lock) + + + typedef struct mem_event { +@@ -45,7 +41,6 @@ typedef struct mem_event { + mem_event_back_ring_t back_ring; + mem_event_shared_page_t *shared_page; + void *ring_page; +- spinlock_t ring_lock; + } mem_event_t; + + +Index: xen-4.1.2-testing/tools/xenpaging/spinlock.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/spinlock.h ++++ /dev/null +@@ -1,69 +0,0 @@ +-/****************************************************************************** +- * tools/xenpaging/spinlock.h +- * +- * Spinlock implementation. +- * +- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp) +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, write to the Free Software +- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +- */ +- +- +-#ifndef __SPINLOCK_H__ +-#define __SPINLOCK_H__ +- +- +-#include "bitops.h" +- +- +-#define SPIN_LOCK_UNLOCKED 0 +- +- +-typedef int spinlock_t; +- +- +-static inline void spin_lock(spinlock_t *lock) +-{ +- while ( test_and_set_bit(1, lock) ); +-} +- +-static inline void spin_lock_init(spinlock_t *lock) +-{ +- *lock = SPIN_LOCK_UNLOCKED; +-} +- +-static inline void spin_unlock(spinlock_t *lock) +-{ +- *lock = SPIN_LOCK_UNLOCKED; +-} +- +-static inline int spin_trylock(spinlock_t *lock) +-{ +- return !test_and_set_bit(1, lock); +-} +- +- +-#endif // __SPINLOCK_H__ +- +- +-/* +- * Local variables: +- * mode: C +- * c-set-style: "BSD" +- * c-basic-offset: 4 +- * tab-width: 4 +- * indent-tabs-mode: nil +- * End: +- */ +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -32,7 +32,6 @@ + #include + + #include "bitops.h" +-#include "spinlock.h" + #include "file_ops.h" + #include "xc.h" + +@@ -127,9 +126,6 @@ static xenpaging_t *xenpaging_init(domid + BACK_RING_INIT(&paging->mem_event.back_ring, + (mem_event_sring_t *)paging->mem_event.ring_page, + PAGE_SIZE); +- +- /* Initialise lock */ +- mem_event_ring_lock_init(&paging->mem_event); + + /* Initialise Xen */ + rc = xc_mem_event_enable(xch, paging->mem_event.domain_id, +@@ -302,8 +298,6 @@ static int get_request(mem_event_t *mem_ + mem_event_back_ring_t *back_ring; + RING_IDX req_cons; + +- mem_event_ring_lock(mem_event); +- + back_ring = &mem_event->back_ring; + req_cons = back_ring->req_cons; + +@@ -315,8 +309,6 @@ static int get_request(mem_event_t *mem_ + back_ring->req_cons = req_cons; + back_ring->sring->req_event = req_cons + 1; + +- mem_event_ring_unlock(mem_event); +- + return 0; + } + +@@ -325,8 +317,6 @@ static int put_response(mem_event_t *mem + mem_event_back_ring_t *back_ring; + RING_IDX rsp_prod; + +- mem_event_ring_lock(mem_event); +- + back_ring = &mem_event->back_ring; + rsp_prod = back_ring->rsp_prod_pvt; + +@@ -338,8 +328,6 @@ static int put_response(mem_event_t *mem + back_ring->rsp_prod_pvt = rsp_prod; + RING_PUSH_RESPONSES(back_ring); + +- mem_event_ring_unlock(mem_event); +- + return 0; + } + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h +@@ -25,7 +25,6 @@ + #define __XEN_PAGING2_H__ + + +-#include "spinlock.h" + #include "xc.h" + #include + diff --git a/23576-x86_show_page_walk_also_for_early_page_faults.patch b/23576-x86_show_page_walk_also_for_early_page_faults.patch new file mode 100644 index 0000000..d5c2466 --- /dev/null +++ b/23576-x86_show_page_walk_also_for_early_page_faults.patch @@ -0,0 +1,151 @@ +changeset: 23576:e2235fe267eb +user: Jan Beulich +date: Thu Jun 23 11:35:55 2011 +0100 +files: xen/arch/x86/mm.c xen/arch/x86/traps.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c +description: +x86: show page walk also for early page faults + +At once, move the common (between 32- and 64-bit) definition of +machine_to_phys_mapping_valid to a common location. + +Signed-off-by: Jan Beulich + + +--- + xen/arch/x86/mm.c | 2 ++ + xen/arch/x86/traps.c | 1 + + xen/arch/x86/x86_32/mm.c | 2 -- + xen/arch/x86/x86_32/traps.c | 9 ++++++--- + xen/arch/x86/x86_64/mm.c | 2 -- + xen/arch/x86/x86_64/traps.c | 12 ++++++++---- + 6 files changed, 17 insertions(+), 11 deletions(-) + +Index: xen-4.1.2-testing/xen/arch/x86/mm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm.c ++++ xen-4.1.2-testing/xen/arch/x86/mm.c +@@ -151,6 +151,8 @@ unsigned long __read_mostly pdx_group_va + (FRAMETABLE_SIZE / sizeof(*frame_table) + PDX_GROUP_COUNT - 1) + / PDX_GROUP_COUNT)] = { [0] = 1 }; + ++bool_t __read_mostly machine_to_phys_mapping_valid = 0; ++ + #define PAGE_CACHE_ATTRS (_PAGE_PAT|_PAGE_PCD|_PAGE_PWT) + + bool_t __read_mostly opt_allow_superpage; +Index: xen-4.1.2-testing/xen/arch/x86/traps.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/traps.c ++++ xen-4.1.2-testing/xen/arch/x86/traps.c +@@ -1428,6 +1428,7 @@ asmlinkage void __init do_early_page_fau + unsigned long *stk = (unsigned long *)regs; + printk("Early fatal page fault at %04x:%p (cr2=%p, ec=%04x)\n", + regs->cs, _p(regs->eip), _p(cr2), regs->error_code); ++ show_page_walk(cr2); + printk("Stack dump: "); + while ( ((long)stk & ((PAGE_SIZE - 1) & ~(BYTES_PER_LONG - 1))) != 0 ) + printk("%p ", _p(*stk++)); +Index: xen-4.1.2-testing/xen/arch/x86/x86_32/mm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/x86_32/mm.c ++++ xen-4.1.2-testing/xen/arch/x86/x86_32/mm.c +@@ -39,8 +39,6 @@ extern l1_pgentry_t l1_identmap[L1_PAGET + unsigned int __read_mostly PAGE_HYPERVISOR = __PAGE_HYPERVISOR; + unsigned int __read_mostly PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE; + +-bool_t __read_mostly machine_to_phys_mapping_valid = 0; +- + static unsigned long __read_mostly mpt_size; + + void *alloc_xen_pagetable(void) +Index: xen-4.1.2-testing/xen/arch/x86/x86_32/traps.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/x86_32/traps.c ++++ xen-4.1.2-testing/xen/arch/x86/x86_32/traps.c +@@ -164,7 +164,8 @@ void show_page_walk(unsigned long addr) + l3t += (cr3 & 0xFE0UL) >> 3; + l3e = l3t[l3_table_offset(addr)]; + mfn = l3e_get_pfn(l3e); +- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; ++ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ? ++ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; + printk(" L3[0x%03lx] = %"PRIpte" %08lx\n", + l3_table_offset(addr), l3e_get_intpte(l3e), pfn); + unmap_domain_page(l3t); +@@ -175,7 +176,8 @@ void show_page_walk(unsigned long addr) + l2t = map_domain_page(mfn); + l2e = l2t[l2_table_offset(addr)]; + mfn = l2e_get_pfn(l2e); +- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; ++ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ? ++ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; + printk(" L2[0x%03lx] = %"PRIpte" %08lx %s\n", + l2_table_offset(addr), l2e_get_intpte(l2e), pfn, + (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : ""); +@@ -188,7 +190,8 @@ void show_page_walk(unsigned long addr) + l1t = map_domain_page(mfn); + l1e = l1t[l1_table_offset(addr)]; + mfn = l1e_get_pfn(l1e); +- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; ++ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ? ++ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; + printk(" L1[0x%03lx] = %"PRIpte" %08lx\n", + l1_table_offset(addr), l1e_get_intpte(l1e), pfn); + unmap_domain_page(l1t); +Index: xen-4.1.2-testing/xen/arch/x86/x86_64/mm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/x86_64/mm.c ++++ xen-4.1.2-testing/xen/arch/x86/x86_64/mm.c +@@ -47,8 +47,6 @@ unsigned int __read_mostly pfn_pdx_hole_ + + unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START; + +-bool_t __read_mostly machine_to_phys_mapping_valid = 0; +- + /* Top-level master (and idle-domain) page directory. */ + l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) + idle_pg_table[L4_PAGETABLE_ENTRIES]; +Index: xen-4.1.2-testing/xen/arch/x86/x86_64/traps.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/x86_64/traps.c ++++ xen-4.1.2-testing/xen/arch/x86/x86_64/traps.c +@@ -176,7 +176,8 @@ void show_page_walk(unsigned long addr) + l4t = mfn_to_virt(mfn); + l4e = l4t[l4_table_offset(addr)]; + mfn = l4e_get_pfn(l4e); +- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; ++ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ? ++ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; + printk(" L4[0x%03lx] = %"PRIpte" %016lx\n", + l4_table_offset(addr), l4e_get_intpte(l4e), pfn); + if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) || +@@ -186,7 +187,8 @@ void show_page_walk(unsigned long addr) + l3t = mfn_to_virt(mfn); + l3e = l3t[l3_table_offset(addr)]; + mfn = l3e_get_pfn(l3e); +- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; ++ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ? ++ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; + printk(" L3[0x%03lx] = %"PRIpte" %016lx%s\n", + l3_table_offset(addr), l3e_get_intpte(l3e), pfn, + (l3e_get_flags(l3e) & _PAGE_PSE) ? " (PSE)" : ""); +@@ -198,7 +200,8 @@ void show_page_walk(unsigned long addr) + l2t = mfn_to_virt(mfn); + l2e = l2t[l2_table_offset(addr)]; + mfn = l2e_get_pfn(l2e); +- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; ++ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ? ++ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; + printk(" L2[0x%03lx] = %"PRIpte" %016lx %s\n", + l2_table_offset(addr), l2e_get_intpte(l2e), pfn, + (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : ""); +@@ -210,7 +213,8 @@ void show_page_walk(unsigned long addr) + l1t = mfn_to_virt(mfn); + l1e = l1t[l1_table_offset(addr)]; + mfn = l1e_get_pfn(l1e); +- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; ++ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ? ++ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; + printk(" L1[0x%03lx] = %"PRIpte" %016lx\n", + l1_table_offset(addr), l1e_get_intpte(l1e), pfn); + } diff --git a/23577-tools_merge_several_bitop_functions_into_xc_bitops.h.patch b/23577-tools_merge_several_bitop_functions_into_xc_bitops.h.patch new file mode 100644 index 0000000..8863e7b --- /dev/null +++ b/23577-tools_merge_several_bitop_functions_into_xc_bitops.h.patch @@ -0,0 +1,1023 @@ +changeset: 23577:607474aeefe1 +parent: 23570:065ca14be963 +user: Olaf Hering +date: Fri Jun 10 10:47:03 2011 +0200 +files: tools/blktap2/drivers/block-log.c tools/libxc/ia64/xc_ia64_linux_restore.c tools/libxc/ia64/xc_ia64_linux_save.c tools/libxc/ia64/xc_ia64_save_restore.h tools/libxc/xc_bitops.h tools/libxc/xc_domain_save.c tools/xenpaging/bitops.h tools/xenpaging/policy_default.c tools/xenpaging/xc.c tools/xenpaging/xc.h tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h +description: +tools: merge several bitop functions into xc_bitops.h + +Bitmaps are used in save/restore, xenpaging and blktap2. Merge the code into a +private xc_bitops.h file. All users are single threaded, so locking is not an +issue. The array of bits is handled as volatile because the x86 save/restore +code passes the bitmap to the hypervisor which in turn modifies the bitmap. + +blktap2 uses a private bitmap. There was a possible overflow in the +bitmap_size() function, the remainder was not considered. + +ia64 save/restore uses a bitmap to send the number of vcpus to the host. + +x86 save/restore uses a bitmap to track dirty pages. This bitmap is shared with +the hypervisor. An unused function count_bits() was removed and a new +bitmap_size() function is now used. + +xenpaging uses 3 private bitmaps to track the gfns which are in paged-out +state. It had a copy of some Linux bitops.h, which is now obsolete. Also the +BITS_PER_LONG macro was hardcoded to 64 which made it impossible to run 32bit +tools on a 64bit host. Wether this works at all has to be tested, yet. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/blktap2/drivers/block-log.c | 29 -- + tools/libxc/ia64/xc_ia64_linux_restore.c | 14 + tools/libxc/ia64/xc_ia64_linux_save.c | 29 -- + tools/libxc/ia64/xc_ia64_save_restore.h | 20 - + tools/libxc/xc_bitops.h | 57 +++ + tools/libxc/xc_domain_save.c | 66 ---- + tools/xenpaging/bitops.h | 448 ------------------------------- + tools/xenpaging/policy_default.c | 20 - + tools/xenpaging/xc.c | 14 + tools/xenpaging/xc.h | 2 + tools/xenpaging/xenpaging.c | 9 + tools/xenpaging/xenpaging.h | 1 + 12 files changed, 91 insertions(+), 618 deletions(-) + +Index: xen-4.1.2-testing/tools/blktap2/drivers/block-log.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/blktap2/drivers/block-log.c ++++ xen-4.1.2-testing/tools/blktap2/drivers/block-log.c +@@ -47,6 +47,7 @@ + #include + #include + ++#include "xc_bitops.h" + #include "log.h" + #include "tapdisk.h" + #include "tapdisk-server.h" +@@ -89,31 +90,6 @@ static void ctl_request(event_id_t, char + + /* large flat bitmaps don't scale particularly well either in size or scan + * time, but they'll do for now */ +-#define BITS_PER_LONG (sizeof(unsigned long) * 8) +-#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +- +-#define BITMAP_ENTRY(_nr, _bmap) ((unsigned long*)(_bmap))[(_nr)/BITS_PER_LONG] +-#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG) +- +-static inline int test_bit(int nr, void* bmap) +-{ +- return (BITMAP_ENTRY(nr, bmap) >> BITMAP_SHIFT(nr)) & 1; +-} +- +-static inline void clear_bit(int nr, void* bmap) +-{ +- BITMAP_ENTRY(nr, bmap) &= ~(1UL << BITMAP_SHIFT(nr)); +-} +- +-static inline void set_bit(int nr, void* bmap) +-{ +- BITMAP_ENTRY(nr, bmap) |= (1UL << BITMAP_SHIFT(nr)); +-} +- +-static inline int bitmap_size(uint64_t sz) +-{ +- return sz >> 3; +-} + + static int writelog_create(struct tdlog_state *s) + { +@@ -123,7 +99,8 @@ static int writelog_create(struct tdlog_ + + BDPRINTF("allocating %"PRIu64" bytes for dirty bitmap", bmsize); + +- if (!(s->writelog = calloc(bmsize, 1))) { ++ s->writelog = bitmap_alloc(s->size); ++ if (!s->writelog) { + BWPRINTF("could not allocate dirty bitmap of size %"PRIu64, bmsize); + return -1; + } +Index: xen-4.1.2-testing/tools/libxc/ia64/xc_ia64_linux_restore.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/ia64/xc_ia64_linux_restore.c ++++ xen-4.1.2-testing/tools/libxc/ia64/xc_ia64_linux_restore.c +@@ -218,14 +218,12 @@ xc_ia64_recv_vcpumap(xc_interface *xch, + max_virt_cpus, info->max_vcpu_id); + return -1; + } +- vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) / +- sizeof(vcpumap[0]); +- vcpumap = malloc(vcpumap_size); +- if (vcpumap == NULL) { ++ vcpumap_size = bitmap_size(max_virt_cpus); ++ rc = bitmap_alloc(&vcpumap, max_virt_cpus); ++ if (rc < 0) { + ERROR("memory alloc for vcpumap"); +- return -1; ++ return rc; + } +- memset(vcpumap, 0, vcpumap_size); + if (read_exact(io_fd, vcpumap, vcpumap_size)) { + ERROR("read vcpumap"); + free(vcpumap); +@@ -353,7 +351,7 @@ xc_ia64_pv_recv_context_ver_three(xc_int + + /* vcpu context */ + for (i = 0; i <= info.max_vcpu_id; i++) { +- if (!__test_bit(i, vcpumap)) ++ if (!test_bit(i, vcpumap)) + continue; + + rc = xc_ia64_pv_recv_vcpu_context(xch, io_fd, dom, i); +@@ -454,7 +452,7 @@ xc_ia64_hvm_recv_context(xc_interface *x + /* A copy of the CPU context of the guest. */ + vcpu_guest_context_any_t ctxt_any; + +- if (!__test_bit(i, vcpumap)) ++ if (!test_bit(i, vcpumap)) + continue; + + if (xc_ia64_recv_vcpu_context(xch, io_fd, dom, i, &ctxt_any)) +Index: xen-4.1.2-testing/tools/libxc/ia64/xc_ia64_linux_save.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/ia64/xc_ia64_linux_save.c ++++ xen-4.1.2-testing/tools/libxc/ia64/xc_ia64_linux_save.c +@@ -32,6 +32,7 @@ + #include + + #include "xg_private.h" ++#include "xc_bitops.h" + #include "xc_ia64.h" + #include "xc_ia64_save_restore.h" + #include "xc_efi.h" +@@ -51,20 +52,6 @@ + ** During (live) save/migrate, we maintain a number of bitmaps to track + ** which pages we have to send, and to skip. + */ +-static inline int test_bit(int nr, volatile void * addr) +-{ +- return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1; +-} +- +-static inline void clear_bit(int nr, volatile void * addr) +-{ +- BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr)); +-} +- +-static inline void set_bit(int nr, volatile void * addr) +-{ +- BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr)); +-} + + static int + suspend_and_state(int (*suspend)(void*), void* data, +@@ -207,19 +194,17 @@ xc_ia64_send_vcpumap(xc_interface *xch, + unsigned long vcpumap_size; + uint64_t *vcpumap = NULL; + +- vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) / +- sizeof(vcpumap[0]); +- vcpumap = malloc(vcpumap_size); +- if (vcpumap == NULL) { ++ vcpumap_size = bitmap_size(max_virt_cpus); ++ rc = bitmap_alloc(&vcpumap, max_virt_cpus); ++ if (rc < 0) { + ERROR("memory alloc for vcpumap"); + goto out; + } +- memset(vcpumap, 0, vcpumap_size); + + for (i = 0; i <= info->max_vcpu_id; i++) { + xc_vcpuinfo_t vinfo; + if ((xc_vcpu_getinfo(xch, dom, i, &vinfo) == 0) && vinfo.online) +- __set_bit(i, vcpumap); ++ set_bit(i, vcpumap); + } + + if (write_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) { +@@ -265,7 +250,7 @@ xc_ia64_pv_send_context(xc_interface *xc + + char *mem; + +- if (!__test_bit(i, vcpumap)) ++ if (!test_bit(i, vcpumap)) + continue; + + if (xc_ia64_send_vcpu_context(xch, io_fd, dom, i, &ctxt_any)) +@@ -332,7 +317,7 @@ xc_ia64_hvm_send_context(xc_interface *x + /* A copy of the CPU context of the guest. */ + vcpu_guest_context_any_t ctxt_any; + +- if (!__test_bit(i, vcpumap)) ++ if (!test_bit(i, vcpumap)) + continue; + + if (xc_ia64_send_vcpu_context(xch, io_fd, dom, i, &ctxt_any)) +Index: xen-4.1.2-testing/tools/libxc/ia64/xc_ia64_save_restore.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/ia64/xc_ia64_save_restore.h ++++ xen-4.1.2-testing/tools/libxc/ia64/xc_ia64_save_restore.h +@@ -33,26 +33,6 @@ + + #define XC_IA64_SR_FORMAT_VER_CURRENT XC_IA64_SR_FORMAT_VER_THREE + +-/* +-** During (live) save/migrate, we maintain a number of bitmaps to track +-** which pages we have to send, and to skip. +-*/ +-#define BITS_PER_LONG (sizeof(unsigned long) * 8) +- +-#define BITMAP_ENTRY(_nr,_bmap) \ +- ((unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG] +- +-#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG) +- +-static inline int __test_bit(int nr, void * addr) +-{ +- return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1; +-} +- +-static inline void __set_bit(int nr, void * addr) +-{ +- BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr)); +-} + + #endif /* XC_IA64_SAVE_RESTORE_H */ + +Index: xen-4.1.2-testing/tools/libxc/xc_bitops.h +=================================================================== +--- /dev/null ++++ xen-4.1.2-testing/tools/libxc/xc_bitops.h +@@ -0,0 +1,57 @@ ++#ifndef XC_BITOPS_H ++#define XC_BITOPS_H 1 ++ ++/* bitmap operations for single threaded access */ ++ ++#include ++ ++#define BITS_PER_LONG (sizeof(unsigned long) * 8) ++#define ORDER_LONG (sizeof(unsigned long) == 4 ? 5 : 6) ++ ++#define BITMAP_ENTRY(_nr,_bmap) ((_bmap))[(_nr)/BITS_PER_LONG] ++#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG) ++ ++/* calculate required space for number of longs needed to hold nr_bits */ ++static inline int bitmap_size(int nr_bits) ++{ ++ int nr_long, nr_bytes; ++ nr_long = (nr_bits + BITS_PER_LONG - 1) >> ORDER_LONG; ++ nr_bytes = nr_long * sizeof(unsigned long); ++ return nr_bytes; ++} ++ ++static inline unsigned long *bitmap_alloc(int nr_bits) ++{ ++ return calloc(1, bitmap_size(nr_bits)); ++} ++ ++static inline int test_bit(int nr, volatile unsigned long *addr) ++{ ++ return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1; ++} ++ ++static inline void clear_bit(int nr, volatile unsigned long *addr) ++{ ++ BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr)); ++} ++ ++static inline void set_bit(int nr, volatile unsigned long *addr) ++{ ++ BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr)); ++} ++ ++static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) ++{ ++ int oldbit = test_bit(nr, addr); ++ clear_bit(nr, addr); ++ return oldbit; ++} ++ ++static inline int test_and_set_bit(int nr, volatile unsigned long *addr) ++{ ++ int oldbit = test_bit(nr, addr); ++ set_bit(nr, addr); ++ return oldbit; ++} ++ ++#endif /* XC_BITOPS_H */ +Index: xen-4.1.2-testing/tools/libxc/xc_domain_save.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/xc_domain_save.c ++++ xen-4.1.2-testing/tools/libxc/xc_domain_save.c +@@ -27,6 +27,7 @@ + #include + + #include "xc_private.h" ++#include "xc_bitops.h" + #include "xc_dom.h" + #include "xg_private.h" + #include "xg_save_restore.h" +@@ -82,57 +83,6 @@ struct outbuf { + ((mfn_to_pfn(_mfn) < (dinfo->p2m_size)) && \ + (pfn_to_mfn(mfn_to_pfn(_mfn)) == (_mfn)))) + +-/* +-** During (live) save/migrate, we maintain a number of bitmaps to track +-** which pages we have to send, to fixup, and to skip. +-*/ +- +-#define BITS_PER_LONG (sizeof(unsigned long) * 8) +-#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +-#define BITMAP_SIZE (BITS_TO_LONGS(dinfo->p2m_size) * sizeof(unsigned long)) +- +-#define BITMAP_ENTRY(_nr,_bmap) \ +- ((volatile unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG] +- +-#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG) +- +-#define ORDER_LONG (sizeof(unsigned long) == 4 ? 5 : 6) +- +-static inline int test_bit (int nr, volatile void * addr) +-{ +- return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1; +-} +- +-static inline void clear_bit (int nr, volatile void * addr) +-{ +- BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr)); +-} +- +-static inline void set_bit ( int nr, volatile void * addr) +-{ +- BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr)); +-} +- +-/* Returns the hamming weight (i.e. the number of bits set) in a N-bit word */ +-static inline unsigned int hweight32(unsigned int w) +-{ +- unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); +- res = (res & 0x33333333) + ((res >> 2) & 0x33333333); +- res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); +- res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); +- return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); +-} +- +-static inline int count_bits ( int nr, volatile void *addr) +-{ +- int i, count = 0; +- volatile unsigned long *p = (volatile unsigned long *)addr; +- /* We know that the array is padded to unsigned long. */ +- for ( i = 0; i < (nr / (sizeof(unsigned long)*8)); i++, p++ ) +- count += hweight32(*p); +- return count; +-} +- + static uint64_t tv_to_us(struct timeval *new) + { + return (new->tv_sec * 1000000) + new->tv_usec; +@@ -1067,9 +1017,9 @@ int xc_domain_save(xc_interface *xch, in + sent_last_iter = dinfo->p2m_size; + + /* Setup to_send / to_fix and to_skip bitmaps */ +- to_send = xc_hypercall_buffer_alloc_pages(xch, to_send, NRPAGES(BITMAP_SIZE)); +- to_skip = xc_hypercall_buffer_alloc_pages(xch, to_skip, NRPAGES(BITMAP_SIZE)); +- to_fix = calloc(1, BITMAP_SIZE); ++ to_send = xc_hypercall_buffer_alloc_pages(xch, to_send, NRPAGES(bitmap_size(dinfo->p2m_size))); ++ to_skip = xc_hypercall_buffer_alloc_pages(xch, to_skip, NRPAGES(bitmap_size(dinfo->p2m_size))); ++ to_fix = calloc(1, bitmap_size(dinfo->p2m_size)); + + if ( !to_send || !to_fix || !to_skip ) + { +@@ -1077,7 +1027,7 @@ int xc_domain_save(xc_interface *xch, in + goto out; + } + +- memset(to_send, 0xff, BITMAP_SIZE); ++ memset(to_send, 0xff, bitmap_size(dinfo->p2m_size)); + + if ( hvm ) + { +@@ -1490,7 +1440,7 @@ int xc_domain_save(xc_interface *xch, in + if ( last_iter && debug ) + { + int id = XC_SAVE_ID_ENABLE_VERIFY_MODE; +- memset(to_send, 0xff, BITMAP_SIZE); ++ memset(to_send, 0xff, bitmap_size(dinfo->p2m_size)); + debug = 0; + DPRINTF("Entering debug resend-all mode\n"); + +@@ -1959,8 +1909,8 @@ int xc_domain_save(xc_interface *xch, in + if ( ctx->live_m2p ) + munmap(ctx->live_m2p, M2P_SIZE(ctx->max_mfn)); + +- xc_hypercall_buffer_free_pages(xch, to_send, NRPAGES(BITMAP_SIZE)); +- xc_hypercall_buffer_free_pages(xch, to_skip, NRPAGES(BITMAP_SIZE)); ++ xc_hypercall_buffer_free_pages(xch, to_send, NRPAGES(bitmap_size(dinfo->p2m_size))); ++ xc_hypercall_buffer_free_pages(xch, to_skip, NRPAGES(bitmap_size(dinfo->p2m_size))); + + free(pfn_type); + free(pfn_batch); +Index: xen-4.1.2-testing/tools/xenpaging/bitops.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/bitops.h ++++ /dev/null +@@ -1,448 +0,0 @@ +-#ifndef _X86_BITOPS_H +-#define _X86_BITOPS_H +- +-/* +- * Copyright 1992, Linus Torvalds. +- */ +- +-//#include +- +-#ifdef CONFIG_SMP +-#define LOCK_PREFIX "lock ; " +-#else +-#define LOCK_PREFIX "" +-#endif +- +-/* +- * We specify the memory operand as both input and output because the memory +- * operand is both read from and written to. Since the operand is in fact a +- * word array, we also specify "memory" in the clobbers list to indicate that +- * words other than the one directly addressed by the memory operand may be +- * modified. We don't use "+m" because the gcc manual says that it should be +- * used only when the constraint allows the operand to reside in a register. +- */ +- +-#define ADDR (*(volatile long *) addr) +-#define CONST_ADDR (*(const volatile long *) addr) +- +-extern void __bitop_bad_size(void); +-#define bitop_bad_size(addr) (sizeof(*(addr)) < 4) +- +-/** +- * set_bit - Atomically set a bit in memory +- * @nr: the bit to set +- * @addr: the address to start counting from +- * +- * This function is atomic and may not be reordered. See __set_bit() +- * if you do not require the atomic guarantees. +- * Note that @nr may be almost arbitrarily large; this function is not +- * restricted to acting on a single-word quantity. +- */ +-static inline void set_bit(int nr, volatile void *addr) +-{ +- asm volatile ( +- LOCK_PREFIX +- "btsl %1,%0" +- : "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +-} +-#define set_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- set_bit(nr, addr); \ +-}) +- +-/** +- * __set_bit - Set a bit in memory +- * @nr: the bit to set +- * @addr: the address to start counting from +- * +- * Unlike set_bit(), this function is non-atomic and may be reordered. +- * If it's called on the same region of memory simultaneously, the effect +- * may be that only one operation succeeds. +- */ +-static inline void __set_bit(int nr, volatile void *addr) +-{ +- asm volatile ( +- "btsl %1,%0" +- : "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +-} +-#define __set_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- __set_bit(nr, addr); \ +-}) +- +-/** +- * clear_bit - Clears a bit in memory +- * @nr: Bit to clear +- * @addr: Address to start counting from +- * +- * clear_bit() is atomic and may not be reordered. However, it does +- * not contain a memory barrier, so if it is used for locking purposes, +- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() +- * in order to ensure changes are visible on other processors. +- */ +-static inline void clear_bit(int nr, volatile void *addr) +-{ +- asm volatile ( +- LOCK_PREFIX +- "btrl %1,%0" +- : "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +-} +-#define clear_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- clear_bit(nr, addr); \ +-}) +- +-/** +- * __clear_bit - Clears a bit in memory +- * @nr: Bit to clear +- * @addr: Address to start counting from +- * +- * Unlike clear_bit(), this function is non-atomic and may be reordered. +- * If it's called on the same region of memory simultaneously, the effect +- * may be that only one operation succeeds. +- */ +-static inline void __clear_bit(int nr, volatile void *addr) +-{ +- asm volatile ( +- "btrl %1,%0" +- : "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +-} +-#define __clear_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- __clear_bit(nr, addr); \ +-}) +- +-#define smp_mb__before_clear_bit() ((void)0) +-#define smp_mb__after_clear_bit() ((void)0) +- +-/** +- * __change_bit - Toggle a bit in memory +- * @nr: the bit to set +- * @addr: the address to start counting from +- * +- * Unlike change_bit(), this function is non-atomic and may be reordered. +- * If it's called on the same region of memory simultaneously, the effect +- * may be that only one operation succeeds. +- */ +-static inline void __change_bit(int nr, volatile void *addr) +-{ +- asm volatile ( +- "btcl %1,%0" +- : "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +-} +-#define __change_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- __change_bit(nr, addr); \ +-}) +- +-/** +- * change_bit - Toggle a bit in memory +- * @nr: Bit to clear +- * @addr: Address to start counting from +- * +- * change_bit() is atomic and may not be reordered. +- * Note that @nr may be almost arbitrarily large; this function is not +- * restricted to acting on a single-word quantity. +- */ +-static inline void change_bit(int nr, volatile void *addr) +-{ +- asm volatile ( +- LOCK_PREFIX +- "btcl %1,%0" +- : "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +-} +-#define change_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- change_bit(nr, addr); \ +-}) +- +-/** +- * test_and_set_bit - Set a bit and return its old value +- * @nr: Bit to set +- * @addr: Address to count from +- * +- * This operation is atomic and cannot be reordered. +- * It also implies a memory barrier. +- */ +-static inline int test_and_set_bit(int nr, volatile void *addr) +-{ +- int oldbit; +- +- asm volatile ( +- LOCK_PREFIX +- "btsl %2,%1\n\tsbbl %0,%0" +- : "=r" (oldbit), "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +- return oldbit; +-} +-#define test_and_set_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- test_and_set_bit(nr, addr); \ +-}) +- +-/** +- * __test_and_set_bit - Set a bit and return its old value +- * @nr: Bit to set +- * @addr: Address to count from +- * +- * This operation is non-atomic and can be reordered. +- * If two examples of this operation race, one can appear to succeed +- * but actually fail. You must protect multiple accesses with a lock. +- */ +-static inline int __test_and_set_bit(int nr, volatile void *addr) +-{ +- int oldbit; +- +- asm volatile ( +- "btsl %2,%1\n\tsbbl %0,%0" +- : "=r" (oldbit), "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +- return oldbit; +-} +-#define __test_and_set_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- __test_and_set_bit(nr, addr); \ +-}) +- +-/** +- * test_and_clear_bit - Clear a bit and return its old value +- * @nr: Bit to set +- * @addr: Address to count from +- * +- * This operation is atomic and cannot be reordered. +- * It also implies a memory barrier. +- */ +-static inline int test_and_clear_bit(int nr, volatile void *addr) +-{ +- int oldbit; +- +- asm volatile ( +- LOCK_PREFIX +- "btrl %2,%1\n\tsbbl %0,%0" +- : "=r" (oldbit), "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +- return oldbit; +-} +-#define test_and_clear_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- test_and_clear_bit(nr, addr); \ +-}) +- +-/** +- * __test_and_clear_bit - Clear a bit and return its old value +- * @nr: Bit to set +- * @addr: Address to count from +- * +- * This operation is non-atomic and can be reordered. +- * If two examples of this operation race, one can appear to succeed +- * but actually fail. You must protect multiple accesses with a lock. +- */ +-static inline int __test_and_clear_bit(int nr, volatile void *addr) +-{ +- int oldbit; +- +- asm volatile ( +- "btrl %2,%1\n\tsbbl %0,%0" +- : "=r" (oldbit), "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +- return oldbit; +-} +-#define __test_and_clear_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- __test_and_clear_bit(nr, addr); \ +-}) +- +-/* WARNING: non atomic and it can be reordered! */ +-static inline int __test_and_change_bit(int nr, volatile void *addr) +-{ +- int oldbit; +- +- asm volatile ( +- "btcl %2,%1\n\tsbbl %0,%0" +- : "=r" (oldbit), "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +- return oldbit; +-} +-#define __test_and_change_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- __test_and_change_bit(nr, addr); \ +-}) +- +-/** +- * test_and_change_bit - Change a bit and return its new value +- * @nr: Bit to set +- * @addr: Address to count from +- * +- * This operation is atomic and cannot be reordered. +- * It also implies a memory barrier. +- */ +-static inline int test_and_change_bit(int nr, volatile void *addr) +-{ +- int oldbit; +- +- asm volatile ( +- LOCK_PREFIX +- "btcl %2,%1\n\tsbbl %0,%0" +- : "=r" (oldbit), "=m" (ADDR) +- : "Ir" (nr), "m" (ADDR) : "memory"); +- return oldbit; +-} +-#define test_and_change_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- test_and_change_bit(nr, addr); \ +-}) +- +-static inline int constant_test_bit(int nr, const volatile void *addr) +-{ +- return ((1U << (nr & 31)) & +- (((const volatile unsigned int *)addr)[nr >> 5])) != 0; +-} +- +-static inline int variable_test_bit(int nr, const volatile void *addr) +-{ +- int oldbit; +- +- asm volatile ( +- "btl %2,%1\n\tsbbl %0,%0" +- : "=r" (oldbit) +- : "m" (CONST_ADDR), "Ir" (nr) : "memory" ); +- return oldbit; +-} +- +-#define test_bit(nr, addr) ({ \ +- if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ +- (__builtin_constant_p(nr) ? \ +- constant_test_bit((nr),(addr)) : \ +- variable_test_bit((nr),(addr))); \ +-}) +- +-extern unsigned int __find_first_bit( +- const unsigned long *addr, unsigned int size); +-extern unsigned int __find_next_bit( +- const unsigned long *addr, unsigned int size, unsigned int offset); +-extern unsigned int __find_first_zero_bit( +- const unsigned long *addr, unsigned int size); +-extern unsigned int __find_next_zero_bit( +- const unsigned long *addr, unsigned int size, unsigned int offset); +- +-static inline unsigned int __scanbit(unsigned long val, unsigned long max) +-{ +- asm ( "bsf %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max) ); +- return (unsigned int)val; +-} +- +-/** +- * find_first_bit - find the first set bit in a memory region +- * @addr: The address to start the search at +- * @size: The maximum size to search +- * +- * Returns the bit-number of the first set bit, not the number of the byte +- * containing a bit. +- */ +-#define find_first_bit(addr,size) \ +-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ +- (__scanbit(*(const unsigned long *)addr, size)) : \ +- __find_first_bit(addr,size))) +- +-/** +- * find_next_bit - find the first set bit in a memory region +- * @addr: The address to base the search on +- * @offset: The bitnumber to start searching at +- * @size: The maximum size to search +- */ +-#define find_next_bit(addr,size,off) \ +-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ +- ((off) + (__scanbit((*(const unsigned long *)addr) >> (off), size))) : \ +- __find_next_bit(addr,size,off))) +- +-/** +- * find_first_zero_bit - find the first zero bit in a memory region +- * @addr: The address to start the search at +- * @size: The maximum size to search +- * +- * Returns the bit-number of the first zero bit, not the number of the byte +- * containing a bit. +- */ +-#define find_first_zero_bit(addr,size) \ +-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ +- (__scanbit(~*(const unsigned long *)addr, size)) : \ +- __find_first_zero_bit(addr,size))) +- +-/** +- * find_next_zero_bit - find the first zero bit in a memory region +- * @addr: The address to base the search on +- * @offset: The bitnumber to start searching at +- * @size: The maximum size to search +- */ +-#define find_next_zero_bit(addr,size,off) \ +-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ +- ((off)+(__scanbit(~(((*(const unsigned long *)addr)) >> (off)), size))) : \ +- __find_next_zero_bit(addr,size,off))) +- +- +-/** +- * find_first_set_bit - find the first set bit in @word +- * @word: the word to search +- * +- * Returns the bit-number of the first set bit. The input must *not* be zero. +- */ +-static inline unsigned int find_first_set_bit(unsigned long word) +-{ +- asm ( "bsf %1,%0" : "=r" (word) : "r" (word) ); +- return (unsigned int)word; +-} +- +-/** +- * ffs - find first bit set +- * @x: the word to search +- * +- * This is defined the same way as the libc and compiler builtin ffs routines. +- */ +-#if 0 +-static inline int ffs(unsigned long x) +-{ +- long r; +- +- asm ( "bsf %1,%0\n\t" +- "jnz 1f\n\t" +- "mov $-1,%0\n" +- "1:" : "=r" (r) : "rm" (x)); +- return (int)r+1; +-} +-#endif +- +-/** +- * fls - find last bit set +- * @x: the word to search +- * +- * This is defined the same way as ffs. +- */ +-static inline int fls(unsigned long x) +-{ +- long r; +- +- asm ( "bsr %1,%0\n\t" +- "jnz 1f\n\t" +- "mov $-1,%0\n" +- "1:" : "=r" (r) : "rm" (x)); +- return (int)r+1; +-} +- +-/** +- * hweightN - returns the hamming weight of a N-bit word +- * @x: the word to weigh +- * +- * The Hamming Weight of a number is the total number of bits set in it. +- */ +-#define hweight64(x) generic_hweight64(x) +-#define hweight32(x) generic_hweight32(x) +-#define hweight16(x) generic_hweight16(x) +-#define hweight8(x) generic_hweight8(x) +- +-#endif /* _X86_BITOPS_H */ +Index: xen-4.1.2-testing/tools/xenpaging/policy_default.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/policy_default.c ++++ xen-4.1.2-testing/tools/xenpaging/policy_default.c +@@ -21,8 +21,7 @@ + */ + + +-#include "bitops.h" +-#include "xc.h" ++#include "xc_bitops.h" + #include "policy.h" + + +@@ -35,26 +34,23 @@ static unsigned int mru_size; + static unsigned long *bitmap; + static unsigned long *unconsumed; + static unsigned long current_gfn; +-static unsigned long bitmap_size; + static unsigned long max_pages; + + + int policy_init(xenpaging_t *paging) + { + int i; +- int rc; ++ int rc = -ENOMEM; + + /* Allocate bitmap for pages not to page out */ +- rc = alloc_bitmap(&bitmap, paging->bitmap_size); +- if ( rc != 0 ) ++ bitmap = bitmap_alloc(paging->domain_info->max_pages); ++ if ( !bitmap ) + goto out; + /* Allocate bitmap to track unusable pages */ +- rc = alloc_bitmap(&unconsumed, paging->bitmap_size); +- if ( rc != 0 ) ++ unconsumed = bitmap_alloc(paging->domain_info->max_pages); ++ if ( !unconsumed ) + goto out; + +- /* record bitmap_size */ +- bitmap_size = paging->bitmap_size; + max_pages = paging->domain_info->max_pages; + + /* Initialise MRU list of paged in pages */ +@@ -65,10 +61,7 @@ int policy_init(xenpaging_t *paging) + + mru = malloc(sizeof(*mru) * mru_size); + if ( mru == NULL ) +- { +- rc = -ENOMEM; + goto out; +- } + + for ( i = 0; i < mru_size; i++ ) + mru[i] = INVALID_MFN; +@@ -76,6 +69,7 @@ int policy_init(xenpaging_t *paging) + /* Don't page out page 0 */ + set_bit(0, bitmap); + ++ rc = 0; + out: + return rc; + } +Index: xen-4.1.2-testing/tools/xenpaging/xc.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c ++++ xen-4.1.2-testing/tools/xenpaging/xc.c +@@ -31,20 +31,6 @@ + #include "xc.h" + + +-int alloc_bitmap(unsigned long **bitmap, unsigned long bitmap_size) +-{ +- if ( *bitmap == NULL ) +- { +- *bitmap = calloc(bitmap_size / BITS_PER_LONG, sizeof(unsigned long)); +- +- if ( *bitmap == NULL ) +- return -ENOMEM; +- } +- +- memset(*bitmap, 0, bitmap_size / 8); +- +- return 0; +-} + + int xc_mem_paging_flush_ioemu_cache(domid_t domain_id) + { +Index: xen-4.1.2-testing/tools/xenpaging/xc.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h ++++ xen-4.1.2-testing/tools/xenpaging/xc.h +@@ -39,7 +39,6 @@ + #endif + + +-#define BITS_PER_LONG 64 + + + typedef struct xc_platform_info { +@@ -50,7 +49,6 @@ typedef struct xc_platform_info { + } xc_platform_info_t; + + +-int alloc_bitmap(unsigned long **bitmap, unsigned long bitmap_size); + + int xc_mem_paging_flush_ioemu_cache(domid_t domain_id); + int xc_wait_for_event(xc_interface *xch, xc_evtchn *xce); +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -31,7 +31,7 @@ + + #include + +-#include "bitops.h" ++#include "xc_bitops.h" + #include "file_ops.h" + #include "xc.h" + +@@ -200,11 +200,8 @@ static xenpaging_t *xenpaging_init(domid + } + + /* Allocate bitmap for tracking pages that have been paged out */ +- paging->bitmap_size = (paging->domain_info->max_pages + BITS_PER_LONG) & +- ~(BITS_PER_LONG - 1); +- +- rc = alloc_bitmap(&paging->bitmap, paging->bitmap_size); +- if ( rc != 0 ) ++ paging->bitmap = bitmap_alloc(paging->domain_info->max_pages); ++ if ( !paging->bitmap ) + { + ERROR("Error allocating bitmap"); + goto err; +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h +@@ -40,7 +40,6 @@ typedef struct xenpaging { + xc_platform_info_t *platform_info; + xc_domaininfo_t *domain_info; + +- unsigned long bitmap_size; + unsigned long *bitmap; + + mem_event_t mem_event; diff --git a/23578-xenpaging_add_xs_handle_to_struct_xenpaging.patch b/23578-xenpaging_add_xs_handle_to_struct_xenpaging.patch new file mode 100644 index 0000000..c2c9be4 --- /dev/null +++ b/23578-xenpaging_add_xs_handle_to_struct_xenpaging.patch @@ -0,0 +1,79 @@ +changeset: 23578:7299a9a44b35 +user: Olaf Hering +date: Wed Jun 22 14:47:09 2011 +0100 +files: tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h +description: +xenpaging: add xs_handle to struct xenpaging + +A xs_handle is currently used in the xc_mem_paging_flush_ioemu_cache() +function and will be used by a subsequent patch. +Add it to struct xenpaging. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson +Acked-by: Ian Campbell + + +--- + tools/xenpaging/xenpaging.c | 14 ++++++++++++++ + tools/xenpaging/xenpaging.h | 1 + + 2 files changed, 15 insertions(+) + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + + #include + +@@ -92,6 +93,14 @@ static xenpaging_t *xenpaging_init(domid + paging = malloc(sizeof(xenpaging_t)); + memset(paging, 0, sizeof(xenpaging_t)); + ++ /* Open connection to xenstore */ ++ paging->xs_handle = xs_open(0); ++ if ( paging->xs_handle == NULL ) ++ { ++ ERROR("Error initialising xenstore connection"); ++ goto err; ++ } ++ + p = getenv("XENPAGING_POLICY_MRU_SIZE"); + if ( p && *p ) + { +@@ -221,6 +230,8 @@ static xenpaging_t *xenpaging_init(domid + err: + if ( paging ) + { ++ if ( paging->xs_handle ) ++ xs_close(paging->xs_handle); + xc_interface_close(xch); + if ( paging->mem_event.shared_page ) + { +@@ -277,6 +288,9 @@ static int xenpaging_teardown(xenpaging_ + } + paging->mem_event.xce_handle = NULL; + ++ /* Close connection to xenstore */ ++ xs_close(paging->xs_handle); ++ + /* Close connection to Xen */ + rc = xc_interface_close(xch); + if ( rc != 0 ) +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h +@@ -36,6 +36,7 @@ + + typedef struct xenpaging { + xc_interface *xc_handle; ++ struct xs_handle *xs_handle; + + xc_platform_info_t *platform_info; + xc_domaininfo_t *domain_info; diff --git a/23579-xenpaging_drop_xc.c_remove_ASSERT.patch b/23579-xenpaging_drop_xc.c_remove_ASSERT.patch new file mode 100644 index 0000000..bb7468a --- /dev/null +++ b/23579-xenpaging_drop_xc.c_remove_ASSERT.patch @@ -0,0 +1,48 @@ +changeset: 23579:868c8c898f73 +user: Olaf Hering +date: Fri Jun 10 10:47:06 2011 +0200 +files: tools/xenpaging/policy_default.c tools/xenpaging/xc.h +description: +xenpaging: drop xc.c, remove ASSERT + +The ASSERT is not needed, victim is never NULL. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/policy_default.c | 1 - + tools/xenpaging/xc.h | 7 ------- + 2 files changed, 8 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/policy_default.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/policy_default.c ++++ xen-4.1.2-testing/tools/xenpaging/policy_default.c +@@ -78,7 +78,6 @@ int policy_choose_victim(xenpaging_t *pa + { + xc_interface *xch = paging->xc_handle; + unsigned long wrap = current_gfn; +- ASSERT(victim != NULL); + + do + { +Index: xen-4.1.2-testing/tools/xenpaging/xc.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h ++++ xen-4.1.2-testing/tools/xenpaging/xc.h +@@ -30,13 +30,6 @@ + #include + + +-#if 1 +-#define ASSERT(_p) \ +- if ( !(_p) ) { DPRINTF("Assertion '%s' failed, line %d, file %s", #_p , \ +- __LINE__, __FILE__); *(int*)0=0; } +-#else +-#define ASSERT(_p) ((void)0) +-#endif + + + diff --git a/23580-xenpaging_drop_xc.c_remove_xc_platform_info_t.patch b/23580-xenpaging_drop_xc.c_remove_xc_platform_info_t.patch new file mode 100644 index 0000000..2141277 --- /dev/null +++ b/23580-xenpaging_drop_xc.c_remove_xc_platform_info_t.patch @@ -0,0 +1,121 @@ +changeset: 23580:771b6984aa2a +user: Olaf Hering +date: Fri Jun 10 10:47:07 2011 +0200 +files: tools/xenpaging/xc.c tools/xenpaging/xc.h tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h +description: +xenpaging: drop xc.c, remove xc_platform_info_t + +xc_platform_info_t is not used in xenpaging. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/xc.c | 10 ---------- + tools/xenpaging/xc.h | 8 -------- + tools/xenpaging/xenpaging.c | 17 ----------------- + tools/xenpaging/xenpaging.h | 1 - + 4 files changed, 36 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/xc.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c ++++ xen-4.1.2-testing/tools/xenpaging/xc.c +@@ -26,7 +26,6 @@ + #include + #include + #include +-#include + #include + #include "xc.h" + +@@ -97,15 +96,6 @@ int xc_wait_for_event(xc_interface *xch, + return xc_wait_for_event_or_timeout(xch, xce, -1); + } + +-int xc_get_platform_info(xc_interface *xc_handle, domid_t domain_id, +- xc_platform_info_t *platform_info) +-{ +- return get_platform_info(xc_handle, domain_id, +- &platform_info->max_mfn, +- &platform_info->hvirt_start, +- &platform_info->pt_levels, +- &platform_info->guest_width); +-} + + + /* +Index: xen-4.1.2-testing/tools/xenpaging/xc.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h ++++ xen-4.1.2-testing/tools/xenpaging/xc.h +@@ -34,12 +34,6 @@ + + + +-typedef struct xc_platform_info { +- unsigned long max_mfn; +- unsigned long hvirt_start; +- unsigned int pt_levels; +- unsigned int guest_width; +-} xc_platform_info_t; + + + +@@ -47,8 +41,6 @@ int xc_mem_paging_flush_ioemu_cache(domi + int xc_wait_for_event(xc_interface *xch, xc_evtchn *xce); + int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms); + +-int xc_get_platform_info(xc_interface *xc_handle, domid_t domain_id, +- xc_platform_info_t *platform_info); + + + #endif // __XC_H__ +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -176,22 +176,6 @@ static xenpaging_t *xenpaging_init(domid + + paging->mem_event.port = rc; + +- /* Get platform info */ +- paging->platform_info = malloc(sizeof(xc_platform_info_t)); +- if ( paging->platform_info == NULL ) +- { +- ERROR("Error allocating memory for platform info"); +- goto err; +- } +- +- rc = xc_get_platform_info(xch, paging->mem_event.domain_id, +- paging->platform_info); +- if ( rc != 1 ) +- { +- ERROR("Error getting platform info"); +- goto err; +- } +- + /* Get domaininfo */ + paging->domain_info = malloc(sizeof(xc_domaininfo_t)); + if ( paging->domain_info == NULL ) +@@ -246,7 +230,6 @@ static xenpaging_t *xenpaging_init(domid + } + + free(paging->bitmap); +- free(paging->platform_info); + free(paging->domain_info); + free(paging); + } +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h +@@ -38,7 +38,6 @@ typedef struct xenpaging { + xc_interface *xc_handle; + struct xs_handle *xs_handle; + +- xc_platform_info_t *platform_info; + xc_domaininfo_t *domain_info; + + unsigned long *bitmap; diff --git a/23581-xenpaging_drop_xc.c_remove_xc_wait_for_event.patch b/23581-xenpaging_drop_xc.c_remove_xc_wait_for_event.patch new file mode 100644 index 0000000..317042a --- /dev/null +++ b/23581-xenpaging_drop_xc.c_remove_xc_wait_for_event.patch @@ -0,0 +1,45 @@ +changeset: 23581:9ce56626a5ab +user: Olaf Hering +date: Fri Jun 10 10:47:08 2011 +0200 +files: tools/xenpaging/xc.c tools/xenpaging/xc.h +description: +xenpaging: drop xc.c, remove xc_wait_for_event + +xc_wait_for_event is not used in xenpaging. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/xc.c | 4 ---- + tools/xenpaging/xc.h | 1 - + 2 files changed, 5 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/xc.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c ++++ xen-4.1.2-testing/tools/xenpaging/xc.c +@@ -91,10 +91,6 @@ int xc_wait_for_event_or_timeout(xc_inte + return -errno; + } + +-int xc_wait_for_event(xc_interface *xch, xc_evtchn *xce) +-{ +- return xc_wait_for_event_or_timeout(xch, xce, -1); +-} + + + +Index: xen-4.1.2-testing/tools/xenpaging/xc.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h ++++ xen-4.1.2-testing/tools/xenpaging/xc.h +@@ -38,7 +38,6 @@ + + + int xc_mem_paging_flush_ioemu_cache(domid_t domain_id); +-int xc_wait_for_event(xc_interface *xch, xc_evtchn *xce); + int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms); + + diff --git a/23582-xenpaging_drop_xc.c_move_xc_mem_paging_flush_ioemu_cache.patch b/23582-xenpaging_drop_xc.c_move_xc_mem_paging_flush_ioemu_cache.patch new file mode 100644 index 0000000..8f274a6 --- /dev/null +++ b/23582-xenpaging_drop_xc.c_move_xc_mem_paging_flush_ioemu_cache.patch @@ -0,0 +1,96 @@ +changeset: 23582:480e548fe76b +user: Olaf Hering +date: Fri Jun 10 10:47:10 2011 +0200 +files: tools/xenpaging/xc.c tools/xenpaging/xc.h tools/xenpaging/xenpaging.c +description: +xenpaging: drop xc.c, move xc_mem_paging_flush_ioemu_cache + +Move xc_mem_paging_flush_ioemu_cache() into xenpaging and massage it a bit to +use the required members from xenpaging_t. +Also update type of rc to match xs_write() return value. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/xc.c | 18 ------------------ + tools/xenpaging/xc.h | 1 - + tools/xenpaging/xenpaging.c | 16 +++++++++++++++- + 3 files changed, 15 insertions(+), 20 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/xc.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c ++++ xen-4.1.2-testing/tools/xenpaging/xc.c +@@ -31,24 +31,6 @@ + + + +-int xc_mem_paging_flush_ioemu_cache(domid_t domain_id) +-{ +- struct xs_handle *xsh = NULL; +- char path[80]; +- int rc; +- +- sprintf(path, "/local/domain/0/device-model/%u/command", domain_id); +- +- xsh = xs_daemon_open(); +- if ( xsh == NULL ) +- return -EIO; +- +- rc = xs_write(xsh, XBT_NULL, path, "flush-cache", strlen("flush-cache")); +- +- xs_daemon_close(xsh); +- +- return rc ? 0 : -1; +-} + + int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms) + { +Index: xen-4.1.2-testing/tools/xenpaging/xc.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h ++++ xen-4.1.2-testing/tools/xenpaging/xc.h +@@ -37,7 +37,6 @@ + + + +-int xc_mem_paging_flush_ioemu_cache(domid_t domain_id); + int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms); + + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -48,6 +48,20 @@ static void close_handler(int sig) + unlink(filename); + } + ++static int xenpaging_mem_paging_flush_ioemu_cache(xenpaging_t *paging) ++{ ++ struct xs_handle *xsh = paging->xs_handle; ++ domid_t domain_id = paging->mem_event.domain_id; ++ char path[80]; ++ bool rc; ++ ++ sprintf(path, "/local/domain/0/device-model/%u/command", domain_id); ++ ++ rc = xs_write(xsh, XBT_NULL, path, "flush-cache", strlen("flush-cache")); ++ ++ return rc == true ? 0 : -1; ++} ++ + static void *init_page(void) + { + void *buffer; +@@ -484,7 +498,7 @@ static int evict_victim(xenpaging_t *pag + else + { + if ( j++ % 1000 == 0 ) +- if ( xc_mem_paging_flush_ioemu_cache(paging->mem_event.domain_id) ) ++ if ( xenpaging_mem_paging_flush_ioemu_cache(paging) ) + ERROR("Error flushing ioemu cache"); + } + } diff --git a/23583-xenpaging_drop_xc.c_move_xc_wait_for_event_or_timeout.patch b/23583-xenpaging_drop_xc.c_move_xc_wait_for_event_or_timeout.patch new file mode 100644 index 0000000..b45d236 --- /dev/null +++ b/23583-xenpaging_drop_xc.c_move_xc_wait_for_event_or_timeout.patch @@ -0,0 +1,161 @@ +changeset: 23583:235d8fdcb3a9 +user: Olaf Hering +date: Fri Jun 10 10:47:11 2011 +0200 +files: tools/xenpaging/xc.c tools/xenpaging/xc.h tools/xenpaging/xenpaging.c +description: +xenpaging: drop xc.c, move xc_wait_for_event_or_timeout + +Move xc_wait_for_event_or_timeout() into xenpaging and massage it a bit for +further changes in subsequent patches. +Include poll.h instead of sys/poll.h. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/xc.c | 40 ------------------------------------ + tools/xenpaging/xc.h | 1 + tools/xenpaging/xenpaging.c | 48 +++++++++++++++++++++++++++++++++++++++++--- + 3 files changed, 45 insertions(+), 44 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/xc.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c ++++ xen-4.1.2-testing/tools/xenpaging/xc.c +@@ -32,46 +32,6 @@ + + + +-int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms) +-{ +- struct pollfd fd = { .fd = xc_evtchn_fd(xce), .events = POLLIN | POLLERR }; +- int port; +- int rc; +- +- rc = poll(&fd, 1, ms); +- if ( rc == -1 ) +- { +- if (errno == EINTR) +- return 0; +- +- ERROR("Poll exited with an error"); +- goto err; +- } +- +- if ( rc == 1 ) +- { +- port = xc_evtchn_pending(xce); +- if ( port == -1 ) +- { +- ERROR("Failed to read port from event channel"); +- goto err; +- } +- +- rc = xc_evtchn_unmask(xce, port); +- if ( rc != 0 ) +- { +- ERROR("Failed to unmask event channel port"); +- goto err; +- } +- } +- else +- port = -1; +- +- return port; +- +- err: +- return -errno; +-} + + + +Index: xen-4.1.2-testing/tools/xenpaging/xc.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h ++++ xen-4.1.2-testing/tools/xenpaging/xc.h +@@ -37,7 +37,6 @@ + + + +-int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms); + + + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -62,6 +63,47 @@ static int xenpaging_mem_paging_flush_io + return rc == true ? 0 : -1; + } + ++static int xenpaging_wait_for_event_or_timeout(xenpaging_t *paging) ++{ ++ xc_interface *xch = paging->xc_handle; ++ xc_evtchn *xce = paging->mem_event.xce_handle; ++ struct pollfd fd[1]; ++ int port; ++ int rc; ++ ++ fd[0].fd = xc_evtchn_fd(xce); ++ fd[0].events = POLLIN | POLLERR; ++ rc = poll(fd, 1, 100); ++ if ( rc < 0 ) ++ { ++ if (errno == EINTR) ++ return 0; ++ ++ ERROR("Poll exited with an error"); ++ return -errno; ++ } ++ ++ if ( rc && fd[0].revents & POLLIN ) ++ { ++ DPRINTF("Got event from evtchn\n"); ++ port = xc_evtchn_pending(xce); ++ if ( port == -1 ) ++ { ++ ERROR("Failed to read port from event channel"); ++ rc = -1; ++ goto err; ++ } ++ ++ rc = xc_evtchn_unmask(xce, port); ++ if ( rc < 0 ) ++ { ++ ERROR("Failed to unmask event channel port"); ++ } ++ } ++err: ++ return rc; ++} ++ + static void *init_page(void) + { + void *buffer; +@@ -598,13 +640,13 @@ int main(int argc, char *argv[]) + while ( !interrupted ) + { + /* Wait for Xen to signal that a page needs paged in */ +- rc = xc_wait_for_event_or_timeout(xch, paging->mem_event.xce_handle, 100); +- if ( rc < -1 ) ++ rc = xenpaging_wait_for_event_or_timeout(paging); ++ if ( rc < 0 ) + { + ERROR("Error getting event"); + goto out; + } +- else if ( rc != -1 ) ++ else if ( rc != 0 ) + { + DPRINTF("Got event from Xen\n"); + } diff --git a/23584-xenpaging_drop_xc.c_remove_xc_files.patch b/23584-xenpaging_drop_xc.c_remove_xc_files.patch new file mode 100644 index 0000000..7023465 --- /dev/null +++ b/23584-xenpaging_drop_xc.c_remove_xc_files.patch @@ -0,0 +1,182 @@ +changeset: 23584:e30cff57b146 +user: Olaf Hering +date: Fri Jun 10 10:47:12 2011 +0200 +files: tools/xenpaging/Makefile tools/xenpaging/mem_event.h tools/xenpaging/xc.c tools/xenpaging/xc.h tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h +description: +xenpaging: drop xc.c, remove xc files + +Finally remove xc.c/xc.h and its references since both are empty now. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/Makefile | 2 - + tools/xenpaging/mem_event.h | 1 + tools/xenpaging/xc.c | 47 -------------------------------------- + tools/xenpaging/xc.h | 54 -------------------------------------------- + tools/xenpaging/xenpaging.c | 1 + tools/xenpaging/xenpaging.h | 1 + 6 files changed, 1 insertion(+), 105 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/Makefile +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/Makefile ++++ xen-4.1.2-testing/tools/xenpaging/Makefile +@@ -9,7 +9,7 @@ LDLIBS += $(LDLIBS_libxenctrl) $(LDLIBS + POLICY = default + + SRC := +-SRCS += file_ops.c xc.c xenpaging.c policy_$(POLICY).c ++SRCS += file_ops.c xenpaging.c policy_$(POLICY).c + + CFLAGS += -Werror + CFLAGS += -Wno-unused +Index: xen-4.1.2-testing/tools/xenpaging/mem_event.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/mem_event.h ++++ xen-4.1.2-testing/tools/xenpaging/mem_event.h +@@ -25,7 +25,6 @@ + #define __XEN_MEM_EVENT_H__ + + +-#include "xc.h" + #include + + #include +Index: xen-4.1.2-testing/tools/xenpaging/xc.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c ++++ /dev/null +@@ -1,47 +0,0 @@ +-/****************************************************************************** +- * tools/xenpaging/lib/xc.c +- * +- * libxc-type add-ons for paging support. +- * +- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp) +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, write to the Free Software +- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +- */ +- +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include "xc.h" +- +- +- +- +- +- +- +- +-/* +- * Local variables: +- * mode: C +- * c-set-style: "BSD" +- * c-basic-offset: 4 +- * tab-width: 4 +- * indent-tabs-mode: nil +- * End: +- */ +Index: xen-4.1.2-testing/tools/xenpaging/xc.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h ++++ /dev/null +@@ -1,54 +0,0 @@ +-/****************************************************************************** +- * tools/xenpaging/lib/xc.h +- * +- * libxc add-ons. +- * +- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp) +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, write to the Free Software +- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +- */ +- +- +-#ifndef __XC_H__ +-#define __XC_H__ +- +- +-#include +-#include +-#include +- +- +- +- +- +- +- +- +- +- +- +- +-#endif // __XC_H__ +- +- +-/* +- * Local variables: +- * mode: C +- * c-set-style: "BSD" +- * c-basic-offset: 4 +- * tab-width: 4 +- * indent-tabs-mode: nil +- * End: +- */ +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -35,7 +35,6 @@ + + #include "xc_bitops.h" + #include "file_ops.h" +-#include "xc.h" + + #include "policy.h" + #include "xenpaging.h" +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h +@@ -25,7 +25,6 @@ + #define __XEN_PAGING2_H__ + + +-#include "xc.h" + #include + + #include diff --git a/xenpaging.guest_remove_page.slow_path.patch b/23585-xenpaging_correct_dropping_of_pages_to_avoid_full_ring_buffer.patch similarity index 85% rename from xenpaging.guest_remove_page.slow_path.patch rename to 23585-xenpaging_correct_dropping_of_pages_to_avoid_full_ring_buffer.patch index 7c0afb6..8ef1d81 100644 --- a/xenpaging.guest_remove_page.slow_path.patch +++ b/23585-xenpaging_correct_dropping_of_pages_to_avoid_full_ring_buffer.patch @@ -1,4 +1,9 @@ -xenpaging: correct dropping pages to avoid full ring buffer +changeset: 23585:b4d18ac00a46 +user: Olaf Hering +date: Fri Jun 10 10:47:14 2011 +0200 +files: tools/xenpaging/xenpaging.c +description: +xenpaging: correct dropping of pages to avoid full ring buffer Doing a one-way channel from Xen to xenpaging is not possible with the current ring buffer implementation. xenpaging uses the mem_event ring @@ -10,6 +15,8 @@ p2m_mem_paging_resume() consume the response from xenpaging. This makes room for yet another request/response pair and avoids hanging guests. Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + --- tools/xenpaging/xenpaging.c | 22 +++++++++++----------- @@ -19,7 +26,7 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c =================================================================== --- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c +++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c -@@ -653,19 +653,19 @@ int main(int argc, char *argv[]) +@@ -690,19 +690,19 @@ int main(int argc, char *argv[]) ERROR("Error populating page"); goto out; } diff --git a/xenpaging.mem_event-no-p2mt.patch b/23586-xenpaging_do_not_bounce_p2mt_back_to_the_hypervisor.patch similarity index 57% rename from xenpaging.mem_event-no-p2mt.patch rename to 23586-xenpaging_do_not_bounce_p2mt_back_to_the_hypervisor.patch index 07b05c9..c720335 100644 --- a/xenpaging.mem_event-no-p2mt.patch +++ b/23586-xenpaging_do_not_bounce_p2mt_back_to_the_hypervisor.patch @@ -1,22 +1,26 @@ -xenpaging: do not bounce p2mt to xenpaging +changeset: 23586:bbdd7413a50a +user: Olaf Hering +date: Wed Jun 22 14:47:13 2011 +0100 +files: tools/xenpaging/xenpaging.c +description: +xenpaging: do not bounce p2mt back to the hypervisor -Do not bounce p2mt to xenpaging because p2m_mem_paging_populate and -p2m_mem_paging_resume dont make use of p2mt. Only pages of type -p2m_ram_rw will be paged-out, and during page-in this type has to be -restored. +do not bounce p2mt back to the hypervisor because p2m_mem_paging_populate() +and p2m_mem_paging_resume() dont make use of p2mt. Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + --- tools/xenpaging/xenpaging.c | 4 ---- - xen/arch/x86/mm/p2m.c | 1 - - 2 files changed, 5 deletions(-) + 1 file changed, 4 deletions(-) Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c =================================================================== --- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c +++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c -@@ -657,7 +657,6 @@ int main(int argc, char *argv[]) +@@ -694,7 +694,6 @@ int main(int argc, char *argv[]) /* Prepare the response */ rsp.gfn = req.gfn; @@ -24,7 +28,7 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c rsp.vcpu_id = req.vcpu_id; rsp.flags = req.flags; -@@ -674,10 +673,8 @@ int main(int argc, char *argv[]) +@@ -711,10 +710,8 @@ int main(int argc, char *argv[]) else { DPRINTF("page already populated (domain = %d; vcpu = %d;" @@ -35,7 +39,7 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c req.gfn, req.flags & MEM_EVENT_FLAG_VCPU_PAUSED); /* Tell Xen to resume the vcpu */ -@@ -686,7 +683,6 @@ int main(int argc, char *argv[]) +@@ -723,7 +720,6 @@ int main(int argc, char *argv[]) { /* Prepare the response */ rsp.gfn = req.gfn; @@ -43,15 +47,3 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c rsp.vcpu_id = req.vcpu_id; rsp.flags = req.flags; -Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c -=================================================================== ---- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c -+++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c -@@ -2975,7 +2975,6 @@ void p2m_mem_paging_populate(struct p2m_ - - /* Send request to pager */ - req.gfn = gfn; -- req.p2mt = p2mt; - req.vcpu_id = v->vcpu_id; - - mem_event_put_request(d, &req); diff --git a/xenpaging.no-srand.patch b/23587-xenpaging_remove_srand_call.patch similarity index 71% rename from xenpaging.no-srand.patch rename to 23587-xenpaging_remove_srand_call.patch index 2ec3248..efdef00 100644 --- a/xenpaging.no-srand.patch +++ b/23587-xenpaging_remove_srand_call.patch @@ -1,9 +1,16 @@ +changeset: 23587:926febc8bd98 +user: Olaf Hering +date: Fri Jun 10 10:47:16 2011 +0200 +files: tools/xenpaging/xenpaging.c +description: xenpaging: remove srand call The policy uses now a linear algorithm instead of a random one. Remove the call to srand(). Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + --- tools/xenpaging/xenpaging.c | 3 --- @@ -13,7 +20,7 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c =================================================================== --- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c +++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c -@@ -544,9 +544,6 @@ int main(int argc, char *argv[]) +@@ -581,9 +581,6 @@ int main(int argc, char *argv[]) domain_id = atoi(argv[1]); num_pages = atoi(argv[2]); diff --git a/xenpaging.return-void.patch b/23588-xenpaging_remove_return_values_from_functions_that_can_not_fail.patch similarity index 74% rename from xenpaging.return-void.patch rename to 23588-xenpaging_remove_return_values_from_functions_that_can_not_fail.patch index a73b07c..08fc6ae 100644 --- a/xenpaging.return-void.patch +++ b/23588-xenpaging_remove_return_values_from_functions_that_can_not_fail.patch @@ -1,9 +1,16 @@ +changeset: 23588:e48535e70145 +user: Olaf Hering +date: Fri Jun 10 10:47:18 2011 +0200 +files: tools/xenpaging/xenpaging.c +description: xenpaging: remove return values from functions that can not fail get_request() and put_response() can not fail, remove return value and update calling functions. Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + --- tools/xenpaging/xenpaging.c | 19 ++++--------------- @@ -13,7 +20,7 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c =================================================================== --- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c +++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c -@@ -297,7 +297,7 @@ static int xenpaging_teardown(xenpaging_ +@@ -342,7 +342,7 @@ static int xenpaging_teardown(xenpaging_ return -1; } @@ -22,10 +29,10 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c { mem_event_back_ring_t *back_ring; RING_IDX req_cons; -@@ -316,11 +316,9 @@ static int get_request(mem_event_t *mem_ +@@ -357,11 +357,9 @@ static int get_request(mem_event_t *mem_ + /* Update ring */ + back_ring->req_cons = req_cons; back_ring->sring->req_event = req_cons + 1; - - mem_event_ring_unlock(mem_event); - - return 0; } @@ -35,16 +42,16 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c { mem_event_back_ring_t *back_ring; RING_IDX rsp_prod; -@@ -339,8 +337,6 @@ static int put_response(mem_event_t *mem +@@ -376,8 +374,6 @@ static int put_response(mem_event_t *mem + /* Update ring */ + back_ring->rsp_prod_pvt = rsp_prod; RING_PUSH_RESPONSES(back_ring); - - mem_event_ring_unlock(mem_event); - - return 0; } static int xenpaging_evict_page(xenpaging_t *paging, -@@ -400,9 +396,7 @@ static int xenpaging_resume_page(xenpagi +@@ -437,9 +433,7 @@ static int xenpaging_resume_page(xenpagi int ret; /* Put the page info on the ring */ @@ -55,7 +62,7 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c /* Notify policy of page being paged in */ if ( notify_policy ) -@@ -612,12 +606,7 @@ int main(int argc, char *argv[]) +@@ -649,12 +643,7 @@ int main(int argc, char *argv[]) while ( RING_HAS_UNCONSUMED_REQUESTS(&paging->mem_event.back_ring) ) { diff --git a/xenpaging.catch-xc_mem_paging_resume-error.patch b/23589-xenpaging_catch_xc_mem_paging_resume_errors.patch similarity index 77% rename from xenpaging.catch-xc_mem_paging_resume-error.patch rename to 23589-xenpaging_catch_xc_mem_paging_resume_errors.patch index 9b50296..4ad1474 100644 --- a/xenpaging.catch-xc_mem_paging_resume-error.patch +++ b/23589-xenpaging_catch_xc_mem_paging_resume_errors.patch @@ -1,9 +1,16 @@ +changeset: 23589:49cb290ede16 +user: Olaf Hering +date: Fri Jun 10 10:47:19 2011 +0200 +files: tools/xenpaging/xenpaging.c +description: xenpaging: catch xc_mem_paging_resume errors In the unlikely event that xc_mem_paging_resume() fails, do not overwrite the error with the return value from xc_evtchn_notify() Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + --- tools/xenpaging/xenpaging.c | 5 +++-- @@ -13,7 +20,7 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c =================================================================== --- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c +++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c -@@ -405,8 +405,9 @@ static int xenpaging_resume_page(xenpagi +@@ -442,8 +442,9 @@ static int xenpaging_resume_page(xenpagi /* Tell Xen page is ready */ ret = xc_mem_paging_resume(paging->xc_handle, paging->mem_event.domain_id, rsp->gfn); diff --git a/23590-xenpaging_remove_local_domain_id_variable.patch b/23590-xenpaging_remove_local_domain_id_variable.patch new file mode 100644 index 0000000..719dc2f --- /dev/null +++ b/23590-xenpaging_remove_local_domain_id_variable.patch @@ -0,0 +1,57 @@ +changeset: 23590:d957acb8bee6 +user: Olaf Hering +date: Fri Jun 10 10:47:20 2011 +0200 +files: tools/xenpaging/xenpaging.c +description: +xenpaging: remove local domain_id variable + +Remove the local domain_id variable, it is already fetched from +paging->mem_event in other places. +Update the sprintf format string to use unsigned argument. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/xenpaging.c | 8 +++----- + 1 file changed, 3 insertions(+), 5 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -552,7 +552,6 @@ static int evict_victim(xenpaging_t *pag + int main(int argc, char *argv[]) + { + struct sigaction act; +- domid_t domain_id; + int num_pages; + xenpaging_t *paging; + xenpaging_victim_t *victims; +@@ -573,11 +572,10 @@ int main(int argc, char *argv[]) + return -1; + } + +- domain_id = atoi(argv[1]); + num_pages = atoi(argv[2]); + + /* Initialise domain paging */ +- paging = xenpaging_init(domain_id); ++ paging = xenpaging_init(atoi(argv[1])); + if ( paging == NULL ) + { + fprintf(stderr, "Error initialising paging"); +@@ -585,10 +583,10 @@ int main(int argc, char *argv[]) + } + xch = paging->xc_handle; + +- DPRINTF("starting %s %u %d\n", argv[0], domain_id, num_pages); ++ DPRINTF("starting %s %u %d\n", argv[0], paging->mem_event.domain_id, num_pages); + + /* Open file */ +- sprintf(filename, "page_cache_%d", domain_id); ++ sprintf(filename, "page_cache_%u", paging->mem_event.domain_id); + fd = open(filename, open_flags, open_mode); + if ( fd < 0 ) + { diff --git a/23591-xenpaging_move_num_pages_into_xenpaging_struct.patch b/23591-xenpaging_move_num_pages_into_xenpaging_struct.patch new file mode 100644 index 0000000..8901463 --- /dev/null +++ b/23591-xenpaging_move_num_pages_into_xenpaging_struct.patch @@ -0,0 +1,129 @@ +changeset: 23591:4aaa90c1db42 +user: Olaf Hering +date: Fri Jun 10 10:47:22 2011 +0200 +files: tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h +description: +xenpaging: move num_pages into xenpaging struct + +Move num_pages into struct xenpaging. +num_pages will be used by the policy in a subsequent patch. + +Also remove a memset, the victims array is allocated with calloc. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/xenpaging.c | 30 ++++++++++++++---------------- + tools/xenpaging/xenpaging.h | 1 + + 2 files changed, 15 insertions(+), 16 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -128,7 +128,7 @@ static void *init_page(void) + return NULL; + } + +-static xenpaging_t *xenpaging_init(domid_t domain_id) ++static xenpaging_t *xenpaging_init(domid_t domain_id, int num_pages) + { + xenpaging_t *paging; + xc_interface *xch; +@@ -256,6 +256,13 @@ static xenpaging_t *xenpaging_init(domid + } + DPRINTF("max_pages = %"PRIx64"\n", paging->domain_info->max_pages); + ++ if ( num_pages < 0 || num_pages > paging->domain_info->max_pages ) ++ { ++ num_pages = paging->domain_info->max_pages; ++ DPRINTF("setting num_pages to %d\n", num_pages); ++ } ++ paging->num_pages = num_pages; ++ + /* Initialise policy */ + rc = policy_init(paging); + if ( rc != 0 ) +@@ -552,7 +559,6 @@ static int evict_victim(xenpaging_t *pag + int main(int argc, char *argv[]) + { + struct sigaction act; +- int num_pages; + xenpaging_t *paging; + xenpaging_victim_t *victims; + mem_event_request_t req; +@@ -572,10 +578,8 @@ int main(int argc, char *argv[]) + return -1; + } + +- num_pages = atoi(argv[2]); +- + /* Initialise domain paging */ +- paging = xenpaging_init(atoi(argv[1])); ++ paging = xenpaging_init(atoi(argv[1]), atoi(argv[2])); + if ( paging == NULL ) + { + fprintf(stderr, "Error initialising paging"); +@@ -583,7 +587,7 @@ int main(int argc, char *argv[]) + } + xch = paging->xc_handle; + +- DPRINTF("starting %s %u %d\n", argv[0], paging->mem_event.domain_id, num_pages); ++ DPRINTF("starting %s %u %d\n", argv[0], paging->mem_event.domain_id, paging->num_pages); + + /* Open file */ + sprintf(filename, "page_cache_%u", paging->mem_event.domain_id); +@@ -594,12 +598,7 @@ int main(int argc, char *argv[]) + return 2; + } + +- if ( num_pages < 0 || num_pages > paging->domain_info->max_pages ) +- { +- num_pages = paging->domain_info->max_pages; +- DPRINTF("setting num_pages to %d\n", num_pages); +- } +- victims = calloc(num_pages, sizeof(xenpaging_victim_t)); ++ victims = calloc(paging->num_pages, sizeof(xenpaging_victim_t)); + + /* ensure that if we get a signal, we'll do cleanup, then exit */ + act.sa_handler = close_handler; +@@ -611,8 +610,7 @@ int main(int argc, char *argv[]) + sigaction(SIGALRM, &act, NULL); + + /* Evict pages */ +- memset(victims, 0, sizeof(xenpaging_victim_t) * num_pages); +- for ( i = 0; i < num_pages; i++ ) ++ for ( i = 0; i < paging->num_pages; i++ ) + { + rc = evict_victim(paging, &victims[i], fd, i); + if ( rc == -ENOSPC ) +@@ -648,13 +646,13 @@ int main(int argc, char *argv[]) + if ( test_and_clear_bit(req.gfn, paging->bitmap) ) + { + /* Find where in the paging file to read from */ +- for ( i = 0; i < num_pages; i++ ) ++ for ( i = 0; i < paging->num_pages; i++ ) + { + if ( victims[i].gfn == req.gfn ) + break; + } + +- if ( i >= num_pages ) ++ if ( i >= paging->num_pages ) + { + DPRINTF("Couldn't find page %"PRIx64"\n", req.gfn); + goto out; +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h +@@ -42,6 +42,7 @@ typedef struct xenpaging { + unsigned long *bitmap; + + mem_event_t mem_event; ++ int num_pages; + int policy_mru_size; + } xenpaging_t; + diff --git a/23592-xenpaging_start_paging_in_the_middle_of_gfn_range.patch b/23592-xenpaging_start_paging_in_the_middle_of_gfn_range.patch new file mode 100644 index 0000000..294b699 --- /dev/null +++ b/23592-xenpaging_start_paging_in_the_middle_of_gfn_range.patch @@ -0,0 +1,33 @@ +changeset: 23592:1e44e75d889c +user: Olaf Hering +date: Fri Jun 10 10:47:23 2011 +0200 +files: tools/xenpaging/policy_default.c +description: +xenpaging: start paging in the middle of gfn range + +Set the starting gfn to somewhere in the middle of the gfn range to +avoid paging during BIOS startup. This can speedup booting of a guest. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/policy_default.c | 4 ++++ + 1 file changed, 4 insertions(+) + +Index: xen-4.1.2-testing/tools/xenpaging/policy_default.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/policy_default.c ++++ xen-4.1.2-testing/tools/xenpaging/policy_default.c +@@ -69,6 +69,10 @@ int policy_init(xenpaging_t *paging) + /* Don't page out page 0 */ + set_bit(0, bitmap); + ++ /* Start in the middle to avoid paging during BIOS startup */ ++ current_gfn = max_pages / 2; ++ current_gfn -= paging->num_pages / 2; ++ + rc = 0; + out: + return rc; diff --git a/xenpaging.xenpaging_populate_page-gfn.patch b/23593-xenpaging_pass_integer_to_xenpaging_populate_page.patch similarity index 78% rename from xenpaging.xenpaging_populate_page-gfn.patch rename to 23593-xenpaging_pass_integer_to_xenpaging_populate_page.patch index bc41468..39cc5e6 100644 --- a/xenpaging.xenpaging_populate_page-gfn.patch +++ b/23593-xenpaging_pass_integer_to_xenpaging_populate_page.patch @@ -1,3 +1,8 @@ +changeset: 23593:7d72475641fa +user: Olaf Hering +date: Fri Jun 10 10:47:24 2011 +0200 +files: tools/xenpaging/xenpaging.c +description: xenpaging: pass integer to xenpaging_populate_page Pass gfn as integer to xenpaging_populate_page(). xc_map_foreign_pages() @@ -5,22 +10,23 @@ takes a pointer to a list of gfns, but its a const pointer. So writing the value back to the caller is not needed. Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + --- - tools/xenpaging/xenpaging.c | 17 ++++++----------- - 1 file changed, 6 insertions(+), 11 deletions(-) + tools/xenpaging/xenpaging.c | 16 ++++++---------- + 1 file changed, 6 insertions(+), 10 deletions(-) Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c =================================================================== --- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c +++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c -@@ -413,28 +413,24 @@ static int xenpaging_resume_page(xenpagi - return ret; +@@ -458,27 +458,24 @@ static int xenpaging_resume_page(xenpagi } --static int xenpaging_populate_page(xenpaging_t *paging, + static int xenpaging_populate_page(xenpaging_t *paging, - uint64_t *gfn, int fd, int i) -+static int xenpaging_populate_page(xenpaging_t *paging, xen_pfn_t gfn, int fd, int i) ++ xen_pfn_t gfn, int fd, int i) { xc_interface *xch = paging->xc_handle; - unsigned long _gfn; @@ -47,7 +53,7 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c sleep(1); continue; } -@@ -447,8 +443,7 @@ static int xenpaging_populate_page(xenpa +@@ -491,8 +488,7 @@ static int xenpaging_populate_page(xenpa /* Map page */ ret = -EFAULT; page = xc_map_foreign_pages(xch, paging->mem_event.domain_id, @@ -57,7 +63,7 @@ Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c if ( page == NULL ) { ERROR("Error mapping page: page is null"); -@@ -634,7 +629,7 @@ int main(int argc, char *argv[]) +@@ -667,7 +663,7 @@ int main(int argc, char *argv[]) else { /* Populate the page */ diff --git a/23594-xenpaging_add_helper_function_for_unlinking_pagefile.patch b/23594-xenpaging_add_helper_function_for_unlinking_pagefile.patch new file mode 100644 index 0000000..94cde26 --- /dev/null +++ b/23594-xenpaging_add_helper_function_for_unlinking_pagefile.patch @@ -0,0 +1,53 @@ +changeset: 23594:2fe46305a00d +user: Olaf Hering +date: Fri Jun 10 10:47:25 2011 +0200 +files: tools/xenpaging/xenpaging.c +description: +xenpaging: add helper function for unlinking pagefile + +Unlink pagefile in the signal handler and also in the exit path. +This does not leave a stale pagefile if an error occoured. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/xenpaging.c | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -41,11 +41,20 @@ + + static char filename[80]; + static int interrupted; +-static void close_handler(int sig) ++ ++static void unlink_pagefile(void) + { +- interrupted = sig; + if ( filename[0] ) ++ { + unlink(filename); ++ filename[0] = '\0'; ++ } ++} ++ ++static void close_handler(int sig) ++{ ++ interrupted = sig; ++ unlink_pagefile(); + } + + static int xenpaging_mem_paging_flush_ioemu_cache(xenpaging_t *paging) +@@ -716,6 +725,7 @@ int main(int argc, char *argv[]) + + out: + close(fd); ++ unlink_pagefile(); + free(victims); + + /* Tear down domain paging */ diff --git a/23595-xenpaging_add_watch_thread_to_catch_guest_shutdown.patch b/23595-xenpaging_add_watch_thread_to_catch_guest_shutdown.patch new file mode 100644 index 0000000..3e9e5af --- /dev/null +++ b/23595-xenpaging_add_watch_thread_to_catch_guest_shutdown.patch @@ -0,0 +1,96 @@ +changeset: 23595:389c8bf31688 +user: Olaf Hering +date: Fri Jun 10 10:47:27 2011 +0200 +files: tools/xenpaging/xenpaging.c +description: +xenpaging: add watch thread to catch guest shutdown + +If xenpaging is started manually then no event is sent to xenpaging when +the guest is shutdown or rebooted. Add a watch on the @releaseDomain +node to leave the loop and gracefully shutdown the pager. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/xenpaging.c | 40 ++++++++++++++++++++++++++++++++++++++-- + 1 file changed, 38 insertions(+), 2 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -39,6 +39,7 @@ + #include "policy.h" + #include "xenpaging.h" + ++static char watch_token[16]; + static char filename[80]; + static int interrupted; + +@@ -75,13 +76,19 @@ static int xenpaging_wait_for_event_or_t + { + xc_interface *xch = paging->xc_handle; + xc_evtchn *xce = paging->mem_event.xce_handle; +- struct pollfd fd[1]; ++ char **vec; ++ unsigned int num; ++ struct pollfd fd[2]; + int port; + int rc; + ++ /* Wait for event channel and xenstore */ + fd[0].fd = xc_evtchn_fd(xce); + fd[0].events = POLLIN | POLLERR; +- rc = poll(fd, 1, 100); ++ fd[1].fd = xs_fileno(paging->xs_handle); ++ fd[1].events = POLLIN | POLLERR; ++ ++ rc = poll(fd, 2, 100); + if ( rc < 0 ) + { + if (errno == EINTR) +@@ -91,6 +98,27 @@ static int xenpaging_wait_for_event_or_t + return -errno; + } + ++ /* First check for guest shutdown */ ++ if ( rc && fd[1].revents & POLLIN ) ++ { ++ DPRINTF("Got event from xenstore\n"); ++ vec = xs_read_watch(paging->xs_handle, &num); ++ if ( vec ) ++ { ++ if ( strcmp(vec[XS_WATCH_TOKEN], watch_token) == 0 ) ++ { ++ /* If our guest disappeared, set interrupt flag and fall through */ ++ if ( xs_is_domain_introduced(paging->xs_handle, paging->mem_event.domain_id) == false ) ++ { ++ xs_unwatch(paging->xs_handle, "@releaseDomain", watch_token); ++ interrupted = SIGQUIT; ++ rc = 0; ++ } ++ } ++ free(vec); ++ } ++ } ++ + if ( rc && fd[0].revents & POLLIN ) + { + DPRINTF("Got event from evtchn\n"); +@@ -165,6 +193,14 @@ static xenpaging_t *xenpaging_init(domid + goto err; + } + ++ /* write domain ID to watch so we can ignore other domain shutdowns */ ++ snprintf(watch_token, sizeof(watch_token), "%u", domain_id); ++ if ( xs_watch(paging->xs_handle, "@releaseDomain", watch_token) == false ) ++ { ++ ERROR("Could not bind to shutdown watch\n"); ++ goto err; ++ } ++ + p = getenv("XENPAGING_POLICY_MRU_SIZE"); + if ( p && *p ) + { diff --git a/23596-xenpaging_implement_stopping_of_pager_by_sending_SIGTERM-SIGINT.patch b/23596-xenpaging_implement_stopping_of_pager_by_sending_SIGTERM-SIGINT.patch new file mode 100644 index 0000000..bb1cef8 --- /dev/null +++ b/23596-xenpaging_implement_stopping_of_pager_by_sending_SIGTERM-SIGINT.patch @@ -0,0 +1,187 @@ +changeset: 23596:c49e22648d0e +user: Olaf Hering +date: Fri Jun 10 10:47:28 2011 +0200 +files: tools/xenpaging/Makefile tools/xenpaging/pagein.c tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h +description: +xenpaging: implement stopping of pager by sending SIGTERM/SIGINT + +Write all paged-out pages back into the guest if the pager is +interrupted by ctrl-c or if it receives SIGTERM. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/Makefile | 1 + tools/xenpaging/pagein.c | 68 ++++++++++++++++++++++++++++++++++++++++++++ + tools/xenpaging/xenpaging.c | 35 ++++++++++++++++++++-- + tools/xenpaging/xenpaging.h | 3 + + 4 files changed, 104 insertions(+), 3 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/Makefile +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/Makefile ++++ xen-4.1.2-testing/tools/xenpaging/Makefile +@@ -10,6 +10,7 @@ POLICY = default + + SRC := + SRCS += file_ops.c xenpaging.c policy_$(POLICY).c ++SRCS += pagein.c + + CFLAGS += -Werror + CFLAGS += -Wno-unused +Index: xen-4.1.2-testing/tools/xenpaging/pagein.c +=================================================================== +--- /dev/null ++++ xen-4.1.2-testing/tools/xenpaging/pagein.c +@@ -0,0 +1,68 @@ ++/* Trigger a page-in in a separate thread-of-execution to avoid deadlock */ ++#include ++#include "xc_private.h" ++ ++struct page_in_args { ++ domid_t dom; ++ xc_interface *xch; ++}; ++ ++static struct page_in_args page_in_args; ++static unsigned long page_in_gfn; ++static unsigned int page_in_possible; ++ ++static pthread_t page_in_thread; ++static pthread_cond_t page_in_cond = PTHREAD_COND_INITIALIZER; ++static pthread_mutex_t page_in_mutex = PTHREAD_MUTEX_INITIALIZER; ++ ++static void *page_in(void *arg) ++{ ++ struct page_in_args *pia = arg; ++ void *page; ++ xen_pfn_t gfn; ++ ++ while (1) ++ { ++ pthread_mutex_lock(&page_in_mutex); ++ while (!page_in_gfn) ++ pthread_cond_wait(&page_in_cond, &page_in_mutex); ++ gfn = page_in_gfn; ++ page_in_gfn = 0; ++ pthread_mutex_unlock(&page_in_mutex); ++ ++ /* Ignore errors */ ++ page = xc_map_foreign_pages(pia->xch, pia->dom, PROT_READ, &gfn, 1); ++ if (page) ++ munmap(page, PAGE_SIZE); ++ } ++ page_in_possible = 0; ++ pthread_exit(NULL); ++} ++ ++void page_in_trigger(unsigned long gfn) ++{ ++ if (!page_in_possible) ++ return; ++ ++ pthread_mutex_lock(&page_in_mutex); ++ page_in_gfn = gfn; ++ pthread_mutex_unlock(&page_in_mutex); ++ pthread_cond_signal(&page_in_cond); ++} ++ ++void create_page_in_thread(domid_t domain_id, xc_interface *xch) ++{ ++ page_in_args.dom = domain_id; ++ page_in_args.xch = xch; ++ if (pthread_create(&page_in_thread, NULL, page_in, &page_in_args) == 0) ++ page_in_possible = 1; ++} ++ ++/* ++ * Local variables: ++ * mode: C ++ * c-set-style: "BSD" ++ * c-basic-offset: 4 ++ * indent-tabs-mode: nil ++ * End: ++ */ +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -650,6 +650,9 @@ int main(int argc, char *argv[]) + sigaction(SIGINT, &act, NULL); + sigaction(SIGALRM, &act, NULL); + ++ /* listen for page-in events to stop pager */ ++ create_page_in_thread(paging->mem_event.domain_id, xch); ++ + /* Evict pages */ + for ( i = 0; i < paging->num_pages; i++ ) + { +@@ -665,7 +668,7 @@ int main(int argc, char *argv[]) + DPRINTF("%d pages evicted. Done.\n", i); + + /* Swap pages in and out */ +- while ( !interrupted ) ++ while ( 1 ) + { + /* Wait for Xen to signal that a page needs paged in */ + rc = xenpaging_wait_for_event_or_timeout(paging); +@@ -728,8 +731,12 @@ int main(int argc, char *argv[]) + goto out; + } + +- /* Evict a new page to replace the one we just paged in */ +- evict_victim(paging, &victims[i], fd, i); ++ /* Evict a new page to replace the one we just paged in, ++ * or clear this pagefile slot on exit */ ++ if ( interrupted ) ++ victims[i].gfn = INVALID_MFN; ++ else ++ evict_victim(paging, &victims[i], fd, i); + } + else + { +@@ -756,6 +763,28 @@ int main(int argc, char *argv[]) + } + } + } ++ ++ /* Write all pages back into the guest */ ++ if ( interrupted == SIGTERM || interrupted == SIGINT ) ++ { ++ for ( i = 0; i < paging->domain_info->max_pages; i++ ) ++ { ++ if ( test_bit(i, paging->bitmap) ) ++ { ++ page_in_trigger(i); ++ break; ++ } ++ } ++ /* If no more pages to process, exit loop */ ++ if ( i == paging->domain_info->max_pages ) ++ break; ++ } ++ else ++ { ++ /* Exit on any other signal */ ++ if ( interrupted ) ++ break; ++ } + } + DPRINTF("xenpaging got signal %d\n", interrupted); + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h +@@ -53,6 +53,9 @@ typedef struct xenpaging_victim { + } xenpaging_victim_t; + + ++extern void create_page_in_thread(domid_t domain_id, xc_interface *xch); ++extern void page_in_trigger(unsigned long gfn); ++ + #endif // __XEN_PAGING_H__ + + diff --git a/23597-xenpaging_remove_private_mem_event.h.patch b/23597-xenpaging_remove_private_mem_event.h.patch new file mode 100644 index 0000000..4106dc9 --- /dev/null +++ b/23597-xenpaging_remove_private_mem_event.h.patch @@ -0,0 +1,123 @@ +changeset: 23597:3dcb553f3ba9 +user: Olaf Hering +date: Fri Jun 10 10:47:29 2011 +0200 +files: tools/xenpaging/mem_event.h tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h +description: +xenpaging: remove private mem_event.h + +tools/xenpaging/mem_event.h is only included in xenpaging.h. +Add the contents into that file and remove mem_event.h. + +Signed-off-by: Olaf Hering +Committed-by: Ian Jackson + + +--- + tools/xenpaging/mem_event.h | 57 -------------------------------------------- + tools/xenpaging/xenpaging.c | 3 -- + tools/xenpaging/xenpaging.h | 11 ++++++-- + 3 files changed, 8 insertions(+), 63 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/mem_event.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/mem_event.h ++++ /dev/null +@@ -1,57 +0,0 @@ +-/****************************************************************************** +- * tools/xenpaging/mem_event.h +- * +- * Memory event structures. +- * +- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp) +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, write to the Free Software +- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +- */ +- +- +-#ifndef __XEN_MEM_EVENT_H__ +-#define __XEN_MEM_EVENT_H__ +- +- +-#include +- +-#include +-#include +- +- +- +- +-typedef struct mem_event { +- domid_t domain_id; +- xc_evtchn *xce_handle; +- int port; +- mem_event_back_ring_t back_ring; +- mem_event_shared_page_t *shared_page; +- void *ring_page; +-} mem_event_t; +- +- +-#endif // __XEN_MEM_EVENT_H__ +- +- +-/* +- * Local variables: +- * mode: C +- * c-set-style: "BSD" +- * c-basic-offset: 4 +- * tab-width: 4 +- * indent-tabs-mode: nil +- * End: +- */ +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -31,11 +31,8 @@ + #include + #include + +-#include +- + #include "xc_bitops.h" + #include "file_ops.h" +- + #include "policy.h" + #include "xenpaging.h" + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h +@@ -26,12 +26,17 @@ + + + #include +- + #include + #include + +-#include "mem_event.h" +- ++typedef struct mem_event { ++ domid_t domain_id; ++ xc_evtchn *xce_handle; ++ int port; ++ mem_event_back_ring_t back_ring; ++ mem_event_shared_page_t *shared_page; ++ void *ring_page; ++} mem_event_t; + + typedef struct xenpaging { + xc_interface *xc_handle; diff --git a/23599-tools_fix_build_after_recent_xenpaging_changes.patch b/23599-tools_fix_build_after_recent_xenpaging_changes.patch new file mode 100644 index 0000000..604e313 --- /dev/null +++ b/23599-tools_fix_build_after_recent_xenpaging_changes.patch @@ -0,0 +1,30 @@ +changeset: 23599:d3027374a8c0 +user: Tim Deegan +date: Mon Jun 27 14:48:57 2011 +0100 +files: tools/xenpaging/Makefile +description: +tools: fix build after recent xenpaging changes + +xenpaging now uses pthreads, so must link appropriately. + +Signed-off-by: Tim Deegan +Committed-by: Ian Jackson + + +--- + tools/xenpaging/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: xen-4.1.2-testing/tools/xenpaging/Makefile +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/Makefile ++++ xen-4.1.2-testing/tools/xenpaging/Makefile +@@ -4,7 +4,7 @@ include $(XEN_ROOT)/tools/Rules.mk + CFLAGS += -I $(XEN_XC) + CFLAGS += -I ./ + CFLAGS += $(CFLAGS_libxenctrl) $(CFLAGS_libxenstore) +-LDLIBS += $(LDLIBS_libxenctrl) $(LDLIBS_libxenstore) ++LDLIBS += $(LDLIBS_libxenctrl) $(LDLIBS_libxenstore) -pthread + + POLICY = default + diff --git a/23614-x86_64-EFI-boot.patch b/23614-x86_64-EFI-boot.patch index 3ac0036..43d4417 100644 --- a/23614-x86_64-EFI-boot.patch +++ b/23614-x86_64-EFI-boot.patch @@ -2143,7 +2143,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/mm.c #include #include #include -@@ -353,6 +354,8 @@ void __init arch_init_memory(void) +@@ -355,6 +356,8 @@ void __init arch_init_memory(void) subarch_init_memory(); @@ -2288,7 +2288,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/x86_64/mm.c #include #include #include -@@ -828,7 +829,8 @@ void __init zap_low_mappings(void) +@@ -830,7 +831,8 @@ void __init zap_low_mappings(void) /* Replace with mapping of the boot trampoline only. */ map_pages_to_xen(BOOT_TRAMPOLINE, BOOT_TRAMPOLINE >> PAGE_SHIFT, diff --git a/23676-x86_64-image-map-bounds.patch b/23676-x86_64-image-map-bounds.patch index 2cf47d4..892ca63 100644 --- a/23676-x86_64-image-map-bounds.patch +++ b/23676-x86_64-image-map-bounds.patch @@ -18,7 +18,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/mm.c =================================================================== --- xen-4.1.2-testing.orig/xen/arch/x86/mm.c +++ xen-4.1.2-testing/xen/arch/x86/mm.c -@@ -776,7 +776,7 @@ static int update_xen_mappings(unsigned +@@ -778,7 +778,7 @@ static int update_xen_mappings(unsigned int err = 0; #ifdef __x86_64__ bool_t alias = mfn >= PFN_DOWN(xen_phys_start) && diff --git a/23723-x86-CMOS-lock.patch b/23723-x86-CMOS-lock.patch index 39b4ef3..bcdf55e 100644 --- a/23723-x86-CMOS-lock.patch +++ b/23723-x86-CMOS-lock.patch @@ -136,7 +136,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/traps.c #include /* -@@ -1629,6 +1631,10 @@ static int admin_io_okay( +@@ -1630,6 +1632,10 @@ static int admin_io_okay( if ( (port == 0xcf8) && (bytes == 4) ) return 0; @@ -147,7 +147,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/traps.c return ioports_access_permitted(v->domain, port, port + bytes - 1); } -@@ -1658,6 +1664,21 @@ static uint32_t guest_io_read( +@@ -1659,6 +1665,21 @@ static uint32_t guest_io_read( { sub_data = pv_pit_handler(port, 0, 0); } @@ -169,7 +169,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/traps.c else if ( (port == 0xcf8) && (bytes == 4) ) { size = 4; -@@ -1683,8 +1704,6 @@ static uint32_t guest_io_read( +@@ -1684,8 +1705,6 @@ static uint32_t guest_io_read( return data; } @@ -178,7 +178,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/traps.c static void guest_io_write( unsigned int port, unsigned int bytes, uint32_t data, struct vcpu *v, struct cpu_user_regs *regs) -@@ -1693,8 +1712,6 @@ static void guest_io_write( +@@ -1694,8 +1713,6 @@ static void guest_io_write( { switch ( bytes ) { case 1: @@ -187,7 +187,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/traps.c outb((uint8_t)data, port); if ( pv_post_outb_hook ) pv_post_outb_hook(port, (uint8_t)data); -@@ -1717,6 +1734,23 @@ static void guest_io_write( +@@ -1718,6 +1735,23 @@ static void guest_io_write( { pv_pit_handler(port, (uint8_t)data, 1); } @@ -211,7 +211,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/traps.c else if ( (port == 0xcf8) && (bytes == 4) ) { size = 4; -@@ -2082,10 +2116,6 @@ static int emulate_privileged_op(struct +@@ -2083,10 +2117,6 @@ static int emulate_privileged_op(struct goto fail; if ( admin_io_okay(port, op_bytes, v, regs) ) { diff --git a/23772-x86-trampoline.patch b/23772-x86-trampoline.patch index e8eea59..6b3bc00 100644 --- a/23772-x86-trampoline.patch +++ b/23772-x86-trampoline.patch @@ -316,7 +316,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/x86_32/mm.c #include #include #include -@@ -164,8 +165,9 @@ void __init zap_low_mappings(l2_pgentry_ +@@ -166,8 +167,9 @@ void __init zap_low_mappings(l2_pgentry_ flush_all(FLUSH_TLB_GLOBAL); /* Replace with mapping of the boot trampoline only. */ @@ -332,7 +332,7 @@ Index: xen-4.1.2-testing/xen/arch/x86/x86_64/mm.c =================================================================== --- xen-4.1.2-testing.orig/xen/arch/x86/x86_64/mm.c +++ xen-4.1.2-testing/xen/arch/x86/x86_64/mm.c -@@ -828,7 +828,7 @@ void __init zap_low_mappings(void) +@@ -830,7 +830,7 @@ void __init zap_low_mappings(void) flush_local(FLUSH_TLB_GLOBAL); /* Replace with mapping of the boot trampoline only. */ diff --git a/xenpaging.23817-mem_event_check_ring.patch b/23817-mem_event_add_ref_counting_for_free_requestslots.patch similarity index 95% rename from xenpaging.23817-mem_event_check_ring.patch rename to 23817-mem_event_add_ref_counting_for_free_requestslots.patch index ee25028..825624b 100644 --- a/xenpaging.23817-mem_event_check_ring.patch +++ b/23817-mem_event_add_ref_counting_for_free_requestslots.patch @@ -1,5 +1,7 @@ -xen-unstable changeset: 23817:083f10851dd8 +changeset: 23817:083f10851dd8 +user: Olaf Hering date: Mon Sep 05 15:10:09 2011 +0100 +files: xen/arch/x86/mm/mem_event.c xen/arch/x86/mm/mem_sharing.c xen/arch/x86/mm/p2m.c xen/include/asm-x86/mem_event.h xen/include/xen/sched.h description: mem_event: add ref counting for free requestslots @@ -26,6 +28,7 @@ result, incomplete requests could be consumed by the ring user. Signed-off-by: Olaf Hering + --- xen/arch/x86/mm/mem_event.c | 19 ++++++++++++------- xen/arch/x86/mm/mem_sharing.c | 1 - diff --git a/23818-mem_event_use_mem_event_mark_and_pause_in_mem_event_check_ring.patch b/23818-mem_event_use_mem_event_mark_and_pause_in_mem_event_check_ring.patch new file mode 100644 index 0000000..0237ec1 --- /dev/null +++ b/23818-mem_event_use_mem_event_mark_and_pause_in_mem_event_check_ring.patch @@ -0,0 +1,30 @@ +changeset: 23818:0268e7380953 +user: Olaf Hering +date: Mon Sep 05 15:10:28 2011 +0100 +files: xen/arch/x86/mm/mem_event.c +description: +mem_event: use mem_event_mark_and_pause() in mem_event_check_ring() + +Signed-off-by: Olaf Hering + + +--- + xen/arch/x86/mm/mem_event.c | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_event.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c +@@ -178,10 +178,7 @@ int mem_event_check_ring(struct domain * + } + + if ( (curr->domain->domain_id == d->domain_id) && ring_full ) +- { +- set_bit(_VPF_mem_event, &curr->pause_flags); +- vcpu_sleep_nosync(curr); +- } ++ mem_event_mark_and_pause(curr); + + mem_event_ring_unlock(d); + diff --git a/23827-xenpaging_use_batch_of_pages_during_final_page-in.patch b/23827-xenpaging_use_batch_of_pages_during_final_page-in.patch new file mode 100644 index 0000000..0ea6592 --- /dev/null +++ b/23827-xenpaging_use_batch_of_pages_during_final_page-in.patch @@ -0,0 +1,178 @@ +changeset: 23827:d1d6abc1db20 +user: Olaf Hering +date: Tue Sep 13 10:25:32 2011 +0100 +files: tools/xenpaging/pagein.c tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h +description: +xenpaging: use batch of pages during final page-in + +Map up to RING_SIZE pages in exit path to fill the ring instead of +populating one page at a time. + +Signed-off-by: Olaf Hering + + +--- + tools/xenpaging/pagein.c | 36 ++++++++++++++++++++++++------------ + tools/xenpaging/xenpaging.c | 18 +++++++++++++----- + tools/xenpaging/xenpaging.h | 7 +++++-- + 3 files changed, 42 insertions(+), 19 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/pagein.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/pagein.c ++++ xen-4.1.2-testing/tools/xenpaging/pagein.c +@@ -1,14 +1,16 @@ + /* Trigger a page-in in a separate thread-of-execution to avoid deadlock */ + #include +-#include "xc_private.h" ++#include ++#include "xenpaging.h" + + struct page_in_args { + domid_t dom; ++ unsigned long *pagein_queue; + xc_interface *xch; + }; + + static struct page_in_args page_in_args; +-static unsigned long page_in_gfn; ++static unsigned long page_in_request; + static unsigned int page_in_possible; + + static pthread_t page_in_thread; +@@ -19,19 +21,28 @@ static void *page_in(void *arg) + { + struct page_in_args *pia = arg; + void *page; +- xen_pfn_t gfn; ++ int i, num; ++ xen_pfn_t gfns[XENPAGING_PAGEIN_QUEUE_SIZE]; + + while (1) + { + pthread_mutex_lock(&page_in_mutex); +- while (!page_in_gfn) ++ while (!page_in_request) + pthread_cond_wait(&page_in_cond, &page_in_mutex); +- gfn = page_in_gfn; +- page_in_gfn = 0; ++ num = 0; ++ for (i = 0; i < XENPAGING_PAGEIN_QUEUE_SIZE; i++) ++ { ++ if (!pia->pagein_queue[i]) ++ continue; ++ gfns[num] = pia->pagein_queue[i]; ++ pia->pagein_queue[i] = 0; ++ num++; ++ } ++ page_in_request = 0; + pthread_mutex_unlock(&page_in_mutex); + + /* Ignore errors */ +- page = xc_map_foreign_pages(pia->xch, pia->dom, PROT_READ, &gfn, 1); ++ page = xc_map_foreign_pages(pia->xch, pia->dom, PROT_READ, gfns, num); + if (page) + munmap(page, PAGE_SIZE); + } +@@ -39,21 +50,22 @@ static void *page_in(void *arg) + pthread_exit(NULL); + } + +-void page_in_trigger(unsigned long gfn) ++void page_in_trigger(void) + { + if (!page_in_possible) + return; + + pthread_mutex_lock(&page_in_mutex); +- page_in_gfn = gfn; ++ page_in_request = 1; + pthread_mutex_unlock(&page_in_mutex); + pthread_cond_signal(&page_in_cond); + } + +-void create_page_in_thread(domid_t domain_id, xc_interface *xch) ++void create_page_in_thread(xenpaging_t *paging) + { +- page_in_args.dom = domain_id; +- page_in_args.xch = xch; ++ page_in_args.dom = paging->mem_event.domain_id; ++ page_in_args.pagein_queue = paging->pagein_queue; ++ page_in_args.xch = paging->xc_handle; + if (pthread_create(&page_in_thread, NULL, page_in, &page_in_args) == 0) + page_in_possible = 1; + } +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -648,7 +648,7 @@ int main(int argc, char *argv[]) + sigaction(SIGALRM, &act, NULL); + + /* listen for page-in events to stop pager */ +- create_page_in_thread(paging->mem_event.domain_id, xch); ++ create_page_in_thread(paging); + + /* Evict pages */ + for ( i = 0; i < paging->num_pages; i++ ) +@@ -764,16 +764,24 @@ int main(int argc, char *argv[]) + /* Write all pages back into the guest */ + if ( interrupted == SIGTERM || interrupted == SIGINT ) + { ++ int num = 0; + for ( i = 0; i < paging->domain_info->max_pages; i++ ) + { + if ( test_bit(i, paging->bitmap) ) + { +- page_in_trigger(i); +- break; ++ paging->pagein_queue[num] = i; ++ num++; ++ if ( num == XENPAGING_PAGEIN_QUEUE_SIZE ) ++ break; + } + } +- /* If no more pages to process, exit loop */ +- if ( i == paging->domain_info->max_pages ) ++ /* ++ * One more round if there are still pages to process. ++ * If no more pages to process, exit loop. ++ */ ++ if ( num ) ++ page_in_trigger(); ++ else if ( i == paging->domain_info->max_pages ) + break; + } + else +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h +@@ -29,6 +29,8 @@ + #include + #include + ++#define XENPAGING_PAGEIN_QUEUE_SIZE 64 ++ + typedef struct mem_event { + domid_t domain_id; + xc_evtchn *xce_handle; +@@ -49,6 +51,7 @@ typedef struct xenpaging { + mem_event_t mem_event; + int num_pages; + int policy_mru_size; ++ unsigned long pagein_queue[XENPAGING_PAGEIN_QUEUE_SIZE]; + } xenpaging_t; + + +@@ -58,8 +61,8 @@ typedef struct xenpaging_victim { + } xenpaging_victim_t; + + +-extern void create_page_in_thread(domid_t domain_id, xc_interface *xch); +-extern void page_in_trigger(unsigned long gfn); ++extern void create_page_in_thread(xenpaging_t *paging); ++extern void page_in_trigger(void); + + #endif // __XEN_PAGING_H__ + diff --git a/23841-mem_event_pass_mem_event_domain_pointer_to_mem_event_functions.patch b/23841-mem_event_pass_mem_event_domain_pointer_to_mem_event_functions.patch new file mode 100644 index 0000000..c50c213 --- /dev/null +++ b/23841-mem_event_pass_mem_event_domain_pointer_to_mem_event_functions.patch @@ -0,0 +1,405 @@ +changeset: 23841:ed7586b1d515 +user: Olaf Hering +date: Fri Sep 16 12:13:31 2011 +0100 +files: xen/arch/x86/hvm/hvm.c xen/arch/x86/mm/mem_event.c xen/arch/x86/mm/mem_sharing.c xen/arch/x86/mm/p2m.c xen/include/asm-x86/mem_event.h +description: +mem_event: pass mem_event_domain pointer to mem_event functions + +Pass a struct mem_event_domain pointer to the various mem_event +functions. This will be used in a subsequent patch which creates +different ring buffers for the memshare, xenpaging and memaccess +functionality. + +Remove the struct domain argument from some functions. + +Signed-off-by: Olaf Hering +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + xen/arch/x86/hvm/hvm.c | 4 - + xen/arch/x86/mm/mem_event.c | 95 ++++++++++++++++++++-------------------- + xen/arch/x86/mm/mem_sharing.c | 6 +- + xen/arch/x86/mm/p2m.c | 18 +++---- + xen/include/asm-x86/mem_event.h | 8 +-- + 5 files changed, 66 insertions(+), 65 deletions(-) + +Index: xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/hvm.c ++++ xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c +@@ -3909,7 +3909,7 @@ static int hvm_memory_event_traps(long p + if ( (p & HVMPME_onchangeonly) && (value == old) ) + return 1; + +- rc = mem_event_check_ring(d); ++ rc = mem_event_check_ring(d, &d->mem_event); + if ( rc ) + return rc; + +@@ -3932,7 +3932,7 @@ static int hvm_memory_event_traps(long p + req.gla_valid = 1; + } + +- mem_event_put_request(d, &req); ++ mem_event_put_request(d, &d->mem_event, &req); + + return 1; + } +Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_event.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c +@@ -33,21 +33,21 @@ + #define xen_rmb() rmb() + #define xen_wmb() wmb() + +-#define mem_event_ring_lock_init(_d) spin_lock_init(&(_d)->mem_event.ring_lock) +-#define mem_event_ring_lock(_d) spin_lock(&(_d)->mem_event.ring_lock) +-#define mem_event_ring_unlock(_d) spin_unlock(&(_d)->mem_event.ring_lock) ++#define mem_event_ring_lock_init(_med) spin_lock_init(&(_med)->ring_lock) ++#define mem_event_ring_lock(_med) spin_lock(&(_med)->ring_lock) ++#define mem_event_ring_unlock(_med) spin_unlock(&(_med)->ring_lock) + +-static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn) ++static int mem_event_enable(struct domain *d, struct mem_event_domain *med, mfn_t ring_mfn, mfn_t shared_mfn) + { + int rc; + + /* Map ring and shared pages */ +- d->mem_event.ring_page = map_domain_page(mfn_x(ring_mfn)); +- if ( d->mem_event.ring_page == NULL ) ++ med->ring_page = map_domain_page(mfn_x(ring_mfn)); ++ if ( med->ring_page == NULL ) + goto err; + +- d->mem_event.shared_page = map_domain_page(mfn_x(shared_mfn)); +- if ( d->mem_event.shared_page == NULL ) ++ med->shared_page = map_domain_page(mfn_x(shared_mfn)); ++ if ( med->shared_page == NULL ) + goto err_ring; + + /* Allocate event channel */ +@@ -56,15 +56,15 @@ static int mem_event_enable(struct domai + if ( rc < 0 ) + goto err_shared; + +- ((mem_event_shared_page_t *)d->mem_event.shared_page)->port = rc; +- d->mem_event.xen_port = rc; ++ ((mem_event_shared_page_t *)med->shared_page)->port = rc; ++ med->xen_port = rc; + + /* Prepare ring buffer */ +- FRONT_RING_INIT(&d->mem_event.front_ring, +- (mem_event_sring_t *)d->mem_event.ring_page, ++ FRONT_RING_INIT(&med->front_ring, ++ (mem_event_sring_t *)med->ring_page, + PAGE_SIZE); + +- mem_event_ring_lock_init(d); ++ mem_event_ring_lock_init(med); + + /* Wake any VCPUs paused for memory events */ + mem_event_unpause_vcpus(d); +@@ -72,34 +72,34 @@ static int mem_event_enable(struct domai + return 0; + + err_shared: +- unmap_domain_page(d->mem_event.shared_page); +- d->mem_event.shared_page = NULL; ++ unmap_domain_page(med->shared_page); ++ med->shared_page = NULL; + err_ring: +- unmap_domain_page(d->mem_event.ring_page); +- d->mem_event.ring_page = NULL; ++ unmap_domain_page(med->ring_page); ++ med->ring_page = NULL; + err: + return 1; + } + +-static int mem_event_disable(struct domain *d) ++static int mem_event_disable(struct mem_event_domain *med) + { +- unmap_domain_page(d->mem_event.ring_page); +- d->mem_event.ring_page = NULL; ++ unmap_domain_page(med->ring_page); ++ med->ring_page = NULL; + +- unmap_domain_page(d->mem_event.shared_page); +- d->mem_event.shared_page = NULL; ++ unmap_domain_page(med->shared_page); ++ med->shared_page = NULL; + + return 0; + } + +-void mem_event_put_request(struct domain *d, mem_event_request_t *req) ++void mem_event_put_request(struct domain *d, struct mem_event_domain *med, mem_event_request_t *req) + { + mem_event_front_ring_t *front_ring; + RING_IDX req_prod; + +- mem_event_ring_lock(d); ++ mem_event_ring_lock(med); + +- front_ring = &d->mem_event.front_ring; ++ front_ring = &med->front_ring; + req_prod = front_ring->req_prod_pvt; + + /* Copy request */ +@@ -107,23 +107,23 @@ void mem_event_put_request(struct domain + req_prod++; + + /* Update ring */ +- d->mem_event.req_producers--; ++ med->req_producers--; + front_ring->req_prod_pvt = req_prod; + RING_PUSH_REQUESTS(front_ring); + +- mem_event_ring_unlock(d); ++ mem_event_ring_unlock(med); + +- notify_via_xen_event_channel(d, d->mem_event.xen_port); ++ notify_via_xen_event_channel(d, med->xen_port); + } + +-void mem_event_get_response(struct domain *d, mem_event_response_t *rsp) ++void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t *rsp) + { + mem_event_front_ring_t *front_ring; + RING_IDX rsp_cons; + +- mem_event_ring_lock(d); ++ mem_event_ring_lock(med); + +- front_ring = &d->mem_event.front_ring; ++ front_ring = &med->front_ring; + rsp_cons = front_ring->rsp_cons; + + /* Copy response */ +@@ -134,7 +134,7 @@ void mem_event_get_response(struct domai + front_ring->rsp_cons = rsp_cons; + front_ring->sring->rsp_event = rsp_cons + 1; + +- mem_event_ring_unlock(d); ++ mem_event_ring_unlock(med); + } + + void mem_event_unpause_vcpus(struct domain *d) +@@ -152,35 +152,35 @@ void mem_event_mark_and_pause(struct vcp + vcpu_sleep_nosync(v); + } + +-void mem_event_put_req_producers(struct domain *d) ++void mem_event_put_req_producers(struct mem_event_domain *med) + { +- mem_event_ring_lock(d); +- d->mem_event.req_producers--; +- mem_event_ring_unlock(d); ++ mem_event_ring_lock(med); ++ med->req_producers--; ++ mem_event_ring_unlock(med); + } + +-int mem_event_check_ring(struct domain *d) ++int mem_event_check_ring(struct domain *d, struct mem_event_domain *med) + { + struct vcpu *curr = current; + int free_requests; + int ring_full = 1; + +- if ( !d->mem_event.ring_page ) ++ if ( !med->ring_page ) + return -1; + +- mem_event_ring_lock(d); ++ mem_event_ring_lock(med); + +- free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring); +- if ( d->mem_event.req_producers < free_requests ) ++ free_requests = RING_FREE_REQUESTS(&med->front_ring); ++ if ( med->req_producers < free_requests ) + { +- d->mem_event.req_producers++; ++ med->req_producers++; + ring_full = 0; + } + +- if ( (curr->domain->domain_id == d->domain_id) && ring_full ) ++ if ( ring_full && (curr->domain == d) ) + mem_event_mark_and_pause(curr); + +- mem_event_ring_unlock(d); ++ mem_event_ring_unlock(med); + + return ring_full; + } +@@ -230,6 +230,7 @@ int mem_event_domctl(struct domain *d, x + { + struct domain *dom_mem_event = current->domain; + struct vcpu *v = current; ++ struct mem_event_domain *med = &d->mem_event; + unsigned long ring_addr = mec->ring_addr; + unsigned long shared_addr = mec->shared_addr; + l1_pgentry_t l1e; +@@ -242,7 +243,7 @@ int mem_event_domctl(struct domain *d, x + * the cache is in an undefined state and so is the guest + */ + rc = -EBUSY; +- if ( d->mem_event.ring_page ) ++ if ( med->ring_page ) + break; + + /* Currently only EPT is supported */ +@@ -270,7 +271,7 @@ int mem_event_domctl(struct domain *d, x + break; + + rc = -EINVAL; +- if ( mem_event_enable(d, ring_mfn, shared_mfn) != 0 ) ++ if ( mem_event_enable(d, med, ring_mfn, shared_mfn) != 0 ) + break; + + rc = 0; +@@ -279,7 +280,7 @@ int mem_event_domctl(struct domain *d, x + + case XEN_DOMCTL_MEM_EVENT_OP_DISABLE: + { +- rc = mem_event_disable(d); ++ rc = mem_event_disable(&d->mem_event); + } + break; + +Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_sharing.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_sharing.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/mem_sharing.c +@@ -322,12 +322,12 @@ static struct page_info* mem_sharing_all + req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; + } + +- if(mem_event_check_ring(d)) return page; ++ if(mem_event_check_ring(d, &d->mem_event)) return page; + + req.gfn = gfn; + req.p2mt = p2m_ram_shared; + req.vcpu_id = v->vcpu_id; +- mem_event_put_request(d, &req); ++ mem_event_put_request(d, &d->mem_event, &req); + + return page; + } +@@ -342,7 +342,7 @@ int mem_sharing_sharing_resume(struct do + mem_event_response_t rsp; + + /* Get request off the ring */ +- mem_event_get_response(d, &rsp); ++ mem_event_get_response(&d->mem_event, &rsp); + + /* Unpause domain/vcpu */ + if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2923,7 +2923,7 @@ void p2m_mem_paging_drop_page(struct p2m + struct domain *d = p2m->domain; + + /* Check that there's space on the ring for this request */ +- if ( mem_event_check_ring(d) == 0) ++ if ( mem_event_check_ring(d, &d->mem_event) == 0) + { + /* Send release notification to pager */ + memset(&req, 0, sizeof(req)); +@@ -2931,7 +2931,7 @@ void p2m_mem_paging_drop_page(struct p2m + req.gfn = gfn; + req.vcpu_id = v->vcpu_id; + +- mem_event_put_request(d, &req); ++ mem_event_put_request(d, &d->mem_event, &req); + } + } + +@@ -2943,7 +2943,7 @@ void p2m_mem_paging_populate(struct p2m_ + struct domain *d = p2m->domain; + + /* Check that there's space on the ring for this request */ +- if ( mem_event_check_ring(d) ) ++ if ( mem_event_check_ring(d, &d->mem_event) ) + return; + + memset(&req, 0, sizeof(req)); +@@ -2970,7 +2970,7 @@ void p2m_mem_paging_populate(struct p2m_ + else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged ) + { + /* gfn is already on its way back and vcpu is not paused */ +- mem_event_put_req_producers(d); ++ mem_event_put_req_producers(&d->mem_event); + return; + } + +@@ -2979,7 +2979,7 @@ void p2m_mem_paging_populate(struct p2m_ + req.p2mt = p2mt; + req.vcpu_id = v->vcpu_id; + +- mem_event_put_request(d, &req); ++ mem_event_put_request(d, &d->mem_event, &req); + } + + int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn) +@@ -3008,7 +3008,7 @@ void p2m_mem_paging_resume(struct p2m_do + mfn_t mfn; + + /* Pull the response off the ring */ +- mem_event_get_response(d, &rsp); ++ mem_event_get_response(&d->mem_event, &rsp); + + /* Fix p2m entry if the page was not dropped */ + if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) ) +@@ -3055,7 +3055,7 @@ void p2m_mem_access_check(unsigned long + p2m_unlock(p2m); + + /* Otherwise, check if there is a memory event listener, and send the message along */ +- res = mem_event_check_ring(d); ++ res = mem_event_check_ring(d, &d->mem_event); + if ( res < 0 ) + { + /* No listener */ +@@ -3099,7 +3099,7 @@ void p2m_mem_access_check(unsigned long + + req.vcpu_id = v->vcpu_id; + +- mem_event_put_request(d, &req); ++ mem_event_put_request(d, &d->mem_event, &req); + + /* VCPU paused, mem event request sent */ + } +@@ -3109,7 +3109,7 @@ void p2m_mem_access_resume(struct p2m_do + struct domain *d = p2m->domain; + mem_event_response_t rsp; + +- mem_event_get_response(d, &rsp); ++ mem_event_get_response(&d->mem_event, &rsp); + + /* Unpause domain */ + if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) +Index: xen-4.1.2-testing/xen/include/asm-x86/mem_event.h +=================================================================== +--- xen-4.1.2-testing.orig/xen/include/asm-x86/mem_event.h ++++ xen-4.1.2-testing/xen/include/asm-x86/mem_event.h +@@ -26,10 +26,10 @@ + + /* Pauses VCPU while marking pause flag for mem event */ + void mem_event_mark_and_pause(struct vcpu *v); +-int mem_event_check_ring(struct domain *d); +-void mem_event_put_req_producers(struct domain *d); +-void mem_event_put_request(struct domain *d, mem_event_request_t *req); +-void mem_event_get_response(struct domain *d, mem_event_response_t *rsp); ++int mem_event_check_ring(struct domain *d, struct mem_event_domain *med); ++void mem_event_put_req_producers(struct mem_event_domain *med); ++void mem_event_put_request(struct domain *d, struct mem_event_domain *med, mem_event_request_t *req); ++void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t *rsp); + void mem_event_unpause_vcpus(struct domain *d); + + int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, diff --git a/23842-mem_event_use_different_ringbuffers_for_share_paging_and_access.patch b/23842-mem_event_use_different_ringbuffers_for_share_paging_and_access.patch new file mode 100644 index 0000000..36a3a03 --- /dev/null +++ b/23842-mem_event_use_different_ringbuffers_for_share_paging_and_access.patch @@ -0,0 +1,868 @@ +changeset: 23842:483c5f8319ad +user: Olaf Hering +date: Fri Sep 16 12:19:26 2011 +0100 +files: tools/libxc/Makefile tools/libxc/xc_mem_access.c tools/libxc/xc_mem_event.c tools/libxc/xc_mem_paging.c tools/libxc/xc_memshr.c tools/libxc/xenctrl.h tools/tests/xen-access/xen-access.c tools/xenpaging/xenpaging.c xen/arch/ia64/xen/dom0_ops.c xen/arch/x86/hvm/hvm.c xen/arch/x86/mm/mem_event.c xen/arch/x86/mm/mem_paging.c xen/arch/x86/mm/mem_sharing.c xen/arch/x86/mm/p2m.c xen/include/public/domctl.h xen/include/xen/sched.h +description: +mem_event: use different ringbuffers for share, paging and access + +Up to now a single ring buffer was used for mem_share, xenpaging and +xen-access. Each helper would have to cooperate and pull only its own +requests from the ring. Unfortunately this was not implemented. And +even if it was, it would make the whole concept fragile because a crash +or early exit of one helper would stall the others. + +What happend up to now is that active xenpaging + memory_sharing would +push memsharing requests in the buffer. xenpaging is not prepared for +such requests. + +This patch creates an independet ring buffer for mem_share, xenpaging +and xen-access and adds also new functions to enable xenpaging and +xen-access. The xc_mem_event_enable/xc_mem_event_disable functions will +be removed. The various XEN_DOMCTL_MEM_EVENT_* macros were cleaned up. +Due to the removal the API changed, so the SONAME will be changed too. + +Signed-off-by: Olaf Hering +Acked-by: Tim Deegan +Acked-by: Ian Jackson +Committed-by: Tim Deegan + + +--- + tools/libxc/Makefile | 2 + tools/libxc/xc_mem_access.c | 21 ++++ + tools/libxc/xc_mem_event.c | 15 --- + tools/libxc/xc_mem_paging.c | 33 +++++-- + tools/libxc/xc_memshr.c | 16 +-- + tools/libxc/xenctrl.h | 9 + + tools/tests/xen-access/xen-access.c | 4 + tools/xenpaging/xenpaging.c | 4 + xen/arch/ia64/xen/dom0_ops.c | 2 + xen/arch/x86/hvm/hvm.c | 4 + xen/arch/x86/mm/mem_event.c | 163 ++++++++++++++++++++---------------- + xen/arch/x86/mm/mem_sharing.c | 22 ++-- + xen/arch/x86/mm/p2m.c | 18 +-- + xen/include/public/domctl.h | 43 +++++---- + xen/include/xen/sched.h | 6 + + 15 files changed, 206 insertions(+), 156 deletions(-) + +Index: xen-4.1.2-testing/tools/libxc/Makefile +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/Makefile ++++ xen-4.1.2-testing/tools/libxc/Makefile +@@ -1,7 +1,7 @@ + XEN_ROOT = $(CURDIR)/../.. + include $(XEN_ROOT)/tools/Rules.mk + +-MAJOR = 4.0 ++MAJOR = 4.2 + MINOR = 0 + + CTRL_SRCS-y := +Index: xen-4.1.2-testing/tools/libxc/xc_mem_access.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/xc_mem_access.c ++++ xen-4.1.2-testing/tools/libxc/xc_mem_access.c +@@ -24,12 +24,29 @@ + #include "xc_private.h" + + ++int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, ++ void *shared_page, void *ring_page) ++{ ++ return xc_mem_event_control(xch, domain_id, ++ XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE, ++ XEN_DOMCTL_MEM_EVENT_OP_ACCESS, ++ shared_page, ring_page, INVALID_MFN); ++} ++ ++int xc_mem_access_disable(xc_interface *xch, domid_t domain_id) ++{ ++ return xc_mem_event_control(xch, domain_id, ++ XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE, ++ XEN_DOMCTL_MEM_EVENT_OP_ACCESS, ++ NULL, NULL, INVALID_MFN); ++} ++ + int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn) + { + return xc_mem_event_control(xch, domain_id, + XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME, +- XEN_DOMCTL_MEM_EVENT_OP_ACCESS, NULL, NULL, +- gfn); ++ XEN_DOMCTL_MEM_EVENT_OP_ACCESS, ++ NULL, NULL, gfn); + } + + /* +Index: xen-4.1.2-testing/tools/libxc/xc_mem_event.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/xc_mem_event.c ++++ xen-4.1.2-testing/tools/libxc/xc_mem_event.c +@@ -42,18 +42,3 @@ int xc_mem_event_control(xc_interface *x + return do_domctl(xch, &domctl); + } + +-int xc_mem_event_enable(xc_interface *xch, domid_t domain_id, +- void *shared_page, void *ring_page) +-{ +- return xc_mem_event_control(xch, domain_id, +- XEN_DOMCTL_MEM_EVENT_OP_ENABLE, 0, +- shared_page, ring_page, INVALID_MFN); +-} +- +-int xc_mem_event_disable(xc_interface *xch, domid_t domain_id) +-{ +- return xc_mem_event_control(xch, domain_id, +- XEN_DOMCTL_MEM_EVENT_OP_DISABLE, 0, +- NULL, NULL, INVALID_MFN); +-} +- +Index: xen-4.1.2-testing/tools/libxc/xc_mem_paging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/xc_mem_paging.c ++++ xen-4.1.2-testing/tools/libxc/xc_mem_paging.c +@@ -24,36 +24,53 @@ + #include "xc_private.h" + + ++int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, ++ void *shared_page, void *ring_page) ++{ ++ return xc_mem_event_control(xch, domain_id, ++ XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE, ++ XEN_DOMCTL_MEM_EVENT_OP_PAGING, ++ shared_page, ring_page, INVALID_MFN); ++} ++ ++int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id) ++{ ++ return xc_mem_event_control(xch, domain_id, ++ XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE, ++ XEN_DOMCTL_MEM_EVENT_OP_PAGING, ++ NULL, NULL, INVALID_MFN); ++} ++ + int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, unsigned long gfn) + { + return xc_mem_event_control(xch, domain_id, + XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE, +- XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL, +- gfn); ++ XEN_DOMCTL_MEM_EVENT_OP_PAGING, ++ NULL, NULL, gfn); + } + + int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, unsigned long gfn) + { + return xc_mem_event_control(xch, domain_id, + XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT, +- XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL, +- gfn); ++ XEN_DOMCTL_MEM_EVENT_OP_PAGING, ++ NULL, NULL, gfn); + } + + int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned long gfn) + { + return xc_mem_event_control(xch, domain_id, + XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP, +- XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL, +- gfn); ++ XEN_DOMCTL_MEM_EVENT_OP_PAGING, ++ NULL, NULL, gfn); + } + + int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn) + { + return xc_mem_event_control(xch, domain_id, + XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME, +- XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL, +- gfn); ++ XEN_DOMCTL_MEM_EVENT_OP_PAGING, ++ NULL, NULL, gfn); + } + + +Index: xen-4.1.2-testing/tools/libxc/xc_memshr.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/xc_memshr.c ++++ xen-4.1.2-testing/tools/libxc/xc_memshr.c +@@ -36,7 +36,7 @@ int xc_memshr_control(xc_interface *xch, + domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; + domctl.domain = (domid_t)domid; + op = &(domctl.u.mem_sharing_op); +- op->op = XEN_DOMCTL_MEM_SHARING_OP_CONTROL; ++ op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL; + op->u.enable = enable; + + return do_domctl(xch, &domctl); +@@ -55,7 +55,7 @@ int xc_memshr_nominate_gfn(xc_interface + domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; + domctl.domain = (domid_t)domid; + op = &(domctl.u.mem_sharing_op); +- op->op = XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN; ++ op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN; + op->u.nominate.u.gfn = gfn; + + ret = do_domctl(xch, &domctl); +@@ -77,7 +77,7 @@ int xc_memshr_nominate_gref(xc_interface + domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; + domctl.domain = (domid_t)domid; + op = &(domctl.u.mem_sharing_op); +- op->op = XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF; ++ op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF; + op->u.nominate.u.grant_ref = gref; + + ret = do_domctl(xch, &domctl); +@@ -97,7 +97,7 @@ int xc_memshr_share(xc_interface *xch, + domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; + domctl.domain = 0; + op = &(domctl.u.mem_sharing_op); +- op->op = XEN_DOMCTL_MEM_SHARING_OP_SHARE; ++ op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE; + op->u.share.source_handle = source_handle; + op->u.share.client_handle = client_handle; + +@@ -114,7 +114,7 @@ int xc_memshr_domain_resume(xc_interface + domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; + domctl.domain = (domid_t)domid; + op = &(domctl.u.mem_sharing_op); +- op->op = XEN_DOMCTL_MEM_SHARING_OP_RESUME; ++ op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME; + + return do_domctl(xch, &domctl); + } +@@ -130,7 +130,7 @@ int xc_memshr_debug_gfn(xc_interface *xc + domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; + domctl.domain = (domid_t)domid; + op = &(domctl.u.mem_sharing_op); +- op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN; ++ op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN; + op->u.debug.u.gfn = gfn; + + return do_domctl(xch, &domctl); +@@ -147,7 +147,7 @@ int xc_memshr_debug_mfn(xc_interface *xc + domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; + domctl.domain = (domid_t)domid; + op = &(domctl.u.mem_sharing_op); +- op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN; ++ op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN; + op->u.debug.u.mfn = mfn; + + return do_domctl(xch, &domctl); +@@ -164,7 +164,7 @@ int xc_memshr_debug_gref(xc_interface *x + domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; + domctl.domain = (domid_t)domid; + op = &(domctl.u.mem_sharing_op); +- op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF; ++ op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF; + op->u.debug.u.gref = gref; + + return do_domctl(xch, &domctl); +Index: xen-4.1.2-testing/tools/libxc/xenctrl.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/xenctrl.h ++++ xen-4.1.2-testing/tools/libxc/xenctrl.h +@@ -1734,16 +1734,19 @@ int xc_mem_event_control(xc_interface *x + unsigned int mode, void *shared_page, + void *ring_page, unsigned long gfn); + +-int xc_mem_event_enable(xc_interface *xch, domid_t domain_id, ++int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, + void *shared_page, void *ring_page); +-int xc_mem_event_disable(xc_interface *xch, domid_t domain_id); +- ++int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id); + int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, + unsigned long gfn); + int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, unsigned long gfn); + int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned long gfn); + int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id, + unsigned long gfn); ++ ++int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, ++ void *shared_page, void *ring_page); ++int xc_mem_access_disable(xc_interface *xch, domid_t domain_id); + int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, + unsigned long gfn); + +Index: xen-4.1.2-testing/tools/tests/xen-access/xen-access.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/tests/xen-access/xen-access.c ++++ xen-4.1.2-testing/tools/tests/xen-access/xen-access.c +@@ -241,7 +241,7 @@ xenaccess_t *xenaccess_init(xc_interface + mem_event_ring_lock_init(&xenaccess->mem_event); + + /* Initialise Xen */ +- rc = xc_mem_event_enable(xenaccess->xc_handle, xenaccess->mem_event.domain_id, ++ rc = xc_mem_access_enable(xenaccess->xc_handle, xenaccess->mem_event.domain_id, + xenaccess->mem_event.shared_page, + xenaccess->mem_event.ring_page); + if ( rc != 0 ) +@@ -351,7 +351,7 @@ int xenaccess_teardown(xc_interface *xch + return 0; + + /* Tear down domain xenaccess in Xen */ +- rc = xc_mem_event_disable(xenaccess->xc_handle, xenaccess->mem_event.domain_id); ++ rc = xc_mem_access_disable(xenaccess->xc_handle, xenaccess->mem_event.domain_id); + if ( rc != 0 ) + { + ERROR("Error tearing down domain xenaccess in xen"); +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -234,7 +234,7 @@ static xenpaging_t *xenpaging_init(domid + PAGE_SIZE); + + /* Initialise Xen */ +- rc = xc_mem_event_enable(xch, paging->mem_event.domain_id, ++ rc = xc_mem_paging_enable(xch, paging->mem_event.domain_id, + paging->mem_event.shared_page, + paging->mem_event.ring_page); + if ( rc != 0 ) +@@ -353,7 +353,7 @@ static int xenpaging_teardown(xenpaging_ + xch = paging->xc_handle; + paging->xc_handle = NULL; + /* Tear down domain paging in Xen */ +- rc = xc_mem_event_disable(xch, paging->mem_event.domain_id); ++ rc = xc_mem_paging_disable(xch, paging->mem_event.domain_id); + if ( rc != 0 ) + { + ERROR("Error tearing down domain paging in xen"); +Index: xen-4.1.2-testing/xen/arch/ia64/xen/dom0_ops.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/ia64/xen/dom0_ops.c ++++ xen-4.1.2-testing/xen/arch/ia64/xen/dom0_ops.c +@@ -688,7 +688,7 @@ long arch_do_domctl(xen_domctl_t *op, XE + + switch(mec->op) + { +- case XEN_DOMCTL_MEM_SHARING_OP_CONTROL: ++ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL: + { + if (mec->u.enable) { + ret = -EINVAL; /* not implemented */ +Index: xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/hvm.c ++++ xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c +@@ -3909,7 +3909,7 @@ static int hvm_memory_event_traps(long p + if ( (p & HVMPME_onchangeonly) && (value == old) ) + return 1; + +- rc = mem_event_check_ring(d, &d->mem_event); ++ rc = mem_event_check_ring(d, &d->mem_access); + if ( rc ) + return rc; + +@@ -3932,7 +3932,7 @@ static int hvm_memory_event_traps(long p + req.gla_valid = 1; + } + +- mem_event_put_request(d, &d->mem_event, &req); ++ mem_event_put_request(d, &d->mem_access, &req); + + return 1; + } +Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_event.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c +@@ -37,24 +37,52 @@ + #define mem_event_ring_lock(_med) spin_lock(&(_med)->ring_lock) + #define mem_event_ring_unlock(_med) spin_unlock(&(_med)->ring_lock) + +-static int mem_event_enable(struct domain *d, struct mem_event_domain *med, mfn_t ring_mfn, mfn_t shared_mfn) ++static int mem_event_enable(struct domain *d, ++ xen_domctl_mem_event_op_t *mec, ++ struct mem_event_domain *med) + { + int rc; ++ struct domain *dom_mem_event = current->domain; ++ struct vcpu *v = current; ++ unsigned long ring_addr = mec->ring_addr; ++ unsigned long shared_addr = mec->shared_addr; ++ l1_pgentry_t l1e; ++ unsigned long gfn; ++ p2m_type_t p2mt; ++ mfn_t ring_mfn; ++ mfn_t shared_mfn; ++ ++ /* Only one helper at a time. If the helper crashed, ++ * the ring is in an undefined state and so is the guest. ++ */ ++ if ( med->ring_page ) ++ return -EBUSY; ++ ++ /* Get MFN of ring page */ ++ guest_get_eff_l1e(v, ring_addr, &l1e); ++ gfn = l1e_get_pfn(l1e); ++ ring_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt); ++ ++ if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) ) ++ return -EINVAL; ++ ++ /* Get MFN of shared page */ ++ guest_get_eff_l1e(v, shared_addr, &l1e); ++ gfn = l1e_get_pfn(l1e); ++ shared_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt); ++ ++ if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) ) ++ return -EINVAL; + + /* Map ring and shared pages */ + med->ring_page = map_domain_page(mfn_x(ring_mfn)); +- if ( med->ring_page == NULL ) +- goto err; +- + med->shared_page = map_domain_page(mfn_x(shared_mfn)); +- if ( med->shared_page == NULL ) +- goto err_ring; + + /* Allocate event channel */ + rc = alloc_unbound_xen_event_channel(d->vcpu[0], + current->domain->domain_id); + if ( rc < 0 ) +- goto err_shared; ++ goto err; + + ((mem_event_shared_page_t *)med->shared_page)->port = rc; + med->xen_port = rc; +@@ -71,14 +99,14 @@ static int mem_event_enable(struct domai + + return 0; + +- err_shared: ++ err: + unmap_domain_page(med->shared_page); + med->shared_page = NULL; +- err_ring: ++ + unmap_domain_page(med->ring_page); + med->ring_page = NULL; +- err: +- return 1; ++ ++ return rc; + } + + static int mem_event_disable(struct mem_event_domain *med) +@@ -220,86 +248,79 @@ int mem_event_domctl(struct domain *d, x + + rc = -ENOSYS; + +- switch ( mec-> mode ) ++ switch ( mec->mode ) + { +- case 0: ++ case XEN_DOMCTL_MEM_EVENT_OP_PAGING: + { ++ struct mem_event_domain *med = &d->mem_paging; ++ rc = -ENODEV; ++ /* Only HAP is supported */ ++ if ( !hap_enabled(d) ) ++ break; ++ ++ /* Currently only EPT is supported */ ++ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) ++ break; ++ + switch( mec->op ) + { +- case XEN_DOMCTL_MEM_EVENT_OP_ENABLE: ++ case XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE: + { +- struct domain *dom_mem_event = current->domain; +- struct vcpu *v = current; +- struct mem_event_domain *med = &d->mem_event; +- unsigned long ring_addr = mec->ring_addr; +- unsigned long shared_addr = mec->shared_addr; +- l1_pgentry_t l1e; +- unsigned long gfn; +- p2m_type_t p2mt; +- mfn_t ring_mfn; +- mfn_t shared_mfn; +- +- /* Only one xenpaging at a time. If xenpaging crashed, +- * the cache is in an undefined state and so is the guest +- */ +- rc = -EBUSY; +- if ( med->ring_page ) +- break; +- +- /* Currently only EPT is supported */ +- rc = -ENODEV; +- if ( !(hap_enabled(d) && +- (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) ) +- break; +- +- /* Get MFN of ring page */ +- guest_get_eff_l1e(v, ring_addr, &l1e); +- gfn = l1e_get_pfn(l1e); +- ring_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt); +- +- rc = -EINVAL; +- if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) ) +- break; +- +- /* Get MFN of shared page */ +- guest_get_eff_l1e(v, shared_addr, &l1e); +- gfn = l1e_get_pfn(l1e); +- shared_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt); +- +- rc = -EINVAL; +- if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) ) +- break; +- +- rc = -EINVAL; +- if ( mem_event_enable(d, med, ring_mfn, shared_mfn) != 0 ) +- break; +- +- rc = 0; ++ rc = mem_event_enable(d, mec, med); + } + break; + +- case XEN_DOMCTL_MEM_EVENT_OP_DISABLE: ++ case XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE: + { +- rc = mem_event_disable(&d->mem_event); ++ rc = mem_event_disable(med); + } + break; + + default: +- rc = -ENOSYS; +- break; ++ { ++ if ( med->ring_page ) ++ rc = mem_paging_domctl(d, mec, u_domctl); + } + break; ++ } + } +- case XEN_DOMCTL_MEM_EVENT_OP_PAGING: +- { +- rc = mem_paging_domctl(d, mec, u_domctl); +- break; +- } ++ break; ++ + case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: + { +- rc = mem_access_domctl(d, mec, u_domctl); ++ struct mem_event_domain *med = &d->mem_access; ++ rc = -ENODEV; ++ /* Only HAP is supported */ ++ if ( !hap_enabled(d) ) ++ break; ++ ++ /* Currently only EPT is supported */ ++ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) ++ break; ++ ++ switch( mec->op ) ++ { ++ case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE: ++ { ++ rc = mem_event_enable(d, mec, med); ++ } ++ break; ++ ++ case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE: ++ { ++ rc = mem_event_disable(&d->mem_access); ++ } ++ break; ++ ++ default: ++ { ++ if ( med->ring_page ) ++ rc = mem_access_domctl(d, mec, u_domctl); ++ } + break; ++ } + } ++ break; + } + + return rc; +Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_sharing.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_sharing.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/mem_sharing.c +@@ -322,12 +322,12 @@ static struct page_info* mem_sharing_all + req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; + } + +- if(mem_event_check_ring(d, &d->mem_event)) return page; ++ if(mem_event_check_ring(d, &d->mem_share)) return page; + + req.gfn = gfn; + req.p2mt = p2m_ram_shared; + req.vcpu_id = v->vcpu_id; +- mem_event_put_request(d, &d->mem_event, &req); ++ mem_event_put_request(d, &d->mem_share, &req); + + return page; + } +@@ -342,7 +342,7 @@ int mem_sharing_sharing_resume(struct do + mem_event_response_t rsp; + + /* Get request off the ring */ +- mem_event_get_response(&d->mem_event, &rsp); ++ mem_event_get_response(&d->mem_share, &rsp); + + /* Unpause domain/vcpu */ + if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) +@@ -739,7 +739,7 @@ int mem_sharing_domctl(struct domain *d, + + switch(mec->op) + { +- case XEN_DOMCTL_MEM_SHARING_OP_CONTROL: ++ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL: + { + d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable; + mem_sharing_audit(); +@@ -747,7 +747,7 @@ int mem_sharing_domctl(struct domain *d, + } + break; + +- case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN: ++ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN: + { + unsigned long gfn = mec->u.nominate.u.gfn; + shr_handle_t handle; +@@ -759,7 +759,7 @@ int mem_sharing_domctl(struct domain *d, + } + break; + +- case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF: ++ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF: + { + grant_ref_t gref = mec->u.nominate.u.grant_ref; + unsigned long gfn; +@@ -776,7 +776,7 @@ int mem_sharing_domctl(struct domain *d, + } + break; + +- case XEN_DOMCTL_MEM_SHARING_OP_SHARE: ++ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE: + { + shr_handle_t sh = mec->u.share.source_handle; + shr_handle_t ch = mec->u.share.client_handle; +@@ -785,7 +785,7 @@ int mem_sharing_domctl(struct domain *d, + } + break; + +- case XEN_DOMCTL_MEM_SHARING_OP_RESUME: ++ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME: + { + if(!mem_sharing_enabled(d)) + return -EINVAL; +@@ -794,7 +794,7 @@ int mem_sharing_domctl(struct domain *d, + } + break; + +- case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN: ++ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN: + { + unsigned long gfn = mec->u.debug.u.gfn; + rc = mem_sharing_debug_gfn(d, gfn); +@@ -802,7 +802,7 @@ int mem_sharing_domctl(struct domain *d, + } + break; + +- case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN: ++ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN: + { + unsigned long mfn = mec->u.debug.u.mfn; + rc = mem_sharing_debug_mfn(mfn); +@@ -810,7 +810,7 @@ int mem_sharing_domctl(struct domain *d, + } + break; + +- case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF: ++ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF: + { + grant_ref_t gref = mec->u.debug.u.gref; + rc = mem_sharing_debug_gref(d, gref); +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2923,7 +2923,7 @@ void p2m_mem_paging_drop_page(struct p2m + struct domain *d = p2m->domain; + + /* Check that there's space on the ring for this request */ +- if ( mem_event_check_ring(d, &d->mem_event) == 0) ++ if ( mem_event_check_ring(d, &d->mem_paging) == 0) + { + /* Send release notification to pager */ + memset(&req, 0, sizeof(req)); +@@ -2931,7 +2931,7 @@ void p2m_mem_paging_drop_page(struct p2m + req.gfn = gfn; + req.vcpu_id = v->vcpu_id; + +- mem_event_put_request(d, &d->mem_event, &req); ++ mem_event_put_request(d, &d->mem_paging, &req); + } + } + +@@ -2943,7 +2943,7 @@ void p2m_mem_paging_populate(struct p2m_ + struct domain *d = p2m->domain; + + /* Check that there's space on the ring for this request */ +- if ( mem_event_check_ring(d, &d->mem_event) ) ++ if ( mem_event_check_ring(d, &d->mem_paging) ) + return; + + memset(&req, 0, sizeof(req)); +@@ -2970,7 +2970,7 @@ void p2m_mem_paging_populate(struct p2m_ + else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged ) + { + /* gfn is already on its way back and vcpu is not paused */ +- mem_event_put_req_producers(&d->mem_event); ++ mem_event_put_req_producers(&d->mem_paging); + return; + } + +@@ -2979,7 +2979,7 @@ void p2m_mem_paging_populate(struct p2m_ + req.p2mt = p2mt; + req.vcpu_id = v->vcpu_id; + +- mem_event_put_request(d, &d->mem_event, &req); ++ mem_event_put_request(d, &d->mem_paging, &req); + } + + int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn) +@@ -3008,7 +3008,7 @@ void p2m_mem_paging_resume(struct p2m_do + mfn_t mfn; + + /* Pull the response off the ring */ +- mem_event_get_response(&d->mem_event, &rsp); ++ mem_event_get_response(&d->mem_paging, &rsp); + + /* Fix p2m entry if the page was not dropped */ + if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) ) +@@ -3055,7 +3055,7 @@ void p2m_mem_access_check(unsigned long + p2m_unlock(p2m); + + /* Otherwise, check if there is a memory event listener, and send the message along */ +- res = mem_event_check_ring(d, &d->mem_event); ++ res = mem_event_check_ring(d, &d->mem_access); + if ( res < 0 ) + { + /* No listener */ +@@ -3099,7 +3099,7 @@ void p2m_mem_access_check(unsigned long + + req.vcpu_id = v->vcpu_id; + +- mem_event_put_request(d, &d->mem_event, &req); ++ mem_event_put_request(d, &d->mem_access, &req); + + /* VCPU paused, mem event request sent */ + } +@@ -3109,7 +3109,7 @@ void p2m_mem_access_resume(struct p2m_do + struct domain *d = p2m->domain; + mem_event_response_t rsp; + +- mem_event_get_response(&d->mem_event, &rsp); ++ mem_event_get_response(&d->mem_access, &rsp); + + /* Unpause domain */ + if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) +Index: xen-4.1.2-testing/xen/include/public/domctl.h +=================================================================== +--- xen-4.1.2-testing.orig/xen/include/public/domctl.h ++++ xen-4.1.2-testing/xen/include/public/domctl.h +@@ -707,20 +707,18 @@ struct xen_domctl_gdbsx_domstatus { + + /* XEN_DOMCTL_mem_event_op */ + +-/* Add and remove memory handlers */ +-#define XEN_DOMCTL_MEM_EVENT_OP_ENABLE 0 +-#define XEN_DOMCTL_MEM_EVENT_OP_DISABLE 1 +- + /* ++* Domain memory paging + * Page memory in and out. + */ + #define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1 + +-/* Domain memory paging */ +-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 0 +-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT 1 +-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 2 +-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 3 ++#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0 ++#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1 ++#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 2 ++#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT 3 ++#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 4 ++#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 5 + + /* + * Access permissions. +@@ -733,11 +731,14 @@ struct xen_domctl_gdbsx_domstatus { + * ACCESS_RESUME mode for the following domctl. + */ + #define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2 +-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME 0 ++ ++#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0 ++#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1 ++#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME 2 + + struct xen_domctl_mem_event_op { +- uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ +- uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */ ++ uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */ ++ uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */ + + /* OP_ENABLE */ + uint64_aligned_t shared_addr; /* IN: Virtual address of shared page */ +@@ -754,14 +755,16 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_e + */ + /* XEN_DOMCTL_mem_sharing_op */ + +-#define XEN_DOMCTL_MEM_SHARING_OP_CONTROL 0 +-#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN 1 +-#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF 2 +-#define XEN_DOMCTL_MEM_SHARING_OP_SHARE 3 +-#define XEN_DOMCTL_MEM_SHARING_OP_RESUME 4 +-#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN 5 +-#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN 6 +-#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF 7 ++#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3 ++ ++#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL 0 ++#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN 1 ++#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF 2 ++#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE 3 ++#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME 4 ++#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN 5 ++#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN 6 ++#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF 7 + + #define XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID (-10) + #define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID (-9) +Index: xen-4.1.2-testing/xen/include/xen/sched.h +=================================================================== +--- xen-4.1.2-testing.orig/xen/include/xen/sched.h ++++ xen-4.1.2-testing/xen/include/xen/sched.h +@@ -326,8 +326,12 @@ struct domain + /* Non-migratable and non-restoreable? */ + bool_t disable_migrate; + ++ /* Memory sharing support */ ++ struct mem_event_domain mem_share; + /* Memory paging support */ +- struct mem_event_domain mem_event; ++ struct mem_event_domain mem_paging; ++ /* Memory access support */ ++ struct mem_event_domain mem_access; + + /* Currently computed from union of all vcpu cpu-affinity masks. */ + nodemask_t node_affinity; diff --git a/23874-xenpaging_track_number_of_paged_pages_in_struct_domain.patch b/23874-xenpaging_track_number_of_paged_pages_in_struct_domain.patch new file mode 100644 index 0000000..5a6a33b --- /dev/null +++ b/23874-xenpaging_track_number_of_paged_pages_in_struct_domain.patch @@ -0,0 +1,122 @@ +changeset: 23874:651aed73b39c +user: Olaf Hering +date: Mon Sep 26 22:19:42 2011 +0100 +files: tools/libxc/xc_domain.c tools/libxc/xenctrl.h xen/arch/x86/mm/p2m.c xen/common/domctl.c xen/include/public/domctl.h xen/include/xen/sched.h +description: +xenpaging: track number of paged pages in struct domain + +The toolstack should know how many pages are paged-out at a given point +in time so it could make smarter decisions about how many pages should +be paged or ballooned. + +Add a new member to xen_domctl_getdomaininfo and bump interface version. +Use the new member in xc_dominfo_t. +The SONAME of libxc should be changed if this patch gets applied. + +Signed-off-by: Olaf Hering +Acked-by: Ian Campbell +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + tools/libxc/xc_domain.c | 1 + + tools/libxc/xenctrl.h | 1 + + xen/arch/x86/mm/p2m.c | 5 +++++ + xen/common/domctl.c | 1 + + xen/include/public/domctl.h | 3 ++- + xen/include/xen/sched.h | 1 + + 6 files changed, 11 insertions(+), 1 deletion(-) + +Index: xen-4.1.2-testing/tools/libxc/xc_domain.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/xc_domain.c ++++ xen-4.1.2-testing/tools/libxc/xc_domain.c +@@ -235,6 +235,7 @@ int xc_domain_getinfo(xc_interface *xch, + info->ssidref = domctl.u.getdomaininfo.ssidref; + info->nr_pages = domctl.u.getdomaininfo.tot_pages; + info->nr_shared_pages = domctl.u.getdomaininfo.shr_pages; ++ info->nr_paged_pages = domctl.u.getdomaininfo.paged_pages; + info->max_memkb = domctl.u.getdomaininfo.max_pages << (PAGE_SHIFT-10); + info->shared_info_frame = domctl.u.getdomaininfo.shared_info_frame; + info->cpu_time = domctl.u.getdomaininfo.cpu_time; +Index: xen-4.1.2-testing/tools/libxc/xenctrl.h +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/xenctrl.h ++++ xen-4.1.2-testing/tools/libxc/xenctrl.h +@@ -353,6 +353,7 @@ typedef struct xc_dominfo { + unsigned int shutdown_reason; /* only meaningful if shutdown==1 */ + unsigned long nr_pages; /* current number, not maximum */ + unsigned long nr_shared_pages; ++ unsigned long nr_paged_pages; + unsigned long shared_info_frame; + uint64_t cpu_time; + unsigned long max_memkb; +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2913,6 +2913,9 @@ int p2m_mem_paging_evict(struct p2m_doma + /* Put the page back so it gets freed */ + put_page(page); + ++ /* Track number of paged gfns */ ++ atomic_inc(&p2m->domain->paged_pages); ++ + return 0; + } + +@@ -2997,6 +3000,8 @@ int p2m_mem_paging_prep(struct p2m_domai + audit_p2m(p2m, 1); + p2m_unlock(p2m); + ++ atomic_dec(&p2m->domain->paged_pages); ++ + return 0; + } + +Index: xen-4.1.2-testing/xen/common/domctl.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/common/domctl.c ++++ xen-4.1.2-testing/xen/common/domctl.c +@@ -139,6 +139,7 @@ void getdomaininfo(struct domain *d, str + info->tot_pages = d->tot_pages; + info->max_pages = d->max_pages; + info->shr_pages = atomic_read(&d->shr_pages); ++ info->paged_pages = atomic_read(&d->paged_pages); + info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT); + BUG_ON(SHARED_M2P(info->shared_info_frame)); + +Index: xen-4.1.2-testing/xen/include/public/domctl.h +=================================================================== +--- xen-4.1.2-testing.orig/xen/include/public/domctl.h ++++ xen-4.1.2-testing/xen/include/public/domctl.h +@@ -35,7 +35,7 @@ + #include "xen.h" + #include "grant_table.h" + +-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000007 ++#define XEN_DOMCTL_INTERFACE_VERSION 0x00000008 + + /* + * NB. xen_domctl.domain is an IN/OUT parameter for this operation. +@@ -95,6 +95,7 @@ struct xen_domctl_getdomaininfo { + uint64_aligned_t tot_pages; + uint64_aligned_t max_pages; + uint64_aligned_t shr_pages; ++ uint64_aligned_t paged_pages; + uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */ + uint64_aligned_t cpu_time; + uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */ +Index: xen-4.1.2-testing/xen/include/xen/sched.h +=================================================================== +--- xen-4.1.2-testing.orig/xen/include/xen/sched.h ++++ xen-4.1.2-testing/xen/include/xen/sched.h +@@ -215,6 +215,7 @@ struct domain + unsigned int tot_pages; /* number of pages currently possesed */ + unsigned int max_pages; /* maximum value for tot_pages */ + atomic_t shr_pages; /* number of shared pages */ ++ atomic_t paged_pages; /* number of paged-out pages */ + unsigned int xenheap_pages; /* # pages allocated from Xen heap */ + + unsigned int max_vcpus; diff --git a/23897-x86-mce-offline-again.patch b/23897-x86-mce-offline-again.patch index 9d2856f..2d4bd63 100644 --- a/23897-x86-mce-offline-again.patch +++ b/23897-x86-mce-offline-again.patch @@ -10,8 +10,10 @@ To avoid recursive mce. Signed-off-by: Liu, Jinsong Committed-by: Keir Fraser ---- a/xen/arch/x86/cpu/mcheck/mce_intel.c -+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c +Index: xen-4.1.2-testing/xen/arch/x86/cpu/mcheck/mce_intel.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/cpu/mcheck/mce_intel.c ++++ xen-4.1.2-testing/xen/arch/x86/cpu/mcheck/mce_intel.c @@ -639,6 +639,8 @@ static void intel_memerr_dhandler(int bn /* This is free page */ if (status & PG_OFFLINE_OFFLINED) @@ -21,8 +23,10 @@ Committed-by: Keir Fraser else if (status & PG_OFFLINE_PENDING) { /* This page has owner */ if (status & PG_OFFLINE_OWNED) { ---- a/xen/common/page_alloc.c -+++ b/xen/common/page_alloc.c +Index: xen-4.1.2-testing/xen/common/page_alloc.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/common/page_alloc.c ++++ xen-4.1.2-testing/xen/common/page_alloc.c @@ -38,6 +38,7 @@ #include #include @@ -31,7 +35,7 @@ Committed-by: Keir Fraser #include #include #include -@@ -704,6 +705,19 @@ int offline_page(unsigned long mfn, int +@@ -708,6 +709,19 @@ int offline_page(unsigned long mfn, int return -EINVAL; } @@ -51,8 +55,10 @@ Committed-by: Keir Fraser spin_lock(&heap_lock); old_info = mark_page_offline(pg, broken); ---- a/xen/include/public/sysctl.h -+++ b/xen/include/public/sysctl.h +Index: xen-4.1.2-testing/xen/include/public/sysctl.h +=================================================================== +--- xen-4.1.2-testing.orig/xen/include/public/sysctl.h ++++ xen-4.1.2-testing/xen/include/public/sysctl.h @@ -399,6 +399,7 @@ struct xen_sysctl_page_offline_op { #define PG_OFFLINE_OFFLINED (0x1UL << 1) #define PG_OFFLINE_PENDING (0x1UL << 2) diff --git a/23904-xenpaging_use_p2m-get_entry_in_p2m_mem_paging_functions.patch b/23904-xenpaging_use_p2m-get_entry_in_p2m_mem_paging_functions.patch new file mode 100644 index 0000000..d67bdca --- /dev/null +++ b/23904-xenpaging_use_p2m-get_entry_in_p2m_mem_paging_functions.patch @@ -0,0 +1,131 @@ +changeset: 23904:ecab267b85ef +user: Olaf Hering +date: Thu Oct 06 12:33:17 2011 +0100 +files: xen/arch/x86/mm/p2m.c +description: +xenpaging: use p2m->get_entry() in p2m_mem_paging functions + +Use p2m->get_entry() in the p2m_mem_paging functions. This preserves the +p2m_access type when gfn is updated with set_p2m_entry(). +Its also a preparation for locking fixes in a subsequent patch. + +Signed-off-by: Olaf Hering +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + xen/arch/x86/mm/p2m.c | 25 ++++++++++++++++--------- + 1 file changed, 16 insertions(+), 9 deletions(-) + +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2839,10 +2839,11 @@ int p2m_mem_paging_nominate(struct p2m_d + { + struct page_info *page; + p2m_type_t p2mt; ++ p2m_access_t a; + mfn_t mfn; + int ret; + +- mfn = gfn_to_mfn(p2m, gfn, &p2mt); ++ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + + /* Check if mfn is valid */ + ret = -EINVAL; +@@ -2869,7 +2870,7 @@ int p2m_mem_paging_nominate(struct p2m_d + + /* Fix p2m entry */ + p2m_lock(p2m); +- set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, p2m->default_access); ++ set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, a); + audit_p2m(p2m, 1); + p2m_unlock(p2m); + +@@ -2883,11 +2884,12 @@ int p2m_mem_paging_evict(struct p2m_doma + { + struct page_info *page; + p2m_type_t p2mt; ++ p2m_access_t a; + mfn_t mfn; + struct domain *d = p2m->domain; + + /* Get mfn */ +- mfn = gfn_to_mfn(p2m, gfn, &p2mt); ++ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + if ( unlikely(!mfn_valid(mfn)) ) + return -EINVAL; + +@@ -2906,7 +2908,7 @@ int p2m_mem_paging_evict(struct p2m_doma + + /* Remove mapping from p2m table */ + p2m_lock(p2m); +- set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged, p2m->default_access); ++ set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged, a); + audit_p2m(p2m, 1); + p2m_unlock(p2m); + +@@ -2943,6 +2945,7 @@ void p2m_mem_paging_populate(struct p2m_ + struct vcpu *v = current; + mem_event_request_t req; + p2m_type_t p2mt; ++ p2m_access_t a; + struct domain *d = p2m->domain; + + /* Check that there's space on the ring for this request */ +@@ -2955,11 +2958,11 @@ void p2m_mem_paging_populate(struct p2m_ + /* Fix p2m mapping */ + /* XXX: It seems inefficient to have this here, as it's only needed + * in one case (ept guest accessing paging out page) */ +- gfn_to_mfn(p2m, gfn, &p2mt); ++ p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + if ( p2mt == p2m_ram_paged ) + { + p2m_lock(p2m); +- set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start, p2m->default_access); ++ set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start, a); + audit_p2m(p2m, 1); + p2m_unlock(p2m); + } +@@ -2988,7 +2991,10 @@ void p2m_mem_paging_populate(struct p2m_ + int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn) + { + struct page_info *page; ++ p2m_type_t p2mt; ++ p2m_access_t a; + ++ p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + /* Get a free page */ + page = alloc_domheap_page(p2m->domain, 0); + if ( unlikely(page == NULL) ) +@@ -2996,7 +3002,7 @@ int p2m_mem_paging_prep(struct p2m_domai + + /* Fix p2m mapping */ + p2m_lock(p2m); +- set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, p2m->default_access); ++ set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, a); + audit_p2m(p2m, 1); + p2m_unlock(p2m); + +@@ -3010,6 +3016,7 @@ void p2m_mem_paging_resume(struct p2m_do + struct domain *d = p2m->domain; + mem_event_response_t rsp; + p2m_type_t p2mt; ++ p2m_access_t a; + mfn_t mfn; + + /* Pull the response off the ring */ +@@ -3018,9 +3025,9 @@ void p2m_mem_paging_resume(struct p2m_do + /* Fix p2m entry if the page was not dropped */ + if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) ) + { +- mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt); ++ mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query); + p2m_lock(p2m); +- set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access); ++ set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, a); + set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn); + audit_p2m(p2m, 1); + p2m_unlock(p2m); diff --git a/23905-xenpaging_fix_locking_in_p2m_mem_paging_functions.patch b/23905-xenpaging_fix_locking_in_p2m_mem_paging_functions.patch new file mode 100644 index 0000000..0d3f175 --- /dev/null +++ b/23905-xenpaging_fix_locking_in_p2m_mem_paging_functions.patch @@ -0,0 +1,157 @@ +changeset: 23905:50ee6be56460 +user: Olaf Hering +date: Thu Oct 06 12:33:17 2011 +0100 +files: xen/arch/x86/mm/p2m.c +description: +xenpaging: fix locking in p2m_mem_paging functions + +As suggested by , query and adjust the p2mt +under the p2m_lock to prevent races with PoD. + +Signed-off-by: Olaf Hering +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + xen/arch/x86/mm/p2m.c | 42 ++++++++++++++++++++++++++---------------- + 1 file changed, 26 insertions(+), 16 deletions(-) + +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2843,6 +2843,8 @@ int p2m_mem_paging_nominate(struct p2m_d + mfn_t mfn; + int ret; + ++ p2m_unlock(p2m); ++ + mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + + /* Check if mfn is valid */ +@@ -2869,14 +2871,12 @@ int p2m_mem_paging_nominate(struct p2m_d + goto out; + + /* Fix p2m entry */ +- p2m_lock(p2m); + set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, a); + audit_p2m(p2m, 1); +- p2m_unlock(p2m); +- + ret = 0; + + out: ++ p2m_unlock(p2m); + return ret; + } + +@@ -2887,30 +2887,31 @@ int p2m_mem_paging_evict(struct p2m_doma + p2m_access_t a; + mfn_t mfn; + struct domain *d = p2m->domain; ++ int ret = -EINVAL; ++ ++ p2m_lock(p2m); + + /* Get mfn */ + mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + if ( unlikely(!mfn_valid(mfn)) ) +- return -EINVAL; ++ goto out; + + if ( (p2mt == p2m_ram_paged) || (p2mt == p2m_ram_paging_in) || + (p2mt == p2m_ram_paging_in_start) ) +- return -EINVAL; ++ goto out; + + /* Get the page so it doesn't get modified under Xen's feet */ + page = mfn_to_page(mfn); + if ( unlikely(!get_page(page, d)) ) +- return -EINVAL; ++ goto out; + + /* Decrement guest domain's ref count of the page */ + if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) + put_page(page); + + /* Remove mapping from p2m table */ +- p2m_lock(p2m); + set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged, a); + audit_p2m(p2m, 1); +- p2m_unlock(p2m); + + /* Put the page back so it gets freed */ + put_page(page); +@@ -2918,7 +2919,11 @@ int p2m_mem_paging_evict(struct p2m_doma + /* Track number of paged gfns */ + atomic_inc(&p2m->domain->paged_pages); + +- return 0; ++ ret = 0; ++ ++ out: ++ p2m_unlock(p2m); ++ return ret; + } + + void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn) +@@ -2958,14 +2963,14 @@ void p2m_mem_paging_populate(struct p2m_ + /* Fix p2m mapping */ + /* XXX: It seems inefficient to have this here, as it's only needed + * in one case (ept guest accessing paging out page) */ ++ p2m_lock(p2m); + p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + if ( p2mt == p2m_ram_paged ) + { +- p2m_lock(p2m); + set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start, a); + audit_p2m(p2m, 1); +- p2m_unlock(p2m); + } ++ p2m_unlock(p2m); + + /* Pause domain */ + if ( v->domain->domain_id == d->domain_id ) +@@ -2993,22 +2998,27 @@ int p2m_mem_paging_prep(struct p2m_domai + struct page_info *page; + p2m_type_t p2mt; + p2m_access_t a; ++ int ret = -ENOMEM; ++ ++ p2m_lock(p2m); + + p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); ++ + /* Get a free page */ + page = alloc_domheap_page(p2m->domain, 0); + if ( unlikely(page == NULL) ) +- return -ENOMEM; ++ goto out; + + /* Fix p2m mapping */ +- p2m_lock(p2m); + set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, a); + audit_p2m(p2m, 1); +- p2m_unlock(p2m); + + atomic_dec(&p2m->domain->paged_pages); + +- return 0; ++ ret = 0; ++ out: ++ p2m_unlock(p2m); ++ return ret; + } + + void p2m_mem_paging_resume(struct p2m_domain *p2m) +@@ -3025,8 +3035,8 @@ void p2m_mem_paging_resume(struct p2m_do + /* Fix p2m entry if the page was not dropped */ + if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) ) + { +- mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query); + p2m_lock(p2m); ++ mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query); + set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, a); + set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn); + audit_p2m(p2m, 1); diff --git a/23906-xenpaging_remove_confusing_comment_from_p2m_mem_paging_populate.patch b/23906-xenpaging_remove_confusing_comment_from_p2m_mem_paging_populate.patch new file mode 100644 index 0000000..76a5482 --- /dev/null +++ b/23906-xenpaging_remove_confusing_comment_from_p2m_mem_paging_populate.patch @@ -0,0 +1,35 @@ +changeset: 23906:7bf85c3fd9f0 +user: Olaf Hering +date: Thu Oct 06 12:33:17 2011 +0100 +files: xen/arch/x86/mm/p2m.c +description: +xenpaging: remove confusing comment from p2m_mem_paging_populate + +Currently there is no way to avoid the double check of the p2mt +because p2m_mem_paging_populate() is called from many places without +the p2m_lock held. Upcoming changes will move the function into +gfn_to_mfn(), so its interface could be changed and the extra +p2m_lock/get_entry can be removed. + +Signed-off-by: Olaf Hering +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + xen/arch/x86/mm/p2m.c | 2 -- + 1 file changed, 2 deletions(-) + +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2961,8 +2961,6 @@ void p2m_mem_paging_populate(struct p2m_ + req.type = MEM_EVENT_TYPE_PAGING; + + /* Fix p2m mapping */ +- /* XXX: It seems inefficient to have this here, as it's only needed +- * in one case (ept guest accessing paging out page) */ + p2m_lock(p2m); + p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + if ( p2mt == p2m_ram_paged ) diff --git a/23908-p2m_query-modify_p2mt_with_p2m_lock_held.patch b/23908-p2m_query-modify_p2mt_with_p2m_lock_held.patch new file mode 100644 index 0000000..836524f --- /dev/null +++ b/23908-p2m_query-modify_p2mt_with_p2m_lock_held.patch @@ -0,0 +1,85 @@ +changeset: 23908:88b6e08b8aa8 +user: Olaf Hering +date: Thu Oct 06 14:15:43 2011 +0100 +files: xen/arch/x86/mm/p2m.c +description: +p2m: query/modify p2mt with p2m_lock held + +Query and update the p2mt in set_mmio_p2m_entry, clear_mmio_p2m_entry +and set_shared_p2m_entry with the p2m_lock held. + +Signed-off-by: Olaf Hering +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + xen/arch/x86/mm/p2m.c | 15 +++++++++------ + 1 file changed, 9 insertions(+), 6 deletions(-) + +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2751,9 +2751,11 @@ set_mmio_p2m_entry(struct p2m_domain *p2 + if ( !paging_mode_translate(p2m->domain) ) + return 0; + ++ p2m_lock(p2m); + omfn = gfn_to_mfn_query(p2m, gfn, &ot); + if ( p2m_is_grant(ot) ) + { ++ p2m_unlock(p2m); + domain_crash(p2m->domain); + return 0; + } +@@ -2764,7 +2766,6 @@ set_mmio_p2m_entry(struct p2m_domain *p2 + } + + P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn)); +- p2m_lock(p2m); + rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct, p2m->default_access); + audit_p2m(p2m, 1); + p2m_unlock(p2m); +@@ -2785,18 +2786,20 @@ clear_mmio_p2m_entry(struct p2m_domain * + if ( !paging_mode_translate(p2m->domain) ) + return 0; + +- mfn = gfn_to_mfn(p2m, gfn, &t); ++ p2m_lock(p2m); ++ mfn = gfn_to_mfn_query(p2m, gfn, &t); + + /* Do not use mfn_valid() here as it will usually fail for MMIO pages. */ + if ( (INVALID_MFN == mfn_x(mfn)) || (t != p2m_mmio_direct) ) + { + gdprintk(XENLOG_ERR, + "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn); +- return 0; ++ goto out; + } +- p2m_lock(p2m); + rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access); + audit_p2m(p2m, 1); ++ ++out: + p2m_unlock(p2m); + + return rc; +@@ -2813,6 +2816,8 @@ set_shared_p2m_entry(struct p2m_domain * + if ( !paging_mode_translate(p2m->domain) ) + return 0; + ++ if ( need_lock ) ++ p2m_lock(p2m); + omfn = gfn_to_mfn_query(p2m, gfn, &ot); + /* At the moment we only allow p2m change if gfn has already been made + * sharable first */ +@@ -2822,8 +2827,6 @@ set_shared_p2m_entry(struct p2m_domain * + set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); + + P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn)); +- if ( need_lock ) +- p2m_lock(p2m); + rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared, p2m->default_access); + if ( need_lock ) + p2m_unlock(p2m); diff --git a/23943-xenpaging_clear_page_content_after_evict.patch b/23943-xenpaging_clear_page_content_after_evict.patch new file mode 100644 index 0000000..634939d --- /dev/null +++ b/23943-xenpaging_clear_page_content_after_evict.patch @@ -0,0 +1,53 @@ +changeset: 23943:1185ae04b5aa +user: Olaf Hering +date: Tue Oct 11 10:46:28 2011 +0100 +files: tools/xenpaging/xenpaging.c xen/arch/x86/mm/p2m.c +description: +xenpaging: clear page content after evict + +If the guest happens to read from the gfn while xenpaging is in the process of +evicting the page, the guest may read zeros instead of actual data. +Also if eviction fails the page content will be corrupted and xenpaging wont +attempt to restore the page. + +Remove page scrubbing from pager and do it after successful eviction. + +Signed-off-by: Olaf Hering +Acked-by: Ian Jackson +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + tools/xenpaging/xenpaging.c | 3 --- + xen/arch/x86/mm/p2m.c | 3 +++ + 2 files changed, 3 insertions(+), 3 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -455,9 +455,6 @@ static int xenpaging_evict_page(xenpagin + goto out; + } + +- /* Clear page */ +- memset(page, 0, PAGE_SIZE); +- + munmap(page, PAGE_SIZE); + + /* Tell Xen to evict page */ +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2916,6 +2916,9 @@ int p2m_mem_paging_evict(struct p2m_doma + set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged, a); + audit_p2m(p2m, 1); + ++ /* Clear content before returning the page to Xen */ ++ scrub_one_page(page); ++ + /* Put the page back so it gets freed */ + put_page(page); + diff --git a/23953-xenpaging_handle_evict_failures.patch b/23953-xenpaging_handle_evict_failures.patch new file mode 100644 index 0000000..cae687f --- /dev/null +++ b/23953-xenpaging_handle_evict_failures.patch @@ -0,0 +1,232 @@ +changeset: 23953:eda18b27de6e +user: Olaf Hering +date: Thu Oct 13 12:21:10 2011 +0100 +files: tools/xenpaging/xenpaging.c xen/arch/x86/mm.c xen/arch/x86/mm/p2m.c xen/include/public/mem_event.h +description: +xenpaging: handle evict failures + +Evict of a nominated gfn must fail if some other process mapped the +page without checking the p2mt of that gfn first. +Add a check to cancel eviction if the page usage count is not 1. + +Handle the possible eviction failure in the page-in paths. +After nominate and before evict, something may check the p2mt and call +populate. Handle this case and let the gfn enter the page-in path. The +gfn may still be connected to a mfn, so there is no need to allocate a +new page in prep. + +Adjust do_mmu_update to return -ENOENT only if the gfn has entered the +page-in path and if it is not yet connected to a mfn. Otherwise +linux_privcmd_map_foreign_bulk() may loop forever. + +Add MEM_EVENT_FLAG_EVICT_FAIL to inform pager that a page-in request for +a possible not-evicted page was sent. xenpaging does currently not need +that flag because failure to evict a gfn will be caught. + +Signed-off-by: Olaf Hering +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + tools/xenpaging/xenpaging.c | 10 ++++--- + xen/arch/x86/mm.c | 8 ++--- + xen/arch/x86/mm/p2m.c | 55 +++++++++++++++++++++++++++++------------ + xen/include/public/mem_event.h | 1 + 4 files changed, 50 insertions(+), 24 deletions(-) + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -734,10 +734,12 @@ int main(int argc, char *argv[]) + } + else + { +- DPRINTF("page already populated (domain = %d; vcpu = %d;" +- " gfn = %"PRIx64"; paused = %d)\n", +- paging->mem_event.domain_id, req.vcpu_id, +- req.gfn, req.flags & MEM_EVENT_FLAG_VCPU_PAUSED); ++ DPRINTF("page %s populated (domain = %d; vcpu = %d;" ++ " gfn = %"PRIx64"; paused = %d; evict_fail = %d)\n", ++ req.flags & MEM_EVENT_FLAG_EVICT_FAIL ? "not" : "already", ++ paging->mem_event.domain_id, req.vcpu_id, req.gfn, ++ !!(req.flags & MEM_EVENT_FLAG_VCPU_PAUSED) , ++ !!(req.flags & MEM_EVENT_FLAG_EVICT_FAIL) ); + + /* Tell Xen to resume the vcpu */ + /* XXX: Maybe just check if the vcpu was paused? */ +Index: xen-4.1.2-testing/xen/arch/x86/mm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm.c ++++ xen-4.1.2-testing/xen/arch/x86/mm.c +@@ -3502,7 +3502,7 @@ int do_mmu_update( + rc = -ENOENT; + break; + } +- else if ( p2m_ram_paging_in_start == l1e_p2mt ) ++ else if ( p2m_ram_paging_in_start == l1e_p2mt && !mfn_valid(mfn) ) + { + rc = -ENOENT; + break; +@@ -3543,7 +3543,7 @@ int do_mmu_update( + rc = -ENOENT; + break; + } +- else if ( p2m_ram_paging_in_start == l2e_p2mt ) ++ else if ( p2m_ram_paging_in_start == l2e_p2mt && !mfn_valid(mfn) ) + { + rc = -ENOENT; + break; +@@ -3572,7 +3572,7 @@ int do_mmu_update( + rc = -ENOENT; + break; + } +- else if ( p2m_ram_paging_in_start == l3e_p2mt ) ++ else if ( p2m_ram_paging_in_start == l3e_p2mt && !mfn_valid(mfn) ) + { + rc = -ENOENT; + break; +@@ -3602,7 +3602,7 @@ int do_mmu_update( + rc = -ENOENT; + break; + } +- else if ( p2m_ram_paging_in_start == l4e_p2mt ) ++ else if ( p2m_ram_paging_in_start == l4e_p2mt && !mfn_valid(mfn) ) + { + rc = -ENOENT; + break; +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2899,15 +2899,24 @@ int p2m_mem_paging_evict(struct p2m_doma + if ( unlikely(!mfn_valid(mfn)) ) + goto out; + +- if ( (p2mt == p2m_ram_paged) || (p2mt == p2m_ram_paging_in) || +- (p2mt == p2m_ram_paging_in_start) ) ++ /* Allow only nominated pages */ ++ if ( p2mt != p2m_ram_paging_out ) + goto out; + ++ ret = -EBUSY; + /* Get the page so it doesn't get modified under Xen's feet */ + page = mfn_to_page(mfn); + if ( unlikely(!get_page(page, d)) ) + goto out; + ++ /* Check page count and type once more */ ++ if ( (page->count_info & (PGC_count_mask | PGC_allocated)) != ++ (2 | PGC_allocated) ) ++ goto out_put; ++ ++ if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_none ) ++ goto out_put; ++ + /* Decrement guest domain's ref count of the page */ + if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) + put_page(page); +@@ -2919,14 +2928,15 @@ int p2m_mem_paging_evict(struct p2m_doma + /* Clear content before returning the page to Xen */ + scrub_one_page(page); + +- /* Put the page back so it gets freed */ +- put_page(page); +- + /* Track number of paged gfns */ + atomic_inc(&p2m->domain->paged_pages); + + ret = 0; + ++ out_put: ++ /* Put the page back so it gets freed */ ++ put_page(page); ++ + out: + p2m_unlock(p2m); + return ret; +@@ -2957,6 +2967,7 @@ void p2m_mem_paging_populate(struct p2m_ + mem_event_request_t req; + p2m_type_t p2mt; + p2m_access_t a; ++ mfn_t mfn; + struct domain *d = p2m->domain; + + /* Check that there's space on the ring for this request */ +@@ -2968,20 +2979,26 @@ void p2m_mem_paging_populate(struct p2m_ + + /* Fix p2m mapping */ + p2m_lock(p2m); +- p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); +- if ( p2mt == p2m_ram_paged ) ++ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); ++ /* Allow only nominated or evicted pages to enter page-in path */ ++ if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged ) + { +- set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start, a); ++ /* Evict will fail now, tag this request for pager */ ++ if ( p2mt == p2m_ram_paging_out ) ++ req.flags |= MEM_EVENT_FLAG_EVICT_FAIL; ++ ++ set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_in_start, a); + audit_p2m(p2m, 1); + } + p2m_unlock(p2m); + +- /* Pause domain */ +- if ( v->domain->domain_id == d->domain_id ) ++ /* Pause domain if request came from guest and gfn has paging type */ ++ if ( p2m_is_paging(p2mt) && v->domain->domain_id == d->domain_id ) + { + vcpu_pause_nosync(v); + req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; + } ++ /* No need to inform pager if the gfn is not in the page-out path */ + else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged ) + { + /* gfn is already on its way back and vcpu is not paused */ +@@ -3002,19 +3019,25 @@ int p2m_mem_paging_prep(struct p2m_domai + struct page_info *page; + p2m_type_t p2mt; + p2m_access_t a; ++ mfn_t mfn; + int ret = -ENOMEM; + + p2m_lock(p2m); + +- p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); ++ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + +- /* Get a free page */ +- page = alloc_domheap_page(p2m->domain, 0); +- if ( unlikely(page == NULL) ) +- goto out; ++ /* Allocate a page if the gfn does not have one yet */ ++ if ( !mfn_valid(mfn) ) ++ { ++ /* Get a free page */ ++ page = alloc_domheap_page(p2m->domain, 0); ++ if ( unlikely(page == NULL) ) ++ goto out; ++ mfn = page_to_mfn(page); ++ } + + /* Fix p2m mapping */ +- set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, a); ++ set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_in, a); + audit_p2m(p2m, 1); + + atomic_dec(&p2m->domain->paged_pages); +Index: xen-4.1.2-testing/xen/include/public/mem_event.h +=================================================================== +--- xen-4.1.2-testing.orig/xen/include/public/mem_event.h ++++ xen-4.1.2-testing/xen/include/public/mem_event.h +@@ -38,6 +38,7 @@ + /* Memory event flags */ + #define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0) + #define MEM_EVENT_FLAG_DROP_PAGE (1 << 1) ++#define MEM_EVENT_FLAG_EVICT_FAIL (1 << 2) + + /* Reasons for the memory event request */ + #define MEM_EVENT_REASON_UNKNOWN 0 /* typical reason */ diff --git a/23978-xenpaging_check_p2mt_in_p2m_mem_paging_functions.patch b/23978-xenpaging_check_p2mt_in_p2m_mem_paging_functions.patch new file mode 100644 index 0000000..5bdc0a0 --- /dev/null +++ b/23978-xenpaging_check_p2mt_in_p2m_mem_paging_functions.patch @@ -0,0 +1,68 @@ +changeset: 23978:fd3fa0a85020 +user: Olaf Hering +date: Thu Oct 20 11:25:55 2011 +0100 +files: xen/arch/x86/mm/p2m.c +description: +xenpaging: check p2mt in p2m_mem_paging functions + +Add checks to forward the p2m_ram_paging* state properly during page-in. + +Resume can be called several times if several vcpus called populate for +the gfn. Finish resume only once. + +Signed-off-by: Olaf Hering +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + xen/arch/x86/mm/p2m.c | 20 ++++++++++++++++---- + 1 file changed, 16 insertions(+), 4 deletions(-) + +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -3020,16 +3020,22 @@ int p2m_mem_paging_prep(struct p2m_domai + p2m_type_t p2mt; + p2m_access_t a; + mfn_t mfn; +- int ret = -ENOMEM; ++ int ret; + + p2m_lock(p2m); + + mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + ++ ret = -ENOENT; ++ /* Allow only missing pages */ ++ if ( p2mt != p2m_ram_paging_in_start ) ++ goto out; ++ + /* Allocate a page if the gfn does not have one yet */ + if ( !mfn_valid(mfn) ) + { + /* Get a free page */ ++ ret = -ENOMEM; + page = alloc_domheap_page(p2m->domain, 0); + if ( unlikely(page == NULL) ) + goto out; +@@ -3064,9 +3070,15 @@ void p2m_mem_paging_resume(struct p2m_do + { + p2m_lock(p2m); + mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query); +- set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, a); +- set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn); +- audit_p2m(p2m, 1); ++ /* Allow only pages which were prepared properly, or pages which ++ * were nominated but not evicted */ ++ if ( mfn_valid(mfn) && ++ (p2mt == p2m_ram_paging_in || p2mt == p2m_ram_paging_in_start) ) ++ { ++ set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, a); ++ set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn); ++ audit_p2m(p2m, 1); ++ } + p2m_unlock(p2m); + } + diff --git a/23979-xenpaging_document_p2m_mem_paging_functions.patch b/23979-xenpaging_document_p2m_mem_paging_functions.patch new file mode 100644 index 0000000..c978e26 --- /dev/null +++ b/23979-xenpaging_document_p2m_mem_paging_functions.patch @@ -0,0 +1,158 @@ +changeset: 23979:18306b054799 +user: Olaf Hering +date: Thu Oct 20 11:25:58 2011 +0100 +files: xen/arch/x86/mm/p2m.c +description: +xenpaging: document p2m_mem_paging functions + +Add some documentation for each of the p2m_mem_paging functions to describe +what they ought to do. + +Signed-off-by: Olaf Hering +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + xen/arch/x86/mm/p2m.c | 93 ++++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 93 insertions(+) + +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2838,6 +2838,24 @@ set_shared_p2m_entry(struct p2m_domain * + } + + #ifdef __x86_64__ ++/** ++ * p2m_mem_paging_nominate - Mark a guest page as to-be-paged-out ++ * @d: guest domain ++ * @gfn: guest page to nominate ++ * ++ * Returns 0 for success or negative errno values if gfn is not pageable. ++ * ++ * p2m_mem_paging_nominate() is called by the pager and checks if a guest page ++ * can be paged out. If the following conditions are met the p2mt will be ++ * changed: ++ * - the gfn is backed by a mfn ++ * - the p2mt of the gfn is pageable ++ * - the mfn is not used for IO ++ * - the mfn has exactly one user and has no special meaning ++ * ++ * Once the p2mt is changed the page is readonly for the guest. On success the ++ * pager can write the page contents to disk and later evict the page. ++ */ + int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn) + { + struct page_info *page; +@@ -2883,6 +2901,25 @@ int p2m_mem_paging_nominate(struct p2m_d + return ret; + } + ++/** ++ * p2m_mem_paging_evict - Mark a guest page as paged-out ++ * @d: guest domain ++ * @gfn: guest page to evict ++ * ++ * Returns 0 for success or negative errno values if eviction is not possible. ++ * ++ * p2m_mem_paging_evict() is called by the pager and will free a guest page and ++ * release it back to Xen. If the following conditions are met the page can be ++ * freed: ++ * - the gfn is backed by a mfn ++ * - the gfn was nominated ++ * - the mfn has still exactly one user and has no special meaning ++ * ++ * After successful nomination some other process could have mapped the page. In ++ * this case eviction can not be done. If the gfn was populated before the pager ++ * could evict it, eviction can not be done either. In this case the gfn is ++ * still backed by a mfn. ++ */ + int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn) + { + struct page_info *page; +@@ -2942,6 +2979,15 @@ int p2m_mem_paging_evict(struct p2m_doma + return ret; + } + ++/** ++ * p2m_mem_paging_drop_page - Tell pager to drop its reference to a paged page ++ * @d: guest domain ++ * @gfn: guest page to drop ++ * ++ * p2m_mem_paging_drop_page() will notify the pager that a paged-out gfn was ++ * released by the guest. The pager is supposed to drop its reference of the ++ * gfn. ++ */ + void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn) + { + struct vcpu *v = current; +@@ -2961,6 +3007,27 @@ void p2m_mem_paging_drop_page(struct p2m + } + } + ++/** ++ * p2m_mem_paging_populate - Tell pager to populete a paged page ++ * @d: guest domain ++ * @gfn: guest page in paging state ++ * ++ * p2m_mem_paging_populate() will notify the pager that a page in any of the ++ * paging states needs to be written back into the guest. ++ * This function needs to be called whenever gfn_to_mfn() returns any of the p2m ++ * paging types because the gfn may not be backed by a mfn. ++ * ++ * The gfn can be in any of the paging states, but the pager needs only be ++ * notified when the gfn is in the paging-out path (paging_out or paged). This ++ * function may be called more than once from several vcpus. If the vcpu belongs ++ * to the guest, the vcpu must be stopped and the pager notified that the vcpu ++ * was stopped. The pager needs to handle several requests for the same gfn. ++ * ++ * If the gfn is not in the paging-out path and the vcpu does not belong to the ++ * guest, nothing needs to be done and the function assumes that a request was ++ * already sent to the pager. In this case the caller has to try again until the ++ * gfn is fully paged in again. ++ */ + void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn) + { + struct vcpu *v = current; +@@ -3014,6 +3081,17 @@ void p2m_mem_paging_populate(struct p2m_ + mem_event_put_request(d, &d->mem_paging, &req); + } + ++/** ++ * p2m_mem_paging_prep - Allocate a new page for the guest ++ * @d: guest domain ++ * @gfn: guest page in paging state ++ * ++ * p2m_mem_paging_prep() will allocate a new page for the guest if the gfn is ++ * not backed by a mfn. It is called by the pager. ++ * It is required that the gfn was already populated. The gfn may already have a ++ * mfn if populate was called for gfn which was nominated but not evicted. In ++ * this case only the p2mt needs to be forwarded. ++ */ + int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn) + { + struct page_info *page; +@@ -3054,6 +3132,21 @@ int p2m_mem_paging_prep(struct p2m_domai + return ret; + } + ++/** ++ * p2m_mem_paging_resume - Resume guest gfn and vcpus ++ * @d: guest domain ++ * @gfn: guest page in paging state ++ * ++ * p2m_mem_paging_resume() will forward the p2mt of a gfn to ram_rw and all ++ * waiting vcpus will be unpaused again. It is called by the pager. ++ * ++ * The gfn was previously either evicted and populated, or nominated and ++ * populated. If the page was evicted the p2mt will be p2m_ram_paging_in. If ++ * the page was just nominated the p2mt will be p2m_ram_paging_in_start because ++ * the pager did not call p2m_mem_paging_prep(). ++ * ++ * If the gfn was dropped the vcpu needs to be unpaused. ++ */ + void p2m_mem_paging_resume(struct p2m_domain *p2m) + { + struct domain *d = p2m->domain; diff --git a/23980-xenpaging_disallow_paging_in_a_PoD_guest.patch b/23980-xenpaging_disallow_paging_in_a_PoD_guest.patch new file mode 100644 index 0000000..4e49a34 --- /dev/null +++ b/23980-xenpaging_disallow_paging_in_a_PoD_guest.patch @@ -0,0 +1,58 @@ +changeset: 23980:a06609840ff1 +user: Olaf Hering +date: Thu Oct 20 11:25:58 2011 +0100 +files: tools/xenpaging/xenpaging.c xen/arch/x86/mm/mem_event.c +description: +xenpaging: disallow paging in a PoD guest + +Disallow xenpaging in a PoD guest until coexistance between the two features +is properly implemented. + +Signed-off-by: Olaf Hering +Acked-by: Tim Deegan +Committed-by: Tim Deegan + + +--- + tools/xenpaging/xenpaging.c | 3 +++ + xen/arch/x86/mm/mem_event.c | 6 ++++++ + 2 files changed, 9 insertions(+) + +Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c ++++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c +@@ -246,6 +246,9 @@ static xenpaging_t *xenpaging_init(domid + case ENODEV: + ERROR("EPT not supported for this guest"); + break; ++ case EXDEV: ++ ERROR("xenpaging not supported in a PoD guest"); ++ break; + default: + ERROR("Error initialising shared page: %s", strerror(errno)); + break; +Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_event.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c +@@ -253,6 +253,7 @@ int mem_event_domctl(struct domain *d, x + case XEN_DOMCTL_MEM_EVENT_OP_PAGING: + { + struct mem_event_domain *med = &d->mem_paging; ++ struct p2m_domain *p2m = p2m_get_hostp2m(d); + rc = -ENODEV; + /* Only HAP is supported */ + if ( !hap_enabled(d) ) +@@ -262,6 +263,11 @@ int mem_event_domctl(struct domain *d, x + if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) + break; + ++ rc = -EXDEV; ++ /* Disallow paging in a PoD guest */ ++ if ( p2m->pod.entry_count ) ++ break; ++ + switch( mec->op ) + { + case XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE: diff --git a/23993-x86-microcode-amd-fix-23871.patch b/23993-x86-microcode-amd-fix-23871.patch new file mode 100644 index 0000000..8d45851 --- /dev/null +++ b/23993-x86-microcode-amd-fix-23871.patch @@ -0,0 +1,71 @@ +# HG changeset patch +# User Jan Beulich +# Date 1319475620 -3600 +# Node ID e458dfc35b8d3be04a9b72c30ff97163e27a7314 +# Parent ffe861c1d5dfa8f4485052e5600e06124105033f +x86/ucode-amd: fix regression from c/s 23871:503ee256fecf + +microcode_fits() must return distinct values for the success and +no-fit-but-no-error cases, so the caller can react accordingly. Make +it return 1 in the success case, and adjust its single caller. + +Also remove an impossible code path - install_equiv_cpu_table(), which +gets called prior to microcode_fits(), never leaves equiv_cpu_table +being NULL without also returning an error. + +Note that this is still awaiting testing on a system where the +regression was actually observed (which also requires a new enough +microcode_ctl package). Note also that this will need to be +backported to 4.0 and 4.1 (or the broken c/s that got backported +there reverted). + +Signed-off-by: Jan Beulich +Committed-by: Keir Fraser + +Index: xen-4.1.2-testing/xen/arch/x86/microcode_amd.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/microcode_amd.c ++++ xen-4.1.2-testing/xen/arch/x86/microcode_amd.c +@@ -76,14 +76,6 @@ static int microcode_fits(void *mc, int + /* We should bind the task to the CPU */ + BUG_ON(cpu != raw_smp_processor_id()); + +- if ( equiv_cpu_table == NULL ) +- { +- printk(KERN_INFO "microcode: CPU%d microcode update with " +- "version 0x%x (current=0x%x)\n", +- cpu, mc_header->patch_id, uci->cpu_sig.rev); +- goto out; +- } +- + current_cpu_id = cpuid_eax(0x00000001); + + for ( i = 0; equiv_cpu_table[i].installed_cpu != 0; i++ ) +@@ -96,7 +88,7 @@ static int microcode_fits(void *mc, int + } + + if ( !equiv_cpu_id ) +- return 0; ++ return 0; + + if ( (mc_header->processor_rev_id) != equiv_cpu_id ) + { +@@ -113,8 +105,7 @@ static int microcode_fits(void *mc, int + "update with version 0x%x (current=0x%x)\n", + cpu, mc_header->patch_id, uci->cpu_sig.rev); + +-out: +- return 0; ++ return 1; + } + + static int apply_microcode(int cpu) +@@ -289,7 +280,7 @@ static int cpu_request_microcode(int cpu + while ( (ret = get_next_ucode_from_buffer_amd(mc, buf, size, &offset)) == 0) + { + error = microcode_fits(mc, cpu); +- if (error != 0) ++ if (error <= 0) + continue; + + error = apply_microcode(cpu); diff --git a/udev-rules.patch b/udev-rules.patch index af4979f..e7e3bee 100644 --- a/udev-rules.patch +++ b/udev-rules.patch @@ -7,5 +7,5 @@ Index: xen-4.1.2-testing/tools/hotplug/Linux/xen-backend.rules KERNEL=="pci_iomul", NAME="xen/%k", MODE="0600" KERNEL=="tapdev[a-z]*", NAME="xen/blktap-2/tapdev%m", MODE="0600" -SUBSYSTEM=="net", KERNEL=="tap*", ACTION=="add", RUN+="/etc/xen/scripts/vif-setup $env{ACTION} type_if=tap" -+SUBSYSTEM=="net", KERNEL=="tap*", ACTION=="add", TEST="/proc/xen", RUN+="/etc/xen/scripts/vif-setup $env{ACTION} type_if=tap" ++SUBSYSTEM=="net", KERNEL=="tap*", ACTION=="add", TEST=="/proc/xen", RUN+="/etc/xen/scripts/vif-setup $env{ACTION} type_if=tap" +KERNELS=="xen", KERNEL=="xvd*", SUBSYSTEM=="block", OPTIONS+="last_rule" diff --git a/x86-show-page-walk-early.patch b/x86-show-page-walk-early.patch deleted file mode 100644 index 54bf906..0000000 --- a/x86-show-page-walk-early.patch +++ /dev/null @@ -1,128 +0,0 @@ ---- a/xen/arch/x86/mm.c -+++ b/xen/arch/x86/mm.c -@@ -162,6 +162,8 @@ static int get_superpage(unsigned long m - #endif - static void put_superpage(unsigned long mfn); - -+bool_t __read_mostly mpt_valid; -+ - #define l1_disallow_mask(d) \ - ((d != dom_io) && \ - (rangeset_is_empty((d)->iomem_caps) && \ ---- a/xen/arch/x86/traps.c -+++ b/xen/arch/x86/traps.c -@@ -1456,6 +1456,7 @@ asmlinkage void __init do_early_page_fau - unsigned long *stk = (unsigned long *)regs; - printk("Early fatal page fault at %04x:%p (cr2=%p, ec=%04x)\n", - regs->cs, _p(regs->eip), _p(cr2), regs->error_code); -+ show_page_walk(cr2); - printk("Stack dump: "); - while ( ((long)stk & ((PAGE_SIZE - 1) & ~(BYTES_PER_LONG - 1))) != 0 ) - printk("%p ", _p(*stk++)); ---- a/xen/arch/x86/x86_32/mm.c -+++ b/xen/arch/x86/x86_32/mm.c -@@ -122,6 +122,8 @@ void __init paging_init(void) - #undef CNT - #undef MFN - -+ mpt_valid = 1; -+ - /* Create page tables for ioremap()/map_domain_page_global(). */ - for ( i = 0; i < (IOREMAP_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ ) - { ---- a/xen/arch/x86/x86_32/traps.c -+++ b/xen/arch/x86/x86_32/traps.c -@@ -164,7 +164,8 @@ void show_page_walk(unsigned long addr) - l3t += (cr3 & 0xFE0UL) >> 3; - l3e = l3t[l3_table_offset(addr)]; - mfn = l3e_get_pfn(l3e); -- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; -+ pfn = mfn_valid(mfn) && mpt_valid ? -+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L3[0x%03lx] = %"PRIpte" %08lx\n", - l3_table_offset(addr), l3e_get_intpte(l3e), pfn); - unmap_domain_page(l3t); -@@ -175,7 +176,8 @@ void show_page_walk(unsigned long addr) - l2t = map_domain_page(mfn); - l2e = l2t[l2_table_offset(addr)]; - mfn = l2e_get_pfn(l2e); -- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; -+ pfn = mfn_valid(mfn) && mpt_valid ? -+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L2[0x%03lx] = %"PRIpte" %08lx %s\n", - l2_table_offset(addr), l2e_get_intpte(l2e), pfn, - (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : ""); -@@ -188,7 +190,8 @@ void show_page_walk(unsigned long addr) - l1t = map_domain_page(mfn); - l1e = l1t[l1_table_offset(addr)]; - mfn = l1e_get_pfn(l1e); -- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; -+ pfn = mfn_valid(mfn) && mpt_valid ? -+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L1[0x%03lx] = %"PRIpte" %08lx\n", - l1_table_offset(addr), l1e_get_intpte(l1e), pfn); - unmap_domain_page(l1t); ---- a/xen/arch/x86/x86_64/mm.c -+++ b/xen/arch/x86/x86_64/mm.c -@@ -751,6 +751,8 @@ void __init paging_init(void) - #undef CNT - #undef MFN - -+ mpt_valid = 1; -+ - /* Create user-accessible L2 directory to map the MPT for compat guests. */ - BUILD_BUG_ON(l4_table_offset(RDWR_MPT_VIRT_START) != - l4_table_offset(HIRO_COMPAT_MPT_VIRT_START)); ---- a/xen/arch/x86/x86_64/traps.c -+++ b/xen/arch/x86/x86_64/traps.c -@@ -176,7 +176,8 @@ void show_page_walk(unsigned long addr) - l4t = mfn_to_virt(mfn); - l4e = l4t[l4_table_offset(addr)]; - mfn = l4e_get_pfn(l4e); -- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; -+ pfn = mfn_valid(mfn) && mpt_valid ? -+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L4[0x%03lx] = %"PRIpte" %016lx\n", - l4_table_offset(addr), l4e_get_intpte(l4e), pfn); - if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) || -@@ -186,7 +187,8 @@ void show_page_walk(unsigned long addr) - l3t = mfn_to_virt(mfn); - l3e = l3t[l3_table_offset(addr)]; - mfn = l3e_get_pfn(l3e); -- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; -+ pfn = mfn_valid(mfn) && mpt_valid ? -+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L3[0x%03lx] = %"PRIpte" %016lx%s\n", - l3_table_offset(addr), l3e_get_intpte(l3e), pfn, - (l3e_get_flags(l3e) & _PAGE_PSE) ? " (PSE)" : ""); -@@ -198,7 +200,8 @@ void show_page_walk(unsigned long addr) - l2t = mfn_to_virt(mfn); - l2e = l2t[l2_table_offset(addr)]; - mfn = l2e_get_pfn(l2e); -- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; -+ pfn = mfn_valid(mfn) && mpt_valid ? -+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L2[0x%03lx] = %"PRIpte" %016lx %s\n", - l2_table_offset(addr), l2e_get_intpte(l2e), pfn, - (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : ""); -@@ -210,7 +213,8 @@ void show_page_walk(unsigned long addr) - l1t = mfn_to_virt(mfn); - l1e = l1t[l1_table_offset(addr)]; - mfn = l1e_get_pfn(l1e); -- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; -+ pfn = mfn_valid(mfn) && mpt_valid ? -+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY; - printk(" L1[0x%03lx] = %"PRIpte" %016lx\n", - l1_table_offset(addr), l1e_get_intpte(l1e), pfn); - } ---- a/xen/include/asm-x86/mm.h -+++ b/xen/include/asm-x86/mm.h -@@ -467,6 +467,8 @@ TYPE_SAFE(unsigned long,mfn); - #define SHARED_M2P_ENTRY (~0UL - 1UL) - #define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) - -+extern bool_t mpt_valid; -+ - #ifdef CONFIG_COMPAT - #define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START) - #define set_gpfn_from_mfn(mfn, pfn) ({ \ diff --git a/xen-4.1.2-testing-src.tar.bz2 b/xen-4.1.2-testing-src.tar.bz2 index 7c791d7..33c6297 100644 --- a/xen-4.1.2-testing-src.tar.bz2 +++ b/xen-4.1.2-testing-src.tar.bz2 @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da40746250019fd62c9c0c71085545f8eb4adcf45ca9e3a7af7560c715e0e970 -size 10786312 +oid sha256:24db3184741ea494f3a11cd522ad308c6dae6a47572360efe5d902ccb4780822 +size 10787209 diff --git a/xen-config.diff b/xen-config.diff index ef9d539..15f2866 100644 --- a/xen-config.diff +++ b/xen-config.diff @@ -9,9 +9,9 @@ Index: xen-4.1.2-testing/Config.mk -CONFIG_QEMU ?= $(QEMU_REMOTE) +CONFIG_QEMU ?= ioemu-qemu-xen - QEMU_TAG := xen-4.1.2-rc3 + QEMU_TAG := xen-4.1.2 #QEMU_TAG ?= e073e69457b4d99b6da0b6536296e3498f7f6599 -@@ -187,7 +187,7 @@ QEMU_TAG := xen-4.1.2-rc3 +@@ -187,7 +187,7 @@ QEMU_TAG := xen-4.1.2 # Optional components XENSTAT_XENTOP ?= y VTPM_TOOLS ?= n diff --git a/xen-warnings-unused.diff b/xen-warnings-unused.diff index 746bd5f..7784cc5 100644 --- a/xen-warnings-unused.diff +++ b/xen-warnings-unused.diff @@ -1,5 +1,7 @@ ---- a/tools/libxc/xc_tmem.c -+++ b/tools/libxc/xc_tmem.c +Index: xen-4.1.2-testing/tools/libxc/xc_tmem.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/xc_tmem.c ++++ xen-4.1.2-testing/tools/libxc/xc_tmem.c @@ -390,7 +390,8 @@ static int xc_tmem_restore_new_pool( int xc_tmem_restore(xc_interface *xch, int dom, int io_fd) @@ -10,8 +12,10 @@ uint32_t this_max_pools, this_version; uint32_t pool_id; uint32_t minusone; ---- a/tools/libxc/xc_domain_restore.c -+++ b/tools/libxc/xc_domain_restore.c +Index: xen-4.1.2-testing/tools/libxc/xc_domain_restore.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxc/xc_domain_restore.c ++++ xen-4.1.2-testing/tools/libxc/xc_domain_restore.c @@ -1087,7 +1087,6 @@ int xc_domain_restore(xc_interface *xch, int vcpuextstate = 0; uint32_t vcpuextstate_size = 0; @@ -28,8 +32,10 @@ n = m = 0; loadpages: ---- a/tools/misc/gtraceview.c -+++ b/tools/misc/gtraceview.c +Index: xen-4.1.2-testing/tools/misc/gtraceview.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/misc/gtraceview.c ++++ xen-4.1.2-testing/tools/misc/gtraceview.c @@ -622,7 +622,8 @@ void crt_init(void) void nr_addch(int nr, int ch) { @@ -40,8 +46,10 @@ getyx(stdscr, y, x); for (i = 0; i < nr; i++) { if (x == COLS-1) ---- a/tools/xcutils/xc_restore.c -+++ b/tools/xcutils/xc_restore.c +Index: xen-4.1.2-testing/tools/xcutils/xc_restore.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xcutils/xc_restore.c ++++ xen-4.1.2-testing/tools/xcutils/xc_restore.c @@ -19,7 +19,8 @@ int main(int argc, char **argv) { @@ -52,8 +60,10 @@ xc_interface *xch; int io_fd, ret; int superpages; ---- a/tools/firmware/rombios/32bit/tcgbios/tcgbios.c -+++ b/tools/firmware/rombios/32bit/tcgbios/tcgbios.c +Index: xen-4.1.2-testing/tools/firmware/rombios/32bit/tcgbios/tcgbios.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/firmware/rombios/32bit/tcgbios/tcgbios.c ++++ xen-4.1.2-testing/tools/firmware/rombios/32bit/tcgbios/tcgbios.c @@ -1064,7 +1064,7 @@ uint32_t HashLogEvent32(struct hlei *hle uint32_t rc = 0; uint16_t size; @@ -63,8 +73,10 @@ uint32_t hashdataptr; uint32_t hashdatalen; ---- a/tools/console/client/main.c -+++ b/tools/console/client/main.c +Index: xen-4.1.2-testing/tools/console/client/main.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/console/client/main.c ++++ xen-4.1.2-testing/tools/console/client/main.c @@ -277,7 +277,8 @@ int main(int argc, char **argv) }; @@ -75,8 +87,10 @@ struct xs_handle *xs; char *end; console_type type = CONSOLE_INVAL; ---- a/tools/xenstat/xentop/xentop.c -+++ b/tools/xenstat/xentop/xentop.c +Index: xen-4.1.2-testing/tools/xenstat/xentop/xentop.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/xenstat/xentop/xentop.c ++++ xen-4.1.2-testing/tools/xenstat/xentop/xentop.c @@ -272,7 +272,8 @@ static void fail(const char *str) /* Return the row containing the cursor. */ static int current_row(void) @@ -97,9 +111,11 @@ getmaxyx(stdscr, y, x); return y; } ---- a/tools/libxl/libxlu_cfg.c -+++ b/tools/libxl/libxlu_cfg.c -@@ -348,7 +348,7 @@ char *xlu__cfgl_dequote(CfgParseContext +Index: xen-4.1.2-testing/tools/libxl/libxlu_cfg.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxl/libxlu_cfg.c ++++ xen-4.1.2-testing/tools/libxl/libxlu_cfg.c +@@ -348,7 +348,7 @@ char *xlu__cfgl_dequote(CfgParseContext #define NUMERIC_CHAR(minlen,maxlen,base,basetext) do{ \ char numbuf[(maxlen)+1], *ep; \ @@ -108,9 +124,11 @@ \ strncpy(numbuf,p,(maxlen)); \ numbuf[(maxlen)]= 0; \ ---- a/tools/libxl/libxl.c -+++ b/tools/libxl/libxl.c -@@ -221,7 +221,7 @@ int libxl_domain_rename(libxl_ctx *ctx, +Index: xen-4.1.2-testing/tools/libxl/libxl.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxl/libxl.c ++++ xen-4.1.2-testing/tools/libxl/libxl.c +@@ -221,7 +221,7 @@ int libxl_domain_rename(libxl_ctx *ctx, int libxl_domain_resume(libxl_ctx *ctx, uint32_t domid) { libxl__gc gc = LIBXL_INIT_GC(ctx); @@ -137,8 +155,10 @@ libxl__sprintf(&gc, "%s/device/vif", dompath), &nb_nics); if (!l) goto err; ---- a/tools/libxl/libxl_pci.c -+++ b/tools/libxl/libxl_pci.c +Index: xen-4.1.2-testing/tools/libxl/libxl_pci.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxl/libxl_pci.c ++++ xen-4.1.2-testing/tools/libxl/libxl_pci.c @@ -240,7 +240,7 @@ static int libxl_create_pci_backend(libx flexarray_t *front = NULL; flexarray_t *back = NULL; @@ -157,8 +177,10 @@ LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, "Creating pci backend"); /* add pci device */ ---- a/tools/libxl/libxl_dom.c -+++ b/tools/libxl/libxl_dom.c +Index: xen-4.1.2-testing/tools/libxl/libxl_dom.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxl/libxl_dom.c ++++ xen-4.1.2-testing/tools/libxl/libxl_dom.c @@ -265,14 +265,13 @@ int libxl__build_hvm(libxl_ctx *ctx, uin libxl_domain_build_info *info, libxl_domain_build_state *state) { @@ -183,8 +205,10 @@ out: libxl__free_all(&gc); return 0; ---- a/tools/libxl/libxl_utils.c -+++ b/tools/libxl/libxl_utils.c +Index: xen-4.1.2-testing/tools/libxl/libxl_utils.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxl/libxl_utils.c ++++ xen-4.1.2-testing/tools/libxl/libxl_utils.c @@ -531,7 +531,7 @@ int libxl_devid_to_device_disk(libxl_ctx libxl__gc gc = LIBXL_INIT_GC(ctx); char *val; @@ -194,8 +218,10 @@ int rc = ERROR_INVAL; devid_n = libxl__device_disk_dev_number(devid); ---- a/tools/libxl/xl_cmdimpl.c -+++ b/tools/libxl/xl_cmdimpl.c +Index: xen-4.1.2-testing/tools/libxl/xl_cmdimpl.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/libxl/xl_cmdimpl.c ++++ xen-4.1.2-testing/tools/libxl/xl_cmdimpl.c @@ -5448,7 +5448,7 @@ int main_cpupoollist(int argc, char **ar {"cpus", 0, 0, 'c'}, {0, 0, 0, 0} @@ -205,8 +231,10 @@ int opt_cpus = 0; const char *pool = NULL; libxl_cpupoolinfo *poolinfo; ---- a/tools/debugger/gdbsx/gx/gx_comm.c -+++ b/tools/debugger/gdbsx/gx/gx_comm.c +Index: xen-4.1.2-testing/tools/debugger/gdbsx/gx/gx_comm.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/debugger/gdbsx/gx/gx_comm.c ++++ xen-4.1.2-testing/tools/debugger/gdbsx/gx/gx_comm.c @@ -163,7 +163,7 @@ readchar(void) static char buf[BUFSIZ]; static int bufcnt = 0; @@ -216,8 +244,10 @@ if (bufcnt-- > 0) return *bufp++ & 0x7f; ---- a/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c -+++ b/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c +Index: xen-4.1.2-testing/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c ++++ xen-4.1.2-testing/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c @@ -820,7 +820,7 @@ static int create_suspend_thread(checkpo static void stop_suspend_thread(checkpoint_state* s) @@ -227,8 +257,10 @@ s->done = 1; ---- a/tools/python/xen/lowlevel/netlink/libnetlink.c -+++ b/tools/python/xen/lowlevel/netlink/libnetlink.c +Index: xen-4.1.2-testing/tools/python/xen/lowlevel/netlink/libnetlink.c +=================================================================== +--- xen-4.1.2-testing.orig/tools/python/xen/lowlevel/netlink/libnetlink.c ++++ xen-4.1.2-testing/tools/python/xen/lowlevel/netlink/libnetlink.c @@ -433,7 +433,8 @@ int rtnl_from_file(FILE *rtnl, rtnl_filt nladdr.nl_groups = 0; @@ -239,8 +271,10 @@ int l; status = fread(&buf, 1, sizeof(*h), rtnl); ---- a/xen/arch/x86/msi.c -+++ b/xen/arch/x86/msi.c +Index: xen-4.1.2-testing/xen/arch/x86/msi.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/msi.c ++++ xen-4.1.2-testing/xen/arch/x86/msi.c @@ -799,7 +799,7 @@ static void __pci_disable_msi(struct msi { struct pci_dev *dev; @@ -250,9 +284,11 @@ u8 bus, slot, func; dev = entry->dev; ---- a/xen/arch/x86/microcode_amd.c -+++ b/xen/arch/x86/microcode_amd.c -@@ -160,7 +160,7 @@ static int apply_microcode(int cpu) +Index: xen-4.1.2-testing/xen/arch/x86/microcode_amd.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/microcode_amd.c ++++ xen-4.1.2-testing/xen/arch/x86/microcode_amd.c +@@ -151,7 +151,7 @@ static int apply_microcode(int cpu) static int get_next_ucode_from_buffer_amd(void *mc, const void *buf, size_t size, unsigned long *offset) { @@ -261,8 +297,10 @@ size_t total_size; const uint8_t *bufp = buf; unsigned long off; ---- a/xen/common/cpupool.c -+++ b/xen/common/cpupool.c +Index: xen-4.1.2-testing/xen/common/cpupool.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/common/cpupool.c ++++ xen-4.1.2-testing/xen/common/cpupool.c @@ -356,7 +356,7 @@ int cpupool_add_domain(struct domain *d, { struct cpupool *c; @@ -283,8 +321,10 @@ if ( d->cpupool == NULL ) return; ---- a/xen/common/grant_table.c -+++ b/xen/common/grant_table.c +Index: xen-4.1.2-testing/xen/common/grant_table.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/common/grant_table.c ++++ xen-4.1.2-testing/xen/common/grant_table.c @@ -765,7 +765,7 @@ __gnttab_unmap_common( struct domain *ld, *rd; struct active_grant_entry *act; @@ -294,8 +334,10 @@ ld = current->domain; ---- a/xen/common/kexec.c -+++ b/xen/common/kexec.c +Index: xen-4.1.2-testing/xen/common/kexec.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/common/kexec.c ++++ xen-4.1.2-testing/xen/common/kexec.c @@ -569,7 +569,8 @@ static int kexec_exec(XEN_GUEST_HANDLE(v { xen_kexec_exec_t exec; @@ -306,8 +348,10 @@ if ( unlikely(copy_from_guest(&exec, uarg, 1)) ) return -EFAULT; ---- a/xen/drivers/passthrough/vtd/intremap.c -+++ b/xen/drivers/passthrough/vtd/intremap.c +Index: xen-4.1.2-testing/xen/drivers/passthrough/vtd/intremap.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/drivers/passthrough/vtd/intremap.c ++++ xen-4.1.2-testing/xen/drivers/passthrough/vtd/intremap.c @@ -367,7 +367,7 @@ unsigned int io_apic_read_remap_rte( unsigned int ioapic_pin = (reg - 0x10) / 2; int index; @@ -326,8 +370,10 @@ iommu = drhd->iommu; qi_ctrl = iommu_qi_ctrl(iommu); ---- a/xen/common/sched_credit2.c -+++ b/xen/common/sched_credit2.c +Index: xen-4.1.2-testing/xen/common/sched_credit2.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/common/sched_credit2.c ++++ xen-4.1.2-testing/xen/common/sched_credit2.c @@ -1854,7 +1854,8 @@ static void deactivate_runqueue(struct c static void init_pcpu(const struct scheduler *ops, int cpu) @@ -338,9 +384,11 @@ struct csched_private *prv = CSCHED_PRIV(ops); struct csched_runqueue_data *rqd; spinlock_t *old_lock; ---- a/xen/common/unlzo.c -+++ b/xen/common/unlzo.c -@@ -68,7 +68,7 @@ static int INIT parse_header(u8 *input, +Index: xen-4.1.2-testing/xen/common/unlzo.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/common/unlzo.c ++++ xen-4.1.2-testing/xen/common/unlzo.c +@@ -68,7 +68,7 @@ static int INIT parse_header(u8 *input, { int l; u8 *parse = input; @@ -349,8 +397,10 @@ u16 version; /* read magic: 9 first bits */ ---- a/xen/arch/x86/time.c -+++ b/xen/arch/x86/time.c +Index: xen-4.1.2-testing/xen/arch/x86/time.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/time.c ++++ xen-4.1.2-testing/xen/arch/x86/time.c @@ -1009,7 +1009,8 @@ static void local_time_calibration(void) * System timestamps, extrapolated from local and master oscillators, * taken during this calibration and the previous calibration. @@ -361,8 +411,10 @@ s_time_t prev_master_stime, curr_master_stime; /* TSC timestamps taken during this calibration and prev calibration. */ ---- a/xen/arch/x86/cpu/amd.c -+++ b/xen/arch/x86/cpu/amd.c +Index: xen-4.1.2-testing/xen/arch/x86/cpu/amd.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/cpu/amd.c ++++ xen-4.1.2-testing/xen/arch/x86/cpu/amd.c @@ -391,7 +391,7 @@ static void __devinit init_amd(struct cp { u32 l, h; @@ -372,9 +424,11 @@ #ifdef CONFIG_SMP unsigned long long value; ---- a/xen/arch/x86/mm/p2m.c -+++ b/xen/arch/x86/mm/p2m.c -@@ -2338,7 +2338,7 @@ p2m_remove_page(struct p2m_domain *p2m, +Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c +@@ -2338,7 +2338,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned int page_order) { unsigned long i; @@ -392,8 +446,10 @@ int pod_count = 0; int rc = 0; ---- a/xen/arch/x86/hvm/emulate.c -+++ b/xen/arch/x86/hvm/emulate.c +Index: xen-4.1.2-testing/xen/arch/x86/hvm/emulate.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/emulate.c ++++ xen-4.1.2-testing/xen/arch/x86/hvm/emulate.c @@ -59,7 +59,7 @@ static int hvmemul_do_io( ioreq_t *p = get_ioreq(curr); unsigned long ram_gfn = paddr_to_pfn(ram_gpa); @@ -403,8 +459,10 @@ int rc; /* Check for paged out page */ ---- a/xen/arch/x86/hvm/hvm.c -+++ b/xen/arch/x86/hvm/hvm.c +Index: xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/hvm.c ++++ xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c @@ -253,7 +253,8 @@ void hvm_migrate_timers(struct vcpu *v) void hvm_migrate_pirqs(struct vcpu *v) @@ -424,8 +482,10 @@ mfn = gfn_to_mfn_unshare(p2m, pfn, &t, 0); if ( p2m_is_paging(t) ) { ---- a/xen/arch/x86/acpi/cpu_idle.c -+++ b/xen/arch/x86/acpi/cpu_idle.c +Index: xen-4.1.2-testing/xen/arch/x86/acpi/cpu_idle.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/acpi/cpu_idle.c ++++ xen-4.1.2-testing/xen/arch/x86/acpi/cpu_idle.c @@ -275,7 +275,7 @@ static void acpi_processor_ffh_cstate_en static void acpi_idle_do_entry(struct acpi_processor_cx *cx) @@ -435,8 +495,10 @@ switch ( cx->entry_method ) { ---- a/xen/arch/x86/cpu/intel_cacheinfo.c -+++ b/xen/arch/x86/cpu/intel_cacheinfo.c +Index: xen-4.1.2-testing/xen/arch/x86/cpu/intel_cacheinfo.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/cpu/intel_cacheinfo.c ++++ xen-4.1.2-testing/xen/arch/x86/cpu/intel_cacheinfo.c @@ -170,7 +170,8 @@ unsigned int __cpuinit init_intel_cachei unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ @@ -447,9 +509,11 @@ if (c->cpuid_level > 3) { static int is_initialized; ---- a/xen/arch/x86/mm/mem_sharing.c -+++ b/xen/arch/x86/mm/mem_sharing.c -@@ -376,7 +376,7 @@ int mem_sharing_debug_gfn(struct domain +Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_sharing.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_sharing.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/mem_sharing.c +@@ -375,7 +375,7 @@ int mem_sharing_debug_gfn(struct domain { p2m_type_t p2mt; mfn_t mfn; @@ -458,8 +522,10 @@ mfn = gfn_to_mfn(p2m_get_hostp2m(d), gfn, &p2mt); page = mfn_to_page(mfn); ---- a/xen/arch/x86/hvm/viridian.c -+++ b/xen/arch/x86/hvm/viridian.c +Index: xen-4.1.2-testing/xen/arch/x86/hvm/viridian.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/viridian.c ++++ xen-4.1.2-testing/xen/arch/x86/hvm/viridian.c @@ -270,7 +270,7 @@ int rdmsr_viridian_regs(uint32_t idx, ui int viridian_hypercall(struct cpu_user_regs *regs) { @@ -469,9 +535,11 @@ uint16_t status = HV_STATUS_SUCCESS; union hypercall_input { ---- a/xen/arch/x86/mm.c -+++ b/xen/arch/x86/mm.c -@@ -4904,7 +4904,7 @@ static int ptwr_emulated_update( +Index: xen-4.1.2-testing/xen/arch/x86/mm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm.c ++++ xen-4.1.2-testing/xen/arch/x86/mm.c +@@ -4906,7 +4906,7 @@ static int ptwr_emulated_update( { unsigned long mfn; unsigned long unaligned_addr = addr; @@ -480,8 +548,10 @@ l1_pgentry_t pte, ol1e, nl1e, *pl1e; struct vcpu *v = current; struct domain *d = v->domain; ---- a/xen/arch/x86/x86_64/mm.c -+++ b/xen/arch/x86/x86_64/mm.c +Index: xen-4.1.2-testing/xen/arch/x86/x86_64/mm.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/x86_64/mm.c ++++ xen-4.1.2-testing/xen/arch/x86/x86_64/mm.c @@ -436,7 +436,8 @@ void destroy_m2p_mapping(struct mem_hota static int setup_compat_m2p_table(struct mem_hotadd_info *info) { @@ -492,8 +562,10 @@ l3_pgentry_t *l3_ro_mpt = NULL; l2_pgentry_t *l2_ro_mpt = NULL; struct page_info *l1_pg; ---- a/xen/arch/x86/cpu/mcheck/mce.c -+++ b/xen/arch/x86/cpu/mcheck/mce.c +Index: xen-4.1.2-testing/xen/arch/x86/cpu/mcheck/mce.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/cpu/mcheck/mce.c ++++ xen-4.1.2-testing/xen/arch/x86/cpu/mcheck/mce.c @@ -151,7 +151,6 @@ static struct mcinfo_bank *mca_init_bank struct mc_info *mi, int bank) { @@ -510,7 +582,7 @@ if (mib->mc_status & MCi_STATUS_MISCV) mib->mc_misc = mca_rdmsr(MSR_IA32_MCx_MISC(bank)); -@@ -281,7 +279,7 @@ mctelem_cookie_t mcheck_mca_logout(enum +@@ -281,7 +279,7 @@ mctelem_cookie_t mcheck_mca_logout(enum recover = (mc_recoverable_scan)? 1: 0; for (i = 0; i < 32 && i < nr_mce_banks; i++) { @@ -528,8 +600,10 @@ uint64_t hwcr = 0; int intpose; int i; ---- a/xen/common/tmem.c -+++ b/xen/common/tmem.c +Index: xen-4.1.2-testing/xen/common/tmem.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/common/tmem.c ++++ xen-4.1.2-testing/xen/common/tmem.c @@ -1351,7 +1351,8 @@ obj_unlock: static int tmem_evict(void) { @@ -550,8 +624,10 @@ client_t *client = pool->client; int ret = client->frozen ? -EFROZEN : -ENOMEM; ---- a/xen/common/tmem_xen.c -+++ b/xen/common/tmem_xen.c +Index: xen-4.1.2-testing/xen/common/tmem_xen.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/common/tmem_xen.c ++++ xen-4.1.2-testing/xen/common/tmem_xen.c @@ -177,7 +177,7 @@ EXPORT int tmh_copy_from_client(pfp_t *p EXPORT int tmh_compress_from_client(tmem_cli_mfn_t cmfn, void **out_va, size_t *out_len, void *cli_va) @@ -589,8 +665,10 @@ tmh->persistent_pool = xmem_pool_create(name, tmh_persistent_pool_page_get, tmh_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE); if ( tmh->persistent_pool == NULL ) ---- a/xen/arch/x86/cpu/mcheck/vmce.c -+++ b/xen/arch/x86/cpu/mcheck/vmce.c +Index: xen-4.1.2-testing/xen/arch/x86/cpu/mcheck/vmce.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/cpu/mcheck/vmce.c ++++ xen-4.1.2-testing/xen/arch/x86/cpu/mcheck/vmce.c @@ -574,7 +574,7 @@ int is_vmce_ready(struct mcinfo_bank *ba */ int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn) @@ -600,9 +678,11 @@ struct p2m_domain *p2m; p2m_type_t pt; ---- a/xen/arch/x86/mm/shadow/multi.c -+++ b/xen/arch/x86/mm/shadow/multi.c -@@ -124,7 +124,7 @@ set_shadow_status(struct vcpu *v, mfn_t +Index: xen-4.1.2-testing/xen/arch/x86/mm/shadow/multi.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/mm/shadow/multi.c ++++ xen-4.1.2-testing/xen/arch/x86/mm/shadow/multi.c +@@ -124,7 +124,7 @@ set_shadow_status(struct vcpu *v, mfn_t /* Put a shadow into the hash table */ { struct domain *d = v->domain; @@ -620,8 +700,10 @@ shadow_l1e_t *sl1p, sl1e; struct page_info *sp; ---- a/xen/arch/x86/domain_build.c -+++ b/xen/arch/x86/domain_build.c +Index: xen-4.1.2-testing/xen/arch/x86/domain_build.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/domain_build.c ++++ xen-4.1.2-testing/xen/arch/x86/domain_build.c @@ -378,8 +378,7 @@ int __init construct_dom0( return rc; @@ -632,9 +714,11 @@ machine = elf_uval(&elf, elf.ehdr, e_machine); switch (CONFIG_PAGING_LEVELS) { case 3: /* x86_32p */ ---- a/xen/arch/x86/traps.c -+++ b/xen/arch/x86/traps.c -@@ -1853,7 +1853,11 @@ static int emulate_privileged_op(struct +Index: xen-4.1.2-testing/xen/arch/x86/traps.c +=================================================================== +--- xen-4.1.2-testing.orig/xen/arch/x86/traps.c ++++ xen-4.1.2-testing/xen/arch/x86/traps.c +@@ -1854,7 +1854,11 @@ static int emulate_privileged_op(struct struct vcpu *v = current; unsigned long *reg, eip = regs->eip; u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0, lock = 0, rex = 0; diff --git a/xen.changes b/xen.changes index c590e29..7d7bca0 100644 --- a/xen.changes +++ b/xen.changes @@ -1,3 +1,83 @@ +------------------------------------------------------------------- +Wed Oct 26 10:13:04 MDT 2011 - carnold@novell.com + +- bnc#725169 - xen-4.0.2_21511_03-0.5.3: bootup hangs + 23993-x86-microcode-amd-fix-23871.patch + +------------------------------------------------------------------- +Wed Oct 26 09:48:14 MDT 2011 - carnold@novell.com + +- Update to Xen 4.1.2 FCS c/s 23174 + +------------------------------------------------------------------- +Mon Oct 24 09:26:24 MDT 2011 - jfehlig@suse.com + +- bnc#720054 - Fix syntax error introduced during recent adjustment + of Xen's tap udev rule. + Updated udev-rules.patch + +------------------------------------------------------------------- +Thu Oct 20 21:39:08 CEST 2011 - ohering@suse.de + +- fate#310510 - fix xenpaging + Merge paging related fixes from xen-unstable: + 23506-x86_Disable_set_gpfn_from_mfn_until_m2p_table_is_allocated..patch + 23507-xenpaging_update_machine_to_phys_mapping_during_page_deallocation.patch + 23509-x86_32_Fix_build_Define_machine_to_phys_mapping_valid.patch + 23562-xenpaging_remove_unused_spinlock_in_pager.patch + 23576-x86_show_page_walk_also_for_early_page_faults.patch + 23577-tools_merge_several_bitop_functions_into_xc_bitops.h.patch + 23578-xenpaging_add_xs_handle_to_struct_xenpaging.patch + 23579-xenpaging_drop_xc.c_remove_ASSERT.patch + 23580-xenpaging_drop_xc.c_remove_xc_platform_info_t.patch + 23581-xenpaging_drop_xc.c_remove_xc_wait_for_event.patch + 23582-xenpaging_drop_xc.c_move_xc_mem_paging_flush_ioemu_cache.patch + 23583-xenpaging_drop_xc.c_move_xc_wait_for_event_or_timeout.patch + 23584-xenpaging_drop_xc.c_remove_xc_files.patch + 23585-xenpaging_correct_dropping_of_pages_to_avoid_full_ring_buffer.patch + 23586-xenpaging_do_not_bounce_p2mt_back_to_the_hypervisor.patch + 23587-xenpaging_remove_srand_call.patch + 23588-xenpaging_remove_return_values_from_functions_that_can_not_fail.patch + 23589-xenpaging_catch_xc_mem_paging_resume_errors.patch + 23590-xenpaging_remove_local_domain_id_variable.patch + 23591-xenpaging_move_num_pages_into_xenpaging_struct.patch + 23592-xenpaging_start_paging_in_the_middle_of_gfn_range.patch + 23593-xenpaging_pass_integer_to_xenpaging_populate_page.patch + 23594-xenpaging_add_helper_function_for_unlinking_pagefile.patch + 23595-xenpaging_add_watch_thread_to_catch_guest_shutdown.patch + 23596-xenpaging_implement_stopping_of_pager_by_sending_SIGTERM-SIGINT.patch + 23597-xenpaging_remove_private_mem_event.h.patch + 23599-tools_fix_build_after_recent_xenpaging_changes.patch + 23817-mem_event_add_ref_counting_for_free_requestslots.patch + 23818-mem_event_use_mem_event_mark_and_pause_in_mem_event_check_ring.patch + 23827-xenpaging_use_batch_of_pages_during_final_page-in.patch + 23841-mem_event_pass_mem_event_domain_pointer_to_mem_event_functions.patch + 23842-mem_event_use_different_ringbuffers_for_share_paging_and_access.patch + 23874-xenpaging_track_number_of_paged_pages_in_struct_domain.patch + 23904-xenpaging_use_p2m-get_entry_in_p2m_mem_paging_functions.patch + 23905-xenpaging_fix_locking_in_p2m_mem_paging_functions.patch + 23906-xenpaging_remove_confusing_comment_from_p2m_mem_paging_populate.patch + 23908-p2m_query-modify_p2mt_with_p2m_lock_held.patch + 23943-xenpaging_clear_page_content_after_evict.patch + 23953-xenpaging_handle_evict_failures.patch + 23978-xenpaging_check_p2mt_in_p2m_mem_paging_functions.patch + 23979-xenpaging_document_p2m_mem_paging_functions.patch + 23980-xenpaging_disallow_paging_in_a_PoD_guest.patch + Remove obsolete patches: + x86-show-page-walk-early.patch + xenpaging.23817-mem_event_check_ring.patch + xenpaging.catch-xc_mem_paging_resume-error.patch + xenpaging.guest_remove_page.slow_path.patch + xenpaging.mem_event-no-p2mt.patch + xenpaging.no-srand.patch + xenpaging.return-void.patch + xenpaging.xenpaging_populate_page-gfn.patch + +------------------------------------------------------------------- +Thu Oct 20 20:57:11 CEST 2011 - ohering@suse.de + +- xen.spec: use changeset number as patch number for upstream patches + ------------------------------------------------------------------- Wed Oct 19 11:37:36 UTC 2011 - adrian@suse.de diff --git a/xen.spec b/xen.spec index c204666..d94fbf8 100644 --- a/xen.spec +++ b/xen.spec @@ -22,7 +22,7 @@ Name: xen ExclusiveArch: %ix86 x86_64 %define xvers 4.1 %define xvermaj 4 -%define changeset 23171 +%define changeset 23174 %define xen_build_dir xen-4.1.2-testing %define with_kmp 1 %define with_stubdom 1 @@ -96,7 +96,7 @@ BuildRequires: glibc-devel %if %{?with_kmp}0 BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11 %endif -Version: 4.1.2_03 +Version: 4.1.2_05 Release: 1 License: GPLv2+ Group: System/Kernel @@ -142,62 +142,105 @@ Source99: baselibs.conf # http://xenbits.xensource.com/ext/xenalyze Source20000: xenalyze.hg.tar.bz2 # Upstream patches -Patch1: 22998-x86-get_page_from_l1e-retcode.patch -Patch2: 22999-x86-mod_l1_entry-retcode.patch -Patch3: 23000-x86-mod_l2_entry-retcode.patch -Patch4: 23074-pfn.h.patch -Patch5: 23096-x86-hpet-no-cpumask_lock.patch -Patch6: 23099-x86-rwlock-scalability.patch -Patch7: 23103-x86-pirq-guest-eoi-check.patch -Patch8: 23127-vtd-bios-settings.patch -Patch9: 23199-amd-iommu-unmapped-intr-fault.patch -Patch10: 23233-hvm-cr-access.patch -Patch11: 23234-svm-decode-assist-base.patch -Patch12: 23235-svm-decode-assist-crs.patch -Patch13: 23236-svm-decode-assist-invlpg.patch -Patch14: 23238-svm-decode-assist-insn-fetch.patch -Patch15: 23246-x86-xsave-enable.patch -Patch16: 23303-cpufreq-misc.patch -Patch17: 23304-amd-oprofile-strings.patch -Patch18: 23305-amd-fam15-xenoprof.patch -Patch19: 23306-amd-fam15-vpmu.patch -Patch20: 23334-amd-fam12+14-vpmu.patch -Patch21: 23383-libxc-rm-static-vars.patch -Patch22: 23437-amd-fam15-TSC-scaling.patch -Patch23: 23462-libxc-cpu-feature.patch -Patch24: 23508-vmx-proc-based-ctls-probe.patch -Patch25: 23511-amd-fam15-no-flush-for-C3.patch -Patch26: 23571-vtd-fault-verbosity.patch -Patch27: 23574-x86-dom0-compressed-ELF.patch -Patch28: 23575-x86-DMI.patch -Patch29: 23610-x86-topology-info.patch -Patch30: 23611-amd-fam15-topology.patch -Patch31: 23613-EFI-headers.patch -Patch32: 23614-x86_64-EFI-boot.patch -Patch33: 23615-x86_64-EFI-runtime.patch -Patch34: 23616-x86_64-EFI-MPS.patch -Patch35: 23676-x86_64-image-map-bounds.patch -Patch36: 23723-x86-CMOS-lock.patch -Patch37: 23724-x86-smpboot-x2apic.patch -Patch38: 23726-x86-intel-flexmigration-v2.patch -Patch39: 23735-guest-dom0-cap.patch -Patch40: 23747-mmcfg-base-address.patch -Patch41: 23749-mmcfg-reservation.patch -Patch42: 23771-x86-ioapic-clear-pin.patch -Patch43: 23772-x86-trampoline.patch -Patch44: 23774-x86_64-EFI-EDD.patch -Patch45: 23781-pm-wide-ACPI-ids.patch -Patch46: 23782-x86-ioapic-clear-irr.patch -Patch47: 23783-ACPI-set-_PDC-bits.patch -Patch48: 23795-intel-ich10-quirk.patch -Patch49: 23800-x86_64-guest-addr-range.patch -Patch50: 23804-x86-IPI-counts.patch -Patch51: 23853-x86-pv-cpuid-xsave.patch -Patch52: 23897-x86-mce-offline-again.patch -Patch53: 23925-x86-AMD-ARAT-Fam12.patch -Patch54: 23933-pt-bus2bridge-update.patch -Patch55: 23955-x86-pv-cpuid-xsave.patch -Patch56: 23957-cpufreq-error-paths.patch +Patch22998: 22998-x86-get_page_from_l1e-retcode.patch +Patch22999: 22999-x86-mod_l1_entry-retcode.patch +Patch23000: 23000-x86-mod_l2_entry-retcode.patch +Patch23074: 23074-pfn.h.patch +Patch23096: 23096-x86-hpet-no-cpumask_lock.patch +Patch23099: 23099-x86-rwlock-scalability.patch +Patch23103: 23103-x86-pirq-guest-eoi-check.patch +Patch23127: 23127-vtd-bios-settings.patch +Patch23199: 23199-amd-iommu-unmapped-intr-fault.patch +Patch23233: 23233-hvm-cr-access.patch +Patch23234: 23234-svm-decode-assist-base.patch +Patch23235: 23235-svm-decode-assist-crs.patch +Patch23236: 23236-svm-decode-assist-invlpg.patch +Patch23238: 23238-svm-decode-assist-insn-fetch.patch +Patch23246: 23246-x86-xsave-enable.patch +Patch23303: 23303-cpufreq-misc.patch +Patch23304: 23304-amd-oprofile-strings.patch +Patch23305: 23305-amd-fam15-xenoprof.patch +Patch23306: 23306-amd-fam15-vpmu.patch +Patch23334: 23334-amd-fam12+14-vpmu.patch +Patch23383: 23383-libxc-rm-static-vars.patch +Patch23437: 23437-amd-fam15-TSC-scaling.patch +Patch23462: 23462-libxc-cpu-feature.patch +Patch23506: 23506-x86_Disable_set_gpfn_from_mfn_until_m2p_table_is_allocated..patch +Patch23507: 23507-xenpaging_update_machine_to_phys_mapping_during_page_deallocation.patch +Patch23508: 23508-vmx-proc-based-ctls-probe.patch +Patch23509: 23509-x86_32_Fix_build_Define_machine_to_phys_mapping_valid.patch +Patch23511: 23511-amd-fam15-no-flush-for-C3.patch +Patch23562: 23562-xenpaging_remove_unused_spinlock_in_pager.patch +Patch23571: 23571-vtd-fault-verbosity.patch +Patch23574: 23574-x86-dom0-compressed-ELF.patch +Patch23575: 23575-x86-DMI.patch +Patch23576: 23576-x86_show_page_walk_also_for_early_page_faults.patch +Patch23577: 23577-tools_merge_several_bitop_functions_into_xc_bitops.h.patch +Patch23578: 23578-xenpaging_add_xs_handle_to_struct_xenpaging.patch +Patch23579: 23579-xenpaging_drop_xc.c_remove_ASSERT.patch +Patch23580: 23580-xenpaging_drop_xc.c_remove_xc_platform_info_t.patch +Patch23581: 23581-xenpaging_drop_xc.c_remove_xc_wait_for_event.patch +Patch23582: 23582-xenpaging_drop_xc.c_move_xc_mem_paging_flush_ioemu_cache.patch +Patch23583: 23583-xenpaging_drop_xc.c_move_xc_wait_for_event_or_timeout.patch +Patch23584: 23584-xenpaging_drop_xc.c_remove_xc_files.patch +Patch23585: 23585-xenpaging_correct_dropping_of_pages_to_avoid_full_ring_buffer.patch +Patch23586: 23586-xenpaging_do_not_bounce_p2mt_back_to_the_hypervisor.patch +Patch23587: 23587-xenpaging_remove_srand_call.patch +Patch23588: 23588-xenpaging_remove_return_values_from_functions_that_can_not_fail.patch +Patch23589: 23589-xenpaging_catch_xc_mem_paging_resume_errors.patch +Patch23590: 23590-xenpaging_remove_local_domain_id_variable.patch +Patch23591: 23591-xenpaging_move_num_pages_into_xenpaging_struct.patch +Patch23592: 23592-xenpaging_start_paging_in_the_middle_of_gfn_range.patch +Patch23593: 23593-xenpaging_pass_integer_to_xenpaging_populate_page.patch +Patch23594: 23594-xenpaging_add_helper_function_for_unlinking_pagefile.patch +Patch23595: 23595-xenpaging_add_watch_thread_to_catch_guest_shutdown.patch +Patch23596: 23596-xenpaging_implement_stopping_of_pager_by_sending_SIGTERM-SIGINT.patch +Patch23597: 23597-xenpaging_remove_private_mem_event.h.patch +Patch23599: 23599-tools_fix_build_after_recent_xenpaging_changes.patch +Patch23610: 23610-x86-topology-info.patch +Patch23611: 23611-amd-fam15-topology.patch +Patch23613: 23613-EFI-headers.patch +Patch23614: 23614-x86_64-EFI-boot.patch +Patch23615: 23615-x86_64-EFI-runtime.patch +Patch23616: 23616-x86_64-EFI-MPS.patch +Patch23676: 23676-x86_64-image-map-bounds.patch +Patch23723: 23723-x86-CMOS-lock.patch +Patch23724: 23724-x86-smpboot-x2apic.patch +Patch23726: 23726-x86-intel-flexmigration-v2.patch +Patch23735: 23735-guest-dom0-cap.patch +Patch23747: 23747-mmcfg-base-address.patch +Patch23749: 23749-mmcfg-reservation.patch +Patch23771: 23771-x86-ioapic-clear-pin.patch +Patch23772: 23772-x86-trampoline.patch +Patch23774: 23774-x86_64-EFI-EDD.patch +Patch23781: 23781-pm-wide-ACPI-ids.patch +Patch23782: 23782-x86-ioapic-clear-irr.patch +Patch23783: 23783-ACPI-set-_PDC-bits.patch +Patch23795: 23795-intel-ich10-quirk.patch +Patch23800: 23800-x86_64-guest-addr-range.patch +Patch23804: 23804-x86-IPI-counts.patch +Patch23817: 23817-mem_event_add_ref_counting_for_free_requestslots.patch +Patch23818: 23818-mem_event_use_mem_event_mark_and_pause_in_mem_event_check_ring.patch +Patch23827: 23827-xenpaging_use_batch_of_pages_during_final_page-in.patch +Patch23841: 23841-mem_event_pass_mem_event_domain_pointer_to_mem_event_functions.patch +Patch23842: 23842-mem_event_use_different_ringbuffers_for_share_paging_and_access.patch +Patch23853: 23853-x86-pv-cpuid-xsave.patch +Patch23874: 23874-xenpaging_track_number_of_paged_pages_in_struct_domain.patch +Patch23897: 23897-x86-mce-offline-again.patch +Patch23904: 23904-xenpaging_use_p2m-get_entry_in_p2m_mem_paging_functions.patch +Patch23905: 23905-xenpaging_fix_locking_in_p2m_mem_paging_functions.patch +Patch23906: 23906-xenpaging_remove_confusing_comment_from_p2m_mem_paging_populate.patch +Patch23908: 23908-p2m_query-modify_p2mt_with_p2m_lock_held.patch +Patch23925: 23925-x86-AMD-ARAT-Fam12.patch +Patch23933: 23933-pt-bus2bridge-update.patch +Patch23943: 23943-xenpaging_clear_page_content_after_evict.patch +Patch23953: 23953-xenpaging_handle_evict_failures.patch +Patch23955: 23955-x86-pv-cpuid-xsave.patch +Patch23957: 23957-cpufreq-error-paths.patch +Patch23978: 23978-xenpaging_check_p2mt_in_p2m_mem_paging_functions.patch +Patch23979: 23979-xenpaging_document_p2m_mem_paging_functions.patch +Patch23980: 23980-xenpaging_disallow_paging_in_a_PoD_guest.patch +Patch23993: 23993-x86-microcode-amd-fix-23871.patch # Upstream qemu patches # Our patches Patch300: xen-config.diff @@ -311,7 +354,6 @@ Patch501: x86-ioapic-ack-default.patch Patch502: x86-cpufreq-report.patch Patch504: dom-print.patch Patch505: pvdrv-import-shared-info.patch -Patch506: x86-show-page-walk-early.patch Patch507: x86-extra-trap-info.patch Patch508: pvdrv_emulation_control.patch Patch509: blktap-pv-cdrom.patch @@ -339,14 +381,7 @@ Patch1010: xen-unstable.xentrace.t_info_first_offset.patch Patch1011: xen-unstable.xentrace.data_size__read_mostly.patch Patch1012: xen-unstable.xentrace.__insert_record-dst-type.patch # FATE 310510 -Patch1100: xenpaging.guest_remove_page.slow_path.patch -Patch1101: xenpaging.mem_event-no-p2mt.patch -Patch1102: xenpaging.no-srand.patch -Patch1103: xenpaging.return-void.patch -Patch1104: xenpaging.catch-xc_mem_paging_resume-error.patch -Patch1105: xenpaging.xenpaging_populate_page-gfn.patch Patch1106: xenpaging.autostart.patch -Patch1107: xenpaging.23817-mem_event_check_ring.patch # xenalyze Patch20000: xenalyze.gcc46.patch # Build patch @@ -690,62 +725,107 @@ Authors: %setup -q -n %xen_build_dir -a 1 -a 20000 %patch20000 -p1 tar xfj %{SOURCE2} -C $RPM_BUILD_DIR/%{xen_build_dir}/tools -%patch1 -p1 -%patch2 -p1 -%patch3 -p1 -%patch4 -p1 -%patch5 -p1 -%patch6 -p1 -%patch7 -p1 -%patch8 -p1 -%patch9 -p1 -%patch10 -p1 -%patch11 -p1 -%patch12 -p1 -%patch13 -p1 -%patch14 -p1 -%patch15 -p1 -%patch16 -p1 -%patch17 -p1 -%patch18 -p1 -%patch19 -p1 -%patch20 -p1 -%patch21 -p1 -%patch22 -p1 -%patch23 -p1 -%patch24 -p1 -%patch25 -p1 -%patch26 -p1 -%patch27 -p1 -%patch28 -p1 -%patch29 -p1 -%patch30 -p1 -%patch31 -p1 -%patch32 -p1 -%patch33 -p1 -%patch34 -p1 -%patch35 -p1 -%patch36 -p1 -%patch37 -p1 -%patch38 -p1 -%patch39 -p1 -%patch40 -p1 -%patch41 -p1 -%patch42 -p1 -%patch43 -p1 -%patch44 -p1 -%patch45 -p1 -%patch46 -p1 -%patch47 -p1 -%patch48 -p1 -%patch49 -p1 -%patch50 -p1 -%patch51 -p1 -%patch52 -p1 -%patch53 -p1 -%patch54 -p1 -%patch55 -p1 -%patch56 -p1 +# Upstream patches +%patch22998 -p1 +%patch22999 -p1 +%patch23000 -p1 +%patch23074 -p1 +%patch23096 -p1 +%patch23099 -p1 +%patch23103 -p1 +%patch23127 -p1 +%patch23199 -p1 +%patch23233 -p1 +%patch23234 -p1 +%patch23235 -p1 +%patch23236 -p1 +%patch23238 -p1 +%patch23246 -p1 +%patch23303 -p1 +%patch23304 -p1 +%patch23305 -p1 +%patch23306 -p1 +%patch23334 -p1 +%patch23383 -p1 +%patch23437 -p1 +%patch23462 -p1 +%patch23506 -p1 +%patch23507 -p1 +%patch23508 -p1 +%patch23509 -p1 +%patch23511 -p1 +%patch23562 -p1 +%patch23571 -p1 +%patch23574 -p1 +%patch23575 -p1 +%patch23576 -p1 +%patch23577 -p1 +%patch23578 -p1 +%patch23579 -p1 +%patch23580 -p1 +%patch23581 -p1 +%patch23582 -p1 +%patch23583 -p1 +%patch23584 -p1 +%patch23585 -p1 +%patch23586 -p1 +%patch23587 -p1 +%patch23588 -p1 +%patch23589 -p1 +%patch23590 -p1 +%patch23591 -p1 +%patch23592 -p1 +%patch23593 -p1 +%patch23594 -p1 +%patch23595 -p1 +%patch23596 -p1 +%patch23597 -p1 +%patch23599 -p1 +%patch23610 -p1 +%patch23611 -p1 +%patch23613 -p1 +%patch23614 -p1 +%patch23615 -p1 +%patch23616 -p1 +%patch23676 -p1 +%patch23723 -p1 +%patch23724 -p1 +%patch23726 -p1 +%patch23735 -p1 +%patch23747 -p1 +%patch23749 -p1 +%patch23771 -p1 +%patch23772 -p1 +%patch23774 -p1 +%patch23781 -p1 +%patch23782 -p1 +%patch23783 -p1 +%patch23795 -p1 +%patch23800 -p1 +%patch23804 -p1 +%patch23817 -p1 +%patch23818 -p1 +%patch23827 -p1 +%patch23841 -p1 +%patch23842 -p1 +%patch23853 -p1 +%patch23874 -p1 +%patch23897 -p1 +%patch23904 -p1 +%patch23905 -p1 +%patch23906 -p1 +%patch23908 -p1 +%patch23925 -p1 +%patch23933 -p1 +%patch23943 -p1 +%patch23953 -p1 +%patch23955 -p1 +%patch23957 -p1 +%patch23978 -p1 +%patch23979 -p1 +%patch23980 -p1 +%patch23993 -p1 +# Upstream patches %patch300 -p1 %patch301 -p1 %patch302 -p1 @@ -851,7 +931,6 @@ tar xfj %{SOURCE2} -C $RPM_BUILD_DIR/%{xen_build_dir}/tools %patch502 -p1 %patch504 -p1 %patch505 -p1 -%patch506 -p1 %patch507 -p1 %patch508 -p1 %patch509 -p1 @@ -879,14 +958,7 @@ tar xfj %{SOURCE2} -C $RPM_BUILD_DIR/%{xen_build_dir}/tools %patch1011 -p1 %patch1012 -p1 # FATE 310510 -%patch1100 -p1 -%patch1101 -p1 -%patch1102 -p1 -%patch1103 -p1 -%patch1104 -p1 -%patch1105 -p1 %patch1106 -p1 -%patch1107 -p1 # %patch99998 -p1 %patch99999 -p1