SHA256
1
0
forked from pool/xen
OBS User unknown 2009-02-09 01:49:08 +00:00 committed by Git OBS Bridge
parent 8a8c811c02
commit 5545adae8a
19 changed files with 2092 additions and 188 deletions

View File

@ -1,7 +1,15 @@
Index: xen-3.3.1-testing/xen/arch/x86/hvm/vmx/vmcs.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/hvm/vmx/vmcs.c
+++ xen-3.3.1-testing/xen/arch/x86/hvm/vmx/vmcs.c
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1232623303 0
# Node ID d52921c18c3d0171bccb4651cca8412f2fff2dd9
# Parent 9f9ba1a7cc924fbc547e05ea21071becafe5e2c2
vmx: utilise the GUEST_PAT and HOST_PAT vmcs area
Signed-off-by: Xin Li <Xin.Li@intel.com>
Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -166,14 +166,15 @@ static void vmx_init_vmcs_config(void)
#endif
@ -81,7 +89,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/vmx/vmcs.c
vmx_dump_sel("IDTR", x86_seg_idtr);
vmx_dump_sel("TR", x86_seg_tr);
+ printk("Guest PAT = 0x%08x%08x\n",
+ (uint32_t)vmr(GUEST_PAT_HIGH), (uint32_t)vmr(GUEST_PAT));
+ (uint32_t)vmr(GUEST_PAT_HIGH), (uint32_t)vmr(GUEST_PAT));
x = (unsigned long long)vmr(TSC_OFFSET_HIGH) << 32;
x |= (uint32_t)vmr(TSC_OFFSET);
printk("TSC Offset = %016llx\n", x);
@ -94,10 +102,8 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/vmx/vmcs.c
printk("*** Control State ***\n");
printk("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
Index: xen-3.3.1-testing/xen/include/asm-x86/hvm/vmx/vmcs.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ xen-3.3.1-testing/xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -150,11 +150,14 @@ extern u32 vmx_pin_based_exec_control;
#define VM_EXIT_IA32E_MODE 0x00000200
@ -113,17 +119,16 @@ Index: xen-3.3.1-testing/xen/include/asm-x86/hvm/vmx/vmcs.h
extern u32 vmx_vmentry_control;
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
@@ -181,7 +184,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr
@@ -181,6 +184,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)
#define cpu_has_vmx_vpid \
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
-
+#define cpu_has_vmx_pat \
+ (vmx_vmentry_control & VM_ENTRY_LOAD_GUEST_PAT)
/* GUEST_INTERRUPTIBILITY_INFO flags. */
#define VMX_INTR_SHADOW_STI 0x00000001
#define VMX_INTR_SHADOW_MOV_SS 0x00000002
@@ -232,6 +236,8 @@ enum vmcs_field {
@@ -232,6 +237,8 @@ enum vmcs_field {
VMCS_LINK_POINTER_HIGH = 0x00002801,
GUEST_IA32_DEBUGCTL = 0x00002802,
GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
@ -132,7 +137,7 @@ Index: xen-3.3.1-testing/xen/include/asm-x86/hvm/vmx/vmcs.h
GUEST_PDPTR0 = 0x0000280a,
GUEST_PDPTR0_HIGH = 0x0000280b,
GUEST_PDPTR1 = 0x0000280c,
@@ -240,6 +246,8 @@ enum vmcs_field {
@@ -240,6 +247,8 @@ enum vmcs_field {
GUEST_PDPTR2_HIGH = 0x0000280f,
GUEST_PDPTR3 = 0x00002810,
GUEST_PDPTR3_HIGH = 0x00002811,

View File

@ -1,13 +1,24 @@
Index: xen-3.3.1-testing/xen/arch/x86/hvm/mtrr.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/hvm/mtrr.c
+++ xen-3.3.1-testing/xen/arch/x86/hvm/mtrr.c
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1232721749 0
# Node ID 175a425e9b55e63c240b0a2ad61f5ed251e85ead
# Parent f3240cd3cd2b9d48acf3d82caa2ca1cab1f66325
EPT/VT-d: Enhance MTRR/PAT virtualization when EPT/VT-d both enabled
Set effective memory type for EPT according to the VT-d snoop control
capability, and also includes some cleanups for EPT & VT-d both enabled.
Signed-off-by: Edwin Zhai <Edwin.Zhai@intel.com>
Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -702,12 +702,15 @@ HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save
1, HVMSR_PER_VCPU);
uint8_t epte_get_entry_emt(
- struct domain *d, unsigned long gfn, unsigned long mfn)
+ struct domain *d, unsigned long gfn,
+ struct domain *d, unsigned long gfn,
+ unsigned long mfn, uint8_t *igmt, int direct_mmio)
{
uint8_t gmtrr_mtype, hmtrr_mtype;
@ -41,10 +52,8 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/mtrr.c
gmtrr_mtype = get_mtrr_type(&v->arch.hvm_vcpu.mtrr, (gfn << PAGE_SHIFT));
hmtrr_mtype = get_mtrr_type(&mtrr_state, (mfn << PAGE_SHIFT));
return ((gmtrr_mtype <= hmtrr_mtype) ? gmtrr_mtype : hmtrr_mtype);
Index: xen-3.3.1-testing/xen/arch/x86/mm/hap/p2m-ept.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/mm/hap/p2m-ept.c
+++ xen-3.3.1-testing/xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c
+++ b/xen/arch/x86/mm/hap/p2m-ept.c
@@ -66,6 +66,7 @@ static int ept_set_middle_entry(struct d
list_add_tail(&pg->list, &d->arch.p2m->pages);
@ -53,29 +62,23 @@ Index: xen-3.3.1-testing/xen/arch/x86/mm/hap/p2m-ept.c
ept_entry->sp_avail = 0;
ept_entry->avail1 = 0;
ept_entry->mfn = page_to_mfn(pg);
@@ -114,9 +115,19 @@ static int ept_next_level(struct domain
@@ -114,9 +115,13 @@ static int ept_next_level(struct domain
}
}
+/* The parameter need_modify_vtd_table in _ept_set_entry():
+ *
+ * Usually VT-d page table is created during the p2m table creation time,
+ * and it only cares about the gfn to mfn translations,
+ * EPT table takes the same effect as the p2m table, so we create
+ * VT-d page table in ept_set_entry() function.
+ * But when EPT entires are modified not related to the gfn and mfn translations * We don't need to recreat the same VT-d page tables.
+ * The need_modify_vtd_table parameter is used for this.
+/*
+ * TODO: ept_set_entry() computes 'need_modify_vtd_table' for itself,
+ * by observing whether any gfn->mfn translations are modified.
+ */
+
static int
-ept_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
- unsigned int order, p2m_type_t p2mt)
+_ept_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
+_ept_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
+ unsigned int order, p2m_type_t p2mt, int need_modify_vtd_table)
{
ept_entry_t *table = NULL;
unsigned long gfn_remainder = gfn, offset = 0;
@@ -124,6 +135,8 @@ ept_set_entry(struct domain *d, unsigned
@@ -124,6 +129,8 @@ ept_set_entry(struct domain *d, unsigned
u32 index;
int i, rv = 0, ret = 0;
int walk_level = order / EPT_TABLE_ORDER;
@ -84,7 +87,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/mm/hap/p2m-ept.c
/* we only support 4k and 2m pages now */
@@ -157,7 +170,9 @@ ept_set_entry(struct domain *d, unsigned
@@ -157,7 +164,9 @@ ept_set_entry(struct domain *d, unsigned
{
if ( mfn_valid(mfn_x(mfn)) || (p2mt == p2m_mmio_direct) )
{
@ -95,41 +98,41 @@ Index: xen-3.3.1-testing/xen/arch/x86/mm/hap/p2m-ept.c
ept_entry->sp_avail = walk_level ? 1 : 0;
if ( ret == GUEST_TABLE_SUPER_PAGE )
@@ -208,7 +223,10 @@ ept_set_entry(struct domain *d, unsigned
@@ -208,7 +217,10 @@ ept_set_entry(struct domain *d, unsigned
{
split_ept_entry = split_table + i;
split_ept_entry->emt = epte_get_entry_emt(d,
- gfn-offset+i, split_mfn+i);
+ gfn-offset+i, split_mfn+i,
+ gfn-offset+i, split_mfn+i,
+ &igmt, direct_mmio);
+ split_ept_entry->igmt = igmt;
+
split_ept_entry->sp_avail = 0;
split_ept_entry->mfn = split_mfn+i;
@@ -223,7 +241,10 @@ ept_set_entry(struct domain *d, unsigned
@@ -223,7 +235,10 @@ ept_set_entry(struct domain *d, unsigned
/* Set the destinated 4k page as normal */
split_ept_entry = split_table + offset;
- split_ept_entry->emt = epte_get_entry_emt(d, gfn, mfn_x(mfn));
+ split_ept_entry->emt = epte_get_entry_emt(d, gfn, mfn_x(mfn),
+ split_ept_entry->emt = epte_get_entry_emt(d, gfn, mfn_x(mfn),
+ &igmt, direct_mmio);
+ split_ept_entry->igmt = igmt;
+
split_ept_entry->mfn = mfn_x(mfn);
split_ept_entry->avail1 = p2mt;
ept_p2m_type_to_flags(split_ept_entry, p2mt);
@@ -246,7 +267,8 @@ out:
@@ -246,7 +261,8 @@ out:
/* Now the p2m table is not shared with vt-d page table */
- if ( iommu_enabled && is_hvm_domain(d) )
+ if ( iommu_enabled && is_hvm_domain(d)
+ if ( iommu_enabled && is_hvm_domain(d)
+ && need_modify_vtd_table )
{
if ( p2mt == p2m_ram_rw )
{
@@ -273,6 +295,17 @@ out:
@@ -273,6 +289,17 @@ out:
return rv;
}
@ -138,16 +141,16 @@ Index: xen-3.3.1-testing/xen/arch/x86/mm/hap/p2m-ept.c
+ unsigned int order, p2m_type_t p2mt)
+{
+ /* ept_set_entry() are called from set_entry(),
+ * We should always create VT-d page table acording
+ * We should always create VT-d page table acording
+ * to the gfn to mfn translations changes.
+ */
+ return _ept_set_entry(d, gfn, mfn, order, p2mt, 1);
+ return _ept_set_entry(d, gfn, mfn, order, p2mt, 1);
+}
+
/* Read ept p2m entries */
static mfn_t ept_get_entry(struct domain *d, unsigned long gfn, p2m_type_t *t)
{
@@ -393,18 +426,30 @@ void ept_change_entry_emt_with_range(str
@@ -393,18 +420,30 @@ void ept_change_entry_emt_with_range(str
* Set emt for super page.
*/
order = EPT_TABLE_ORDER;
@ -183,10 +186,8 @@ Index: xen-3.3.1-testing/xen/arch/x86/mm/hap/p2m-ept.c
}
}
Index: xen-3.3.1-testing/xen/drivers/passthrough/iommu.c
===================================================================
--- xen-3.3.1-testing.orig/xen/drivers/passthrough/iommu.c
+++ xen-3.3.1-testing/xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -40,6 +40,7 @@ int iommu_enabled = 0;
int iommu_pv_enabled = 0;
int force_iommu = 0;
@ -195,10 +196,8 @@ Index: xen-3.3.1-testing/xen/drivers/passthrough/iommu.c
static void __init parse_iommu_param(char *s)
{
Index: xen-3.3.1-testing/xen/drivers/passthrough/vtd/dmar.c
===================================================================
--- xen-3.3.1-testing.orig/xen/drivers/passthrough/vtd/dmar.c
+++ xen-3.3.1-testing/xen/drivers/passthrough/vtd/dmar.c
--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -29,6 +29,7 @@
#include <xen/pci_regs.h>
#include <asm/string.h>
@ -238,10 +237,8 @@ Index: xen-3.3.1-testing/xen/drivers/passthrough/vtd/dmar.c
return 0;
Index: xen-3.3.1-testing/xen/drivers/passthrough/vtd/iommu.c
===================================================================
--- xen-3.3.1-testing.orig/xen/drivers/passthrough/vtd/iommu.c
+++ xen-3.3.1-testing/xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1495,6 +1495,11 @@ int intel_iommu_map_page(
pte_present = dma_pte_present(*pte);
dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
@ -254,10 +251,8 @@ Index: xen-3.3.1-testing/xen/drivers/passthrough/vtd/iommu.c
iommu_flush_cache_entry(pte);
unmap_vtd_domain_page(page);
Index: xen-3.3.1-testing/xen/drivers/passthrough/vtd/iommu.h
===================================================================
--- xen-3.3.1-testing.orig/xen/drivers/passthrough/vtd/iommu.h
+++ xen-3.3.1-testing/xen/drivers/passthrough/vtd/iommu.h
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -104,6 +104,7 @@
#define ecap_ext_intr(e) ((e >> 4) & 0x1)
#define ecap_cache_hints(e) ((e >> 5) & 0x1)
@ -279,10 +274,8 @@ Index: xen-3.3.1-testing/xen/drivers/passthrough/vtd/iommu.h
#define dma_set_pte_prot(p, prot) \
do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
Index: xen-3.3.1-testing/xen/include/asm-x86/hvm/vmx/vmx.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/asm-x86/hvm/vmx/vmx.h
+++ xen-3.3.1-testing/xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -33,7 +33,8 @@ typedef union {
u64 r : 1,
w : 1,
@ -293,23 +286,25 @@ Index: xen-3.3.1-testing/xen/include/asm-x86/hvm/vmx/vmx.h
sp_avail : 1,
avail1 : 4,
mfn : 45,
Index: xen-3.3.1-testing/xen/include/asm-x86/mtrr.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/asm-x86/mtrr.h
+++ xen-3.3.1-testing/xen/include/asm-x86/mtrr.h
@@ -64,7 +64,7 @@ extern int mtrr_del_page(int reg, unsign
--- a/xen/include/asm-x86/mtrr.h
+++ b/xen/include/asm-x86/mtrr.h
@@ -64,9 +64,11 @@ extern int mtrr_del_page(int reg, unsign
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr,
paddr_t spaddr);
-extern uint8_t epte_get_entry_emt(struct domain *d, unsigned long gfn, unsigned long mfn);
+extern uint8_t epte_get_entry_emt(struct domain *d, unsigned long gfn, unsigned long mfn, uint8_t *igmt, int direct_mmio);
extern void ept_change_entry_emt_with_range(struct domain *d, unsigned long start_gfn,
unsigned long end_gfn);
-extern void ept_change_entry_emt_with_range(struct domain *d, unsigned long start_gfn,
- unsigned long end_gfn);
+extern uint8_t epte_get_entry_emt(
+ struct domain *d, unsigned long gfn, unsigned long mfn,
+ uint8_t *igmt, int direct_mmio);
+extern void ept_change_entry_emt_with_range(
+ struct domain *d, unsigned long start_gfn, unsigned long end_gfn);
extern unsigned char pat_type_2_pte_flags(unsigned char pat_type);
Index: xen-3.3.1-testing/xen/include/xen/iommu.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/xen/iommu.h
+++ xen-3.3.1-testing/xen/include/xen/iommu.h
#endif /* __ASM_X86_MTRR_H__ */
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -32,6 +32,7 @@ extern int iommu_enabled;
extern int iommu_pv_enabled;
extern int force_iommu;

View File

@ -0,0 +1,146 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1232981779 0
# Node ID 055c589f4791811797867736857b08fdd0fd6d49
# Parent c9dc7dcacc1d0c064a131da98a4063fa2cedd716
x86: No need for CMPXCHG8B on page_info structure.
References: bnc#470949
Updates and checks on count_info and page owner can safely be
non-atomic.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1887,36 +1887,29 @@ void put_page(struct page_info *page)
int get_page(struct page_info *page, struct domain *domain)
{
- u32 x, nx, y = page->count_info;
- u32 d, nd = page->u.inuse._domain;
- u32 _domain = pickle_domptr(domain);
+ u32 x, y = page->count_info;
do {
- x = y;
- nx = x + 1;
- d = nd;
+ x = y;
if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
/* Keep one spare reference to be acquired by get_page_light(). */
- unlikely(((nx + 1) & PGC_count_mask) <= 1) || /* Overflow? */
- unlikely(d != _domain) ) /* Wrong owner? */
- {
- if ( !_shadow_mode_refcounts(domain) && !domain->is_dying )
- gdprintk(XENLOG_INFO,
- "Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%"
- PRtype_info "\n",
- page_to_mfn(page), domain, unpickle_domptr(d),
- x, page->u.inuse.type_info);
- return 0;
- }
- asm volatile (
- LOCK_PREFIX "cmpxchg8b %2"
- : "=d" (nd), "=a" (y),
- "=m" (*(volatile u64 *)(&page->count_info))
- : "0" (d), "1" (x), "c" (d), "b" (nx) );
+ unlikely(((x + 2) & PGC_count_mask) <= 1) ) /* Overflow? */
+ goto fail;
}
- while ( unlikely(nd != d) || unlikely(y != x) );
+ while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
- return 1;
+ if ( likely(page_get_owner(page) == domain) )
+ return 1;
+
+ put_page(page);
+
+ fail:
+ if ( !_shadow_mode_refcounts(domain) && !domain->is_dying )
+ gdprintk(XENLOG_INFO,
+ "Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%" PRtype_info,
+ page_to_mfn(page), domain, page_get_owner(page),
+ y, page->u.inuse.type_info);
+ return 0;
}
/*
@@ -3438,49 +3431,47 @@ int replace_grant_host_mapping(
int steal_page(
struct domain *d, struct page_info *page, unsigned int memflags)
{
- u32 _d, _nd, x, y;
+ u32 x, y;
spin_lock(&d->page_alloc_lock);
+ if ( is_xen_heap_page(page) || (page_get_owner(page) != d) )
+ goto fail;
+
/*
- * The tricky bit: atomically release ownership while there is just one
- * benign reference to the page (PGC_allocated). If that reference
- * disappears then the deallocation routine will safely spin.
+ * We require there is just one reference (PGC_allocated). We temporarily
+ * drop this reference now so that we can safely swizzle the owner.
*/
- _d = pickle_domptr(d);
- _nd = page->u.inuse._domain;
- y = page->count_info;
+ y = page->count_info;
do {
x = y;
- if ( unlikely((x & (PGC_count_mask|PGC_allocated)) !=
- (1 | PGC_allocated)) || unlikely(_nd != _d) )
- {
- MEM_LOG("gnttab_transfer: Bad page %p: ed=%p(%u), sd=%p,"
- " caf=%08x, taf=%" PRtype_info "\n",
- (void *) page_to_mfn(page),
- d, d->domain_id, unpickle_domptr(_nd), x,
- page->u.inuse.type_info);
- spin_unlock(&d->page_alloc_lock);
- return -1;
- }
- asm volatile (
- LOCK_PREFIX "cmpxchg8b %2"
- : "=d" (_nd), "=a" (y),
- "=m" (*(volatile u64 *)(&page->count_info))
- : "0" (_d), "1" (x), "c" (NULL), "b" (x) );
- } while (unlikely(_nd != _d) || unlikely(y != x));
+ if ( (x & (PGC_count_mask|PGC_allocated)) != (1 | PGC_allocated) )
+ goto fail;
+ y = cmpxchg(&page->count_info, x, x & ~PGC_count_mask);
+ } while ( y != x );
- /*
- * Unlink from 'd'. At least one reference remains (now anonymous), so
- * noone else is spinning to try to delete this page from 'd'.
- */
+ /* Swizzle the owner then reinstate the PGC_allocated reference. */
+ page_set_owner(page, NULL);
+ y = page->count_info;
+ do {
+ x = y;
+ BUG_ON((x & (PGC_count_mask|PGC_allocated)) != PGC_allocated);
+ } while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x );
+
+ /* Unlink from original owner. */
if ( !(memflags & MEMF_no_refcount) )
d->tot_pages--;
list_del(&page->list);
spin_unlock(&d->page_alloc_lock);
-
return 0;
+
+ fail:
+ spin_unlock(&d->page_alloc_lock);
+ MEM_LOG("Bad page %p: ed=%p(%u), sd=%p, caf=%08x, taf=%" PRtype_info,
+ (void *)page_to_mfn(page), d, d->domain_id,
+ page_get_owner(page), page->count_info, page->u.inuse.type_info);
+ return -1;
}
int do_update_va_mapping(unsigned long va, u64 val64,

View File

@ -0,0 +1,445 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1232986782 0
# Node ID 39517e863cc89a085341e1d53317aaa7ceddd127
# Parent 055c589f4791811797867736857b08fdd0fd6d49
x86_64: Widen page counts to avoid overflow.
References: bnc#470949
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1232988758 0
# Node ID 728d1892f0e24c2531df2d61a2d95177400ceb17
# Parent 90909b81b3b9cf9b303e2bc457580603da3ac7fd
x86: Clean up shadow_page_info after page_info changes.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1233056759 0
# Node ID 6e623569455c08b57e43e3355f6809b3a4ba0707
# Parent 7b56dbd1b439e0996083810489398cb51dc43aa6
x86: clean up struct page_info
Remove the now unnecessary (and leading to misalignment of cpumask on
x86-64) 'packed' attributes.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -143,7 +143,7 @@ void dump_pageframe_info(struct domain *
{
list_for_each_entry ( page, &d->page_list, list )
{
- printk(" DomPage %p: caf=%08x, taf=%" PRtype_info "\n",
+ printk(" DomPage %p: caf=%08lx, taf=%" PRtype_info "\n",
_p(page_to_mfn(page)),
page->count_info, page->u.inuse.type_info);
}
@@ -151,7 +151,7 @@ void dump_pageframe_info(struct domain *
list_for_each_entry ( page, &d->xenpage_list, list )
{
- printk(" XenPage %p: caf=%08x, taf=%" PRtype_info "\n",
+ printk(" XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
_p(page_to_mfn(page)),
page->count_info, page->u.inuse.type_info);
}
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -714,8 +714,8 @@ get_page_from_l1e(
else if ( pte_flags_to_cacheattr(l1f) !=
((page->count_info >> PGC_cacheattr_base) & 7) )
{
- uint32_t x, nx, y = page->count_info;
- uint32_t cacheattr = pte_flags_to_cacheattr(l1f);
+ unsigned long x, nx, y = page->count_info;
+ unsigned long cacheattr = pte_flags_to_cacheattr(l1f);
if ( is_xen_heap_page(page) )
{
@@ -1869,7 +1869,7 @@ static int mod_l4_entry(l4_pgentry_t *pl
void put_page(struct page_info *page)
{
- u32 nx, x, y = page->count_info;
+ unsigned long nx, x, y = page->count_info;
do {
x = y;
@@ -1887,7 +1887,7 @@ void put_page(struct page_info *page)
int get_page(struct page_info *page, struct domain *domain)
{
- u32 x, y = page->count_info;
+ unsigned long x, y = page->count_info;
do {
x = y;
@@ -1906,7 +1906,7 @@ int get_page(struct page_info *page, str
fail:
if ( !_shadow_mode_refcounts(domain) && !domain->is_dying )
gdprintk(XENLOG_INFO,
- "Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%" PRtype_info,
+ "Error pfn %lx: rd=%p, od=%p, caf=%08lx, taf=%" PRtype_info,
page_to_mfn(page), domain, page_get_owner(page),
y, page->u.inuse.type_info);
return 0;
@@ -1922,7 +1922,7 @@ int get_page(struct page_info *page, str
*/
static void get_page_light(struct page_info *page)
{
- u32 x, nx, y = page->count_info;
+ unsigned long x, nx, y = page->count_info;
do {
x = y;
@@ -1963,7 +1963,7 @@ static int alloc_page_type(struct page_i
rc = alloc_segdesc_page(page);
break;
default:
- printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%x\n",
+ printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%lx\n",
type, page->u.inuse.type_info,
page->count_info);
rc = -EINVAL;
@@ -1987,7 +1987,7 @@ static int alloc_page_type(struct page_i
{
ASSERT(rc < 0);
MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %"
- PRtype_info ": caf=%08x taf=%" PRtype_info,
+ PRtype_info ": caf=%08lx taf=%" PRtype_info,
page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
type, page->count_info, page->u.inuse.type_info);
page->u.inuse.type_info = 0;
@@ -3144,7 +3144,7 @@ static int create_grant_pte_mapping(
void *va;
unsigned long gmfn, mfn;
struct page_info *page;
- u32 type;
+ unsigned long type;
l1_pgentry_t ol1e;
struct domain *d = v->domain;
@@ -3205,7 +3205,7 @@ static int destroy_grant_pte_mapping(
void *va;
unsigned long gmfn, mfn;
struct page_info *page;
- u32 type;
+ unsigned long type;
l1_pgentry_t ol1e;
gmfn = addr >> PAGE_SHIFT;
@@ -3431,7 +3431,7 @@ int replace_grant_host_mapping(
int steal_page(
struct domain *d, struct page_info *page, unsigned int memflags)
{
- u32 x, y;
+ unsigned long x, y;
spin_lock(&d->page_alloc_lock);
@@ -3468,7 +3468,7 @@ int steal_page(
fail:
spin_unlock(&d->page_alloc_lock);
- MEM_LOG("Bad page %p: ed=%p(%u), sd=%p, caf=%08x, taf=%" PRtype_info,
+ MEM_LOG("Bad page %p: ed=%p(%u), sd=%p, caf=%08lx, taf=%" PRtype_info,
(void *)page_to_mfn(page), d, d->domain_id,
page_get_owner(page), page->count_info, page->u.inuse.type_info);
return -1;
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -166,7 +166,7 @@ void hap_free_p2m_page(struct domain *d,
ASSERT(page_get_owner(pg) == d);
/* Should have just the one ref we gave it in alloc_p2m_page() */
if ( (pg->count_info & PGC_count_mask) != 1 )
- HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
+ HAP_ERROR("Odd p2m page count c=%#lx t=%"PRtype_info"\n",
pg->count_info, pg->u.inuse.type_info);
pg->count_info = 0;
/* Free should not decrement domain's total allocation, since
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1678,7 +1678,7 @@ shadow_free_p2m_page(struct domain *d, s
/* Should have just the one ref we gave it in alloc_p2m_page() */
if ( (pg->count_info & PGC_count_mask) != 1 )
{
- SHADOW_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
+ SHADOW_ERROR("Odd p2m page count c=%#lx t=%"PRtype_info"\n",
pg->count_info, pg->u.inuse.type_info);
}
pg->count_info = 0;
@@ -1796,14 +1796,21 @@ static unsigned int sh_set_allocation(st
sp = list_entry(d->arch.paging.shadow.freelists[order].next,
struct shadow_page_info, list);
list_del(&sp->list);
-#if defined(__x86_64__)
/*
- * Re-instate lock field which we overwrite with shadow_page_info.
- * This was safe, since the lock is only used on guest pages.
+ * The pages were allocated anonymously, but the owner field
+ * may get overwritten, so need to clear it here.
*/
for ( j = 0; j < 1U << order; j++ )
+ {
+ page_set_owner(&((struct page_info *)sp)[j], NULL);
+#if defined(__x86_64__)
+ /*
+ * Re-instate lock field which we overwrite with shadow_page_info.
+ * This was safe, since the lock is only used on guest pages.
+ */
spin_lock_init(&((struct page_info *)sp)[j].lock);
#endif
+ }
d->arch.paging.shadow.free_pages -= 1 << order;
d->arch.paging.shadow.total_pages -= 1 << order;
free_domheap_pages((struct page_info *)sp, order);
@@ -2516,7 +2523,7 @@ int sh_remove_all_mappings(struct vcpu *
&& (page->u.inuse.type_info & PGT_count_mask) == 0) )
{
SHADOW_ERROR("can't find all mappings of mfn %lx: "
- "c=%08x t=%08lx\n", mfn_x(gmfn),
+ "c=%08lx t=%08lx\n", mfn_x(gmfn),
page->count_info, page->u.inuse.type_info);
}
}
@@ -3591,7 +3598,6 @@ int shadow_track_dirty_vram(struct domai
for ( i = 0; i < nr; i++ ) {
mfn_t mfn = gfn_to_mfn(d, begin_pfn + i, &t);
struct page_info *page;
- u32 count_info;
int dirty = 0;
paddr_t sl1ma = d->dirty_vram->sl1ma[i];
@@ -3602,8 +3608,7 @@ int shadow_track_dirty_vram(struct domai
else
{
page = mfn_to_page(mfn);
- count_info = page->u.inuse.type_info & PGT_count_mask;
- switch (count_info)
+ switch (page->u.inuse.type_info & PGT_count_mask)
{
case 0:
/* No guest reference, nothing to track. */
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1334,9 +1334,8 @@ static inline void shadow_vram_get_l1e(s
if ( (gfn >= d->dirty_vram->begin_pfn) && (gfn < d->dirty_vram->end_pfn) ) {
unsigned long i = gfn - d->dirty_vram->begin_pfn;
struct page_info *page = mfn_to_page(mfn);
- u32 count_info = page->u.inuse.type_info & PGT_count_mask;
- if ( count_info == 1 )
+ if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
/* Initial guest reference, record it */
d->dirty_vram->sl1ma[i] = pfn_to_paddr(mfn_x(sl1mfn))
| ((unsigned long)sl1e & ~PAGE_MASK);
@@ -1362,12 +1361,11 @@ static inline void shadow_vram_put_l1e(s
if ( (gfn >= d->dirty_vram->begin_pfn) && (gfn < d->dirty_vram->end_pfn) ) {
unsigned long i = gfn - d->dirty_vram->begin_pfn;
struct page_info *page = mfn_to_page(mfn);
- u32 count_info = page->u.inuse.type_info & PGT_count_mask;
int dirty = 0;
paddr_t sl1ma = pfn_to_paddr(mfn_x(sl1mfn))
| ((unsigned long)sl1e & ~PAGE_MASK);
- if ( count_info == 1 ) {
+ if ( (page->u.inuse.type_info & PGT_count_mask) == 1 ) {
/* Last reference */
if ( d->dirty_vram->sl1ma[i] == INVALID_PADDR ) {
/* We didn't know it was that one, let's say it is dirty */
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -201,12 +201,11 @@ struct shadow_page_info
u32 tlbflush_timestamp;
};
struct {
- unsigned int type:5; /* What kind of shadow is this? */
- unsigned int pinned:1; /* Is the shadow pinned? */
- unsigned int count:26; /* Reference count */
- u32 mbz; /* Must be zero: this is where the
- * owner field lives in page_info */
- } __attribute__((packed));
+ unsigned long mbz; /* Must be zero: count_info is here. */
+ unsigned long type:5; /* What kind of shadow is this? */
+ unsigned long pinned:1; /* Is the shadow pinned? */
+ unsigned long count:26; /* Reference count */
+ };
union {
/* For unused shadow pages, a list of pages of this order; for
* pinnable shadows, if pinned, a list of other pinned shadows
@@ -229,7 +228,7 @@ static inline void shadow_check_page_str
BUILD_BUG_ON(sizeof (struct shadow_page_info) !=
sizeof (struct page_info));
BUILD_BUG_ON(offsetof(struct shadow_page_info, mbz) !=
- offsetof(struct page_info, u.inuse._domain));
+ offsetof(struct page_info, count_info));
};
/* Shadow type codes */
--- a/xen/arch/x86/x86_32/mm.c
+++ b/xen/arch/x86/x86_32/mm.c
@@ -159,15 +159,6 @@ void __init subarch_init_memory(void)
unsigned long m2p_start_mfn;
unsigned int i, j;
- /*
- * We are rather picky about the layout of 'struct page_info'. The
- * count_info and domain fields must be adjacent, as we perform atomic
- * 64-bit operations on them. Also, just for sanity, we assert the size
- * of the structure here.
- */
- BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) !=
- (offsetof(struct page_info, count_info) + sizeof(u32)));
- BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
BUILD_BUG_ON(sizeof(struct page_info) != 24);
/* M2P table is mappable read-only by privileged domains. */
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -225,17 +225,6 @@ void __init subarch_init_memory(void)
l3_pgentry_t l3e;
l2_pgentry_t l2e;
- /*
- * We are rather picky about the layout of 'struct page_info'. The
- * count_info and domain fields must be adjacent, as we perform atomic
- * 64-bit operations on them.
- */
- BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) !=
- (offsetof(struct page_info, count_info) + sizeof(u32)));
- BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
- BUILD_BUG_ON(sizeof(struct page_info) !=
- (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long)));
-
/* M2P table is mappable read-only by privileged domains. */
for ( v = RDWR_MPT_VIRT_START;
v != RDWR_MPT_VIRT_END;
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -142,8 +142,8 @@ share_xenoprof_page_with_guest(struct do
struct page_info *page = mfn_to_page(mfn + i);
if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
{
- gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%x\n",
- mfn + i, page->count_info);
+ gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%lx\n",
+ mfn + i, (unsigned long)page->count_info);
return -EBUSY;
}
page_set_owner(page, NULL);
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -23,7 +23,7 @@ struct page_info
struct list_head list;
/* Reference count and various PGC_xxx flags and fields. */
- u32 count_info;
+ unsigned long count_info;
/* Context-dependent fields follow... */
union {
@@ -34,7 +34,7 @@ struct page_info
u32 _domain; /* pickled format */
/* Type reference count and various PGT_xxx flags and fields. */
unsigned long type_info;
- } __attribute__ ((packed)) inuse;
+ } inuse;
/* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
struct {
@@ -42,7 +42,7 @@ struct page_info
u32 order;
/* Mask of possibly-tainted TLBs. */
cpumask_t cpumask;
- } __attribute__ ((packed)) free;
+ } free;
} u;
@@ -102,48 +102,53 @@ struct page_info
};
};
+#define PG_shift(idx) (BITS_PER_LONG - (idx))
+#define PG_mask(x, idx) (x ## UL << PG_shift(idx))
+
/* The following page types are MUTUALLY EXCLUSIVE. */
-#define PGT_none (0U<<29) /* no special uses of this page */
-#define PGT_l1_page_table (1U<<29) /* using this page as an L1 page table? */
-#define PGT_l2_page_table (2U<<29) /* using this page as an L2 page table? */
-#define PGT_l3_page_table (3U<<29) /* using this page as an L3 page table? */
-#define PGT_l4_page_table (4U<<29) /* using this page as an L4 page table? */
-#define PGT_seg_desc_page (5U<<29) /* using this page in a GDT/LDT? */
-#define PGT_writable_page (7U<<29) /* has writable mappings of this page? */
-#define PGT_type_mask (7U<<29) /* Bits 29-31. */
+#define PGT_none PG_mask(0, 3) /* no special uses of this page */
+#define PGT_l1_page_table PG_mask(1, 3) /* using as an L1 page table? */
+#define PGT_l2_page_table PG_mask(2, 3) /* using as an L2 page table? */
+#define PGT_l3_page_table PG_mask(3, 3) /* using as an L3 page table? */
+#define PGT_l4_page_table PG_mask(4, 3) /* using as an L4 page table? */
+#define PGT_seg_desc_page PG_mask(5, 3) /* using this page in a GDT/LDT? */
+#define PGT_writable_page PG_mask(7, 3) /* has writable mappings? */
+#define PGT_type_mask PG_mask(7, 3) /* Bits 29-31. */
/* Owning guest has pinned this page to its current type? */
-#define _PGT_pinned 28
-#define PGT_pinned (1U<<_PGT_pinned)
+#define _PGT_pinned PG_shift(4)
+#define PGT_pinned PG_mask(1, 4)
/* Has this page been validated for use as its current type? */
-#define _PGT_validated 27
-#define PGT_validated (1U<<_PGT_validated)
+#define _PGT_validated PG_shift(5)
+#define PGT_validated PG_mask(1, 5)
/* PAE only: is this an L2 page directory containing Xen-private mappings? */
-#define _PGT_pae_xen_l2 26
-#define PGT_pae_xen_l2 (1U<<_PGT_pae_xen_l2)
+#define _PGT_pae_xen_l2 PG_shift(6)
+#define PGT_pae_xen_l2 PG_mask(1, 6)
/* Has this page been *partially* validated for use as its current type? */
-#define _PGT_partial 25
-#define PGT_partial (1U<<_PGT_partial)
+#define _PGT_partial PG_shift(7)
+#define PGT_partial PG_mask(1, 7)
- /* 25-bit count of uses of this frame as its current type. */
-#define PGT_count_mask ((1U<<25)-1)
+ /* Count of uses of this frame as its current type. */
+#define PGT_count_width PG_shift(7)
+#define PGT_count_mask ((1UL<<PGT_count_width)-1)
/* Cleared when the owning guest 'frees' this page. */
-#define _PGC_allocated 31
-#define PGC_allocated (1U<<_PGC_allocated)
+#define _PGC_allocated PG_shift(1)
+#define PGC_allocated PG_mask(1, 1)
#if defined(__i386__)
/* Page is locked? */
-# define _PGC_locked 30
-# define PGC_locked (1U<<_PGC_out_of_sync)
+# define _PGC_locked PG_shift(2)
+# define PGC_locked PG_mask(1, 2)
#endif
/* Set when is using a page as a page table */
-#define _PGC_page_table 29
-#define PGC_page_table (1U<<_PGC_page_table)
+#define _PGC_page_table PG_shift(3)
+#define PGC_page_table PG_mask(1, 3)
/* 3-bit PAT/PCD/PWT cache-attribute hint. */
-#define PGC_cacheattr_base 26
-#define PGC_cacheattr_mask (7U<<PGC_cacheattr_base)
- /* 26-bit count of references to this frame. */
-#define PGC_count_mask ((1U<<26)-1)
+#define PGC_cacheattr_base PG_shift(6)
+#define PGC_cacheattr_mask PG_mask(7, 6)
+ /* Count of references to this frame. */
+#define PGC_count_width PG_shift(6)
+#define PGC_count_mask ((1UL<<PGC_count_width)-1)
#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
#define is_xen_heap_mfn(mfn) ({ \

View File

@ -0,0 +1,834 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1233072141 0
# Node ID bcf77bfd1161d1e2693d6762bcd436ad98ec0779
# Parent dbf53b739af0434adff50172fc071f718b57b450
x86: Fold page_info lock into type_info.
References: bnc#470949
Fix some racey looking code at the same time.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -202,11 +202,6 @@ void __init init_frametable(void)
}
memset(frame_table, 0, nr_pages << PAGE_SHIFT);
-
-#if defined(__x86_64__)
- for ( i = 0; i < max_page; i ++ )
- spin_lock_init(&frame_table[i].lock);
-#endif
}
void __init arch_init_memory(void)
@@ -1499,24 +1494,31 @@ static int free_l4_table(struct page_inf
#define free_l4_table(page, preemptible) (-EINVAL)
#endif
-static void page_lock(struct page_info *page)
+static int page_lock(struct page_info *page)
{
-#if defined(__i386__)
- while ( unlikely(test_and_set_bit(_PGC_locked, &page->count_info)) )
- while ( test_bit(_PGC_locked, &page->count_info) )
+ unsigned long x, nx;
+
+ do {
+ while ( (x = page->u.inuse.type_info) & PGT_locked )
cpu_relax();
-#else
- spin_lock(&page->lock);
-#endif
+ nx = x + (1 | PGT_locked);
+ if ( !(x & PGT_validated) ||
+ !(x & PGT_count_mask) ||
+ !(nx & PGT_count_mask) )
+ return 0;
+ } while ( cmpxchg(&page->u.inuse.type_info, x, nx) != x );
+
+ return 1;
}
static void page_unlock(struct page_info *page)
{
-#if defined(__i386__)
- clear_bit(_PGC_locked, &page->count_info);
-#else
- spin_unlock(&page->lock);
-#endif
+ unsigned long x, nx, y = page->u.inuse.type_info;
+
+ do {
+ x = y;
+ nx = x - (1 | PGT_locked);
+ } while ( (y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x );
}
/* How to write an entry to the guest pagetables.
@@ -1579,19 +1581,15 @@ static int mod_l1_entry(l1_pgentry_t *pl
struct vcpu *curr = current;
struct domain *d = curr->domain;
unsigned long mfn;
- struct page_info *l1pg = mfn_to_page(gl1mfn);
p2m_type_t p2mt;
int rc = 1;
- page_lock(l1pg);
-
if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
- return page_unlock(l1pg), 0;
+ return 0;
if ( unlikely(paging_mode_refcounts(d)) )
{
rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, preserve_ad);
- page_unlock(l1pg);
return rc;
}
@@ -1600,13 +1598,12 @@ static int mod_l1_entry(l1_pgentry_t *pl
/* Translate foreign guest addresses. */
mfn = mfn_x(gfn_to_mfn(FOREIGNDOM, l1e_get_pfn(nl1e), &p2mt));
if ( !p2m_is_ram(p2mt) || unlikely(mfn == INVALID_MFN) )
- return page_unlock(l1pg), 0;
+ return 0;
ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
nl1e = l1e_from_pfn(mfn, l1e_get_flags(nl1e));
if ( unlikely(l1e_get_flags(nl1e) & l1_disallow_mask(d)) )
{
- page_unlock(l1pg);
MEM_LOG("Bad L1 flags %x",
l1e_get_flags(nl1e) & l1_disallow_mask(d));
return 0;
@@ -1618,12 +1615,11 @@ static int mod_l1_entry(l1_pgentry_t *pl
adjust_guest_l1e(nl1e, d);
rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
preserve_ad);
- page_unlock(l1pg);
return rc;
}
if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
- return page_unlock(l1pg), 0;
+ return 0;
adjust_guest_l1e(nl1e, d);
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
@@ -1636,11 +1632,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
else if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
preserve_ad)) )
{
- page_unlock(l1pg);
return 0;
}
- page_unlock(l1pg);
put_page_from_l1e(ol1e, d);
return rc;
}
@@ -1650,13 +1644,13 @@ static int mod_l1_entry(l1_pgentry_t *pl
static int mod_l2_entry(l2_pgentry_t *pl2e,
l2_pgentry_t nl2e,
unsigned long pfn,
- unsigned long type,
int preserve_ad)
{
l2_pgentry_t ol2e;
struct vcpu *curr = current;
struct domain *d = curr->domain;
struct page_info *l2pg = mfn_to_page(pfn);
+ unsigned long type = l2pg->u.inuse.type_info;
int rc = 1;
if ( unlikely(!is_guest_l2_slot(d, type, pgentry_ptr_to_slot(pl2e))) )
@@ -1665,16 +1659,13 @@ static int mod_l2_entry(l2_pgentry_t *pl
return 0;
}
- page_lock(l2pg);
-
if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
- return page_unlock(l2pg), 0;
+ return 0;
if ( l2e_get_flags(nl2e) & _PAGE_PRESENT )
{
if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) )
{
- page_unlock(l2pg);
MEM_LOG("Bad L2 flags %x",
l2e_get_flags(nl2e) & L2_DISALLOW_MASK);
return 0;
@@ -1685,12 +1676,11 @@ static int mod_l2_entry(l2_pgentry_t *pl
{
adjust_guest_l2e(nl2e, d);
rc = UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr, preserve_ad);
- page_unlock(l2pg);
return rc;
}
if ( unlikely(get_page_from_l2e(nl2e, pfn, d) < 0) )
- return page_unlock(l2pg), 0;
+ return 0;
adjust_guest_l2e(nl2e, d);
if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr,
@@ -1703,11 +1693,9 @@ static int mod_l2_entry(l2_pgentry_t *pl
else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr,
preserve_ad)) )
{
- page_unlock(l2pg);
return 0;
}
- page_unlock(l2pg);
put_page_from_l2e(ol2e, pfn);
return rc;
}
@@ -1722,7 +1710,6 @@ static int mod_l3_entry(l3_pgentry_t *pl
l3_pgentry_t ol3e;
struct vcpu *curr = current;
struct domain *d = curr->domain;
- struct page_info *l3pg = mfn_to_page(pfn);
int rc = 0;
if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) )
@@ -1738,16 +1725,13 @@ static int mod_l3_entry(l3_pgentry_t *pl
if ( is_pv_32bit_domain(d) && (pgentry_ptr_to_slot(pl3e) >= 3) )
return -EINVAL;
- page_lock(l3pg);
-
if ( unlikely(__copy_from_user(&ol3e, pl3e, sizeof(ol3e)) != 0) )
- return page_unlock(l3pg), -EFAULT;
+ return -EFAULT;
if ( l3e_get_flags(nl3e) & _PAGE_PRESENT )
{
if ( unlikely(l3e_get_flags(nl3e) & l3_disallow_mask(d)) )
{
- page_unlock(l3pg);
MEM_LOG("Bad L3 flags %x",
l3e_get_flags(nl3e) & l3_disallow_mask(d));
return -EINVAL;
@@ -1758,13 +1742,12 @@ static int mod_l3_entry(l3_pgentry_t *pl
{
adjust_guest_l3e(nl3e, d);
rc = UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr, preserve_ad);
- page_unlock(l3pg);
return rc ? 0 : -EFAULT;
}
rc = get_page_from_l3e(nl3e, pfn, d, 0, preemptible);
if ( unlikely(rc < 0) )
- return page_unlock(l3pg), rc;
+ return rc;
rc = 0;
adjust_guest_l3e(nl3e, d);
@@ -1778,7 +1761,6 @@ static int mod_l3_entry(l3_pgentry_t *pl
else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr,
preserve_ad)) )
{
- page_unlock(l3pg);
return -EFAULT;
}
@@ -1790,7 +1772,6 @@ static int mod_l3_entry(l3_pgentry_t *pl
pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
}
- page_unlock(l3pg);
put_page_from_l3e(ol3e, pfn, 0, 0);
return rc;
}
@@ -1807,7 +1788,6 @@ static int mod_l4_entry(l4_pgentry_t *pl
struct vcpu *curr = current;
struct domain *d = curr->domain;
l4_pgentry_t ol4e;
- struct page_info *l4pg = mfn_to_page(pfn);
int rc = 0;
if ( unlikely(!is_guest_l4_slot(d, pgentry_ptr_to_slot(pl4e))) )
@@ -1816,16 +1796,13 @@ static int mod_l4_entry(l4_pgentry_t *pl
return -EINVAL;
}
- page_lock(l4pg);
-
if ( unlikely(__copy_from_user(&ol4e, pl4e, sizeof(ol4e)) != 0) )
- return page_unlock(l4pg), -EFAULT;
+ return -EFAULT;
if ( l4e_get_flags(nl4e) & _PAGE_PRESENT )
{
if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) )
{
- page_unlock(l4pg);
MEM_LOG("Bad L4 flags %x",
l4e_get_flags(nl4e) & L4_DISALLOW_MASK);
return -EINVAL;
@@ -1836,13 +1813,12 @@ static int mod_l4_entry(l4_pgentry_t *pl
{
adjust_guest_l4e(nl4e, d);
rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr, preserve_ad);
- page_unlock(l4pg);
return rc ? 0 : -EFAULT;
}
rc = get_page_from_l4e(nl4e, pfn, d, 0, preemptible);
if ( unlikely(rc < 0) )
- return page_unlock(l4pg), rc;
+ return rc;
rc = 0;
adjust_guest_l4e(nl4e, d);
@@ -1856,11 +1832,9 @@ static int mod_l4_entry(l4_pgentry_t *pl
else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr,
preserve_ad)) )
{
- page_unlock(l4pg);
return -EFAULT;
}
- page_unlock(l4pg);
put_page_from_l4e(ol4e, pfn, 0, 0);
return rc;
}
@@ -2918,7 +2892,6 @@ int do_mmu_update(
unsigned int cmd, done = 0;
struct vcpu *v = current;
struct domain *d = v->domain;
- unsigned long type_info;
struct domain_mmap_cache mapcache;
if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
@@ -2990,24 +2963,9 @@ int do_mmu_update(
(unsigned long)(req.ptr & ~PAGE_MASK));
page = mfn_to_page(mfn);
- switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask )
+ if ( page_lock(page) )
{
- case PGT_l1_page_table:
- case PGT_l2_page_table:
- case PGT_l3_page_table:
- case PGT_l4_page_table:
- {
- if ( paging_mode_refcounts(d) )
- {
- MEM_LOG("mmu update on auto-refcounted domain!");
- break;
- }
-
- if ( unlikely(!get_page_type(
- page, type_info & (PGT_type_mask|PGT_pae_xen_l2))) )
- goto not_a_pt;
-
- switch ( type_info & PGT_type_mask )
+ switch ( page->u.inuse.type_info & PGT_type_mask )
{
case PGT_l1_page_table:
{
@@ -3019,7 +2977,7 @@ int do_mmu_update(
case PGT_l2_page_table:
{
l2_pgentry_t l2e = l2e_from_intpte(req.val);
- okay = mod_l2_entry(va, l2e, mfn, type_info,
+ okay = mod_l2_entry(va, l2e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD);
}
break;
@@ -3041,31 +2999,23 @@ int do_mmu_update(
}
break;
#endif
+ case PGT_writable_page:
+ perfc_incr(writable_mmu_updates);
+ okay = paging_write_guest_entry(v, va, req.val, _mfn(mfn));
+ break;
}
-
- put_page_type(page);
+ page_unlock(page);
if ( rc == -EINTR )
rc = -EAGAIN;
}
- break;
-
- default:
- not_a_pt:
+ else if ( get_page_type(page, PGT_writable_page) )
{
- if ( unlikely(!get_page_type(page, PGT_writable_page)) )
- break;
-
perfc_incr(writable_mmu_updates);
-
okay = paging_write_guest_entry(v, va, req.val, _mfn(mfn));
-
put_page_type(page);
}
- break;
- }
unmap_domain_page_with_cache(va, &mapcache);
-
put_page(page);
break;
@@ -3144,7 +3094,6 @@ static int create_grant_pte_mapping(
void *va;
unsigned long gmfn, mfn;
struct page_info *page;
- unsigned long type;
l1_pgentry_t ol1e;
struct domain *d = v->domain;
@@ -3165,21 +3114,23 @@ static int create_grant_pte_mapping(
va = (void *)((unsigned long)va + ((unsigned long)pte_addr & ~PAGE_MASK));
page = mfn_to_page(mfn);
- type = page->u.inuse.type_info & PGT_type_mask;
- if ( (type != PGT_l1_page_table) || !get_page_type(page, type) )
+ if ( !page_lock(page) )
{
- MEM_LOG("Grant map attempted to update a non-L1 page");
rc = GNTST_general_error;
goto failed;
}
- page_lock(page);
+ if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(page);
+ rc = GNTST_general_error;
+ goto failed;
+ }
ol1e = *(l1_pgentry_t *)va;
if ( !UPDATE_ENTRY(l1, (l1_pgentry_t *)va, ol1e, nl1e, mfn, v, 0) )
{
page_unlock(page);
- put_page_type(page);
rc = GNTST_general_error;
goto failed;
}
@@ -3189,8 +3140,6 @@ static int create_grant_pte_mapping(
if ( !paging_mode_refcounts(d) )
put_page_from_l1e(ol1e, d);
- put_page_type(page);
-
failed:
unmap_domain_page(va);
put_page(page);
@@ -3205,7 +3154,6 @@ static int destroy_grant_pte_mapping(
void *va;
unsigned long gmfn, mfn;
struct page_info *page;
- unsigned long type;
l1_pgentry_t ol1e;
gmfn = addr >> PAGE_SHIFT;
@@ -3221,15 +3169,18 @@ static int destroy_grant_pte_mapping(
va = (void *)((unsigned long)va + ((unsigned long)addr & ~PAGE_MASK));
page = mfn_to_page(mfn);
- type = page->u.inuse.type_info & PGT_type_mask;
- if ( (type != PGT_l1_page_table) || !get_page_type(page, type) )
+ if ( !page_lock(page) )
{
- MEM_LOG("Grant map attempted to update a non-L1 page");
rc = GNTST_general_error;
goto failed;
}
- page_lock(page);
+ if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(page);
+ rc = GNTST_general_error;
+ goto failed;
+ }
ol1e = *(l1_pgentry_t *)va;
@@ -3239,7 +3190,6 @@ static int destroy_grant_pte_mapping(
page_unlock(page);
MEM_LOG("PTE entry %lx for address %"PRIx64" doesn't match frame %lx",
(unsigned long)l1e_get_intpte(ol1e), addr, frame);
- put_page_type(page);
rc = GNTST_general_error;
goto failed;
}
@@ -3253,13 +3203,11 @@ static int destroy_grant_pte_mapping(
{
page_unlock(page);
MEM_LOG("Cannot delete PTE entry at %p", va);
- put_page_type(page);
rc = GNTST_general_error;
goto failed;
}
page_unlock(page);
- put_page_type(page);
failed:
unmap_domain_page(va);
@@ -3287,21 +3235,40 @@ static int create_grant_va_mapping(
MEM_LOG("Could not find L1 PTE for address %lx", va);
return GNTST_general_error;
}
+
+ if ( !get_page_from_pagenr(gl1mfn, current->domain) )
+ {
+ guest_unmap_l1e(v, pl1e);
+ return GNTST_general_error;
+ }
+
l1pg = mfn_to_page(gl1mfn);
- page_lock(l1pg);
+ if ( !page_lock(l1pg) )
+ {
+ put_page(l1pg);
+ guest_unmap_l1e(v, pl1e);
+ return GNTST_general_error;
+ }
+
+ if ( (l1pg->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(l1pg);
+ put_page(l1pg);
+ guest_unmap_l1e(v, pl1e);
+ return GNTST_general_error;
+ }
+
ol1e = *pl1e;
okay = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, v, 0);
+
page_unlock(l1pg);
+ put_page(l1pg);
guest_unmap_l1e(v, pl1e);
- pl1e = NULL;
- if ( !okay )
- return GNTST_general_error;
-
- if ( !paging_mode_refcounts(d) )
+ if ( okay && !paging_mode_refcounts(d) )
put_page_from_l1e(ol1e, d);
- return GNTST_okay;
+ return okay ? GNTST_okay : GNTST_general_error;
}
static int replace_grant_va_mapping(
@@ -3319,31 +3286,48 @@ static int replace_grant_va_mapping(
return GNTST_general_error;
}
+ if ( !get_page_from_pagenr(gl1mfn, current->domain) )
+ {
+ rc = GNTST_general_error;
+ goto out;
+ }
+
l1pg = mfn_to_page(gl1mfn);
- page_lock(l1pg);
+ if ( !page_lock(l1pg) )
+ {
+ rc = GNTST_general_error;
+ put_page(l1pg);
+ goto out;
+ }
+
+ if ( (l1pg->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ rc = GNTST_general_error;
+ goto unlock_and_out;
+ }
+
ol1e = *pl1e;
/* Check that the virtual address supplied is actually mapped to frame. */
if ( unlikely(l1e_get_pfn(ol1e) != frame) )
{
- page_unlock(l1pg);
MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx",
l1e_get_pfn(ol1e), addr, frame);
rc = GNTST_general_error;
- goto out;
+ goto unlock_and_out;
}
/* Delete pagetable entry. */
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, v, 0)) )
{
- page_unlock(l1pg);
MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
rc = GNTST_general_error;
- goto out;
+ goto unlock_and_out;
}
+ unlock_and_out:
page_unlock(l1pg);
-
+ put_page(l1pg);
out:
guest_unmap_l1e(v, pl1e);
return rc;
@@ -3405,20 +3389,42 @@ int replace_grant_host_mapping(
return GNTST_general_error;
}
+ if ( !get_page_from_pagenr(gl1mfn, current->domain) )
+ {
+ guest_unmap_l1e(curr, pl1e);
+ return GNTST_general_error;
+ }
+
l1pg = mfn_to_page(gl1mfn);
- page_lock(l1pg);
+ if ( !page_lock(l1pg) )
+ {
+ put_page(l1pg);
+ guest_unmap_l1e(curr, pl1e);
+ return GNTST_general_error;
+ }
+
+ if ( (l1pg->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(l1pg);
+ put_page(l1pg);
+ guest_unmap_l1e(curr, pl1e);
+ return GNTST_general_error;
+ }
+
ol1e = *pl1e;
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, l1e_empty(),
gl1mfn, curr, 0)) )
{
page_unlock(l1pg);
+ put_page(l1pg);
MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
guest_unmap_l1e(curr, pl1e);
return GNTST_general_error;
}
page_unlock(l1pg);
+ put_page(l1pg);
guest_unmap_l1e(curr, pl1e);
rc = replace_grant_va_mapping(addr, frame, ol1e, curr);
@@ -3480,28 +3486,45 @@ int do_update_va_mapping(unsigned long v
l1_pgentry_t val = l1e_from_intpte(val64);
struct vcpu *v = current;
struct domain *d = v->domain;
+ struct page_info *gl1pg;
l1_pgentry_t *pl1e;
unsigned long vmask, bmap_ptr, gl1mfn;
cpumask_t pmask;
- int rc = 0;
+ int rc;
perfc_incr(calls_to_update_va);
- if ( unlikely(!access_ok(va, 1) && !paging_mode_external(d)) )
- return -EINVAL;
-
rc = xsm_update_va_mapping(d, val);
if ( rc )
return rc;
+ rc = -EINVAL;
pl1e = guest_map_l1e(v, va, &gl1mfn);
+ if ( unlikely(!pl1e || !get_page_from_pagenr(gl1mfn, d)) )
+ goto out;
- if ( unlikely(!pl1e || !mod_l1_entry(pl1e, val, gl1mfn, 0)) )
- rc = -EINVAL;
+ gl1pg = mfn_to_page(gl1mfn);
+ if ( !page_lock(gl1pg) )
+ {
+ put_page(gl1pg);
+ goto out;
+ }
+
+ if ( (gl1pg->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(gl1pg);
+ put_page(gl1pg);
+ goto out;
+ }
+
+ rc = mod_l1_entry(pl1e, val, gl1mfn, 0) ? 0 : -EINVAL;
+ page_unlock(gl1pg);
+ put_page(gl1pg);
+
+ out:
if ( pl1e )
guest_unmap_l1e(v, pl1e);
- pl1e = NULL;
process_deferred_ops();
@@ -4122,15 +4145,25 @@ int ptwr_do_page_fault(struct vcpu *v, u
/* Attempt to read the PTE that maps the VA being accessed. */
guest_get_eff_l1e(v, addr, &pte);
- page = l1e_get_page(pte);
/* We are looking only for read-only mappings of p.t. pages. */
if ( ((l1e_get_flags(pte) & (_PAGE_PRESENT|_PAGE_RW)) != _PAGE_PRESENT) ||
- !mfn_valid(l1e_get_pfn(pte)) ||
- ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
- ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
- (page_get_owner(page) != d) )
+ !get_page_from_pagenr(l1e_get_pfn(pte), d) )
+ goto bail;
+
+ page = l1e_get_page(pte);
+ if ( !page_lock(page) )
+ {
+ put_page(page);
+ goto bail;
+ }
+
+ if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(page);
+ put_page(page);
goto bail;
+ }
ptwr_ctxt.ctxt.regs = regs;
ptwr_ctxt.ctxt.force_writeback = 0;
@@ -4139,9 +4172,11 @@ int ptwr_do_page_fault(struct vcpu *v, u
ptwr_ctxt.cr2 = addr;
ptwr_ctxt.pte = pte;
- page_lock(page);
rc = x86_emulate(&ptwr_ctxt.ctxt, &ptwr_emulate_ops);
+
page_unlock(page);
+ put_page(page);
+
if ( rc == X86EMUL_UNHANDLEABLE )
goto bail;
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1685,9 +1685,6 @@ shadow_free_p2m_page(struct domain *d, s
/* Free should not decrement domain's total allocation, since
* these pages were allocated without an owner. */
page_set_owner(pg, NULL);
-#if defined(__x86_64__)
- spin_lock_init(&pg->lock);
-#endif
free_domheap_pages(pg, 0);
d->arch.paging.shadow.p2m_pages--;
perfc_decr(shadow_alloc_count);
@@ -1801,16 +1798,7 @@ static unsigned int sh_set_allocation(st
* may get overwritten, so need to clear it here.
*/
for ( j = 0; j < 1U << order; j++ )
- {
page_set_owner(&((struct page_info *)sp)[j], NULL);
-#if defined(__x86_64__)
- /*
- * Re-instate lock field which we overwrite with shadow_page_info.
- * This was safe, since the lock is only used on guest pages.
- */
- spin_lock_init(&((struct page_info *)sp)[j].lock);
-#endif
- }
d->arch.paging.shadow.free_pages -= 1 << order;
d->arch.paging.shadow.total_pages -= 1 << order;
free_domheap_pages((struct page_info *)sp, order);
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -46,10 +46,6 @@ struct page_info
} u;
-#if defined(__x86_64__)
- spinlock_t lock;
-#endif
-
union {
/*
* Timestamp from 'TLB clock', used to avoid extra safety flushes.
@@ -127,27 +123,25 @@ struct page_info
/* Has this page been *partially* validated for use as its current type? */
#define _PGT_partial PG_shift(7)
#define PGT_partial PG_mask(1, 7)
+ /* Page is locked? */
+#define _PGT_locked PG_shift(8)
+#define PGT_locked PG_mask(1, 8)
/* Count of uses of this frame as its current type. */
-#define PGT_count_width PG_shift(7)
+#define PGT_count_width PG_shift(8)
#define PGT_count_mask ((1UL<<PGT_count_width)-1)
/* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated PG_shift(1)
#define PGC_allocated PG_mask(1, 1)
-#if defined(__i386__)
- /* Page is locked? */
-# define _PGC_locked PG_shift(2)
-# define PGC_locked PG_mask(1, 2)
-#endif
/* Set when is using a page as a page table */
-#define _PGC_page_table PG_shift(3)
-#define PGC_page_table PG_mask(1, 3)
+#define _PGC_page_table PG_shift(2)
+#define PGC_page_table PG_mask(1, 2)
/* 3-bit PAT/PCD/PWT cache-attribute hint. */
-#define PGC_cacheattr_base PG_shift(6)
-#define PGC_cacheattr_mask PG_mask(7, 6)
+#define PGC_cacheattr_base PG_shift(5)
+#define PGC_cacheattr_mask PG_mask(7, 5)
/* Count of references to this frame. */
-#define PGC_count_width PG_shift(6)
+#define PGC_count_width PG_shift(5)
#define PGC_count_mask ((1UL<<PGC_count_width)-1)
#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -336,7 +336,7 @@ void paging_dump_vcpu_info(struct vcpu *
* Access to the guest pagetables */
/* Get a mapping of a PV guest's l1e for this virtual address. */
-static inline void *
+static inline l1_pgentry_t *
guest_map_l1e(struct vcpu *v, unsigned long addr, unsigned long *gl1mfn)
{
l2_pgentry_t l2e;
@@ -354,15 +354,14 @@ guest_map_l1e(struct vcpu *v, unsigned l
!= _PAGE_PRESENT )
return NULL;
*gl1mfn = l2e_get_pfn(l2e);
- return &__linear_l1_table[l1_linear_offset(addr)];
+ return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(addr);
}
/* Pull down the mapping we got from guest_map_l1e() */
static inline void
guest_unmap_l1e(struct vcpu *v, void *p)
{
- if ( unlikely(paging_mode_translate(v->domain)) )
- unmap_domain_page(p);
+ unmap_domain_page(p);
}
/* Read the guest's l1e that maps this address. */

View File

@ -0,0 +1,14 @@
Index: xen-3.3.1-testing/tools/python/xen/xend/XendAPIStore.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xend/XendAPIStore.py
+++ xen-3.3.1-testing/tools/python/xen/xend/XendAPIStore.py
@@ -33,7 +33,8 @@ def register(uuid, type, inst):
def deregister(uuid, type):
old = get(uuid, type)
- del __classes[(uuid, type)]
+ if old is not None:
+ del __classes[(uuid, type)]
return old
def get(uuid, type):

13
19152-xm-man-page.patch Normal file
View File

@ -0,0 +1,13 @@
Index: xen-3.3.1-testing/docs/man/xm.pod.1
===================================================================
--- xen-3.3.1-testing.orig/docs/man/xm.pod.1
+++ xen-3.3.1-testing/docs/man/xm.pod.1
@@ -67,6 +67,8 @@ The attached console will perform much l
so running curses based interfaces over the console B<is not
advised>. Vi tends to get very odd when using it over this interface.
+Use the key combination Ctrl+] to detach the domain console.
+
=item B<create> I<configfile> [I<OPTIONS>] [I<vars>]..
The create subcommand requires a config file and can optionally take a

View File

@ -0,0 +1,17 @@
Index: xen-3.3.1-testing/tools/python/xen/xm/main.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xm/main.py
+++ xen-3.3.1-testing/tools/python/xen/xm/main.py
@@ -58,7 +58,11 @@ from xen.util.acmpolicy import ACM_LABEL
import XenAPI
import xen.lowlevel.xc
-xc = xen.lowlevel.xc.xc()
+try:
+ xc = xen.lowlevel.xc.xc()
+except Exception, ex:
+ print >>sys.stderr, ("Is xen kernel running?")
+ sys.exit(1)
import inspect
from xen.xend import XendOptions

View File

@ -55,11 +55,95 @@ Index: xen-3.3.1-testing/tools/blktap/lib/xenbus.c
#include "blktaplib.h"
#include "list.h"
#include "xs_api.h"
@@ -149,6 +150,37 @@ static int backend_remove(struct xs_hand
@@ -149,6 +150,125 @@ static int backend_remove(struct xs_hand
return 0;
}
+static int check_image(struct backend_info *be, const char** errmsg)
+static int check_sharing(struct xs_handle *h, struct backend_info *be)
+{
+ char *dom_uuid;
+ char *cur_dom_uuid;
+ char *path;
+ char *mode;
+ char *params;
+ char **domains;
+ char **devices;
+ int i, j;
+ unsigned int num_dom, num_dev;
+ blkif_info_t *info;
+ int ret = 0;
+
+ /* If the mode contains '!' or doesn't contain 'w' don't check anything */
+ xs_gather(h, be->backpath, "mode", NULL, &mode, NULL);
+ if (strchr(mode, '!'))
+ goto out;
+ if (strchr(mode, 'w') == NULL)
+ goto out;
+
+ /* Get the UUID of the domain we want to attach to */
+ if (asprintf(&path, "/local/domain/%ld", be->frontend_id) == -1)
+ goto fail;
+ xs_gather(h, path, "vm", NULL, &dom_uuid, NULL);
+ free(path);
+
+ /* Iterate through the devices of all VMs */
+ domains = xs_directory(h, XBT_NULL, "backend/tap", &num_dom);
+ for (i = 0; !ret && (i < num_dom); i++) {
+
+ /* If it's the same VM, no action needed */
+ if (asprintf(&path, "/local/domain/%s", domains[i]) == -1) {
+ ret = -1;
+ break;
+ }
+ xs_gather(h, path, "vm", NULL, &cur_dom_uuid, NULL);
+ free(path);
+
+ if (!strcmp(cur_dom_uuid, dom_uuid)) {
+ free(cur_dom_uuid);
+ continue;
+ }
+
+ /* Check the devices */
+ if (asprintf(&path, "backend/tap/%s", domains[i]) == -1) {
+ ret = -1;
+ free(cur_dom_uuid);
+ break;
+ }
+ devices = xs_directory(h, XBT_NULL, path, &num_dev);
+ free(path);
+
+ for (j = 0; !ret && (j < num_dev); j++) {
+ if (asprintf(&path, "backend/tap/%s/%s", domains[i], devices[j]) == -1) {
+ ret = -1;
+ break;
+ }
+ xs_gather(h, path, "params", NULL, &params, NULL);
+ free(path);
+
+ info = be->blkif->info;
+ if (strcmp(params, info->params)) {
+ ret = -1;
+ }
+
+ free(params);
+ }
+
+ free(cur_dom_uuid);
+ free(devices);
+ }
+ free(domains);
+ free(dom_uuid);
+ goto out;
+
+fail:
+ ret = -1;
+out:
+ free(mode);
+ return ret;
+}
+
+static int check_image(struct xs_handle *h, struct backend_info *be,
+ const char** errmsg)
+{
+ const char *path;
+ int mode;
@ -85,7 +169,11 @@ Index: xen-3.3.1-testing/tools/blktap/lib/xenbus.c
+ return -1;
+ }
+
+ /* TODO Check that the image is not attached to a different VM */
+ /* Check that the image is not attached to a different VM */
+ if (check_sharing(h, be)) {
+ *errmsg = "File already in use by other domain";
+ return -1;
+ }
+
+ return 0;
+}
@ -93,7 +181,7 @@ Index: xen-3.3.1-testing/tools/blktap/lib/xenbus.c
static void ueblktap_setup(struct xs_handle *h, char *bepath)
{
struct backend_info *be;
@@ -156,6 +188,7 @@ static void ueblktap_setup(struct xs_han
@@ -156,6 +276,7 @@ static void ueblktap_setup(struct xs_han
int len, er, deverr;
long int pdev = 0, handle;
blkif_info_t *blk;
@ -101,17 +189,17 @@ Index: xen-3.3.1-testing/tools/blktap/lib/xenbus.c
be = be_lookup_be(bepath);
if (be == NULL)
@@ -211,6 +244,9 @@ static void ueblktap_setup(struct xs_han
@@ -211,6 +332,9 @@ static void ueblktap_setup(struct xs_han
be->pdev = pdev;
}
+ if (check_image(be, &errmsg))
+ if (check_image(h, be, &errmsg))
+ goto fail;
+
er = blkif_init(be->blkif, handle, be->pdev, be->readonly);
if (er != 0) {
DPRINTF("Unable to open device %s\n",blk->params);
@@ -246,12 +282,21 @@ static void ueblktap_setup(struct xs_han
@@ -246,12 +370,21 @@ static void ueblktap_setup(struct xs_han
}
be->blkif->state = CONNECTED;
@ -134,7 +222,7 @@ Index: xen-3.3.1-testing/tools/blktap/lib/xenbus.c
close:
if (path)
free(path);
@@ -286,7 +331,8 @@ static void ueblktap_probe(struct xs_han
@@ -286,7 +419,8 @@ static void ueblktap_probe(struct xs_han
len = strsep_len(bepath, '/', 7);
if (len < 0)
goto free_be;

View File

@ -1,8 +1,6 @@
Index: xen-3.3.1-testing/tools/blktap/drivers/block-cdrom.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ xen-3.3.1-testing/tools/blktap/drivers/block-cdrom.c 2008-09-28 13:14:49.000000000 -0600
@@ -0,0 +1,535 @@
--- /dev/null
+++ b/tools/blktap/drivers/block-cdrom.c
@@ -0,0 +1,536 @@
+/* block-cdrom.c
+ *
+ * simple slow synchronous cdrom disk implementation. Based off
@ -113,16 +111,17 @@ Index: xen-3.3.1-testing/tools/blktap/drivers/block-cdrom.c
+ ioctl(prv->fd, BLKSSZGET, &s->sector_size);
+
+ if (s->sector_size != CDROM_DEFAULT_SECTOR_SIZE)
+ DPRINTF("Note: sector size is %ld (not %d)\n",
+ s->sector_size, CDROM_DEFAULT_SECTOR_SIZE);
+ DPRINTF("Note: sector size is %llu (not %d)\n",
+ (long long unsigned)s->sector_size,
+ CDROM_DEFAULT_SECTOR_SIZE);
+ }
+#else
+ s->sector_size = CDROM_DEFAULT_SECTOR_SIZE;
+#endif
+ DPRINTF("Block Device: Image size: %llu",
+ (long long unsigned)s->size);
+ DPRINTF("\t media_present: %d sector_size: %lu\n",
+ prv->media_present, s->sector_size);
+ DPRINTF("Block Device: Image size: %llu"
+ " media_present: %d sector_size: %llu\n",
+ (long long unsigned)s->size, prv->media_present,
+ (long long unsigned)s->sector_size);
+ } else {
+ /*Local file? try fstat instead*/
+ prv->dev_type = FILE_DEVICE;
@ -538,10 +537,8 @@ Index: xen-3.3.1-testing/tools/blktap/drivers/block-cdrom.c
+ .td_get_parent_id = tdcdrom_get_parent_id,
+ .td_validate_parent = tdcdrom_validate_parent
+};
Index: xen-3.3.1-testing/xen/include/public/io/cdromif.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ xen-3.3.1-testing/xen/include/public/io/cdromif.h 2008-09-28 13:11:57.000000000 -0600
--- /dev/null
+++ b/xen/include/public/io/cdromif.h
@@ -0,0 +1,120 @@
+/******************************************************************************
+ * cdromif.h
@ -663,11 +660,9 @@ Index: xen-3.3.1-testing/xen/include/public/io/cdromif.h
+ sizeof(struct vcd_generic_command) - sizeof(struct request_sense))
+
+#endif
Index: xen-3.3.1-testing/tools/blktap/drivers/Makefile
===================================================================
--- xen-3.3.1-testing.orig/tools/blktap/drivers/Makefile 2008-09-28 13:11:54.000000000 -0600
+++ xen-3.3.1-testing/tools/blktap/drivers/Makefile 2008-09-28 13:11:57.000000000 -0600
@@ -24,8 +24,9 @@
--- a/tools/blktap/drivers/Makefile
+++ b/tools/blktap/drivers/Makefile
@@ -24,8 +24,9 @@ CRYPT_LIB := -lcrypto
$(warning *** libgcrypt not installed: falling back to libcrypto ***)
endif
@ -679,7 +674,7 @@ Index: xen-3.3.1-testing/tools/blktap/drivers/Makefile
BLK-OBJS-y := block-aio.o
BLK-OBJS-y += block-sync.o
@@ -33,6 +34,7 @@
@@ -33,6 +34,7 @@ BLK-OBJS-y += block-vmdk.o
BLK-OBJS-y += block-ram.o
BLK-OBJS-y += block-qcow.o
BLK-OBJS-y += block-qcow2.o
@ -687,11 +682,9 @@ Index: xen-3.3.1-testing/tools/blktap/drivers/Makefile
BLK-OBJS-y += aes.o
BLK-OBJS-y += tapaio.o
BLK-OBJS-$(CONFIG_Linux) += blk_linux.o
Index: xen-3.3.1-testing/tools/blktap/drivers/tapdisk.h
===================================================================
--- xen-3.3.1-testing.orig/tools/blktap/drivers/tapdisk.h 2008-09-28 13:11:56.000000000 -0600
+++ xen-3.3.1-testing/tools/blktap/drivers/tapdisk.h 2008-09-28 13:11:57.000000000 -0600
@@ -137,6 +137,9 @@
--- a/tools/blktap/drivers/tapdisk.h
+++ b/tools/blktap/drivers/tapdisk.h
@@ -137,6 +137,9 @@ struct tap_disk {
int (*td_get_parent_id) (struct disk_driver *dd, struct disk_id *id);
int (*td_validate_parent)(struct disk_driver *dd,
struct disk_driver *p, td_flag_t flags);
@ -701,7 +694,7 @@ Index: xen-3.3.1-testing/tools/blktap/drivers/tapdisk.h
};
typedef struct disk_info {
@@ -160,6 +163,7 @@
@@ -160,6 +163,7 @@ extern struct tap_disk tapdisk_vmdk;
extern struct tap_disk tapdisk_ram;
extern struct tap_disk tapdisk_qcow;
extern struct tap_disk tapdisk_qcow2;
@ -709,7 +702,7 @@ Index: xen-3.3.1-testing/tools/blktap/drivers/tapdisk.h
/*Define Individual Disk Parameters here */
@@ -240,6 +244,17 @@
@@ -240,6 +244,17 @@ static disk_info_t ioemu_disk = {
#endif
};
@ -727,7 +720,7 @@ Index: xen-3.3.1-testing/tools/blktap/drivers/tapdisk.h
/*Main disk info array */
static disk_info_t *dtypes[] = {
&aio_disk,
@@ -249,6 +264,7 @@
@@ -249,6 +264,7 @@ static disk_info_t *dtypes[] = {
&qcow_disk,
&qcow2_disk,
&ioemu_disk,
@ -735,11 +728,9 @@ Index: xen-3.3.1-testing/tools/blktap/drivers/tapdisk.h
};
typedef struct driver_list_entry {
Index: xen-3.3.1-testing/tools/blktap/lib/blktaplib.h
===================================================================
--- xen-3.3.1-testing.orig/tools/blktap/lib/blktaplib.h 2008-09-28 13:11:56.000000000 -0600
+++ xen-3.3.1-testing/tools/blktap/lib/blktaplib.h 2008-09-28 13:11:57.000000000 -0600
@@ -221,6 +221,7 @@
--- a/tools/blktap/lib/blktaplib.h
+++ b/tools/blktap/lib/blktaplib.h
@@ -221,6 +221,7 @@ typedef struct msg_pid {
#define DISK_TYPE_QCOW 4
#define DISK_TYPE_QCOW2 5
#define DISK_TYPE_IOEMU 6
@ -747,10 +738,8 @@ Index: xen-3.3.1-testing/tools/blktap/lib/blktaplib.h
/* xenstore/xenbus: */
#define DOMNAME "Domain-0"
Index: xen-3.3.1-testing/xen/include/public/io/blkif.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/public/io/blkif.h 2008-09-16 10:31:07.000000000 -0600
+++ xen-3.3.1-testing/xen/include/public/io/blkif.h 2008-09-28 13:11:57.000000000 -0600
--- a/xen/include/public/io/blkif.h
+++ b/xen/include/public/io/blkif.h
@@ -76,6 +76,10 @@
* "feature-flush-cache" node!
*/
@ -762,11 +751,9 @@ Index: xen-3.3.1-testing/xen/include/public/io/blkif.h
/*
* Maximum scatter/gather segments per request.
Index: xen-3.3.1-testing/tools/blktap/drivers/tapdisk.c
===================================================================
--- xen-3.3.1-testing.orig/tools/blktap/drivers/tapdisk.c 2008-09-16 10:31:02.000000000 -0600
+++ xen-3.3.1-testing/tools/blktap/drivers/tapdisk.c 2008-09-28 13:11:57.000000000 -0600
@@ -735,6 +735,22 @@
--- a/tools/blktap/drivers/tapdisk.c
+++ b/tools/blktap/drivers/tapdisk.c
@@ -735,6 +735,22 @@ static void get_io_request(struct td_sta
goto out;
}
break;
@ -789,11 +776,9 @@ Index: xen-3.3.1-testing/tools/blktap/drivers/tapdisk.c
default:
DPRINTF("Unknown block operation\n");
break;
Index: xen-3.3.1-testing/tools/python/xen/xend/server/BlktapController.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xend/server/BlktapController.py 2008-09-16 10:31:03.000000000 -0600
+++ xen-3.3.1-testing/tools/python/xen/xend/server/BlktapController.py 2008-09-28 13:11:57.000000000 -0600
@@ -14,8 +14,8 @@
--- a/tools/python/xen/xend/server/BlktapController.py
+++ b/tools/python/xen/xend/server/BlktapController.py
@@ -14,8 +14,8 @@ blktap_disk_types = [
'ram',
'qcow',
'qcow2',

View File

@ -22,7 +22,7 @@
+ continue;
+ }
+ }
printk(" DomPage %p: caf=%08x, taf=%" PRtype_info "\n",
printk(" DomPage %p: caf=%08lx, taf=%" PRtype_info "\n",
_p(page_to_mfn(page)),
page->count_info, page->u.inuse.type_info);
--- a/xen/common/keyhandler.c

View File

@ -1,9 +1,9 @@
%patch
Index: xen-3.3.1-testing/xen/include/asm-x86/hvm/domain.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/asm-x86/hvm/domain.h
+++ xen-3.3.1-testing/xen/include/asm-x86/hvm/domain.h
@@ -82,6 +82,7 @@ struct hvm_domain {
--- xen-3.3.1-testing.orig/xen/include/asm-x86/hvm/domain.h 2009-01-05 13:27:58.000000000 -0700
+++ xen-3.3.1-testing/xen/include/asm-x86/hvm/domain.h 2009-01-22 13:23:44.000000000 -0700
@@ -82,6 +82,7 @@
struct vmx_domain vmx;
struct svm_domain svm;
};
@ -13,8 +13,8 @@ Index: xen-3.3.1-testing/xen/include/asm-x86/hvm/domain.h
#endif /* __ASM_X86_HVM_DOMAIN_H__ */
Index: xen-3.3.1-testing/xen/arch/x86/hvm/Makefile
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/hvm/Makefile
+++ xen-3.3.1-testing/xen/arch/x86/hvm/Makefile
--- xen-3.3.1-testing.orig/xen/arch/x86/hvm/Makefile 2009-01-05 13:27:57.000000000 -0700
+++ xen-3.3.1-testing/xen/arch/x86/hvm/Makefile 2009-01-22 13:23:44.000000000 -0700
@@ -1,5 +1,6 @@
subdir-y += svm
subdir-y += vmx
@ -24,8 +24,8 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/Makefile
obj-y += hvm.o
Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/hvm/hvm.c
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
--- xen-3.3.1-testing.orig/xen/arch/x86/hvm/hvm.c 2009-01-05 13:27:57.000000000 -0700
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c 2009-01-22 13:23:44.000000000 -0700
@@ -44,6 +44,7 @@
#include <asm/mc146818rtc.h>
#include <asm/spinlock.h>
@ -34,7 +34,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
#include <asm/hvm/vpt.h>
#include <asm/hvm/support.h>
#include <asm/hvm/cacheattr.h>
@@ -361,6 +362,7 @@ void hvm_domain_relinquish_resources(str
@@ -361,6 +362,7 @@
void hvm_domain_destroy(struct domain *d)
{
@ -42,7 +42,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
hvm_funcs.domain_destroy(d);
rtc_deinit(d);
stdvga_deinit(d);
@@ -644,8 +646,14 @@ int hvm_vcpu_initialise(struct vcpu *v)
@@ -644,8 +646,14 @@
{
int rc;
@ -57,7 +57,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
goto fail2;
@@ -692,12 +700,14 @@ int hvm_vcpu_initialise(struct vcpu *v)
@@ -692,12 +700,14 @@
hvm_funcs.vcpu_destroy(v);
fail2:
vlapic_destroy(v);
@ -72,7 +72,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
hvm_vcpu_cacheattr_destroy(v);
vlapic_destroy(v);
hvm_funcs.vcpu_destroy(v);
@@ -1645,7 +1655,7 @@ void hvm_cpuid(unsigned int input, unsig
@@ -1645,7 +1655,7 @@
struct vcpu *v = current;
if ( cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
@ -81,7 +81,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
domain_cpuid(v->domain, input, *ecx, eax, ebx, ecx, edx);
@@ -1657,6 +1667,8 @@ void hvm_cpuid(unsigned int input, unsig
@@ -1657,6 +1667,8 @@
if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
__clear_bit(X86_FEATURE_APIC & 31, edx);
}
@ -90,7 +90,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
}
void hvm_rdtsc_intercept(struct cpu_user_regs *regs)
@@ -1747,6 +1759,8 @@ int hvm_msr_read_intercept(struct cpu_us
@@ -1747,6 +1759,8 @@
break;
default:
@ -99,7 +99,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
return hvm_funcs.msr_read_intercept(regs);
}
@@ -1835,6 +1849,8 @@ int hvm_msr_write_intercept(struct cpu_u
@@ -1835,6 +1849,8 @@
break;
default:
@ -108,7 +108,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
return hvm_funcs.msr_write_intercept(regs);
}
@@ -2002,6 +2018,10 @@ int hvm_do_hypercall(struct cpu_user_reg
@@ -2002,6 +2018,10 @@
case 0:
break;
}
@ -119,7 +119,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
if ( (eax >= NR_hypercalls) || !hvm_hypercall32_table[eax] )
{
@@ -2503,6 +2523,15 @@ long do_hvm_op(unsigned long op, XEN_GUE
@@ -2503,6 +2523,15 @@
rc = -EINVAL;
break;
@ -137,9 +137,9 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
if ( rc == 0 )
Index: xen-3.3.1-testing/xen/include/public/arch-x86/hvm/save.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/public/arch-x86/hvm/save.h
+++ xen-3.3.1-testing/xen/include/public/arch-x86/hvm/save.h
@@ -38,7 +38,7 @@ struct hvm_save_header {
--- xen-3.3.1-testing.orig/xen/include/public/arch-x86/hvm/save.h 2009-01-05 13:27:58.000000000 -0700
+++ xen-3.3.1-testing/xen/include/public/arch-x86/hvm/save.h 2009-01-23 08:35:50.000000000 -0700
@@ -38,7 +38,7 @@
uint32_t version; /* File format version */
uint64_t changeset; /* Version of Xen that saved this file */
uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */
@ -148,7 +148,7 @@ Index: xen-3.3.1-testing/xen/include/public/arch-x86/hvm/save.h
};
DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
@@ -421,9 +421,22 @@ struct hvm_hw_mtrr {
@@ -421,9 +421,23 @@
DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
@ -163,6 +163,7 @@ Index: xen-3.3.1-testing/xen/include/public/arch-x86/hvm/save.h
+struct hvm_hyperv_cpu {
+ uint64_t control_msr;
+ uint64_t version_msr;
+ uint64_t pad[27]; //KYS: sles10 sp2 compatibility
+};
+DECLARE_HVM_SAVE_TYPE(HYPERV_CPU, 16, struct hvm_hyperv_cpu);
/*
@ -174,8 +175,8 @@ Index: xen-3.3.1-testing/xen/include/public/arch-x86/hvm/save.h
#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
Index: xen-3.3.1-testing/xen/arch/x86/hvm/vlapic.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/hvm/vlapic.c
+++ xen-3.3.1-testing/xen/arch/x86/hvm/vlapic.c
--- xen-3.3.1-testing.orig/xen/arch/x86/hvm/vlapic.c 2009-01-22 13:23:43.000000000 -0700
+++ xen-3.3.1-testing/xen/arch/x86/hvm/vlapic.c 2009-01-22 13:23:44.000000000 -0700
@@ -34,6 +34,7 @@
#include <asm/hvm/hvm.h>
#include <asm/hvm/io.h>
@ -184,7 +185,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/vlapic.c
#include <asm/hvm/vmx/vmx.h>
#include <public/hvm/ioreq.h>
#include <public/hvm/params.h>
@@ -307,6 +308,7 @@ static int vlapic_accept_sipi(struct vcp
@@ -307,6 +308,7 @@
hvm_vcpu_reset_state(v, trampoline_vector << 8, 0);
vcpu_unpause(v);
@ -194,8 +195,8 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/vlapic.c
}
Index: xen-3.3.1-testing/xen/include/public/hvm/params.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/public/hvm/params.h
+++ xen-3.3.1-testing/xen/include/public/hvm/params.h
--- xen-3.3.1-testing.orig/xen/include/public/hvm/params.h 2009-01-05 13:27:58.000000000 -0700
+++ xen-3.3.1-testing/xen/include/public/hvm/params.h 2009-01-22 13:23:44.000000000 -0700
@@ -93,6 +93,8 @@
/* ACPI S state: currently support S0 and S3 on x86. */
#define HVM_PARAM_ACPI_S_STATE 14

View File

@ -2,7 +2,7 @@
Index: xen-3.3.1-testing/xen/include/asm-x86/hvm/hvm_extensions.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ xen-3.3.1-testing/xen/include/asm-x86/hvm/hvm_extensions.h 2008-12-08 13:58:31.000000000 -0700
+++ xen-3.3.1-testing/xen/include/asm-x86/hvm/hvm_extensions.h 2009-01-22 13:23:44.000000000 -0700
@@ -0,0 +1,165 @@
+/****************************************************************************
+ |
@ -172,14 +172,14 @@ Index: xen-3.3.1-testing/xen/include/asm-x86/hvm/hvm_extensions.h
Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/Makefile
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/Makefile 2008-12-08 13:58:31.000000000 -0700
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/Makefile 2009-01-22 13:23:44.000000000 -0700
@@ -0,0 +1,2 @@
+obj-y += hv_intercept.o
+obj-y += hv_hypercall.o
Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_errno.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_errno.h 2008-12-08 13:58:31.000000000 -0700
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_errno.h 2009-01-22 13:23:44.000000000 -0700
@@ -0,0 +1,62 @@
+/****************************************************************************
+ |
@ -246,7 +246,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_errno.h
Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c 2008-12-11 18:17:25.000000000 -0700
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c 2009-01-22 13:23:44.000000000 -0700
@@ -0,0 +1,153 @@
+/****************************************************************************
+ |
@ -404,7 +404,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c
Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h 2008-12-08 13:58:31.000000000 -0700
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h 2009-01-22 13:23:44.000000000 -0700
@@ -0,0 +1,46 @@
+/****************************************************************************
+ |
@ -455,8 +455,8 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h
Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c 2008-12-09 12:42:50.000000000 -0700
@@ -0,0 +1,1002 @@
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c 2009-01-26 12:33:27.000000000 -0700
@@ -0,0 +1,1008 @@
+/****************************************************************************
+ |
+ | Copyright (c) [2007, 2008] Novell, Inc.
@ -523,7 +523,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
+
+
+static inline void
+hv_hypercall_page_initialize(void *hypercall_page, hv_partition_t *curp);
+hv_hypercall_page_initialize(void *hypercall_page);
+
+static inline void *
+get_virt_from_gmfn(struct domain *d, unsigned long gmfn)
@ -607,8 +607,12 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
+ spin_unlock(&curp->lock);
+ return;
+ }
+ hv_hypercall_page_initialize(hypercall_page, curp);
+ hv_hypercall_page_initialize(hypercall_page);
+ unmap_domain_page(hypercall_page);
+ if (hvm_funcs.guest_x86_mode(current) == 8)
+ curp->long_mode_guest = 1;
+ else
+ curp->long_mode_guest = 0;
+ curp->hypercall_msr = msr_content;
+ spin_unlock(&curp->lock);
+ cur_vcpu->flags |= HV_VCPU_UP;
@ -771,16 +775,10 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
+}
+
+static inline void
+hv_hypercall_page_initialize(void *hypercall_page, hv_partition_t *curp)
+hv_hypercall_page_initialize(void *hypercall_page)
+{
+ char *p;
+
+ if (hvm_funcs.guest_x86_mode(current) == 8)
+ curp->long_mode_guest = 1;
+ else
+ curp->long_mode_guest = 0;
+
+
+ memset(hypercall_page, 0, PAGE_SIZE);
+ p = (char *)(hypercall_page) ;
+ /*
@ -1000,7 +998,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
+ vcpuid = hvm_load_instance(h);
+ vcpup = &curp->vcpu_state[vcpuid];
+ ASSERT(vcpup != NULL);
+ if ( hvm_load_entry(HYPERV_CPU, h, &ctxt) != 0 )
+ if ( hvm_load_entry(HYPERV_CPU, h, &ctxt) != 0 )
+ return -EINVAL;
+
+ vcpup->control_msr = ctxt.control_msr;
@ -1032,18 +1030,26 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
+{
+ struct hvm_hyperv_dom ctxt;
+ hv_partition_t *curp;
+ void *hypercall_page;
+
+ if ( hvm_load_entry(HYPERV_DOM, h, &ctxt) != 0 )
+ if ( hvm_load_entry(HYPERV_DOM, h, &ctxt) != 0 )
+ return -EINVAL;
+ d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] = ctxt.ext_id;
+ if (ctxt.ext_id != 1)
+ return 0;
+ d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] = 1;
+ if (hyperv_initialize(d))
+ return -EINVAL;
+ curp = d->arch.hvm_domain.hyperv_handle;
+
+ curp->guest_id_msr = ctxt.guestid_msr;
+ curp->hypercall_msr = ctxt.hypercall_msr;
+ /*
+ * We may have migrated from a sles10 host; re-initialize the
+ * hypercall page.
+ */
+ hypercall_page = get_virt_from_gmfn(d, (curp->hypercall_msr >>12));
+ if (hypercall_page == NULL)
+ return -EINVAL;
+ hv_hypercall_page_initialize(hypercall_page);
+ unmap_domain_page(hypercall_page);
+ curp->long_mode_guest = ctxt.long_mode;
+ return 0;
+}
@ -1462,7 +1468,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
Index: xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_shim.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_shim.h 2008-12-09 14:50:19.000000000 -0700
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hyperv/hv_shim.h 2009-01-22 13:23:44.000000000 -0700
@@ -0,0 +1,285 @@
+/****************************************************************************
+ |

View File

@ -0,0 +1,24 @@
Index: xen-3.3.1-testing/tools/ioemu-remote/hw/xen_machine_fv.c
===================================================================
--- xen-3.3.1-testing.orig/tools/ioemu-remote/hw/xen_machine_fv.c
+++ xen-3.3.1-testing/tools/ioemu-remote/hw/xen_machine_fv.c
@@ -185,6 +185,7 @@ void qemu_invalidate_map_cache(void)
#endif /* defined(MAPCACHE) */
+extern void init_blktap(void);
static void xen_init_fv(ram_addr_t ram_size, int vga_ram_size,
const char *boot_device, DisplayState *ds,
@@ -210,6 +211,11 @@ static void xen_init_fv(ram_addr_t ram_s
}
#endif
+#ifndef CONFIG_STUBDOM
+ /* Initialize tapdisk client */
+ init_blktap();
+#endif
+
#ifdef CONFIG_STUBDOM /* the hvmop is not supported on older hypervisors */
xc_set_hvm_param(xc_handle, domid, HVM_PARAM_DM_DOMAIN, DOMID_SELF);
#endif

View File

@ -122,7 +122,7 @@
}
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -331,6 +331,7 @@ TYPE_SAFE(unsigned long,mfn);
@@ -330,6 +330,7 @@ TYPE_SAFE(unsigned long,mfn);
#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
#define INVALID_M2P_ENTRY (~0UL)
#define VALID_M2P(_e) (!((_e) & (1UL<<(BITS_PER_LONG-1))))

123
x86_64-page-info-pack.patch Normal file
View File

@ -0,0 +1,123 @@
References: bnc#470949
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1902,7 +1902,7 @@ static void sh_hash_audit_bucket(struct
{
if ( !page_is_out_of_sync(gpg) )
{
- SHADOW_ERROR("MFN %#lx shadowed (by %#"PRI_mfn")"
+ SHADOW_ERROR("MFN %#x shadowed (by %#"PRI_mfn")"
" and not OOS but has typecount %#lx\n",
sp->backpointer,
mfn_x(shadow_page_to_mfn(sp)),
@@ -1916,7 +1916,7 @@ static void sh_hash_audit_bucket(struct
if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page
&& (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
{
- SHADOW_ERROR("MFN %#lx shadowed (by %#"PRI_mfn")"
+ SHADOW_ERROR("MFN %#x shadowed (by %#"PRI_mfn")"
" but has typecount %#lx\n",
sp->backpointer, mfn_x(shadow_page_to_mfn(sp)),
gpg->u.inuse.type_info);
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -190,7 +190,7 @@ struct shadow_page_info
struct {
union {
/* When in use, guest page we're a shadow of */
- unsigned long backpointer;
+ unsigned int backpointer;
/* When free, order of the freelist we're on */
unsigned int order;
};
@@ -204,12 +204,17 @@ struct shadow_page_info
/* When free, TLB flush time when freed */
u32 tlbflush_timestamp;
};
+#ifdef __i386__
+ unsigned long mbz; /* Must be zero: count_info is here. */
+#endif
struct {
- unsigned long mbz; /* Must be zero: count_info is here. */
unsigned long type:5; /* What kind of shadow is this? */
unsigned long pinned:1; /* Is the shadow pinned? */
unsigned long count:26; /* Reference count */
};
+#ifndef __i386__
+ unsigned long mbz; /* Must be zero: count_info is here. */
+#endif
union {
/* For unused shadow pages, a list of pages of this order; for
* pinnable shadows, if pinned, a list of other pinned shadows
@@ -645,7 +650,7 @@ static inline int sh_get_ref(struct vcpu
if ( unlikely(nx >= 1U<<26) )
{
- SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRtype_info " smfn=%lx\n",
+ SHADOW_PRINTK("shadow ref overflow, gmfn=%x smfn=%lx\n",
sp->backpointer, mfn_x(smfn));
return 0;
}
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -15,7 +15,7 @@
* 1. 'struct page_info' contains a 'struct list_head list'.
* 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
*/
-#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
+#define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
struct page_info
{
@@ -30,16 +30,12 @@ struct page_info
/* Page is in use: ((count_info & PGC_count_mask) != 0). */
struct {
- /* Owner of this page (NULL if page is anonymous). */
- u32 _domain; /* pickled format */
/* Type reference count and various PGT_xxx flags and fields. */
unsigned long type_info;
} inuse;
/* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
struct {
- /* Order-size of the free chunk this page is the head of. */
- u32 order;
/* Mask of possibly-tainted TLBs. */
cpumask_t cpumask;
} free;
@@ -47,6 +43,22 @@ struct page_info
} u;
union {
+
+ /* Page is in use. */
+ struct {
+ /* Owner of this page (NULL if page is anonymous). */
+ u32 _domain; /* pickled format */
+ } inuse;
+
+ /* Page is on a free list. */
+ struct {
+ /* Order-size of the free chunk this page is the head of. */
+ u32 order;
+ } free;
+
+ } v;
+
+ union {
/*
* Timestamp from 'TLB clock', used to avoid extra safety flushes.
* Only valid for: a) free pages, and b) pages with zero type count
@@ -173,8 +185,8 @@ static inline u32 pickle_domptr(struct d
/* OOS fixup entries */
#define SHADOW_OOS_FIXUPS 2
-#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
-#define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
+#define page_get_owner(_p) (unpickle_domptr((_p)->v.inuse._domain))
+#define page_set_owner(_p,_d) ((_p)->v.inuse._domain = pickle_domptr(_d))
#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma))))
#define vaddr_get_owner(va) (page_get_owner(virt_to_page((va))))

132
x86_64-sh-next-shadow.patch Normal file
View File

@ -0,0 +1,132 @@
References: bnc#470949
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1456,6 +1456,22 @@ static __init int shadow_blow_tables_key
__initcall(shadow_blow_tables_keyhandler_init);
#endif /* !NDEBUG */
+#ifdef __i386__
+# define next_shadow(pg) ((pg)->next_shadow)
+# define set_next_shadow(pg, n) ((void)((pg)->next_shadow = (n)))
+#else
+static inline struct shadow_page_info *
+next_shadow(const struct shadow_page_info *sp)
+{
+ return sp->next_shadow ? mfn_to_shadow_page(_mfn(sp->next_shadow)) : NULL;
+}
+static inline void
+set_next_shadow(struct shadow_page_info *sp, struct shadow_page_info *next)
+{
+ sp->next_shadow = next ? mfn_x(shadow_page_to_mfn(next)) : 0;
+}
+#endif
+
/* Allocate another shadow's worth of (contiguous, aligned) pages,
* and fill in the type and backpointer fields of their page_infos.
* Never fails to allocate. */
@@ -1525,7 +1541,7 @@ mfn_t shadow_alloc(struct domain *d,
sp[i].pinned = 0;
sp[i].count = 0;
sp[i].backpointer = backpointer;
- sp[i].next_shadow = NULL;
+ set_next_shadow(&sp[i], NULL);
perfc_incr(shadow_alloc_count);
}
return shadow_page_to_mfn(sp);
@@ -1865,7 +1881,7 @@ static void sh_hash_audit_bucket(struct
/* Wrong bucket? */
BUG_ON( sh_hash(sp->backpointer, sp->type) != bucket );
/* Duplicate entry? */
- for ( x = sp->next_shadow; x; x = x->next_shadow )
+ for ( x = next_shadow(sp); x; x = next_shadow(x) )
BUG_ON( x->backpointer == sp->backpointer && x->type == sp->type );
/* Follow the backpointer to the guest pagetable */
if ( sp->type != SH_type_fl1_32_shadow
@@ -1908,7 +1924,7 @@ static void sh_hash_audit_bucket(struct
}
}
/* That entry was OK; on we go */
- sp = sp->next_shadow;
+ sp = next_shadow(sp);
}
}
@@ -2002,7 +2018,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
/* Delete sp from the list */
prev->next_shadow = sp->next_shadow;
/* Re-insert it at the head of the list */
- sp->next_shadow = d->arch.paging.shadow.hash_table[key];
+ set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
d->arch.paging.shadow.hash_table[key] = sp;
}
}
@@ -2013,7 +2029,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
return shadow_page_to_mfn(sp);
}
prev = sp;
- sp = sp->next_shadow;
+ sp = next_shadow(sp);
}
perfc_incr(shadow_hash_lookup_miss);
@@ -2040,7 +2056,7 @@ void shadow_hash_insert(struct vcpu *v,
/* Insert this shadow at the top of the bucket */
sp = mfn_to_shadow_page(smfn);
- sp->next_shadow = d->arch.paging.shadow.hash_table[key];
+ set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
d->arch.paging.shadow.hash_table[key] = sp;
sh_hash_audit_bucket(d, key);
@@ -2067,7 +2083,7 @@ void shadow_hash_delete(struct vcpu *v,
sp = mfn_to_shadow_page(smfn);
if ( d->arch.paging.shadow.hash_table[key] == sp )
/* Easy case: we're deleting the head item. */
- d->arch.paging.shadow.hash_table[key] = sp->next_shadow;
+ d->arch.paging.shadow.hash_table[key] = next_shadow(sp);
else
{
/* Need to search for the one we want */
@@ -2076,15 +2092,15 @@ void shadow_hash_delete(struct vcpu *v,
{
ASSERT(x); /* We can't have hit the end, since our target is
* still in the chain somehwere... */
- if ( x->next_shadow == sp )
+ if ( next_shadow(x) == sp )
{
x->next_shadow = sp->next_shadow;
break;
}
- x = x->next_shadow;
+ x = next_shadow(x);
}
}
- sp->next_shadow = NULL;
+ set_next_shadow(sp, NULL);
sh_hash_audit_bucket(d, key);
}
@@ -2118,7 +2134,7 @@ static void hash_foreach(struct vcpu *v,
/* WARNING: This is not safe against changes to the hash table.
* The callback *must* return non-zero if it has inserted or
* deleted anything from the hash (lookups are OK, though). */
- for ( x = d->arch.paging.shadow.hash_table[i]; x; x = x->next_shadow )
+ for ( x = d->arch.paging.shadow.hash_table[i]; x; x = next_shadow(x) )
{
if ( callback_mask & (1 << x->type) )
{
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -196,7 +196,11 @@ struct shadow_page_info
};
union {
/* When in use, next shadow in this hash chain */
+#ifdef __i386__
struct shadow_page_info *next_shadow;
+#else
+ unsigned int next_shadow;
+#endif
/* When free, TLB flush time when freed */
u32 tlbflush_timestamp;
};

View File

@ -1,3 +1,40 @@
-------------------------------------------------------------------
Thu Feb 5 12:03:44 MST 2009 - jfehlig@novell.com
- bnc#470133 - Better error handling in xm when not booted Xen
19153-xm-noxen-error.patch
-------------------------------------------------------------------
Wed Feb 4 20:35:41 CET 2009 - kwolf@suse.de
- bnc#472075 - Fix ioemu to initialize its blktap backend also for
fully virtualized guests
ioemu-blktap-fv-init.patch
-------------------------------------------------------------------
Tue Feb 3 13:35:28 MST 2009 - jfehlig@novell.com
- bnc#470855 - Add note to xm man page on how to detach domain
console
19152-xm-man-page.patch
-------------------------------------------------------------------
Mon Feb 2 14:15:55 MST 2009 - jfehlig@novell.com
- bnc#471090 - XendAPIStore: Do not remove non-existent item
class list
19151-xend-class-dereg.patch
-------------------------------------------------------------------
Mon Feb 2 10:41:05 MST 2009 - carnold@novell.com
- bnc#470949 - user mode application may crash kernel
19088-x86-page-non-atomic-owner.patch (Jan Beulich)
19089-x86_64-widen-page-refcounts.patch
19103-x86_64-fold-page-lock.patch
x86_64-page-info-pack.patch
x86_64-sh-next-shadow.patch
-------------------------------------------------------------------
Fri Jan 23 11:47:31 MST 2009 - carnold@novell.com

View File

@ -1,5 +1,5 @@
#
# spec file for package xen (Version 3.3.1_18546_04)
# spec file for package xen (Version 3.3.1_18546_06)
#
# Copyright (c) 2009 SUSE LINUX Products GmbH, Nuernberg, Germany.
#
@ -37,7 +37,7 @@ BuildRequires: glibc-32bit glibc-devel-32bit
%if %{?with_kmp}0
BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11
%endif
Version: 3.3.1_18546_04
Version: 3.3.1_18546_06
Release: 1
License: GPL v2 only
Group: System/Kernel
@ -146,6 +146,12 @@ Patch76: 19048-cross-bit-coredumping.patch
Patch77: 19051-cross-bit-coredumping.patch
Patch78: 19072-vmx-pat.patch
Patch79: 19079-snp_ctl-1.patch
Patch80: 19088-x86-page-non-atomic-owner.patch
Patch81: 19089-x86_64-widen-page-refcounts.patch
Patch82: 19103-x86_64-fold-page-lock.patch
Patch83: 19151-xend-class-dereg.patch
Patch84: 19152-xm-man-page.patch
Patch85: 19153-xm-noxen-error.patch
# Our patches
Patch100: xen-config.diff
Patch101: xend-config.diff
@ -212,8 +218,9 @@ Patch185: tapdisk-ioemu-logfile.patch
Patch186: blktap-ioemu-close-fix.patch
Patch187: ioemu-blktap-zero-size.patch
Patch188: blktap-error-handling.patch
Patch189: ioemu-blktap-fv-init.patch
# Jim's domain lock patch
Patch190: xend-domain-lock.patch
Patch200: xend-domain-lock.patch
# Patches from Jan
Patch240: dump-exec-state.patch
Patch241: x86-show-page-walk-early.patch
@ -228,6 +235,8 @@ Patch352: pvdrv_emulation_control.patch
Patch353: blktap-pv-cdrom.patch
Patch354: x86-cpufreq-report.patch
Patch355: dom-print.patch
Patch356: x86_64-sh-next-shadow.patch
Patch357: x86_64-page-info-pack.patch
# novell_shim patches
Patch400: hv_tools.patch
Patch401: hv_xen_base.patch
@ -644,6 +653,12 @@ Authors:
%patch77 -p1
%patch78 -p1
%patch79 -p1
%patch80 -p1
%patch81 -p1
%patch82 -p1
%patch83 -p1
%patch84 -p1
%patch85 -p1
%patch100 -p1
%patch101 -p1
%patch102 -p1
@ -706,7 +721,8 @@ Authors:
%patch186 -p1
%patch187 -p1
%patch188 -p1
%patch190 -p1
%patch189 -p1
%patch200 -p1
%patch240 -p1
%patch241 -p1
%patch242 -p1
@ -719,7 +735,8 @@ Authors:
%patch353 -p1
%patch354 -p1
%patch355 -p1
# Don't use shim for now
%patch356 -p1
%patch357 -p1
%ifarch x86_64
%patch400 -p1
%patch401 -p1
@ -1069,6 +1086,28 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug
/sbin/ldconfig
%changelog
* Thu Feb 05 2009 jfehlig@novell.com
- bnc#470133 - Better error handling in xm when not booted Xen
19153-xm-noxen-error.patch
* Wed Feb 04 2009 kwolf@suse.de
- bnc#472075 - Fix ioemu to initialize its blktap backend also for
fully virtualized guests
ioemu-blktap-fv-init.patch
* Tue Feb 03 2009 jfehlig@novell.com
- bnc#470855 - Add note to xm man page on how to detach domain
console
19152-xm-man-page.patch
* Mon Feb 02 2009 jfehlig@novell.com
- bnc#471090 - XendAPIStore: Do not remove non-existent item
class list
19151-xend-class-dereg.patch
* Mon Feb 02 2009 carnold@novell.com
- bnc#470949 - user mode application may crash kernel
19088-x86-page-non-atomic-owner.patch (Jan Beulich)
19089-x86_64-widen-page-refcounts.patch
19103-x86_64-fold-page-lock.patch
x86_64-page-info-pack.patch
x86_64-sh-next-shadow.patch
* Fri Jan 23 2009 carnold@novell.com
- Intel - Remove improper operating condition that results in a
machine check.