xen/livemig-ept-novell-x64.patch

332 lines
12 KiB
Diff

Index: xen-3.2.1-testing/xen/arch/x86/hvm/vmx/vmx.c
===================================================================
--- xen-3.2.1-testing.orig/xen/arch/x86/hvm/vmx/vmx.c
+++ xen-3.2.1-testing/xen/arch/x86/hvm/vmx/vmx.c
@@ -50,6 +50,7 @@
#include <public/hvm/save.h>
#include <asm/hvm/trace.h>
#include <asm/hvm/hvm_extensions.h>
+#include <asm/hap.h>
enum handler_return { HNDL_done, HNDL_unhandled, HNDL_exception_raised };
@@ -2865,9 +2866,44 @@ void vmx_wbinvd_intercept(void)
static void ept_handle_violation(unsigned long qualification, paddr_t gpa)
{
- if ( unlikely(((qualification >> 7) & 0x3) != 0x3) )
+ unsigned long gla_validity = qualification & EPT_GLA_VALIDITY_MASK;
+ struct domain *d = current->domain;
+ u64 gfn = gpa >> PAGE_SHIFT;
+ mfn_t mfn;
+ p2m_type_t t;
+
+ /* GPA exceeds GAW. */
+ if ( unlikely(qualification & EPT_GAW_VIOLATION) )
{
- domain_crash(current->domain);
+ printk("EPT violation: guest physical address %"PRIpaddr" exceeded "
+ "its width limit.\n", gpa);
+ domain_crash(d);
+ }
+
+ if ( gla_validity == EPT_GLA_VALIDITY_RSVD ||
+ gla_validity == EPT_GLA_VALIDITY_PDPTR_LOAD )
+ {
+ printk("ept violation: reserved bit or pdptr load violation.\n");
+ domain_crash(d);
+ }
+
+ mfn = gfn_to_mfn(d, gfn, &t);
+
+ if ( unlikely( gla_validity != EPT_GLA_VALIDITY_MATCH) )
+ {
+ if ( p2m_is_ram(t) && paging_mode_log_dirty(d) )
+ goto mark_dirty;
+ domain_crash(d);
+ return;
+ }
+
+mark_dirty:
+
+ if ( p2m_is_ram(t) && paging_mode_log_dirty(d) )
+ {
+ paging_mark_dirty(d, mfn_x(mfn));
+ p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
+ flush_tlb_mask(d->domain_dirty_cpumask);
return;
}
Index: xen-3.2.1-testing/xen/arch/x86/mm/hap/hap.c
===================================================================
--- xen-3.2.1-testing.orig/xen/arch/x86/mm/hap/hap.c
+++ xen-3.2.1-testing/xen/arch/x86/mm/hap/hap.c
@@ -61,7 +61,7 @@ int hap_enable_log_dirty(struct domain *
hap_unlock(d);
/* set l1e entries of P2M table to be read-only. */
- p2m_change_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
+ p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
flush_tlb_mask(d->domain_dirty_cpumask);
return 0;
}
@@ -73,14 +73,14 @@ int hap_disable_log_dirty(struct domain
hap_unlock(d);
/* set l1e entries of P2M table with normal mode */
- p2m_change_type_global(d, p2m_ram_logdirty, p2m_ram_rw);
+ p2m_change_entry_type_global(d, p2m_ram_logdirty, p2m_ram_rw);
return 0;
}
void hap_clean_dirty_bitmap(struct domain *d)
{
/* set l1e entries of P2M table to be read-only. */
- p2m_change_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
+ p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
flush_tlb_mask(d->domain_dirty_cpumask);
}
Index: xen-3.2.1-testing/xen/arch/x86/mm/p2m-ept.c
===================================================================
--- xen-3.2.1-testing.orig/xen/arch/x86/mm/p2m-ept.c
+++ xen-3.2.1-testing/xen/arch/x86/mm/p2m-ept.c
@@ -23,6 +23,27 @@
#include <asm/types.h>
#include <asm/domain.h>
#include <asm/hvm/vmx/vmx.h>
+#include <asm/hap.h>
+
+static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type)
+{
+ switch(type)
+ {
+ case p2m_invalid:
+ case p2m_mmio_dm:
+ default:
+ return;
+ case p2m_ram_rw:
+ case p2m_mmio_direct:
+ entry->r = entry->w = entry->x = 1;
+ return;
+ case p2m_ram_logdirty:
+ case p2m_ram_ro:
+ entry->r = entry->x = 1;
+ entry->w = 0;
+ return;
+ }
+}
static int ept_next_level(struct domain *d, bool_t read_only,
ept_entry_t **table, unsigned long *gfn_remainder,
@@ -100,6 +121,7 @@ ept_set_entry(struct domain *d, unsigned
ept_entry->avail2 = 0;
/* last step */
ept_entry->r = ept_entry->w = ept_entry->x = 1;
+ ept_p2m_type_to_flags(ept_entry, p2mt);
}
else
ept_entry->epte = 0;
@@ -140,13 +162,10 @@ static mfn_t ept_get_entry(struct domain
index = gfn_remainder;
ept_entry = table + index;
- if ( (ept_entry->epte & 0x7) == 0x7 )
+ if ( ept_entry->avail1 != p2m_invalid )
{
- if ( ept_entry->avail1 != p2m_invalid )
- {
- *t = ept_entry->avail1;
- mfn = _mfn(ept_entry->mfn);
- }
+ *t = ept_entry->avail1;
+ mfn = _mfn(ept_entry->mfn);
}
out:
@@ -159,11 +178,64 @@ static mfn_t ept_get_entry_fast(unsigned
return ept_get_entry(current->domain, gfn, t);
}
+/* Walk the whole p2m table, changing any entries of the old type
+ * to the new type. This is used in hardware-assisted paging to
+ * quickly enable or diable log-dirty tracking */
+
+static void ept_change_entry_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt)
+{
+ if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
+ return;
+
+ if ( EPT_DEFAULT_GAW == 3 )
+ {
+ ept_entry_t *l4e, *l3e, *l2e, *l1e;
+ int i4, i3, i2, i1;
+
+ l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ for (i4 = 0; i4 < EPT_PAGETABLE_ENTRIES; i4++ )
+ {
+ if ( !(l4e+i4)->epte || (l4e+i4)->sp_avail)
+ continue;
+ l3e = map_domain_page((l4e+i4)->mfn);
+ for ( i3 = 0; i3 < EPT_PAGETABLE_ENTRIES; i3++ )
+ {
+ if ( !(l3e+i3)->epte || (l3e+i3)->sp_avail )
+ continue;
+ l2e = map_domain_page((l3e+i3)->mfn);
+ for ( i2 = 0; i2 < EPT_PAGETABLE_ENTRIES; i2++ )
+ {
+ if ( !(l2e+i2)->epte || (l2e+i2)->sp_avail )
+ continue;
+ l1e = map_domain_page((l2e+i2)->mfn);
+ for ( i1 = 0; i1 < EPT_PAGETABLE_ENTRIES; i1++ )
+ {
+ if ( !(l1e+i1)->epte )
+ continue;
+ if ( (l1e+i1)->avail1 != ot )
+ continue;
+ (l1e+i1)->avail1 = nt;
+ ept_p2m_type_to_flags(l1e+i1, nt);
+ }
+ unmap_domain_page(l1e);
+ }
+ unmap_domain_page(l2e);
+ }
+ unmap_domain_page(l3e);
+ }
+ unmap_domain_page(l4e);
+
+ if ( d->vcpu[0] )
+ ept_sync_domain(d->vcpu[0]);
+ }
+}
+
void ept_p2m_init(struct domain *d)
{
d->arch.p2m.set_entry = ept_set_entry;
d->arch.p2m.get_entry = ept_get_entry;
d->arch.p2m.get_entry_fast = ept_get_entry_fast;
+ d->arch.p2m.change_entry_type_global = ept_change_entry_type_global;
}
/*
Index: xen-3.2.1-testing/xen/arch/x86/mm/p2m.c
===================================================================
--- xen-3.2.1-testing.orig/xen/arch/x86/mm/p2m.c
+++ xen-3.2.1-testing/xen/arch/x86/mm/p2m.c
@@ -279,11 +279,19 @@ void p2m_init(struct domain *d)
d->arch.p2m.set_entry = p2m_set_entry;
d->arch.p2m.get_entry = p2m_gfn_to_mfn;
d->arch.p2m.get_entry_fast = p2m_gfn_to_mfn_fast;
+ d->arch.p2m.change_entry_type_global = p2m_change_type_global;
if ( is_hvm_domain(d) )
hvm_p2m_init(d);
}
+void p2m_change_entry_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt)
+{
+ p2m_lock(d);
+ d->arch.p2m.change_entry_type_global(d, ot, nt);
+ p2m_unlock(d);
+}
+
static inline
int set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt)
{
@@ -806,8 +814,6 @@ void p2m_change_type_global(struct domai
if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
return;
- p2m_lock(d);
-
#if CONFIG_PAGING_LEVELS == 4
l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
#elif CONFIG_PAGING_LEVELS == 3
@@ -878,7 +884,6 @@ void p2m_change_type_global(struct domai
unmap_domain_page(l2e);
#endif
- p2m_unlock(d);
}
/* Modify the p2m type of a single gfn from ot to nt, returning the
Index: xen-3.2.1-testing/xen/include/asm-x86/domain.h
===================================================================
--- xen-3.2.1-testing.orig/xen/include/asm-x86/domain.h
+++ xen-3.2.1-testing/xen/include/asm-x86/domain.h
@@ -171,6 +171,8 @@ struct p2m_domain {
p2m_type_t *p2mt);
mfn_t (*get_entry_fast)(unsigned long gfn, p2m_type_t *p2mt);
+ void (*change_entry_type_global)(struct domain *d,
+ p2m_type_t ot, p2m_type_t nt);
/* Highest guest frame that's ever been mapped in the p2m */
unsigned long max_mapped_pfn;
};
Index: xen-3.2.1-testing/xen/include/asm-x86/hap.h
===================================================================
--- xen-3.2.1-testing.orig/xen/include/asm-x86/hap.h
+++ xen-3.2.1-testing/xen/include/asm-x86/hap.h
@@ -97,6 +97,49 @@ extern struct paging_mode hap_paging_pro
extern struct paging_mode hap_paging_pae_mode;
extern struct paging_mode hap_paging_long_mode;
+/* EPT violation qualifications definitions */
+/* bit offset 0 in exit qualification */
+#define _EPT_READ_VIOLATION 0
+#define EPT_READ_VIOLATION (1UL<<_EPT_READ_VIOLATION)
+/* bit offset 1 in exit qualification */
+#define _EPT_WRITE_VIOLATION 1
+#define EPT_WRITE_VIOLATION (1UL<<_EPT_WRITE_VIOLATION)
+/* bit offset 2 in exit qualification */
+#define _EPT_EXEC_VIOLATION 2
+#define EPT_EXEC_VIOLATION (1UL<<_EPT_EXEC_VIOLATION)
+
+/* bit offset 3 in exit qualification */
+#define _EPT_EFFECTIVE_READ 3
+#define EPT_EFFECTIVE_READ (1UL<<_EPT_EFFECTIVE_READ)
+/* bit offset 4 in exit qualification */
+#define _EPT_EFFECTIVE_WRITE 4
+#define EPT_EFFECTIVE_WRITE (1UL<<_EPT_EFFECTIVE_WRITE)
+/* bit offset 5 in exit qualification */
+#define _EPT_EFFECTIVE_EXEC 5
+#define EPT_EFFECTIVE_EXEC (1UL<<_EPT_EFFECTIVE_EXEC)
+
+/* bit offset 6 in exit qualification */
+#define _EPT_GAW_VIOLATION 6
+#define EPT_GAW_VIOLATION (1UL<<_EPT_GAW_VIOLATION)
+
+/* bits offset 7 & 8 in exit qualification */
+#define _EPT_GLA_VALIDITY 7
+#define EPT_GLA_VALIDITY_MASK (3UL<<_EPT_GLA_VALIDITY)
+/* gla != gpa, when load PDPTR */
+#define EPT_GLA_VALIDITY_PDPTR_LOAD (0UL<<_EPT_GLA_VALIDITY)
+/* gla != gpa, during guest page table walking */
+#define EPT_GLA_VALIDITY_GPT_WALK (1UL<<_EPT_GLA_VALIDITY)
+/* reserved */
+#define EPT_GLA_VALIDITY_RSVD (2UL<<_EPT_GLA_VALIDITY)
+/* gla == gpa, normal case */
+#define EPT_GLA_VALIDITY_MATCH (3UL<<_EPT_GLA_VALIDITY)
+
+#define EPT_EFFECTIVE_MASK (EPT_EFFECTIVE_READ | \
+ EPT_EFFECTIVE_WRITE | \
+ EPT_EFFECTIVE_EXEC)
+
+#define EPT_PAGETABLE_ENTRIES 512
+
#endif /* XEN_HAP_H */
/*
Index: xen-3.2.1-testing/xen/include/asm-x86/p2m.h
===================================================================
--- xen-3.2.1-testing.orig/xen/include/asm-x86/p2m.h
+++ xen-3.2.1-testing/xen/include/asm-x86/p2m.h
@@ -209,6 +209,7 @@ void guest_physmap_remove_page(struct do
/* Change types across all p2m entries in a domain */
void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
+void p2m_change_entry_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
/* Compare-exchange the type of a single p2m entry */
p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,