99885eadf2
xen-4.4.1-testing-src.tar.bz2 - Dropped patches now contained in tarball 53d7b781-x86-cpu-undo-BIOS-CPUID-max_leaf-limit-earlier.patch 53df71c7-lz4-check-for-underruns.patch 53e47d6b-x86_emulate-properly-do-IP-updates-and-other-side-effects.patch - bnc#882089 - Windows 2012 R2 fails to boot up with greater than 60 vcpus 53df727b-x86-HVM-extend-LAPIC-shortcuts-around-P2M-lookups.patch 53e8be5f-x86-vHPET-use-rwlock-instead-of-simple-one.patch 53ff3659-x86-consolidate-boolean-inputs-in-hvm-and-p2m.patch 53ff36ae-x86-hvm-treat-non-insn-fetch-NPF-also-as-read-violations.patch 53ff36d5-x86-mem_event-deliver-gla-fault-EPT-violation-information.patch 54005472-EPT-utilize-GLA-GPA-translation-known-for-certain-faults.patch - Upstream patches from Jan 53f737b1-VMX-fix-DebugCtl-MSR-clearing.patch 53f7386d-x86-irq-process-softirqs-in-irq-keyhandlers.patch 53ff3716-x86-ats-Disable-Address-Translation-Services-by-default.patch 53ff3899-x86-NMI-allow-processing-unknown-NMIs-with-watchdog.patch - bnc#864801 - VUL-0: CVE-2013-4540: qemu: zaurus: buffer overrun on invalid state load CVE-2013-4540-qemu.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=328
332 lines
12 KiB
Diff
332 lines
12 KiB
Diff
References: bnc#882089
|
|
|
|
# Commit 3d4d4f9336159f3f77a7b480ce9984fd3ff7949f
|
|
# Date 2014-08-28 16:02:01 +0200
|
|
# Author Tamas K Lengyel <tamas.lengyel@zentific.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
x86: consolidate boolean inputs in hvm and p2m into a shared bitmap
|
|
|
|
This patch consolidates the boolean input parameters of
|
|
hvm_hap_nested_page_fault and p2m_mem_access_check into a common bitmap
|
|
and defines the bitmap members accordingly.
|
|
|
|
Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
|
|
Reviewed-by: Jan Beulich <jbeulich@suse.com>
|
|
Acked-by: Kevin Tian <kevin.tian@intel.com>
|
|
Reviewed-by: Tim Deegan <tim@xen.org>
|
|
|
|
# Commit 24857896a30105b7947e2cd36d63768054538bbc
|
|
# Date 2014-09-03 15:06:06 +0200
|
|
# Author Andrew Cooper <andrew.cooper3@citrix.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
x86/hvm: fix operator precedence bug introduced by 3d4d4f9336
|
|
|
|
Bitwise or has greater precedence than the ternary operator, making the result
|
|
of the expression a constant P2M_UNSHARE.
|
|
|
|
Coverity-ID: 1234633
|
|
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: Don Slutz <dslutz@verizon.com>
|
|
|
|
--- a/xen/arch/x86/hvm/hvm.c
|
|
+++ b/xen/arch/x86/hvm/hvm.c
|
|
@@ -1464,12 +1464,8 @@ void hvm_inject_page_fault(int errcode,
|
|
hvm_inject_trap(&trap);
|
|
}
|
|
|
|
-int hvm_hap_nested_page_fault(paddr_t gpa,
|
|
- bool_t gla_valid,
|
|
- unsigned long gla,
|
|
- bool_t access_r,
|
|
- bool_t access_w,
|
|
- bool_t access_x)
|
|
+int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
|
|
+ struct npfec npfec)
|
|
{
|
|
unsigned long gfn = gpa >> PAGE_SHIFT;
|
|
p2m_type_t p2mt;
|
|
@@ -1498,8 +1494,11 @@ int hvm_hap_nested_page_fault(paddr_t gp
|
|
* into l1 guest if not fixable. The algorithm is
|
|
* the same as for shadow paging.
|
|
*/
|
|
- rv = nestedhvm_hap_nested_page_fault(v, &gpa,
|
|
- access_r, access_w, access_x);
|
|
+
|
|
+ rv = nestedhvm_hap_nested_page_fault(v, &gpa,
|
|
+ npfec.read_access,
|
|
+ npfec.write_access,
|
|
+ npfec.insn_fetch);
|
|
switch (rv) {
|
|
case NESTEDHVM_PAGEFAULT_DONE:
|
|
case NESTEDHVM_PAGEFAULT_RETRY:
|
|
@@ -1538,47 +1537,49 @@ int hvm_hap_nested_page_fault(paddr_t gp
|
|
|
|
p2m = p2m_get_hostp2m(v->domain);
|
|
mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma,
|
|
- P2M_ALLOC | (access_w ? P2M_UNSHARE : 0), NULL);
|
|
+ P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE : 0),
|
|
+ NULL);
|
|
|
|
/* Check access permissions first, then handle faults */
|
|
if ( mfn_x(mfn) != INVALID_MFN )
|
|
{
|
|
- int violation = 0;
|
|
+ bool_t violation;
|
|
+
|
|
/* If the access is against the permissions, then send to mem_event */
|
|
- switch (p2ma)
|
|
+ switch (p2ma)
|
|
{
|
|
case p2m_access_n:
|
|
case p2m_access_n2rwx:
|
|
default:
|
|
- violation = access_r || access_w || access_x;
|
|
+ violation = npfec.read_access || npfec.write_access || npfec.insn_fetch;
|
|
break;
|
|
case p2m_access_r:
|
|
- violation = access_w || access_x;
|
|
+ violation = npfec.write_access || npfec.insn_fetch;
|
|
break;
|
|
case p2m_access_w:
|
|
- violation = access_r || access_x;
|
|
+ violation = npfec.read_access || npfec.insn_fetch;
|
|
break;
|
|
case p2m_access_x:
|
|
- violation = access_r || access_w;
|
|
+ violation = npfec.read_access || npfec.write_access;
|
|
break;
|
|
case p2m_access_rx:
|
|
case p2m_access_rx2rw:
|
|
- violation = access_w;
|
|
+ violation = npfec.write_access;
|
|
break;
|
|
case p2m_access_wx:
|
|
- violation = access_r;
|
|
+ violation = npfec.read_access;
|
|
break;
|
|
case p2m_access_rw:
|
|
- violation = access_x;
|
|
+ violation = npfec.insn_fetch;
|
|
break;
|
|
case p2m_access_rwx:
|
|
+ violation = 0;
|
|
break;
|
|
}
|
|
|
|
if ( violation )
|
|
{
|
|
- if ( p2m_mem_access_check(gpa, gla_valid, gla, access_r,
|
|
- access_w, access_x, &req_ptr) )
|
|
+ if ( p2m_mem_access_check(gpa, gla, npfec, &req_ptr) )
|
|
{
|
|
fall_through = 1;
|
|
} else {
|
|
@@ -1594,7 +1595,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
|
|
* to the mmio handler.
|
|
*/
|
|
if ( (p2mt == p2m_mmio_dm) ||
|
|
- (access_w && (p2mt == p2m_ram_ro)) )
|
|
+ (npfec.write_access && (p2mt == p2m_ram_ro)) )
|
|
{
|
|
put_gfn(p2m->domain, gfn);
|
|
|
|
@@ -1613,7 +1614,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
|
|
paged = 1;
|
|
|
|
/* Mem sharing: unshare the page and try again */
|
|
- if ( access_w && (p2mt == p2m_ram_shared) )
|
|
+ if ( npfec.write_access && (p2mt == p2m_ram_shared) )
|
|
{
|
|
ASSERT(!p2m_is_nestedp2m(p2m));
|
|
sharing_enomem =
|
|
@@ -1630,7 +1631,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
|
|
* a large page, we do not change other pages type within that large
|
|
* page.
|
|
*/
|
|
- if ( access_w )
|
|
+ if ( npfec.write_access )
|
|
{
|
|
paging_mark_dirty(v->domain, mfn_x(mfn));
|
|
p2m_change_type(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
|
|
@@ -1640,7 +1641,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
|
|
}
|
|
|
|
/* Shouldn't happen: Maybe the guest was writing to a r/o grant mapping? */
|
|
- if ( access_w && (p2mt == p2m_grant_map_ro) )
|
|
+ if ( npfec.write_access && (p2mt == p2m_grant_map_ro) )
|
|
{
|
|
gdprintk(XENLOG_WARNING,
|
|
"trying to write to read-only grant mapping\n");
|
|
--- a/xen/arch/x86/hvm/svm/svm.c
|
|
+++ b/xen/arch/x86/hvm/svm/svm.c
|
|
@@ -1289,7 +1289,7 @@ const struct hvm_function_table * __init
|
|
}
|
|
|
|
static void svm_do_nested_pgfault(struct vcpu *v,
|
|
- struct cpu_user_regs *regs, uint32_t npfec, paddr_t gpa)
|
|
+ struct cpu_user_regs *regs, uint32_t pfec, paddr_t gpa)
|
|
{
|
|
int ret;
|
|
unsigned long gfn = gpa >> PAGE_SHIFT;
|
|
@@ -1298,10 +1298,13 @@ static void svm_do_nested_pgfault(struct
|
|
p2m_access_t p2ma;
|
|
struct p2m_domain *p2m = NULL;
|
|
|
|
- ret = hvm_hap_nested_page_fault(gpa, 0, ~0ul,
|
|
- 1, /* All NPFs count as reads */
|
|
- npfec & PFEC_write_access,
|
|
- npfec & PFEC_insn_fetch);
|
|
+ struct npfec npfec = {
|
|
+ .read_access = 1, /* All NPFs count as reads */
|
|
+ .write_access = !!(pfec & PFEC_write_access),
|
|
+ .insn_fetch = !!(pfec & PFEC_insn_fetch)
|
|
+ };
|
|
+
|
|
+ ret = hvm_hap_nested_page_fault(gpa, ~0ul, npfec);
|
|
|
|
if ( tb_init_done )
|
|
{
|
|
@@ -1329,7 +1332,7 @@ static void svm_do_nested_pgfault(struct
|
|
case -1:
|
|
ASSERT(nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v));
|
|
/* inject #VMEXIT(NPF) into guest. */
|
|
- nestedsvm_vmexit_defer(v, VMEXIT_NPF, npfec, gpa);
|
|
+ nestedsvm_vmexit_defer(v, VMEXIT_NPF, pfec, gpa);
|
|
return;
|
|
}
|
|
|
|
--- a/xen/arch/x86/hvm/vmx/vmx.c
|
|
+++ b/xen/arch/x86/hvm/vmx/vmx.c
|
|
@@ -2278,6 +2278,11 @@ static void ept_handle_violation(unsigne
|
|
p2m_type_t p2mt;
|
|
int ret;
|
|
struct domain *d = current->domain;
|
|
+ struct npfec npfec = {
|
|
+ .read_access = !!(qualification & EPT_READ_VIOLATION),
|
|
+ .write_access = !!(qualification & EPT_WRITE_VIOLATION),
|
|
+ .insn_fetch = !!(qualification & EPT_EXEC_VIOLATION)
|
|
+ };
|
|
|
|
if ( tb_init_done )
|
|
{
|
|
@@ -2296,14 +2301,14 @@ static void ept_handle_violation(unsigne
|
|
}
|
|
|
|
if ( qualification & EPT_GLA_VALID )
|
|
+ {
|
|
__vmread(GUEST_LINEAR_ADDRESS, &gla);
|
|
+ npfec.gla_valid = 1;
|
|
+ }
|
|
else
|
|
gla = ~0ull;
|
|
- ret = hvm_hap_nested_page_fault(gpa,
|
|
- !!(qualification & EPT_GLA_VALID), gla,
|
|
- !!(qualification & EPT_READ_VIOLATION),
|
|
- !!(qualification & EPT_WRITE_VIOLATION),
|
|
- !!(qualification & EPT_EXEC_VIOLATION));
|
|
+
|
|
+ ret = hvm_hap_nested_page_fault(gpa, gla, npfec);
|
|
switch ( ret )
|
|
{
|
|
case 0: // Unhandled L1 EPT violation
|
|
--- a/xen/arch/x86/mm/p2m.c
|
|
+++ b/xen/arch/x86/mm/p2m.c
|
|
@@ -1261,9 +1261,9 @@ void p2m_mem_paging_resume(struct domain
|
|
}
|
|
}
|
|
|
|
-bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
|
|
- bool_t access_r, bool_t access_w, bool_t access_x,
|
|
- mem_event_request_t **req_ptr)
|
|
+bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
|
|
+ struct npfec npfec,
|
|
+ mem_event_request_t **req_ptr)
|
|
{
|
|
struct vcpu *v = current;
|
|
unsigned long gfn = gpa >> PAGE_SHIFT;
|
|
@@ -1281,7 +1281,7 @@ bool_t p2m_mem_access_check(paddr_t gpa,
|
|
gfn_lock(p2m, gfn, 0);
|
|
mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL);
|
|
|
|
- if ( access_w && p2ma == p2m_access_rx2rw )
|
|
+ if ( npfec.write_access && p2ma == p2m_access_rx2rw )
|
|
{
|
|
rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw);
|
|
ASSERT(rc);
|
|
@@ -1290,7 +1290,7 @@ bool_t p2m_mem_access_check(paddr_t gpa,
|
|
}
|
|
else if ( p2ma == p2m_access_n2rwx )
|
|
{
|
|
- ASSERT(access_w || access_r || access_x);
|
|
+ ASSERT(npfec.write_access || npfec.read_access || npfec.insn_fetch);
|
|
rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
|
|
p2mt, p2m_access_rwx);
|
|
ASSERT(rc);
|
|
@@ -1341,11 +1341,11 @@ bool_t p2m_mem_access_check(paddr_t gpa,
|
|
/* Send request to mem event */
|
|
req->gfn = gfn;
|
|
req->offset = gpa & ((1 << PAGE_SHIFT) - 1);
|
|
- req->gla_valid = gla_valid;
|
|
+ req->gla_valid = npfec.gla_valid;
|
|
req->gla = gla;
|
|
- req->access_r = access_r;
|
|
- req->access_w = access_w;
|
|
- req->access_x = access_x;
|
|
+ req->access_r = npfec.read_access;
|
|
+ req->access_w = npfec.write_access;
|
|
+ req->access_x = npfec.insn_fetch;
|
|
|
|
req->vcpu_id = v->vcpu_id;
|
|
}
|
|
--- a/xen/include/asm-x86/hvm/hvm.h
|
|
+++ b/xen/include/asm-x86/hvm/hvm.h
|
|
@@ -435,11 +435,8 @@ static inline void hvm_invalidate_regs_f
|
|
#endif
|
|
}
|
|
|
|
-int hvm_hap_nested_page_fault(paddr_t gpa,
|
|
- bool_t gla_valid, unsigned long gla,
|
|
- bool_t access_r,
|
|
- bool_t access_w,
|
|
- bool_t access_x);
|
|
+int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
|
|
+ struct npfec npfec);
|
|
|
|
#define hvm_msr_tsc_aux(v) ({ \
|
|
struct domain *__d = (v)->domain; \
|
|
--- a/xen/include/asm-x86/mm.h
|
|
+++ b/xen/include/asm-x86/mm.h
|
|
@@ -551,6 +551,16 @@ void audit_domains(void);
|
|
|
|
#endif
|
|
|
|
+/*
|
|
+ * Nested page fault exception codes.
|
|
+ */
|
|
+struct npfec {
|
|
+ unsigned int read_access:1;
|
|
+ unsigned int write_access:1;
|
|
+ unsigned int insn_fetch:1;
|
|
+ unsigned int gla_valid:1;
|
|
+};
|
|
+
|
|
int new_guest_cr3(unsigned long pfn);
|
|
void make_cr3(struct vcpu *v, unsigned long mfn);
|
|
void update_cr3(struct vcpu *v);
|
|
--- a/xen/include/asm-x86/p2m.h
|
|
+++ b/xen/include/asm-x86/p2m.h
|
|
@@ -568,9 +568,9 @@ void p2m_mem_paging_resume(struct domain
|
|
* been promoted with no underlying vcpu pause. If the req_ptr has been populated,
|
|
* then the caller must put the event in the ring (once having released get_gfn*
|
|
* locks -- caller must also xfree the request. */
|
|
-bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
|
|
- bool_t access_r, bool_t access_w, bool_t access_x,
|
|
- mem_event_request_t **req_ptr);
|
|
+bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
|
|
+ struct npfec npfec,
|
|
+ mem_event_request_t **req_ptr);
|
|
/* Resumes the running of the VCPU, restarting the last instruction */
|
|
void p2m_mem_access_resume(struct domain *d);
|
|
|