SHA256
1
0
forked from pool/xen
xen/523c0ed4-x86-HVM-properly-handle-wide-MMIO.patch

185 lines
6.9 KiB
Diff
Raw Normal View History

- Improvements to block-dmmd script bnc#828623 - bnc#839596 - VUL-0: CVE-2013-1442: XSA-62: xen: Information leak on AVX and/or LWP capable CPUs 5242a1b5-x86-xsave-initialize-extended-register-state-when-guests-enable-it.patch - bnc#840592 - VUL-0: CVE-2013-4355: XSA-63: xen: Information leaks through I/O instruction emulation CVE-2013-4355-xsa63.patch - bnc#840593 - VUL-0: CVE-2013-4356: XSA-64: xen: Memory accessible by 64-bit PV guests under live migration CVE-2013-4356-xsa64.patch - bnc#841766 - VUL-1: CVE-2013-4361: XSA-66: xen: Information leak through fbld instruction emulation CVE-2013-4361-xsa66.patch - bnc#833796 - L3: Xen: migration broken from xsave-capable to xsave-incapable host 52205e27-x86-xsave-initialization-improvements.patch 522dc0e6-x86-xsave-fix-migration-from-xsave-capable-to-xsave-incapable-host.patch - bnc#839600 - [HP BCS SLES11 Bug]: In HP’s UEFI x86_64 platform and sles11sp3 with xen environment, xen hypervisor will panic on multiple blades nPar. 523172d5-x86-fix-memory-cut-off-when-using-PFN-compression.patch - bnc#833251 - [HP BCS SLES11 Bug]: In HP’s UEFI x86_64 platform and with xen environment, in booting stage ,xen hypervisor will panic. 522d896b-x86-EFI-properly-handle-run-time-memory-regions-outside-the-1-1-map.patch - bnc#834751 - [HP BCS SLES11 Bug]: In xen, “shutdown –y 0 –h” cannot power off system 522d896b-x86-EFI-properly-handle-run-time-memory-regions-outside-the-1-1-map.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=274
2013-10-02 22:41:46 +00:00
# Commit 3b89f08a498ddac09d4002d9849e329018ceb107
# Date 2013-09-20 11:01:08 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/HVM: properly handle MMIO reads and writes wider than a machine word
Just like real hardware we ought to split such accesses transparently
to the caller. With little extra effort we can at once even handle page
crossing accesses correctly.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -438,6 +438,7 @@ static int __hvmemul_read(
{
struct vcpu *curr = current;
unsigned long addr, reps = 1;
+ unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER);
uint32_t pfec = PFEC_page_present;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
paddr_t gpa;
@@ -447,16 +448,38 @@ static int __hvmemul_read(
seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY )
return rc;
+ off = addr & (PAGE_SIZE - 1);
+ /*
+ * We only need to handle sizes actual instruction operands can have. All
+ * such sizes are either powers of 2 or the sum of two powers of 2. Thus
+ * picking as initial chunk size the largest power of 2 not greater than
+ * the total size will always result in only power-of-2 size requests
+ * issued to hvmemul_do_mmio() (hvmemul_do_io() rejects non-powers-of-2).
+ */
+ while ( chunk & (chunk - 1) )
+ chunk &= chunk - 1;
+ if ( off + bytes > PAGE_SIZE )
+ while ( off & (chunk - 1) )
+ chunk >>= 1;
if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
{
- unsigned int off = addr & (PAGE_SIZE - 1);
if ( access_type == hvm_access_insn_fetch )
return X86EMUL_UNHANDLEABLE;
gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
- if ( (off + bytes) <= PAGE_SIZE )
- return hvmemul_do_mmio(gpa, &reps, bytes, 0,
- IOREQ_READ, 0, p_data);
+ while ( (off + chunk) <= PAGE_SIZE )
+ {
+ rc = hvmemul_do_mmio(gpa, &reps, chunk, 0, IOREQ_READ, 0, p_data);
+ if ( rc != X86EMUL_OKAY || bytes == chunk )
+ return rc;
+ addr += chunk;
+ off += chunk;
+ gpa += chunk;
+ p_data += chunk;
+ bytes -= chunk;
+ if ( bytes < chunk )
+ chunk = bytes;
+ }
}
if ( (seg != x86_seg_none) &&
@@ -473,14 +496,32 @@ static int __hvmemul_read(
return X86EMUL_EXCEPTION;
case HVMCOPY_unhandleable:
return X86EMUL_UNHANDLEABLE;
- case HVMCOPY_bad_gfn_to_mfn:
+ case HVMCOPY_bad_gfn_to_mfn:
if ( access_type == hvm_access_insn_fetch )
return X86EMUL_UNHANDLEABLE;
- rc = hvmemul_linear_to_phys(
- addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
- if ( rc != X86EMUL_OKAY )
- return rc;
- return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, p_data);
+ rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
+ hvmemul_ctxt);
+ while ( rc == X86EMUL_OKAY )
+ {
+ rc = hvmemul_do_mmio(gpa, &reps, chunk, 0, IOREQ_READ, 0, p_data);
+ if ( rc != X86EMUL_OKAY || bytes == chunk )
+ break;
+ addr += chunk;
+ off += chunk;
+ p_data += chunk;
+ bytes -= chunk;
+ if ( bytes < chunk )
+ chunk = bytes;
+ if ( off < PAGE_SIZE )
+ gpa += chunk;
+ else
+ {
+ rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
+ hvmemul_ctxt);
+ off = 0;
+ }
+ }
+ return rc;
case HVMCOPY_gfn_paged_out:
return X86EMUL_RETRY;
case HVMCOPY_gfn_shared:
@@ -537,6 +578,7 @@ static int hvmemul_write(
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
struct vcpu *curr = current;
unsigned long addr, reps = 1;
+ unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER);
uint32_t pfec = PFEC_page_present | PFEC_write_access;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
paddr_t gpa;
@@ -546,14 +588,30 @@ static int hvmemul_write(
seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY )
return rc;
+ off = addr & (PAGE_SIZE - 1);
+ /* See the respective comment in __hvmemul_read(). */
+ while ( chunk & (chunk - 1) )
+ chunk &= chunk - 1;
+ if ( off + bytes > PAGE_SIZE )
+ while ( off & (chunk - 1) )
+ chunk >>= 1;
if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
{
- unsigned int off = addr & (PAGE_SIZE - 1);
gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
- if ( (off + bytes) <= PAGE_SIZE )
- return hvmemul_do_mmio(gpa, &reps, bytes, 0,
- IOREQ_WRITE, 0, p_data);
+ while ( (off + chunk) <= PAGE_SIZE )
+ {
+ rc = hvmemul_do_mmio(gpa, &reps, chunk, 0, IOREQ_WRITE, 0, p_data);
+ if ( rc != X86EMUL_OKAY || bytes == chunk )
+ return rc;
+ addr += chunk;
+ off += chunk;
+ gpa += chunk;
+ p_data += chunk;
+ bytes -= chunk;
+ if ( bytes < chunk )
+ chunk = bytes;
+ }
}
if ( (seg != x86_seg_none) &&
@@ -569,12 +627,29 @@ static int hvmemul_write(
case HVMCOPY_unhandleable:
return X86EMUL_UNHANDLEABLE;
case HVMCOPY_bad_gfn_to_mfn:
- rc = hvmemul_linear_to_phys(
- addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
- if ( rc != X86EMUL_OKAY )
- return rc;
- return hvmemul_do_mmio(gpa, &reps, bytes, 0,
- IOREQ_WRITE, 0, p_data);
+ rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
+ hvmemul_ctxt);
+ while ( rc == X86EMUL_OKAY )
+ {
+ rc = hvmemul_do_mmio(gpa, &reps, chunk, 0, IOREQ_WRITE, 0, p_data);
+ if ( rc != X86EMUL_OKAY || bytes == chunk )
+ break;
+ addr += chunk;
+ off += chunk;
+ p_data += chunk;
+ bytes -= chunk;
+ if ( bytes < chunk )
+ chunk = bytes;
+ if ( off < PAGE_SIZE )
+ gpa += chunk;
+ else
+ {
+ rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
+ hvmemul_ctxt);
+ off = 0;
+ }
+ }
+ return rc;
case HVMCOPY_gfn_paged_out:
return X86EMUL_RETRY;
case HVMCOPY_gfn_shared: