164 lines
6.2 KiB
Diff
164 lines
6.2 KiB
Diff
|
References: bnc#882089
|
||
|
|
||
|
# Commit ecb69533582e51999e5d76bce513be870222908f
|
||
|
# Date 2014-08-29 12:22:42 +0200
|
||
|
# Author Jan Beulich <jbeulich@suse.com>
|
||
|
# Committer Jan Beulich <jbeulich@suse.com>
|
||
|
EPT: utilize GLA->GPA translation known for certain faults
|
||
|
|
||
|
Rather than doing the translation ourselves in __hvmemul_{read,write}()
|
||
|
leverage that we know the association for faults other than such having
|
||
|
occurred when translating addresses of page tables.
|
||
|
|
||
|
There is one intentional but not necessarily obvious (and possibly
|
||
|
subtle) adjustment to behavior: __hvmemul_read() no longer blindly
|
||
|
bails on instruction fetches matching the MMIO GVA (the callers of
|
||
|
handle_mmio_with_translation() now control the behavior via the struct
|
||
|
npfec they pass, and it didn't seem right to bail here rather than just
|
||
|
falling through to the unaccelerated path)
|
||
|
|
||
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||
|
Reviewed-by: Tim Deegan <tim@xen.org>
|
||
|
|
||
|
--- a/xen/arch/x86/hvm/emulate.c
|
||
|
+++ b/xen/arch/x86/hvm/emulate.c
|
||
|
@@ -496,10 +496,11 @@ static int __hvmemul_read(
|
||
|
while ( off & (chunk - 1) )
|
||
|
chunk >>= 1;
|
||
|
|
||
|
- if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
|
||
|
+ if ( ((access_type != hvm_access_insn_fetch
|
||
|
+ ? vio->mmio_access.read_access
|
||
|
+ : vio->mmio_access.insn_fetch)) &&
|
||
|
+ (vio->mmio_gva == (addr & PAGE_MASK)) )
|
||
|
{
|
||
|
- if ( access_type == hvm_access_insn_fetch )
|
||
|
- return X86EMUL_UNHANDLEABLE;
|
||
|
gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
|
||
|
while ( (off + chunk) <= PAGE_SIZE )
|
||
|
{
|
||
|
@@ -639,7 +640,8 @@ static int hvmemul_write(
|
||
|
while ( off & (chunk - 1) )
|
||
|
chunk >>= 1;
|
||
|
|
||
|
- if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
|
||
|
+ if ( vio->mmio_access.write_access &&
|
||
|
+ (vio->mmio_gva == (addr & PAGE_MASK)) )
|
||
|
{
|
||
|
gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
|
||
|
while ( (off + chunk) <= PAGE_SIZE )
|
||
|
--- a/xen/arch/x86/hvm/hvm.c
|
||
|
+++ b/xen/arch/x86/hvm/hvm.c
|
||
|
@@ -1529,7 +1529,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
|
||
|
&& is_hvm_vcpu(v)
|
||
|
&& hvm_mmio_internal(gpa) )
|
||
|
{
|
||
|
- if ( !handle_mmio() )
|
||
|
+ if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
|
||
|
hvm_inject_hw_exception(TRAP_gp_fault, 0);
|
||
|
rc = 1;
|
||
|
goto out;
|
||
|
@@ -1603,7 +1603,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
|
||
|
if ( unlikely(is_pvh_vcpu(v)) )
|
||
|
goto out;
|
||
|
|
||
|
- if ( !handle_mmio() )
|
||
|
+ if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
|
||
|
hvm_inject_hw_exception(TRAP_gp_fault, 0);
|
||
|
rc = 1;
|
||
|
goto out;
|
||
|
--- a/xen/arch/x86/hvm/io.c
|
||
|
+++ b/xen/arch/x86/hvm/io.c
|
||
|
@@ -189,7 +189,7 @@ int handle_mmio(void)
|
||
|
if ( vio->io_state == HVMIO_awaiting_completion )
|
||
|
vio->io_state = HVMIO_handle_mmio_awaiting_completion;
|
||
|
else
|
||
|
- vio->mmio_gva = 0;
|
||
|
+ vio->mmio_access = (struct npfec){};
|
||
|
|
||
|
switch ( rc )
|
||
|
{
|
||
|
@@ -218,9 +218,14 @@ int handle_mmio(void)
|
||
|
return 1;
|
||
|
}
|
||
|
|
||
|
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn)
|
||
|
+int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
|
||
|
+ struct npfec access)
|
||
|
{
|
||
|
struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
|
||
|
+
|
||
|
+ vio->mmio_access = access.gla_valid &&
|
||
|
+ access.kind == npfec_kind_with_gla
|
||
|
+ ? access : (struct npfec){};
|
||
|
vio->mmio_gva = gva & PAGE_MASK;
|
||
|
vio->mmio_gpfn = gpfn;
|
||
|
return handle_mmio();
|
||
|
--- a/xen/arch/x86/mm/shadow/multi.c
|
||
|
+++ b/xen/arch/x86/mm/shadow/multi.c
|
||
|
@@ -2839,6 +2839,11 @@ static int sh_page_fault(struct vcpu *v,
|
||
|
p2m_type_t p2mt;
|
||
|
uint32_t rc;
|
||
|
int version;
|
||
|
+ struct npfec access = {
|
||
|
+ .read_access = 1,
|
||
|
+ .gla_valid = 1,
|
||
|
+ .kind = npfec_kind_with_gla
|
||
|
+ };
|
||
|
#if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
|
||
|
int fast_emul = 0;
|
||
|
#endif
|
||
|
@@ -2849,6 +2854,9 @@ static int sh_page_fault(struct vcpu *v,
|
||
|
|
||
|
perfc_incr(shadow_fault);
|
||
|
|
||
|
+ if ( regs->error_code & PFEC_write_access )
|
||
|
+ access.write_access = 1;
|
||
|
+
|
||
|
#if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
|
||
|
/* If faulting frame is successfully emulated in last shadow fault
|
||
|
* it's highly likely to reach same emulation action for this frame.
|
||
|
@@ -2950,7 +2958,7 @@ static int sh_page_fault(struct vcpu *v,
|
||
|
SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
|
||
|
reset_early_unshadow(v);
|
||
|
trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va);
|
||
|
- return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
|
||
|
+ return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
|
||
|
? EXCRET_fault_fixed : 0);
|
||
|
}
|
||
|
else
|
||
|
@@ -3447,7 +3455,7 @@ static int sh_page_fault(struct vcpu *v,
|
||
|
paging_unlock(d);
|
||
|
put_gfn(d, gfn_x(gfn));
|
||
|
trace_shadow_gen(TRC_SHADOW_MMIO, va);
|
||
|
- return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
|
||
|
+ return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
|
||
|
? EXCRET_fault_fixed : 0);
|
||
|
|
||
|
not_a_shadow_fault:
|
||
|
--- a/xen/include/asm-x86/hvm/io.h
|
||
|
+++ b/xen/include/asm-x86/hvm/io.h
|
||
|
@@ -119,7 +119,8 @@ static inline void register_buffered_io_
|
||
|
void send_timeoffset_req(unsigned long timeoff);
|
||
|
void send_invalidate_req(void);
|
||
|
int handle_mmio(void);
|
||
|
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn);
|
||
|
+int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
|
||
|
+ struct npfec);
|
||
|
int handle_pio(uint16_t port, unsigned int size, int dir);
|
||
|
void hvm_interrupt_post(struct vcpu *v, int vector, int type);
|
||
|
void hvm_io_assist(ioreq_t *p);
|
||
|
--- a/xen/include/asm-x86/hvm/vcpu.h
|
||
|
+++ b/xen/include/asm-x86/hvm/vcpu.h
|
||
|
@@ -54,8 +54,9 @@ struct hvm_vcpu_io {
|
||
|
* HVM emulation:
|
||
|
* Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
|
||
|
* The latter is known to be an MMIO frame (not RAM).
|
||
|
- * This translation is only valid if @mmio_gva is non-zero.
|
||
|
+ * This translation is only valid for accesses as per @mmio_access.
|
||
|
*/
|
||
|
+ struct npfec mmio_access;
|
||
|
unsigned long mmio_gva;
|
||
|
unsigned long mmio_gpfn;
|
||
|
|