xen/xenpaging.HVMCOPY_gfn_paged_out.patch
Charles Arnold 9a05aa7fc4 - bnc#658704 - SLES11 SP1 Xen boot panic in x2apic mode
22707-x2apic-preenabled-check.patch
- bnc#641419 - L3: Xen: qemu-dm reports "xc_map_foreign_batch: mmap failed:
  Cannot allocate memory"
  7434-qemu-rlimit-as.patch
- Additional or upstream patches from Jan
  22693-fam10-mmio-conf-base-protect.patch
  22694-x86_64-no-weak.patch
  22708-xenctx-misc.patch
  21432-4.0-cpu-boot-failure.patch
  22645-amd-flush-filter.patch
  qemu-fix-7433.patch

- Maintain compatibility with the extid flag even though it is
  deprecated for both legacy and sxp config files.
  hv_extid_compatibility.patch 

- bnc#649209-improve suspend eventchn lock
  suspend_evtchn_lock.patch

- Removed the hyper-v shim patches in favor of using the upstream 
  version. 

- bnc#641419 - L3: Xen: qemu-dm reports "xc_map_foreign_batch: mmap
  failed: Cannot allocate memory" 
  qemu-rlimit-as.patch

- Upstream c/s 7433 to replace qemu_altgr_more.patch
  7433-qemu-altgr.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=90
2011-01-14 18:24:51 +00:00

152 lines
5.3 KiB
Diff

Subject: xenpaging: handle HVMCOPY_gfn_paged_out in copy_from/to_user
copy_from_user_hvm can fail when __hvm_copy returns
HVMCOPY_gfn_paged_out for a referenced gfn, for example during guests
pagetable walk. This has to be handled in some way. One hypercall that
failed was do_memory_op/XENMEM_decrease_reservation which lead to a
BUG_ON balloon.c. Since do_memory_op already has restart support for
the hypercall, copy_from_guest uses this existing retry code. In
addition, cleanup on error was added to increase_reservation and
populate_physmap.
Signed-off-by: Olaf Hering <olaf@aepfle.de>
---
xen/arch/x86/hvm/hvm.c | 4 ++++
xen/common/memory.c | 39 ++++++++++++++++++++++++++++++++++-----
2 files changed, 38 insertions(+), 5 deletions(-)
Index: xen-4.0.1-testing/xen/arch/x86/hvm/hvm.c
===================================================================
--- xen-4.0.1-testing.orig/xen/arch/x86/hvm/hvm.c
+++ xen-4.0.1-testing/xen/arch/x86/hvm/hvm.c
@@ -1843,6 +1843,8 @@ unsigned long copy_to_user_hvm(void *to,
rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
len, 0);
+ if ( rc == HVMCOPY_gfn_paged_out )
+ return -EAGAIN;
return rc ? len : 0; /* fake a copy_to_user() return code */
}
@@ -1859,6 +1861,8 @@ unsigned long copy_from_user_hvm(void *t
#endif
rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
+ if ( rc == HVMCOPY_gfn_paged_out )
+ return -EAGAIN;
return rc ? len : 0; /* fake a copy_from_user() return code */
}
Index: xen-4.0.1-testing/xen/common/memory.c
===================================================================
--- xen-4.0.1-testing.orig/xen/common/memory.c
+++ xen-4.0.1-testing/xen/common/memory.c
@@ -47,6 +47,7 @@ static void increase_reservation(struct
{
struct page_info *page;
unsigned long i;
+ unsigned long ctg_ret;
xen_pfn_t mfn;
struct domain *d = a->domain;
@@ -80,8 +81,13 @@ static void increase_reservation(struct
if ( !guest_handle_is_null(a->extent_list) )
{
mfn = page_to_mfn(page);
- if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
+ ctg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
+ if ( unlikely(ctg_ret) )
+ {
+ if ( (long)ctg_ret == -EAGAIN )
+ a->preempted = 1;
goto out;
+ }
}
}
@@ -93,6 +99,7 @@ static void populate_physmap(struct memo
{
struct page_info *page;
unsigned long i, j;
+ unsigned long cftg_ret;
xen_pfn_t gpfn, mfn;
struct domain *d = a->domain;
@@ -111,8 +118,13 @@ static void populate_physmap(struct memo
goto out;
}
- if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
+ cftg_ret = __copy_from_guest_offset(&gpfn, a->extent_list, i, 1);
+ if ( unlikely(cftg_ret) )
+ {
+ if ( (long)cftg_ret == -EAGAIN )
+ a->preempted = 1;
goto out;
+ }
if ( a->memflags & MEMF_populate_on_demand )
{
@@ -142,8 +154,13 @@ static void populate_physmap(struct memo
set_gpfn_from_mfn(mfn + j, gpfn + j);
/* Inform the domain of the new page's machine address. */
- if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
+ cftg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
+ if ( unlikely(cftg_ret) )
+ {
+ if ( (long)cftg_ret == -EAGAIN )
+ a->preempted = 1;
goto out;
+ }
}
}
}
@@ -212,6 +229,7 @@ int guest_remove_page(struct domain *d,
static void decrease_reservation(struct memop_args *a)
{
unsigned long i, j;
+ unsigned long cfg_ret;
xen_pfn_t gmfn;
if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
@@ -226,8 +244,13 @@ static void decrease_reservation(struct
goto out;
}
- if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
+ cfg_ret = __copy_from_guest_offset(&gmfn, a->extent_list, i, 1);
+ if ( unlikely(cfg_ret) )
+ {
+ if ( (long)cfg_ret == -EAGAIN )
+ a->preempted = 1;
goto out;
+ }
if ( tb_init_done )
{
@@ -511,6 +534,7 @@ long do_memory_op(unsigned long cmd, XEN
int rc, op;
unsigned int address_bits;
unsigned long start_extent;
+ unsigned long cfg_ret;
struct xen_memory_reservation reservation;
struct memop_args args;
domid_t domid;
@@ -524,8 +548,13 @@ long do_memory_op(unsigned long cmd, XEN
case XENMEM_populate_physmap:
start_extent = cmd >> MEMOP_EXTENT_SHIFT;
- if ( copy_from_guest(&reservation, arg, 1) )
+ cfg_ret = copy_from_guest(&reservation, arg, 1);
+ if ( unlikely(cfg_ret) )
+ {
+ if ( (long)cfg_ret == -EAGAIN )
+ return hypercall_create_continuation(__HYPERVISOR_memory_op, "lh", cmd, arg);
return start_extent;
+ }
/* Is size too large for us to encode a continuation? */
if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )