This change reverses the task of xenpaging. Before this change a fixed number of pages was paged out. With this change the guest will not have access to more than the given number of pages at the same time. The xenpaging= config option is replaced by actmem= A new xm mem-swap-target is added. The xenpaging binary is moved to /usr/lib/xen/bin/ xenpaging.HVMCOPY_gfn_paged_out.patch xenpaging.XEN_PAGING_DIR.patch xenpaging.add_evict_pages.patch xenpaging.bitmap_clear.patch xenpaging.cmdline-interface.patch xenpaging.encapsulate_domain_info.patch xenpaging.file_op-return-code.patch xenpaging.guest-memusage.patch xenpaging.install-to-libexec.patch xenpaging.low_target_policy_nomru.patch xenpaging.main-loop-exit-handling.patch xenpaging.misleading-comment.patch xenpaging.page_in-munmap-size.patch xenpaging.print-gfn.patch xenpaging.record-numer-paged-out-pages.patch xenpaging.reset-uncomsumed.patch xenpaging.stale-comments.patch xenpaging.target-tot_pages.patch xenpaging.use-PERROR.patch xenpaging.watch-target-tot_pages.patch xenpaging.watch_event-DPRINTF.patch xenpaging.xc_interface_open-comment.patch - xen.spec: update filelist package /usr/lib*/xen with wildcard to pickup new files remove duplicate /usr/sbin/xen-list from filelist OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=157
152 lines
5.2 KiB
Diff
152 lines
5.2 KiB
Diff
|
|
xenpaging: handle HVMCOPY_gfn_paged_out in copy_from/to_user
|
|
|
|
copy_from_user_hvm can fail when __hvm_copy returns
|
|
HVMCOPY_gfn_paged_out for a referenced gfn, for example during guests
|
|
pagetable walk. This has to be handled in some way.
|
|
|
|
For the time being, return -EAGAIN for the most common case (xen_balloon
|
|
driver crashing in guest) until the recently added waitqueues will be
|
|
used.
|
|
|
|
Signed-off-by: Olaf Hering <olaf@aepfle.de>
|
|
|
|
---
|
|
xen/arch/x86/hvm/hvm.c | 4 ++++
|
|
xen/common/memory.c | 39 ++++++++++++++++++++++++++++++++++-----
|
|
2 files changed, 38 insertions(+), 5 deletions(-)
|
|
|
|
Index: xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c
|
|
===================================================================
|
|
--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/hvm.c
|
|
+++ xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c
|
|
@@ -2247,6 +2247,8 @@ unsigned long copy_to_user_hvm(void *to,
|
|
|
|
rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
|
|
len, 0);
|
|
+ if ( unlikely(rc == HVMCOPY_gfn_paged_out) )
|
|
+ return -EAGAIN;
|
|
return rc ? len : 0; /* fake a copy_to_user() return code */
|
|
}
|
|
|
|
@@ -2264,6 +2266,8 @@ unsigned long copy_from_user_hvm(void *t
|
|
#endif
|
|
|
|
rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
|
|
+ if ( unlikely(rc == HVMCOPY_gfn_paged_out) )
|
|
+ return -EAGAIN;
|
|
return rc ? len : 0; /* fake a copy_from_user() return code */
|
|
}
|
|
|
|
Index: xen-4.1.2-testing/xen/common/memory.c
|
|
===================================================================
|
|
--- xen-4.1.2-testing.orig/xen/common/memory.c
|
|
+++ xen-4.1.2-testing/xen/common/memory.c
|
|
@@ -48,6 +48,7 @@ static void increase_reservation(struct
|
|
{
|
|
struct page_info *page;
|
|
unsigned long i;
|
|
+ unsigned long ctg_ret;
|
|
xen_pfn_t mfn;
|
|
struct domain *d = a->domain;
|
|
|
|
@@ -81,8 +82,13 @@ static void increase_reservation(struct
|
|
if ( !guest_handle_is_null(a->extent_list) )
|
|
{
|
|
mfn = page_to_mfn(page);
|
|
- if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
|
|
+ ctg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
|
|
+ if ( unlikely(ctg_ret) )
|
|
+ {
|
|
+ if ( (long)ctg_ret == -EAGAIN )
|
|
+ a->preempted = 1;
|
|
goto out;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -94,6 +100,7 @@ static void populate_physmap(struct memo
|
|
{
|
|
struct page_info *page;
|
|
unsigned long i, j;
|
|
+ unsigned long cftg_ret;
|
|
xen_pfn_t gpfn, mfn;
|
|
struct domain *d = a->domain;
|
|
|
|
@@ -112,8 +119,13 @@ static void populate_physmap(struct memo
|
|
goto out;
|
|
}
|
|
|
|
- if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
|
|
+ cftg_ret = __copy_from_guest_offset(&gpfn, a->extent_list, i, 1);
|
|
+ if ( unlikely(cftg_ret) )
|
|
+ {
|
|
+ if ( (long)cftg_ret == -EAGAIN )
|
|
+ a->preempted = 1;
|
|
goto out;
|
|
+ }
|
|
|
|
if ( a->memflags & MEMF_populate_on_demand )
|
|
{
|
|
@@ -143,8 +155,13 @@ static void populate_physmap(struct memo
|
|
set_gpfn_from_mfn(mfn + j, gpfn + j);
|
|
|
|
/* Inform the domain of the new page's machine address. */
|
|
- if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
|
|
+ cftg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
|
|
+ if ( unlikely(cftg_ret) )
|
|
+ {
|
|
+ if ( (long)cftg_ret == -EAGAIN )
|
|
+ a->preempted = 1;
|
|
goto out;
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
@@ -213,6 +230,7 @@ int guest_remove_page(struct domain *d,
|
|
static void decrease_reservation(struct memop_args *a)
|
|
{
|
|
unsigned long i, j;
|
|
+ unsigned long cfg_ret;
|
|
xen_pfn_t gmfn;
|
|
|
|
if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
|
|
@@ -227,8 +245,13 @@ static void decrease_reservation(struct
|
|
goto out;
|
|
}
|
|
|
|
- if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
|
|
+ cfg_ret = __copy_from_guest_offset(&gmfn, a->extent_list, i, 1);
|
|
+ if ( unlikely(cfg_ret) )
|
|
+ {
|
|
+ if ( (long)cfg_ret == -EAGAIN )
|
|
+ a->preempted = 1;
|
|
goto out;
|
|
+ }
|
|
|
|
if ( tb_init_done )
|
|
{
|
|
@@ -509,6 +532,7 @@ long do_memory_op(unsigned long cmd, XEN
|
|
int rc, op;
|
|
unsigned int address_bits;
|
|
unsigned long start_extent;
|
|
+ unsigned long cfg_ret;
|
|
struct xen_memory_reservation reservation;
|
|
struct memop_args args;
|
|
domid_t domid;
|
|
@@ -522,8 +546,13 @@ long do_memory_op(unsigned long cmd, XEN
|
|
case XENMEM_populate_physmap:
|
|
start_extent = cmd >> MEMOP_EXTENT_SHIFT;
|
|
|
|
- if ( copy_from_guest(&reservation, arg, 1) )
|
|
+ cfg_ret = copy_from_guest(&reservation, arg, 1);
|
|
+ if ( unlikely(cfg_ret) )
|
|
+ {
|
|
+ if ( (long)cfg_ret == -EAGAIN )
|
|
+ return hypercall_create_continuation(__HYPERVISOR_memory_op, "lh", cmd, arg);
|
|
return start_extent;
|
|
+ }
|
|
|
|
/* Is size too large for us to encode a continuation? */
|
|
if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )
|