xen/533d413b-x86-mm-fix-checks-against-max_mapped_pfn.patch

39 lines
1.4 KiB
Diff
Raw Normal View History

# Commit 088ee1d47b65d6bb92de61b404805f4ca92e3240
# Date 2014-04-03 12:08:43 +0100
# Author Jan Beulich <JBeulich@suse.com>
# Committer Tim Deegan <tim@xen.org>
x86/mm: fix checks against max_mapped_pfn
This value is an inclusive one, i.e. this fixes an off-by-one in memory
sharing and an off-by-two in shadow code.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tim Deegan <tim@xen.org>
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1268,8 +1268,8 @@ int relinquish_shared_pages(struct domai
return 0;
p2m_lock(p2m);
- for (gfn = p2m->next_shared_gfn_to_relinquish;
- gfn < p2m->max_mapped_pfn; gfn++ )
+ for ( gfn = p2m->next_shared_gfn_to_relinquish;
+ gfn <= p2m->max_mapped_pfn; gfn++ )
{
p2m_access_t a;
p2m_type_t t;
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3489,9 +3489,7 @@ int shadow_track_dirty_vram(struct domai
struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
- if (end_pfn < begin_pfn
- || begin_pfn > p2m->max_mapped_pfn
- || end_pfn >= p2m->max_mapped_pfn)
+ if ( end_pfn < begin_pfn || end_pfn > p2m->max_mapped_pfn + 1 )
return -EINVAL;
/* We perform p2m lookups, so lock the p2m upfront to avoid deadlock */