- remove xen.migrate.tools_notify_restore_to_hangup_during_migration_--abort_if_busy.patch It changed migration protocol and upstream wants a different solution - bnc#802221 - fix xenpaging readd xenpaging.qemu.flush-cache.patch - Upstream patches from Jan 26891-x86-S3-Fix-cpu-pool-scheduling-after-suspend-resume.patch 26930-x86-EFI-fix-runtime-call-status-for-compat-mode-Dom0.patch - Additional fix for bnc#816159 CVE-2013-1918-xsa45-followup.patch - bnc#817068 - Xen guest with >1 sr-iov vf won't start xen-managed-pci-device.patch - Update to Xen 4.2.2 c/s 26064 The following recent security patches are included in the tarball CVE-2013-0151-xsa34.patch (bnc#797285) CVE-2012-6075-xsa41.patch (bnc#797523) CVE-2013-1917-xsa44.patch (bnc#813673) CVE-2013-1919-xsa46.patch (bnc#813675) - Upstream patch from Jan 26902-x86-EFI-pass-boot-services-variable-info-to-runtime-code.patch - bnc#816159 - VUL-0: xen: CVE-2013-1918: XSA-45: Several long latency operations are not preemptible CVE-2013-1918-xsa45-1-vcpu-destroy-pagetables-preemptible.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=237
407 lines
14 KiB
Diff
407 lines
14 KiB
Diff
x86: cleanup after making various page table manipulation operations preemptible
|
|
|
|
This drops the "preemptible" parameters from various functions where
|
|
now they can't (or shouldn't, validated by assertions) be run in non-
|
|
preemptible mode anymore, to prove that manipulations of at least L3
|
|
and L4 page tables and page table entries are now always preemptible,
|
|
i.e. the earlier patches actually fulfill their purpose of fixing the
|
|
resulting security issue.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Acked-by: Tim Deegan <tim@xen.org>
|
|
|
|
--- a/xen/arch/x86/domain.c
|
|
+++ b/xen/arch/x86/domain.c
|
|
@@ -1986,7 +1986,7 @@ static int relinquish_memory(
|
|
}
|
|
|
|
if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
|
|
- ret = put_page_and_type_preemptible(page, 1);
|
|
+ ret = put_page_and_type_preemptible(page);
|
|
switch ( ret )
|
|
{
|
|
case 0:
|
|
--- a/xen/arch/x86/mm.c
|
|
+++ b/xen/arch/x86/mm.c
|
|
@@ -1044,7 +1044,7 @@ get_page_from_l2e(
|
|
define_get_linear_pagetable(l3);
|
|
static int
|
|
get_page_from_l3e(
|
|
- l3_pgentry_t l3e, unsigned long pfn, struct domain *d, int partial, int preemptible)
|
|
+ l3_pgentry_t l3e, unsigned long pfn, struct domain *d, int partial)
|
|
{
|
|
int rc;
|
|
|
|
@@ -1058,7 +1058,7 @@ get_page_from_l3e(
|
|
}
|
|
|
|
rc = get_page_and_type_from_pagenr(
|
|
- l3e_get_pfn(l3e), PGT_l2_page_table, d, partial, preemptible);
|
|
+ l3e_get_pfn(l3e), PGT_l2_page_table, d, partial, 1);
|
|
if ( unlikely(rc == -EINVAL) && get_l3_linear_pagetable(l3e, pfn, d) )
|
|
rc = 0;
|
|
|
|
@@ -1069,7 +1069,7 @@ get_page_from_l3e(
|
|
define_get_linear_pagetable(l4);
|
|
static int
|
|
get_page_from_l4e(
|
|
- l4_pgentry_t l4e, unsigned long pfn, struct domain *d, int partial, int preemptible)
|
|
+ l4_pgentry_t l4e, unsigned long pfn, struct domain *d, int partial)
|
|
{
|
|
int rc;
|
|
|
|
@@ -1083,7 +1083,7 @@ get_page_from_l4e(
|
|
}
|
|
|
|
rc = get_page_and_type_from_pagenr(
|
|
- l4e_get_pfn(l4e), PGT_l3_page_table, d, partial, preemptible);
|
|
+ l4e_get_pfn(l4e), PGT_l3_page_table, d, partial, 1);
|
|
if ( unlikely(rc == -EINVAL) && get_l4_linear_pagetable(l4e, pfn, d) )
|
|
rc = 0;
|
|
|
|
@@ -1237,8 +1237,10 @@ static int put_page_from_l2e(l2_pgentry_
|
|
static int __put_page_type(struct page_info *, int preemptible);
|
|
|
|
static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn,
|
|
- int partial, int preemptible)
|
|
+ int partial, bool_t defer)
|
|
{
|
|
+ struct page_info *pg;
|
|
+
|
|
if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || (l3e_get_pfn(l3e) == pfn) )
|
|
return 1;
|
|
|
|
@@ -1257,41 +1259,45 @@ static int put_page_from_l3e(l3_pgentry_
|
|
}
|
|
#endif
|
|
|
|
+ pg = l3e_get_page(l3e);
|
|
+
|
|
if ( unlikely(partial > 0) )
|
|
{
|
|
- ASSERT(preemptible >= 0);
|
|
- return __put_page_type(l3e_get_page(l3e), preemptible);
|
|
+ ASSERT(!defer);
|
|
+ return __put_page_type(pg, 1);
|
|
}
|
|
|
|
- if ( preemptible < 0 )
|
|
+ if ( defer )
|
|
{
|
|
- current->arch.old_guest_table = l3e_get_page(l3e);
|
|
+ current->arch.old_guest_table = pg;
|
|
return 0;
|
|
}
|
|
|
|
- return put_page_and_type_preemptible(l3e_get_page(l3e), preemptible);
|
|
+ return put_page_and_type_preemptible(pg);
|
|
}
|
|
|
|
#if CONFIG_PAGING_LEVELS >= 4
|
|
static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn,
|
|
- int partial, int preemptible)
|
|
+ int partial, bool_t defer)
|
|
{
|
|
if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) &&
|
|
(l4e_get_pfn(l4e) != pfn) )
|
|
{
|
|
+ struct page_info *pg = l4e_get_page(l4e);
|
|
+
|
|
if ( unlikely(partial > 0) )
|
|
{
|
|
- ASSERT(preemptible >= 0);
|
|
- return __put_page_type(l4e_get_page(l4e), preemptible);
|
|
+ ASSERT(!defer);
|
|
+ return __put_page_type(pg, 1);
|
|
}
|
|
|
|
- if ( preemptible < 0 )
|
|
+ if ( defer )
|
|
{
|
|
- current->arch.old_guest_table = l4e_get_page(l4e);
|
|
+ current->arch.old_guest_table = pg;
|
|
return 0;
|
|
}
|
|
|
|
- return put_page_and_type_preemptible(l4e_get_page(l4e), preemptible);
|
|
+ return put_page_and_type_preemptible(pg);
|
|
}
|
|
return 1;
|
|
}
|
|
@@ -1509,7 +1515,7 @@ static int alloc_l2_table(struct page_in
|
|
return rc > 0 ? 0 : rc;
|
|
}
|
|
|
|
-static int alloc_l3_table(struct page_info *page, int preemptible)
|
|
+static int alloc_l3_table(struct page_info *page)
|
|
{
|
|
struct domain *d = page_get_owner(page);
|
|
unsigned long pfn = page_to_mfn(page);
|
|
@@ -1556,11 +1562,10 @@ static int alloc_l3_table(struct page_in
|
|
rc = get_page_and_type_from_pagenr(l3e_get_pfn(pl3e[i]),
|
|
PGT_l2_page_table |
|
|
PGT_pae_xen_l2,
|
|
- d, partial, preemptible);
|
|
+ d, partial, 1);
|
|
}
|
|
else if ( !is_guest_l3_slot(i) ||
|
|
- (rc = get_page_from_l3e(pl3e[i], pfn, d,
|
|
- partial, preemptible)) > 0 )
|
|
+ (rc = get_page_from_l3e(pl3e[i], pfn, d, partial)) > 0 )
|
|
continue;
|
|
|
|
if ( rc == -EAGAIN )
|
|
@@ -1604,7 +1609,7 @@ static int alloc_l3_table(struct page_in
|
|
}
|
|
|
|
#if CONFIG_PAGING_LEVELS >= 4
|
|
-static int alloc_l4_table(struct page_info *page, int preemptible)
|
|
+static int alloc_l4_table(struct page_info *page)
|
|
{
|
|
struct domain *d = page_get_owner(page);
|
|
unsigned long pfn = page_to_mfn(page);
|
|
@@ -1616,8 +1621,7 @@ static int alloc_l4_table(struct page_in
|
|
i++, partial = 0 )
|
|
{
|
|
if ( !is_guest_l4_slot(d, i) ||
|
|
- (rc = get_page_from_l4e(pl4e[i], pfn, d,
|
|
- partial, preemptible)) > 0 )
|
|
+ (rc = get_page_from_l4e(pl4e[i], pfn, d, partial)) > 0 )
|
|
continue;
|
|
|
|
if ( rc == -EAGAIN )
|
|
@@ -1662,7 +1666,7 @@ static int alloc_l4_table(struct page_in
|
|
return rc > 0 ? 0 : rc;
|
|
}
|
|
#else
|
|
-#define alloc_l4_table(page, preemptible) (-EINVAL)
|
|
+#define alloc_l4_table(page) (-EINVAL)
|
|
#endif
|
|
|
|
|
|
@@ -1714,7 +1718,7 @@ static int free_l2_table(struct page_inf
|
|
return err;
|
|
}
|
|
|
|
-static int free_l3_table(struct page_info *page, int preemptible)
|
|
+static int free_l3_table(struct page_info *page)
|
|
{
|
|
struct domain *d = page_get_owner(page);
|
|
unsigned long pfn = page_to_mfn(page);
|
|
@@ -1727,7 +1731,7 @@ static int free_l3_table(struct page_inf
|
|
do {
|
|
if ( is_guest_l3_slot(i) )
|
|
{
|
|
- rc = put_page_from_l3e(pl3e[i], pfn, partial, preemptible);
|
|
+ rc = put_page_from_l3e(pl3e[i], pfn, partial, 0);
|
|
if ( rc < 0 )
|
|
break;
|
|
partial = 0;
|
|
@@ -1754,7 +1758,7 @@ static int free_l3_table(struct page_inf
|
|
}
|
|
|
|
#if CONFIG_PAGING_LEVELS >= 4
|
|
-static int free_l4_table(struct page_info *page, int preemptible)
|
|
+static int free_l4_table(struct page_info *page)
|
|
{
|
|
struct domain *d = page_get_owner(page);
|
|
unsigned long pfn = page_to_mfn(page);
|
|
@@ -1764,7 +1768,7 @@ static int free_l4_table(struct page_inf
|
|
|
|
do {
|
|
if ( is_guest_l4_slot(d, i) )
|
|
- rc = put_page_from_l4e(pl4e[i], pfn, partial, preemptible);
|
|
+ rc = put_page_from_l4e(pl4e[i], pfn, partial, 0);
|
|
if ( rc < 0 )
|
|
break;
|
|
partial = 0;
|
|
@@ -1784,7 +1788,7 @@ static int free_l4_table(struct page_inf
|
|
return rc > 0 ? 0 : rc;
|
|
}
|
|
#else
|
|
-#define free_l4_table(page, preemptible) (-EINVAL)
|
|
+#define free_l4_table(page) (-EINVAL)
|
|
#endif
|
|
|
|
int page_lock(struct page_info *page)
|
|
@@ -2023,7 +2027,6 @@ static int mod_l3_entry(l3_pgentry_t *pl
|
|
l3_pgentry_t nl3e,
|
|
unsigned long pfn,
|
|
int preserve_ad,
|
|
- int preemptible,
|
|
struct vcpu *vcpu)
|
|
{
|
|
l3_pgentry_t ol3e;
|
|
@@ -2063,7 +2066,7 @@ static int mod_l3_entry(l3_pgentry_t *pl
|
|
return rc ? 0 : -EFAULT;
|
|
}
|
|
|
|
- rc = get_page_from_l3e(nl3e, pfn, d, 0, preemptible);
|
|
+ rc = get_page_from_l3e(nl3e, pfn, d, 0);
|
|
if ( unlikely(rc < 0) )
|
|
return rc;
|
|
rc = 0;
|
|
@@ -2090,7 +2093,7 @@ static int mod_l3_entry(l3_pgentry_t *pl
|
|
pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
|
|
}
|
|
|
|
- put_page_from_l3e(ol3e, pfn, 0, -preemptible);
|
|
+ put_page_from_l3e(ol3e, pfn, 0, 1);
|
|
return rc;
|
|
}
|
|
|
|
@@ -2101,7 +2104,6 @@ static int mod_l4_entry(l4_pgentry_t *pl
|
|
l4_pgentry_t nl4e,
|
|
unsigned long pfn,
|
|
int preserve_ad,
|
|
- int preemptible,
|
|
struct vcpu *vcpu)
|
|
{
|
|
struct domain *d = vcpu->domain;
|
|
@@ -2134,7 +2136,7 @@ static int mod_l4_entry(l4_pgentry_t *pl
|
|
return rc ? 0 : -EFAULT;
|
|
}
|
|
|
|
- rc = get_page_from_l4e(nl4e, pfn, d, 0, preemptible);
|
|
+ rc = get_page_from_l4e(nl4e, pfn, d, 0);
|
|
if ( unlikely(rc < 0) )
|
|
return rc;
|
|
rc = 0;
|
|
@@ -2153,7 +2155,7 @@ static int mod_l4_entry(l4_pgentry_t *pl
|
|
return -EFAULT;
|
|
}
|
|
|
|
- put_page_from_l4e(ol4e, pfn, 0, -preemptible);
|
|
+ put_page_from_l4e(ol4e, pfn, 0, 1);
|
|
return rc;
|
|
}
|
|
|
|
@@ -2275,10 +2277,12 @@ static int alloc_page_type(struct page_i
|
|
rc = alloc_l2_table(page, type, preemptible);
|
|
break;
|
|
case PGT_l3_page_table:
|
|
- rc = alloc_l3_table(page, preemptible);
|
|
+ ASSERT(preemptible);
|
|
+ rc = alloc_l3_table(page);
|
|
break;
|
|
case PGT_l4_page_table:
|
|
- rc = alloc_l4_table(page, preemptible);
|
|
+ ASSERT(preemptible);
|
|
+ rc = alloc_l4_table(page);
|
|
break;
|
|
case PGT_seg_desc_page:
|
|
rc = alloc_segdesc_page(page);
|
|
@@ -2372,10 +2376,12 @@ int free_page_type(struct page_info *pag
|
|
if ( !(type & PGT_partial) )
|
|
page->nr_validated_ptes = L3_PAGETABLE_ENTRIES;
|
|
#endif
|
|
- rc = free_l3_table(page, preemptible);
|
|
+ ASSERT(preemptible);
|
|
+ rc = free_l3_table(page);
|
|
break;
|
|
case PGT_l4_page_table:
|
|
- rc = free_l4_table(page, preemptible);
|
|
+ ASSERT(preemptible);
|
|
+ rc = free_l4_table(page);
|
|
break;
|
|
default:
|
|
MEM_LOG("type %lx pfn %lx\n", type, page_to_mfn(page));
|
|
@@ -2866,7 +2872,7 @@ static int put_old_guest_table(struct vc
|
|
if ( !v->arch.old_guest_table )
|
|
return 0;
|
|
|
|
- switch ( rc = put_page_and_type_preemptible(v->arch.old_guest_table, 1) )
|
|
+ switch ( rc = put_page_and_type_preemptible(v->arch.old_guest_table) )
|
|
{
|
|
case -EINTR:
|
|
case -EAGAIN:
|
|
@@ -2898,7 +2904,7 @@ int vcpu_destroy_pagetables(struct vcpu
|
|
if ( paging_mode_refcounts(v->domain) )
|
|
put_page(page);
|
|
else
|
|
- rc = put_page_and_type_preemptible(page, 1);
|
|
+ rc = put_page_and_type_preemptible(page);
|
|
}
|
|
|
|
#ifdef __x86_64__
|
|
@@ -2924,7 +2930,7 @@ int vcpu_destroy_pagetables(struct vcpu
|
|
if ( paging_mode_refcounts(v->domain) )
|
|
put_page(page);
|
|
else
|
|
- rc = put_page_and_type_preemptible(page, 1);
|
|
+ rc = put_page_and_type_preemptible(page);
|
|
}
|
|
if ( !rc )
|
|
v->arch.guest_table_user = pagetable_null();
|
|
@@ -2953,7 +2959,7 @@ int new_guest_cr3(unsigned long mfn)
|
|
l4e_from_pfn(
|
|
mfn,
|
|
(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)),
|
|
- pagetable_get_pfn(curr->arch.guest_table), 0, 1, curr);
|
|
+ pagetable_get_pfn(curr->arch.guest_table), 0, curr);
|
|
switch ( rc )
|
|
{
|
|
case 0:
|
|
@@ -3016,7 +3022,7 @@ int new_guest_cr3(unsigned long mfn)
|
|
if ( paging_mode_refcounts(d) )
|
|
put_page(page);
|
|
else
|
|
- switch ( rc = put_page_and_type_preemptible(page, 1) )
|
|
+ switch ( rc = put_page_and_type_preemptible(page) )
|
|
{
|
|
case -EINTR:
|
|
rc = -EAGAIN;
|
|
@@ -3327,7 +3333,7 @@ long do_mmuext_op(
|
|
break;
|
|
}
|
|
|
|
- switch ( rc = put_page_and_type_preemptible(page, 1) )
|
|
+ switch ( rc = put_page_and_type_preemptible(page) )
|
|
{
|
|
case -EINTR:
|
|
case -EAGAIN:
|
|
@@ -3405,7 +3411,7 @@ long do_mmuext_op(
|
|
if ( paging_mode_refcounts(d) )
|
|
put_page(page);
|
|
else
|
|
- switch ( rc = put_page_and_type_preemptible(page, 1) )
|
|
+ switch ( rc = put_page_and_type_preemptible(page) )
|
|
{
|
|
case -EINTR:
|
|
rc = -EAGAIN;
|
|
@@ -3882,12 +3888,12 @@ long do_mmu_update(
|
|
break;
|
|
case PGT_l3_page_table:
|
|
rc = mod_l3_entry(va, l3e_from_intpte(req.val), mfn,
|
|
- cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
|
|
+ cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
|
|
break;
|
|
#if CONFIG_PAGING_LEVELS >= 4
|
|
case PGT_l4_page_table:
|
|
rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
|
|
- cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
|
|
+ cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
|
|
break;
|
|
#endif
|
|
case PGT_writable_page:
|
|
--- a/xen/include/asm-x86/mm.h
|
|
+++ b/xen/include/asm-x86/mm.h
|
|
@@ -384,15 +384,10 @@ static inline void put_page_and_type(str
|
|
put_page(page);
|
|
}
|
|
|
|
-static inline int put_page_and_type_preemptible(struct page_info *page,
|
|
- int preemptible)
|
|
+static inline int put_page_and_type_preemptible(struct page_info *page)
|
|
{
|
|
- int rc = 0;
|
|
+ int rc = put_page_type_preemptible(page);
|
|
|
|
- if ( preemptible )
|
|
- rc = put_page_type_preemptible(page);
|
|
- else
|
|
- put_page_type(page);
|
|
if ( likely(rc == 0) )
|
|
put_page(page);
|
|
return rc;
|