53356c1e-x86-HVM-correct-CPUID-leaf-80000008-handling.patch 533ad1ee-VMX-fix-PAT-value-seen-by-guest.patch 533d413b-x86-mm-fix-checks-against-max_mapped_pfn.patch - bnc#862608 - SLES 11 SP3 vm-install should get RHEL 7 support when released 53206661-pygrub-support-linux16-and-initrd16.patch - Upstream bug fixes 53299d8f-xenconsole-reset-tty-on-failure.patch 53299d8f-xenconsole-tolerate-tty-errors.patch - fix build for armv7l and aarch64 - Remove compiletime strings from qemu-upstream qemu-xen-upstream-megasas-buildtime.patch - bnc#871546 - KMPs are not signed in SUSE:SLE-12:GA? xen.spec - Upstream patches from Jan 532fff53-x86-fix-determination-of-bit-count-for-struct-domain-allocations.patch 5331917d-x86-enforce-preemption-in-HVM_set_mem_access-p2m_set_mem_access.patch - Drop xsa89.patch for upstream version (see bnc#867910, 5331917d-x86-enforce...) OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=309
58 lines
2.0 KiB
Diff
58 lines
2.0 KiB
Diff
# Commit b3d2f8b2cba9fce5bc8995612d0d13fcefec7769
|
|
# Date 2014-03-24 10:48:03 +0100
|
|
# Author Jan Beulich <jbeulich@suse.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
x86: fix determination of bit count for struct domain allocations
|
|
|
|
We can't just add in the hole shift value, as the hole may be at or
|
|
above the 44-bit boundary. Instead we need to determine the total bit
|
|
count until reaching 32 significant (not squashed out) bits in PFN
|
|
representations.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Acked-by: Keir Fraser <keir@xen.org>
|
|
|
|
--- a/xen/arch/x86/domain.c
|
|
+++ b/xen/arch/x86/domain.c
|
|
@@ -180,6 +180,28 @@ void dump_pageframe_info(struct domain *
|
|
spin_unlock(&d->page_alloc_lock);
|
|
}
|
|
|
|
+/*
|
|
+ * The hole may be at or above the 44-bit boundary, so we need to determine
|
|
+ * the total bit count until reaching 32 significant (not squashed out) bits
|
|
+ * in PFN representations.
|
|
+ * Note that the way "bits" gets initialized/updated/bounds-checked guarantees
|
|
+ * that the function will never return zero, and hence will never be called
|
|
+ * more than once (which is important due to it being deliberately placed in
|
|
+ * .init.text).
|
|
+ */
|
|
+static unsigned int __init noinline _domain_struct_bits(void)
|
|
+{
|
|
+ unsigned int bits = 32 + PAGE_SHIFT;
|
|
+ unsigned int sig = hweight32(~pfn_hole_mask);
|
|
+ unsigned int mask = pfn_hole_mask >> 32;
|
|
+
|
|
+ for ( ; bits < BITS_PER_LONG && sig < 32; ++bits, mask >>= 1 )
|
|
+ if ( !(mask & 1) )
|
|
+ ++sig;
|
|
+
|
|
+ return bits;
|
|
+}
|
|
+
|
|
struct domain *alloc_domain_struct(void)
|
|
{
|
|
struct domain *d;
|
|
@@ -187,7 +209,10 @@ struct domain *alloc_domain_struct(void)
|
|
* We pack the PDX of the domain structure into a 32-bit field within
|
|
* the page_info structure. Hence the MEMF_bits() restriction.
|
|
*/
|
|
- unsigned int bits = 32 + PAGE_SHIFT + pfn_pdx_hole_shift;
|
|
+ static unsigned int __read_mostly bits;
|
|
+
|
|
+ if ( unlikely(!bits) )
|
|
+ bits = _domain_struct_bits();
|
|
|
|
BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE);
|
|
d = alloc_xenheap_pages(0, MEMF_bits(bits));
|