xen/55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch
Charles Arnold 4a5ee0f11d - bsc#945164 - Xl destroy show error with kernel of SLES 12 sp1
5537a4d8-libxl-use-DEBUG-log-level-instead-of-INFO.patch

- Upstream patches from Jan
  55dc78e9-x86-amd_ucode-skip-updates-for-final-levels.patch
  55dc7937-x86-IO-APIC-don-t-create-pIRQ-mapping-from-masked-RTE.patch
  55df2f76-IOMMU-skip-domains-without-page-tables-when-dumping.patch
  55e43fd8-x86-NUMA-fix-setup_node.patch
  55e43ff8-x86-NUMA-don-t-account-hotplug-regions.patch
  55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch
  54c2553c-grant-table-use-uint16_t-consistently-for-offset-and-length.patch
  54ca33bc-grant-table-refactor-grant-copy-to-reduce-duplicate-code.patch
  54ca340e-grant-table-defer-releasing-pages-acquired-in-a-grant-copy.patch

- bsc#944463 - VUL-0: CVE-2015-5239: qemu-kvm: Integer overflow in
  vnc_client_read() and protocol_client_msg()
  CVE-2015-5239-qemuu-limit-client_cut_text-msg-payload-size.patch
  CVE-2015-5239-qemut-limit-client_cut_text-msg-payload-size.patch
- bsc#944697 - VUL-1: CVE-2015-6815: qemu: net: e1000: infinite
  loop issue
  CVE-2015-6815-qemuu-e1000-fix-infinite-loop.patch
  CVE-2015-6815-qemut-e1000-fix-infinite-loop.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=375
2015-09-16 16:29:39 +00:00

178 lines
6.6 KiB
Diff

# Commit 88e3ed61642bb393458acc7a9bd2f96edc337190
# Date 2015-09-01 14:02:57 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/NUMA: make init_node_heap() respect Xen heap limit
On NUMA systems, where we try to use node local memory for the basic
control structures of the buddy allocator, this special case needs to
take into consideration a possible address width limit placed on the
Xen heap. In turn this (but also other, more abstract considerations)
requires that xenheap_max_mfn() not be called more than once (at most
we might permit it to be called a second time with a larger value than
was passed the first time), and be called only before calling
end_boot_allocator().
While inspecting all the involved code, a couple of off-by-one issues
were found (and are being corrected here at once):
- arch_init_memory() cleared one too many page table slots
- the highmem_start based invocation of xenheap_max_mfn() passed too
big a value
- xenheap_max_mfn() calculated the wrong bit count in edge cases
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Release-acked-by: Wei Liu <wei.liu2@citrix.com>
# Commit 0a7167d9b20cdc48e6ea320fbbb920b3267c9757
# Date 2015-09-04 14:58:07 +0100
# Author Julien Grall <julien.grall@citrix.com>
# Committer Ian Campbell <ian.campbell@citrix.com>
xen/arm64: do not (incorrectly) limit size of xenheap
The commit 88e3ed61642bb393458acc7a9bd2f96edc337190 "x86/NUMA: make
init_node_heap() respect Xen heap limit" breaks boot on the arm64 board
X-Gene.
The xenheap bits variable is used to know the last RAM MFN always mapped
in Xen virtual memory. If the value is 0, it means that all the memory is
always mapped in Xen virtual memory.
On X-gene the RAM bank resides above 128GB and last xenheap MFN is
0x4400000. With the new way to calculate the number of bits, xenheap_bits
will be equal to 38 bits. This will result to hide all the RAM and the
impossibility to allocate xenheap memory.
Given that aarch64 have always all the memory mapped in Xen virtual
memory, it's not necessary to call xenheap_max_mfn which set the number
of bits.
Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Julien Grall <julien.grall@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -664,7 +664,6 @@ static void __init setup_mm(unsigned lon
xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
xenheap_mfn_start = ram_start >> PAGE_SHIFT;
xenheap_mfn_end = ram_end >> PAGE_SHIFT;
- xenheap_max_mfn(xenheap_mfn_end);
/*
* Need enough mapped pages for copying the DTB.
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -372,7 +372,7 @@ void __init arch_init_memory(void)
for ( i = 0; i < l3_table_offset(split_va); ++i )
l3tab[i] = l3idle[i];
- for ( ; i <= L3_PAGETABLE_ENTRIES; ++i )
+ for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
l3tab[i] = l3e_empty();
split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
__PAGE_HYPERVISOR);
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -970,7 +970,7 @@ void __init noreturn __start_xen(unsigne
setup_max_pdx(raw_max_page);
if ( highmem_start )
- xenheap_max_mfn(PFN_DOWN(highmem_start));
+ xenheap_max_mfn(PFN_DOWN(highmem_start - 1));
/*
* Walk every RAM region and map it in its entirety (on x86/64, at least)
@@ -1151,9 +1151,6 @@ void __init noreturn __start_xen(unsigne
numa_initmem_init(0, raw_max_page);
- end_boot_allocator();
- system_state = SYS_STATE_boot;
-
if ( max_page - 1 > virt_to_mfn(HYPERVISOR_VIRT_END - 1) )
{
unsigned long limit = virt_to_mfn(HYPERVISOR_VIRT_END - 1);
@@ -1162,6 +1159,8 @@ void __init noreturn __start_xen(unsigne
if ( !highmem_start )
xenheap_max_mfn(limit);
+ end_boot_allocator();
+
/* Pass the remaining memory to the allocator. */
for ( i = 0; i < boot_e820.nr_map; i++ )
{
@@ -1185,6 +1184,10 @@ void __init noreturn __start_xen(unsigne
opt_tmem = 0;
}
}
+ else
+ end_boot_allocator();
+
+ system_state = SYS_STATE_boot;
vm_init();
console_init_ring();
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -405,13 +405,19 @@ void get_outstanding_claims(uint64_t *fr
spin_unlock(&heap_lock);
}
+static bool_t __read_mostly first_node_initialised;
+#ifndef CONFIG_SEPARATE_XENHEAP
+static unsigned int __read_mostly xenheap_bits;
+#else
+#define xenheap_bits 0
+#endif
+
static unsigned long init_node_heap(int node, unsigned long mfn,
unsigned long nr, bool_t *use_tail)
{
/* First node to be discovered has its heap metadata statically alloced. */
static heap_by_zone_and_order_t _heap_static;
static unsigned long avail_static[NR_ZONES];
- static int first_node_initialised;
unsigned long needed = (sizeof(**_heap) +
sizeof(**avail) * NR_ZONES +
PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -429,14 +435,18 @@ static unsigned long init_node_heap(int
}
#ifdef DIRECTMAP_VIRT_END
else if ( *use_tail && nr >= needed &&
- (mfn + nr) <= (virt_to_mfn(eva - 1) + 1) )
+ (mfn + nr) <= (virt_to_mfn(eva - 1) + 1) &&
+ (!xenheap_bits ||
+ !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
{
_heap[node] = mfn_to_virt(mfn + nr - needed);
avail[node] = mfn_to_virt(mfn + nr - 1) +
PAGE_SIZE - sizeof(**avail) * NR_ZONES;
}
else if ( nr >= needed &&
- (mfn + needed) <= (virt_to_mfn(eva - 1) + 1) )
+ (mfn + needed) <= (virt_to_mfn(eva - 1) + 1) &&
+ (!xenheap_bits ||
+ !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
{
_heap[node] = mfn_to_virt(mfn);
avail[node] = mfn_to_virt(mfn + needed - 1) +
@@ -1541,11 +1551,13 @@ void free_xenheap_pages(void *v, unsigne
#else
-static unsigned int __read_mostly xenheap_bits;
-
void __init xenheap_max_mfn(unsigned long mfn)
{
- xenheap_bits = fls(mfn) + PAGE_SHIFT;
+ ASSERT(!first_node_initialised);
+ ASSERT(!xenheap_bits);
+ BUILD_BUG_ON(PADDR_BITS >= BITS_PER_LONG);
+ xenheap_bits = min(fls(mfn + 1) - 1 + PAGE_SHIFT, PADDR_BITS);
+ printk(XENLOG_INFO "Xen heap: %u bits\n", xenheap_bits);
}
void init_xenheap_pages(paddr_t ps, paddr_t pe)