e178e25130
OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=98a7b497156d81631d87c0adc03f8518
100 lines
3.8 KiB
Diff
100 lines
3.8 KiB
Diff
# HG changeset patch
|
|
# User Keir Fraser <keir.fraser@citrix.com>
|
|
# Date 1272973341 -3600
|
|
# Node ID b07edd50661e7f768088c08215dabb9becb5c5b6
|
|
# Parent 6c7b905b03ff1cf171187bafe7129e3e213e5787
|
|
x86: fix Dom0 booting time regression
|
|
References: bnc#593536
|
|
|
|
Unfortunately the changes in c/s 21035 caused boot time to go up
|
|
significantly on certain large systems. To rectify this without going
|
|
back to the old behavior, introduce a new memory allocation flag so
|
|
that Dom0 allocations can exhaust non-DMA memory before starting to
|
|
consume DMA memory. For the latter, the behavior introduced in
|
|
aforementioned c/s gets retained, while for the former we can now even
|
|
try larger chunks first.
|
|
|
|
This builds on the fact that alloc_chunk() gets called with non-
|
|
increasing 'max_pages' arguments, end hence it can store locally the
|
|
allocation order last used (as larger order allocations can't succeed
|
|
during subsequent invocations if they failed once).
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@novell.com>
|
|
|
|
--- a/xen/arch/x86/domain_build.c
|
|
+++ b/xen/arch/x86/domain_build.c
|
|
@@ -125,26 +125,36 @@ string_param("dom0_ioports_disable", opt
|
|
static struct page_info * __init alloc_chunk(
|
|
struct domain *d, unsigned long max_pages)
|
|
{
|
|
+ static unsigned int __initdata last_order = MAX_ORDER;
|
|
+ static unsigned int __initdata memflags = MEMF_no_dma;
|
|
struct page_info *page;
|
|
- unsigned int order, free_order;
|
|
+ unsigned int order = get_order_from_pages(max_pages), free_order;
|
|
|
|
- /*
|
|
- * Allocate up to 2MB at a time: It prevents allocating very large chunks
|
|
- * from DMA pools before the >4GB pool is fully depleted.
|
|
- */
|
|
- if ( max_pages > (2UL << (20 - PAGE_SHIFT)) )
|
|
- max_pages = 2UL << (20 - PAGE_SHIFT);
|
|
- order = get_order_from_pages(max_pages);
|
|
- if ( (max_pages & (max_pages-1)) != 0 )
|
|
- order--;
|
|
- while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
|
|
+ if ( order > last_order )
|
|
+ order = last_order;
|
|
+ else if ( max_pages & (max_pages - 1) )
|
|
+ --order;
|
|
+ while ( (page = alloc_domheap_pages(d, order, memflags)) == NULL )
|
|
if ( order-- == 0 )
|
|
break;
|
|
+ if ( page )
|
|
+ last_order = order;
|
|
+ else if ( memflags )
|
|
+ {
|
|
+ /*
|
|
+ * Allocate up to 2MB at a time: It prevents allocating very large
|
|
+ * chunks from DMA pools before the >4GB pool is fully depleted.
|
|
+ */
|
|
+ last_order = 21 - PAGE_SHIFT;
|
|
+ memflags = 0;
|
|
+ return alloc_chunk(d, max_pages);
|
|
+ }
|
|
+
|
|
/*
|
|
* Make a reasonable attempt at finding a smaller chunk at a higher
|
|
* address, to avoid allocating from low memory as much as possible.
|
|
*/
|
|
- for ( free_order = order; page && order--; )
|
|
+ for ( free_order = order; !memflags && page && order--; )
|
|
{
|
|
struct page_info *pg2;
|
|
|
|
--- a/xen/common/page_alloc.c
|
|
+++ b/xen/common/page_alloc.c
|
|
@@ -1157,8 +1157,9 @@ struct page_info *alloc_domheap_pages(
|
|
pg = alloc_heap_pages(dma_zone + 1, zone_hi, node, order, memflags);
|
|
|
|
if ( (pg == NULL) &&
|
|
- ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi,
|
|
- node, order, memflags)) == NULL) )
|
|
+ ((memflags & MEMF_no_dma) ||
|
|
+ ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi,
|
|
+ node, order, memflags)) == NULL)) )
|
|
return NULL;
|
|
|
|
if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
|
|
--- a/xen/include/xen/mm.h
|
|
+++ b/xen/include/xen/mm.h
|
|
@@ -79,6 +79,8 @@ int assign_pages(
|
|
#define MEMF_populate_on_demand (1U<<_MEMF_populate_on_demand)
|
|
#define _MEMF_tmem 2
|
|
#define MEMF_tmem (1U<<_MEMF_tmem)
|
|
+#define _MEMF_no_dma 3
|
|
+#define MEMF_no_dma (1U<<_MEMF_no_dma)
|
|
#define _MEMF_node 8
|
|
#define MEMF_node(n) ((((n)+1)&0xff)<<_MEMF_node)
|
|
#define _MEMF_bits 24
|