169 lines
6.1 KiB
Diff
169 lines
6.1 KiB
Diff
Index: xen-3.0.4-testing/xen/arch/x86/domain_build.c
|
|
===================================================================
|
|
--- xen-3.0.4-testing.orig/xen/arch/x86/domain_build.c
|
|
+++ xen-3.0.4-testing/xen/arch/x86/domain_build.c
|
|
@@ -473,11 +473,14 @@ int construct_dom0(struct domain *d,
|
|
if ( (1UL << order) > nr_pages )
|
|
panic("Domain 0 allocation is too small for kernel image.\n");
|
|
|
|
- /*
|
|
- * Allocate from DMA pool: on i386 this ensures that our low-memory 1:1
|
|
- * mapping covers the allocation.
|
|
- */
|
|
- if ( (page = alloc_domheap_pages(d, order, MEMF_dma)) == NULL )
|
|
+#ifdef __i386__
|
|
+ /* Ensure that our low-memory 1:1 mapping covers the allocation. */
|
|
+ page = alloc_domheap_pages(d, order,
|
|
+ MEMF_bits(30 + (dsi.v_start >> 31)));
|
|
+#else
|
|
+ page = alloc_domheap_pages(d, order, 0);
|
|
+#endif
|
|
+ if ( page == NULL )
|
|
panic("Not enough RAM for domain 0 allocation.\n");
|
|
alloc_spfn = page_to_mfn(page);
|
|
alloc_epfn = alloc_spfn + d->tot_pages;
|
|
Index: xen-3.0.4-testing/xen/common/memory.c
|
|
===================================================================
|
|
--- xen-3.0.4-testing.orig/xen/common/memory.c
|
|
+++ xen-3.0.4-testing/xen/common/memory.c
|
|
@@ -322,12 +322,12 @@ static long memory_exchange(XEN_GUEST_HA
|
|
(exch.out.address_bits <
|
|
(get_order_from_pages(max_page) + PAGE_SHIFT)) )
|
|
{
|
|
- if ( exch.out.address_bits < dma_bitsize )
|
|
+ if ( exch.out.address_bits <= PAGE_SHIFT )
|
|
{
|
|
rc = -ENOMEM;
|
|
goto fail_early;
|
|
}
|
|
- memflags = MEMF_dma;
|
|
+ memflags = MEMF_bits(exch.out.address_bits);
|
|
}
|
|
|
|
if ( exch.in.extent_order <= exch.out.extent_order )
|
|
@@ -535,9 +535,9 @@ long do_memory_op(unsigned long cmd, XEN
|
|
(reservation.address_bits <
|
|
(get_order_from_pages(max_page) + PAGE_SHIFT)) )
|
|
{
|
|
- if ( reservation.address_bits < dma_bitsize )
|
|
+ if ( reservation.address_bits <= PAGE_SHIFT )
|
|
return start_extent;
|
|
- args.memflags = MEMF_dma;
|
|
+ args.memflags = MEMF_bits(reservation.address_bits);
|
|
}
|
|
|
|
if ( likely(reservation.domid == DOMID_SELF) )
|
|
Index: xen-3.0.4-testing/xen/common/page_alloc.c
|
|
===================================================================
|
|
--- xen-3.0.4-testing.orig/xen/common/page_alloc.c
|
|
+++ xen-3.0.4-testing/xen/common/page_alloc.c
|
|
@@ -62,8 +62,8 @@ custom_param("lowmem_emergency_pool", pa
|
|
/*
|
|
* Bit width of the DMA heap.
|
|
*/
|
|
-unsigned int dma_bitsize = CONFIG_DMA_BITSIZE;
|
|
-unsigned long max_dma_mfn = (1UL << (CONFIG_DMA_BITSIZE - PAGE_SHIFT)) - 1;
|
|
+static unsigned int dma_bitsize = CONFIG_DMA_BITSIZE;
|
|
+static unsigned long max_dma_mfn = (1UL << (CONFIG_DMA_BITSIZE - PAGE_SHIFT)) - 1;
|
|
static void parse_dma_bits(char *s)
|
|
{
|
|
unsigned int v = simple_strtol(s, NULL, 0);
|
|
@@ -72,7 +72,7 @@ static void parse_dma_bits(char *s)
|
|
dma_bitsize = BITS_PER_LONG + PAGE_SHIFT;
|
|
max_dma_mfn = ~0UL;
|
|
}
|
|
- else if ( v > PAGE_SHIFT )
|
|
+ else if ( v > PAGE_SHIFT + 1 )
|
|
{
|
|
dma_bitsize = v;
|
|
max_dma_mfn = (1UL << (dma_bitsize - PAGE_SHIFT)) - 1;
|
|
@@ -725,12 +725,22 @@ struct page_info *__alloc_domheap_pages(
|
|
struct page_info *pg = NULL;
|
|
cpumask_t mask;
|
|
unsigned long i;
|
|
+ unsigned int bits = memflags >> _MEMF_bits, zone_hi;
|
|
|
|
ASSERT(!in_irq());
|
|
|
|
- if ( !(memflags & MEMF_dma) )
|
|
+ if ( bits && bits <= PAGE_SHIFT + 1 )
|
|
+ return NULL;
|
|
+
|
|
+ zone_hi = bits - PAGE_SHIFT - 1;
|
|
+ if ( zone_hi >= NR_ZONES )
|
|
+ zone_hi = NR_ZONES - 1;
|
|
+
|
|
+ if ( NR_ZONES + PAGE_SHIFT > dma_bitsize &&
|
|
+ (!bits || bits > dma_bitsize) )
|
|
{
|
|
- pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, NR_ZONES - 1, cpu, order);
|
|
+ pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, cpu, order);
|
|
+
|
|
/* Failure? Then check if we can fall back to the DMA pool. */
|
|
if ( unlikely(pg == NULL) &&
|
|
((order > MAX_ORDER) ||
|
|
@@ -743,7 +753,7 @@ struct page_info *__alloc_domheap_pages(
|
|
|
|
if ( pg == NULL )
|
|
if ( (pg = alloc_heap_pages(MEMZONE_XEN + 1,
|
|
- dma_bitsize - PAGE_SHIFT - 1,
|
|
+ zone_hi,
|
|
cpu, order)) == NULL )
|
|
return NULL;
|
|
|
|
Index: xen-3.0.4-testing/xen/include/asm-ia64/config.h
|
|
===================================================================
|
|
--- xen-3.0.4-testing.orig/xen/include/asm-ia64/config.h
|
|
+++ xen-3.0.4-testing/xen/include/asm-ia64/config.h
|
|
@@ -41,7 +41,7 @@
|
|
#define CONFIG_IOSAPIC
|
|
#define supervisor_mode_kernel (0)
|
|
|
|
-#define CONFIG_DMA_BITSIZE 30
|
|
+#define CONFIG_DMA_BITSIZE 32
|
|
|
|
/* If PERFC is used, include privop maps. */
|
|
#ifdef PERF_COUNTERS
|
|
Index: xen-3.0.4-testing/xen/include/asm-x86/config.h
|
|
===================================================================
|
|
--- xen-3.0.4-testing.orig/xen/include/asm-x86/config.h
|
|
+++ xen-3.0.4-testing/xen/include/asm-x86/config.h
|
|
@@ -82,7 +82,7 @@
|
|
/* Debug stack is restricted to 8kB by guard pages. */
|
|
#define DEBUG_STACK_SIZE 8192
|
|
|
|
-#define CONFIG_DMA_BITSIZE 30
|
|
+#define CONFIG_DMA_BITSIZE 32
|
|
|
|
#ifndef __ASSEMBLY__
|
|
extern unsigned long _end; /* standard ELF symbol */
|
|
Index: xen-3.0.4-testing/xen/include/xen/mm.h
|
|
===================================================================
|
|
--- xen-3.0.4-testing.orig/xen/include/xen/mm.h
|
|
+++ xen-3.0.4-testing/xen/include/xen/mm.h
|
|
@@ -71,10 +71,10 @@ int assign_pages(
|
|
unsigned int memflags);
|
|
|
|
/* memflags: */
|
|
-#define _MEMF_dma 0
|
|
-#define MEMF_dma (1U<<_MEMF_dma)
|
|
-#define _MEMF_no_refcount 1
|
|
+#define _MEMF_no_refcount 0
|
|
#define MEMF_no_refcount (1U<<_MEMF_no_refcount)
|
|
+#define _MEMF_bits 24
|
|
+#define MEMF_bits(n) ((n)<<_MEMF_bits)
|
|
|
|
#ifdef CONFIG_PAGEALLOC_MAX_ORDER
|
|
#define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
|
|
@@ -82,10 +82,6 @@ int assign_pages(
|
|
#define MAX_ORDER 20 /* 2^20 contiguous pages */
|
|
#endif
|
|
|
|
-/* DMA heap parameters. */
|
|
-extern unsigned int dma_bitsize;
|
|
-extern unsigned long max_dma_mfn;
|
|
-
|
|
/* Automatic page scrubbing for dead domains. */
|
|
extern struct list_head page_scrub_list;
|
|
#define page_scrub_schedule_work() \
|