Updated block-dmmd script - fate#310510 - fix xenpaging restore changes to integrate paging into xm/xend xenpaging.autostart.patch xenpaging.doc.patch - bnc#787163 - VUL-0: CVE-2012-4544: xen: Domain builder Out-of- memory due to malicious kernel/ramdisk (XSA 25) CVE-2012-4544-xsa25.patch - bnc#779212 - VUL-0: CVE-2012-4411: XEN / qemu: guest administrator can access qemu monitor console (XSA-19) CVE-2012-4411-xsa19.patch - bnc#786516 - VUL-0: CVE-2012-4535: xen: Timer overflow DoS vulnerability CVE-2012-4535-xsa20.patch - bnc#786518 - VUL-0: CVE-2012-4536: xen: pirq range check DoS vulnerability CVE-2012-4536-xsa21.patch - bnc#786517 - VUL-0: CVE-2012-4537: xen: Memory mapping failure DoS vulnerability CVE-2012-4537-xsa22.patch - bnc#786519 - VUL-0: CVE-2012-4538: xen: Unhooking empty PAE entries DoS vulnerability CVE-2012-4538-xsa23.patch - bnc#786520 - VUL-0: CVE-2012-4539: xen: Grant table hypercall infinite loop DoS vulnerability CVE-2012-4539-xsa24.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=212
85 lines
2.9 KiB
Diff
85 lines
2.9 KiB
Diff
# HG changeset patch
|
|
# User Keir Fraser <keir@xen.org>
|
|
# Date 1350315491 -3600
|
|
# Node ID 177fdda0be568ccdb62697b64aa64ee20bc55bee
|
|
# Parent 14e32621dbaf5b485b134ace4558e67c4c36e1ce
|
|
More efficient TLB-flush filtering in alloc_heap_pages().
|
|
|
|
Rather than per-cpu filtering for every page in a super-page
|
|
allocation, simply remember the most recent TLB timestamp across all
|
|
allocated pages, and filter on that, just once, at the end of the
|
|
function.
|
|
|
|
For large-CPU systems, doing 2MB allocations during domain creation,
|
|
this cuts down the domain creation time *massively*.
|
|
|
|
TODO: It may make sense to move the filtering out into some callers,
|
|
such as memory.c:populate_physmap() and
|
|
memory.c:increase_reservation(), so that the filtering can be moved
|
|
outside their loops, too.
|
|
|
|
Signed-off-by: Keir Fraser <keir@xen.org>
|
|
|
|
--- a/xen/common/page_alloc.c
|
|
+++ b/xen/common/page_alloc.c
|
|
@@ -414,9 +414,10 @@ static struct page_info *alloc_heap_page
|
|
unsigned int first_node, i, j, zone = 0, nodemask_retry = 0;
|
|
unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1);
|
|
unsigned long request = 1UL << order;
|
|
- cpumask_t mask;
|
|
struct page_info *pg;
|
|
nodemask_t nodemask = (d != NULL ) ? d->node_affinity : node_online_map;
|
|
+ bool_t need_tlbflush = 0;
|
|
+ uint32_t tlbflush_timestamp = 0;
|
|
|
|
if ( node == NUMA_NO_NODE )
|
|
{
|
|
@@ -530,22 +531,19 @@ static struct page_info *alloc_heap_page
|
|
if ( d != NULL )
|
|
d->last_alloc_node = node;
|
|
|
|
- cpumask_clear(&mask);
|
|
-
|
|
for ( i = 0; i < (1 << order); i++ )
|
|
{
|
|
/* Reference count must continuously be zero for free pages. */
|
|
BUG_ON(pg[i].count_info != PGC_state_free);
|
|
pg[i].count_info = PGC_state_inuse;
|
|
|
|
- if ( pg[i].u.free.need_tlbflush )
|
|
+ if ( pg[i].u.free.need_tlbflush &&
|
|
+ (pg[i].tlbflush_timestamp <= tlbflush_current_time()) &&
|
|
+ (!need_tlbflush ||
|
|
+ (pg[i].tlbflush_timestamp > tlbflush_timestamp)) )
|
|
{
|
|
- /* Add in extra CPUs that need flushing because of this page. */
|
|
- static cpumask_t extra_cpus_mask;
|
|
-
|
|
- cpumask_andnot(&extra_cpus_mask, &cpu_online_map, &mask);
|
|
- tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
|
|
- cpumask_or(&mask, &mask, &extra_cpus_mask);
|
|
+ need_tlbflush = 1;
|
|
+ tlbflush_timestamp = pg[i].tlbflush_timestamp;
|
|
}
|
|
|
|
/* Initialise fields which have other uses for free pages. */
|
|
@@ -555,10 +553,15 @@ static struct page_info *alloc_heap_page
|
|
|
|
spin_unlock(&heap_lock);
|
|
|
|
- if ( unlikely(!cpumask_empty(&mask)) )
|
|
+ if ( need_tlbflush )
|
|
{
|
|
- perfc_incr(need_flush_tlb_flush);
|
|
- flush_tlb_mask(&mask);
|
|
+ cpumask_t mask = cpu_online_map;
|
|
+ tlbflush_filter(mask, tlbflush_timestamp);
|
|
+ if ( !cpumask_empty(&mask) )
|
|
+ {
|
|
+ perfc_incr(need_flush_tlb_flush);
|
|
+ flush_tlb_mask(&mask);
|
|
+ }
|
|
}
|
|
|
|
return pg;
|