References: bnc#638465 # HG changeset patch # User Keir Fraser # Date 1284394111 -3600 # Node ID 69e8bb164683c76e0cd787df21b98c73905a61e6 # Parent e300bfa3c0323ac08e7b8cd9fb40f9f1ab548543 page_alloc: Hold heap_lock while adjusting page states to/from PGC_state_free. This avoids races with buddy-merging logic in free_heap_pages(). Signed-off-by: Keir Fraser --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -378,8 +378,6 @@ static struct page_info *alloc_heap_page total_avail_pages -= request; ASSERT(total_avail_pages >= 0); - spin_unlock(&heap_lock); - cpus_clear(mask); for ( i = 0; i < (1 << order); i++ ) @@ -401,6 +399,8 @@ static struct page_info *alloc_heap_page page_set_owner(&pg[i], NULL); } + spin_unlock(&heap_lock); + if ( unlikely(!cpus_empty(mask)) ) { perfc_incr(need_flush_tlb_flush); @@ -496,6 +496,8 @@ static void free_heap_pages( ASSERT(order <= MAX_ORDER); ASSERT(node >= 0); + spin_lock(&heap_lock); + for ( i = 0; i < (1 << order); i++ ) { /* @@ -523,8 +525,6 @@ static void free_heap_pages( pg[i].tlbflush_timestamp = tlbflush_current_time(); } - spin_lock(&heap_lock); - avail[node][zone] += 1 << order; total_avail_pages += 1 << order;