142 lines
5.2 KiB
Diff
142 lines
5.2 KiB
Diff
|
xen-unstable changeset: 23817:083f10851dd8
|
||
|
date: Mon Sep 05 15:10:09 2011 +0100
|
||
|
description:
|
||
|
mem_event: add ref counting for free requestslots
|
||
|
|
||
|
If mem_event_check_ring() is called by many vcpus at the same time
|
||
|
before any of them called also mem_event_put_request(), all of the
|
||
|
callers must assume there are enough free slots available in the ring.
|
||
|
|
||
|
Record the number of request producers in mem_event_check_ring() to
|
||
|
keep track of available free slots.
|
||
|
|
||
|
Add a new mem_event_put_req_producers() function to release a request
|
||
|
attempt made in mem_event_check_ring(). Its required for
|
||
|
p2m_mem_paging_populate() because that function can only modify the
|
||
|
p2m type if there are free request slots. But in some cases
|
||
|
p2m_mem_paging_populate() does not actually have to produce another
|
||
|
request when it is known that the same request was already made
|
||
|
earlier by a different vcpu.
|
||
|
|
||
|
|
||
|
mem_event_check_ring() can not return a reference to a free request
|
||
|
slot because there could be multiple references for different vcpus
|
||
|
and the order of mem_event_put_request() calls is not known. As a
|
||
|
result, incomplete requests could be consumed by the ring user.
|
||
|
|
||
|
Signed-off-by: Olaf Hering <olaf@aepfle.de>
|
||
|
|
||
|
---
|
||
|
xen/arch/x86/mm/mem_event.c | 19 ++++++++++++-------
|
||
|
xen/arch/x86/mm/mem_sharing.c | 1 -
|
||
|
xen/arch/x86/mm/p2m.c | 1 +
|
||
|
xen/include/asm-x86/mem_event.h | 1 +
|
||
|
xen/include/xen/sched.h | 1 +
|
||
|
5 files changed, 15 insertions(+), 8 deletions(-)
|
||
|
|
||
|
Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c
|
||
|
===================================================================
|
||
|
--- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_event.c
|
||
|
+++ xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c
|
||
|
@@ -37,8 +37,6 @@
|
||
|
#define mem_event_ring_lock(_d) spin_lock(&(_d)->mem_event.ring_lock)
|
||
|
#define mem_event_ring_unlock(_d) spin_unlock(&(_d)->mem_event.ring_lock)
|
||
|
|
||
|
-#define MEM_EVENT_RING_THRESHOLD 4
|
||
|
-
|
||
|
static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
|
||
|
{
|
||
|
int rc;
|
||
|
@@ -109,6 +107,7 @@ void mem_event_put_request(struct domain
|
||
|
req_prod++;
|
||
|
|
||
|
/* Update ring */
|
||
|
+ d->mem_event.req_producers--;
|
||
|
front_ring->req_prod_pvt = req_prod;
|
||
|
RING_PUSH_REQUESTS(front_ring);
|
||
|
|
||
|
@@ -153,11 +152,18 @@ void mem_event_mark_and_pause(struct vcp
|
||
|
vcpu_sleep_nosync(v);
|
||
|
}
|
||
|
|
||
|
+void mem_event_put_req_producers(struct domain *d)
|
||
|
+{
|
||
|
+ mem_event_ring_lock(d);
|
||
|
+ d->mem_event.req_producers--;
|
||
|
+ mem_event_ring_unlock(d);
|
||
|
+}
|
||
|
+
|
||
|
int mem_event_check_ring(struct domain *d)
|
||
|
{
|
||
|
struct vcpu *curr = current;
|
||
|
int free_requests;
|
||
|
- int ring_full;
|
||
|
+ int ring_full = 1;
|
||
|
|
||
|
if ( !d->mem_event.ring_page )
|
||
|
return -1;
|
||
|
@@ -165,12 +171,11 @@ int mem_event_check_ring(struct domain *
|
||
|
mem_event_ring_lock(d);
|
||
|
|
||
|
free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
|
||
|
- if ( unlikely(free_requests < 2) )
|
||
|
+ if ( d->mem_event.req_producers < free_requests )
|
||
|
{
|
||
|
- gdprintk(XENLOG_INFO, "free request slots: %d\n", free_requests);
|
||
|
- WARN_ON(free_requests == 0);
|
||
|
+ d->mem_event.req_producers++;
|
||
|
+ ring_full = 0;
|
||
|
}
|
||
|
- ring_full = free_requests < MEM_EVENT_RING_THRESHOLD ? 1 : 0;
|
||
|
|
||
|
if ( (curr->domain->domain_id == d->domain_id) && ring_full )
|
||
|
{
|
||
|
Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_sharing.c
|
||
|
===================================================================
|
||
|
--- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_sharing.c
|
||
|
+++ xen-4.1.2-testing/xen/arch/x86/mm/mem_sharing.c
|
||
|
@@ -322,7 +322,6 @@ static struct page_info* mem_sharing_all
|
||
|
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
|
||
|
}
|
||
|
|
||
|
- /* XXX: Need to reserve a request, not just check the ring! */
|
||
|
if(mem_event_check_ring(d)) return page;
|
||
|
|
||
|
req.gfn = gfn;
|
||
|
Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c
|
||
|
===================================================================
|
||
|
--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c
|
||
|
+++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c
|
||
|
@@ -2970,6 +2970,7 @@ void p2m_mem_paging_populate(struct p2m_
|
||
|
else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
|
||
|
{
|
||
|
/* gfn is already on its way back and vcpu is not paused */
|
||
|
+ mem_event_put_req_producers(d);
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
Index: xen-4.1.2-testing/xen/include/asm-x86/mem_event.h
|
||
|
===================================================================
|
||
|
--- xen-4.1.2-testing.orig/xen/include/asm-x86/mem_event.h
|
||
|
+++ xen-4.1.2-testing/xen/include/asm-x86/mem_event.h
|
||
|
@@ -27,6 +27,7 @@
|
||
|
/* Pauses VCPU while marking pause flag for mem event */
|
||
|
void mem_event_mark_and_pause(struct vcpu *v);
|
||
|
int mem_event_check_ring(struct domain *d);
|
||
|
+void mem_event_put_req_producers(struct domain *d);
|
||
|
void mem_event_put_request(struct domain *d, mem_event_request_t *req);
|
||
|
void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
|
||
|
void mem_event_unpause_vcpus(struct domain *d);
|
||
|
Index: xen-4.1.2-testing/xen/include/xen/sched.h
|
||
|
===================================================================
|
||
|
--- xen-4.1.2-testing.orig/xen/include/xen/sched.h
|
||
|
+++ xen-4.1.2-testing/xen/include/xen/sched.h
|
||
|
@@ -190,6 +190,7 @@ struct mem_event_domain
|
||
|
{
|
||
|
/* ring lock */
|
||
|
spinlock_t ring_lock;
|
||
|
+ unsigned int req_producers;
|
||
|
/* shared page */
|
||
|
mem_event_shared_page_t *shared_page;
|
||
|
/* shared ring page */
|