SHA256
1
0
forked from pool/xen
OBS User unknown 2008-10-25 23:33:59 +00:00 committed by Git OBS Bridge
parent 1e7cc04da7
commit b82e0d6f19
46 changed files with 4200 additions and 321 deletions

View File

@ -130,9 +130,9 @@ Index: xen-3.3.1-testing/xen/common/schedule.c
+ goto out;
+#endif
for ( i = 0; i < sched_poll->nr_ports; i++ )
{
@@ -369,6 +403,9 @@ static long do_poll(struct sched_poll *s
rc = 0;
if ( local_events_need_delivery() )
@@ -373,6 +407,9 @@ static long do_poll(struct sched_poll *s
goto out;
}
@ -142,7 +142,7 @@ Index: xen-3.3.1-testing/xen/common/schedule.c
if ( sched_poll->timeout != 0 )
set_timer(&v->poll_timer, sched_poll->timeout);
@@ -378,7 +415,8 @@ static long do_poll(struct sched_poll *s
@@ -382,7 +419,8 @@ static long do_poll(struct sched_poll *s
return 0;
out:
@ -152,7 +152,7 @@ Index: xen-3.3.1-testing/xen/common/schedule.c
clear_bit(_VPF_blocked, &v->pause_flags);
return rc;
}
@@ -760,11 +798,8 @@ static void poll_timer_fn(void *data)
@@ -764,11 +802,8 @@ static void poll_timer_fn(void *data)
{
struct vcpu *v = data;

View File

@ -0,0 +1,60 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1220968229 -3600
# Node ID 5ce9459ce8722a8af89da5a73b0c80a767d5b1ad
# Parent d57e9b29858bddf4651efb002cfdadf978da79c0
vtd: Enable pass-through translation for Dom0
If pass-through field in extended capability register is set, set
pass-through translation type for Dom0, that means DMA requests with
Untranslated addresses are processed as pass-through in Dom0, needn't
translate DMA requests through a multi-level page-table.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
Signed-off-by: Weidong Han <weidong.han@intel.com>
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1091,12 +1091,12 @@ static int domain_context_mapping_one(
}
spin_lock_irqsave(&iommu->lock, flags);
-
-#ifdef CONTEXT_PASSTHRU
if ( ecap_pass_thru(iommu->ecap) && (domain->domain_id == 0) )
+ {
context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
+ agaw = level_to_agaw(iommu->nr_pt_levels);
+ }
else
-#endif
{
/* Ensure we have pagetables allocated down to leaf PTE. */
if ( hd->pgd_maddr == 0 )
@@ -1460,11 +1460,12 @@ int intel_iommu_map_page(
u64 pg_maddr;
int pte_present;
-#ifdef CONTEXT_PASSTHRU
+ drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
+ iommu = drhd->iommu;
+
/* do nothing if dom0 and iommu supports pass thru */
if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
return 0;
-#endif
pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
if ( pg_maddr == 0 )
@@ -1501,11 +1502,9 @@ int intel_iommu_unmap_page(struct domain
drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
iommu = drhd->iommu;
-#ifdef CONTEXT_PASSTHRU
/* do nothing if dom0 and iommu supports pass thru */
if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
return 0;
-#endif
dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);

View File

@ -0,0 +1,85 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1221040389 -3600
# Node ID 6a37b3d966f90f3c1604c9a3045d033cc5eeb4ea
# Parent b5912430e66c900c2092c035227816f43f7caeb0
vtd: Add a command line param to enable/disable pass-through feature
Taking security into accout, it's not suitable to bypass VT-d
translation for Dom0 by default when the pass-through field in
extended capability register is set. This feature is for people/usages
who are not overly worried about security/isolation, but want better
performance.
This patch adds a command line param that controls if it's enabled or
disabled.
Signed-off-by: Weidong Han <weidong.han@intel.com>
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -33,11 +33,13 @@ int amd_iov_detect(void);
* pv Enable IOMMU for PV domains
* no-pv Disable IOMMU for PV domains (default)
* force|required Don't boot unless IOMMU is enabled
+ * passthrough Bypass VT-d translation for Dom0
*/
custom_param("iommu", parse_iommu_param);
int iommu_enabled = 0;
int iommu_pv_enabled = 0;
int force_iommu = 0;
+int iommu_passthrough = 0;
static void __init parse_iommu_param(char *s)
{
@@ -58,6 +60,8 @@ static void __init parse_iommu_param(cha
iommu_pv_enabled = 0;
else if ( !strcmp(s, "force") || !strcmp(s, "required") )
force_iommu = 1;
+ else if ( !strcmp(s, "passthrough") )
+ iommu_passthrough = 1;
s = ss + 1;
} while ( ss );
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1091,7 +1091,8 @@ static int domain_context_mapping_one(
}
spin_lock_irqsave(&iommu->lock, flags);
- if ( ecap_pass_thru(iommu->ecap) && (domain->domain_id == 0) )
+ if ( iommu_passthrough &&
+ ecap_pass_thru(iommu->ecap) && (domain->domain_id == 0) )
{
context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
agaw = level_to_agaw(iommu->nr_pt_levels);
@@ -1464,7 +1465,8 @@ int intel_iommu_map_page(
iommu = drhd->iommu;
/* do nothing if dom0 and iommu supports pass thru */
- if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
+ if ( iommu_passthrough &&
+ ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
return 0;
pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
@@ -1503,7 +1505,8 @@ int intel_iommu_unmap_page(struct domain
iommu = drhd->iommu;
/* do nothing if dom0 and iommu supports pass thru */
- if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
+ if ( iommu_passthrough &&
+ ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
return 0;
dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -31,6 +31,7 @@ extern int vtd_enabled;
extern int iommu_enabled;
extern int iommu_pv_enabled;
extern int force_iommu;
+extern int iommu_passthrough;
#define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu)

View File

@ -0,0 +1,41 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1221148273 -3600
# Node ID a5cc38391afb16af6bdae32c5a5f4f9a3a236987
# Parent d8ce41b79ecc74c00797d73caa56dbdaf02bbd66
ACPI: Grant access of MSR_IA32_THERM_CONTROL MSR to dom0
The purpose is to support dom0 throttling control via MSR.
Signed-off-by: Wei Gang <gang.wei@intel.com>
Index: xen-3.3.1-testing/xen/arch/x86/traps.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/traps.c
+++ xen-3.3.1-testing/xen/arch/x86/traps.c
@@ -2158,6 +2158,12 @@ static int emulate_privileged_op(struct
if ( wrmsr_safe(regs->ecx, eax, edx) != 0 )
goto fail;
break;
+ case MSR_IA32_THERM_CONTROL:
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+ goto fail;
+ if ( wrmsr_safe(regs->ecx, eax, edx) != 0 )
+ goto fail;
+ break;
default:
if ( wrmsr_hypervisor_regs(regs->ecx, eax, edx) )
break;
@@ -2234,6 +2240,12 @@ static int emulate_privileged_op(struct
MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |
MSR_IA32_MISC_ENABLE_XTPR_DISABLE;
break;
+ case MSR_IA32_THERM_CONTROL:
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+ goto fail;
+ if ( rdmsr_safe(regs->ecx, regs->eax, regs->edx) )
+ goto fail;
+ break;
default:
if ( rdmsr_hypervisor_regs(regs->ecx, &l, &h) )
{

View File

@ -0,0 +1,25 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1222087267 -3600
# Node ID 3c42b5ad0a4f607749426f82ecf11f75d84699c5
# Parent e61c7833dc9d87eb7fb41f47d2377370aa9a0b46
x86,amd,microcode: fix hypercall return code
Make the hypercall return failure if the microcode didn't apply.
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
--- a/xen/arch/x86/microcode_amd.c
+++ b/xen/arch/x86/microcode_amd.c
@@ -335,10 +335,7 @@ static int cpu_request_microcode(int cpu
* lets keep searching till the latest version
*/
if ( error == 1 )
- {
- apply_microcode(cpu);
- error = 0;
- }
+ error = apply_microcode(cpu);
xfree(mc);
}
if ( offset > 0 )

View File

@ -438,7 +438,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/traps.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/traps.c
+++ xen-3.3.1-testing/xen/arch/x86/traps.c
@@ -2960,13 +2960,13 @@ void set_intr_gate(unsigned int n, void
@@ -2976,13 +2976,13 @@ void set_intr_gate(unsigned int n, void
void set_tss_desc(unsigned int n, void *addr)
{
_set_tssldt_desc(

113
18528-dump-evtchn.patch Normal file
View File

@ -0,0 +1,113 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1222095059 -3600
# Node ID 81483e49c74c314ae3ed098c1373dfc3f2d3f31e
# Parent ae29cd95ba7d7f5cdcbb32509575b83e9fb3d43c
Add debug key 'e' for event channel information
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -25,6 +25,7 @@
#include <xen/iocap.h>
#include <xen/compat.h>
#include <xen/guest_access.h>
+#include <xen/keyhandler.h>
#include <asm/current.h>
#include <public/xen.h>
@@ -1046,6 +1047,77 @@ void evtchn_destroy(struct domain *d)
spin_unlock(&d->evtchn_lock);
}
+static void domain_dump_evtchn_info(struct domain *d)
+{
+ unsigned int port;
+
+ printk("Domain %d polling vCPUs: %08lx\n", d->domain_id, d->poll_mask[0]);
+
+ if ( !spin_trylock(&d->evtchn_lock) )
+ return;
+
+ printk("Event channel information for domain %d:\n",
+ d->domain_id);
+
+ for ( port = 1; port < MAX_EVTCHNS(d); ++port )
+ {
+ const struct evtchn *chn;
+
+ if ( !port_is_valid(d, port) )
+ continue;
+ chn = evtchn_from_port(d, port);
+ if ( chn->state == ECS_FREE )
+ continue;
+
+ printk(" %4u[%d/%d]: s=%d n=%d",
+ port,
+ test_bit(port, &shared_info(d, evtchn_pending)),
+ test_bit(port, &shared_info(d, evtchn_mask)),
+ chn->state, chn->notify_vcpu_id);
+ switch ( chn->state )
+ {
+ case ECS_UNBOUND:
+ printk(" d=%d", chn->u.unbound.remote_domid);
+ break;
+ case ECS_INTERDOMAIN:
+ printk(" d=%d p=%d",
+ chn->u.interdomain.remote_dom->domain_id,
+ chn->u.interdomain.remote_port);
+ break;
+ case ECS_PIRQ:
+ printk(" p=%d", chn->u.pirq);
+ break;
+ case ECS_VIRQ:
+ printk(" v=%d", chn->u.virq);
+ break;
+ }
+ printk(" x=%d\n", chn->consumer_is_xen);
+ }
+
+ spin_unlock(&d->evtchn_lock);
+}
+
+static void dump_evtchn_info(unsigned char key)
+{
+ struct domain *d;
+
+ printk("'%c' pressed -> dumping event-channel info\n", key);
+
+ rcu_read_lock(&domlist_read_lock);
+
+ for_each_domain ( d )
+ domain_dump_evtchn_info(d);
+
+ rcu_read_unlock(&domlist_read_lock);
+}
+
+static int __init dump_evtchn_info_key_init(void)
+{
+ register_keyhandler('e', dump_evtchn_info, "dump evtchn info");
+ return 0;
+}
+__initcall(dump_evtchn_info_key_init);
+
/*
* Local variables:
* mode: C
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -204,11 +204,11 @@ static void dump_domains(unsigned char k
printk("VCPU information and callbacks for domain %u:\n",
d->domain_id);
for_each_vcpu ( d, v ) {
- printk(" VCPU%d: CPU%d [has=%c] flags=%lx "
+ printk(" VCPU%d: CPU%d [has=%c] flags=%lx poll=%d "
"upcall_pend = %02x, upcall_mask = %02x ",
v->vcpu_id, v->processor,
v->is_running ? 'T':'F',
- v->pause_flags,
+ v->pause_flags, v->poll_evtchn,
vcpu_info(v, evtchn_upcall_pending),
vcpu_info(v, evtchn_upcall_mask));
cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);

View File

@ -0,0 +1,581 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1222256215 -3600
# Node ID 31f09a5e24cf8eb8a9d73acc6c23262fe9d463d7
# Parent 7750906b06b3ebbba529e6d1042d7a2a2712623c
x86: Properly synchronise updates to pirq-to-vector mapping.
Per-domain irq mappings are now protected by d->evtchn_lock and by the
per-vector irq_desc lock.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/ia64/xen/irq.c
+++ b/xen/arch/ia64/xen/irq.c
@@ -459,20 +459,24 @@ int pirq_guest_bind(struct vcpu *v, int
return rc;
}
-void pirq_guest_unbind(struct domain *d, int irq)
+int pirq_guest_unbind(struct domain *d, int irq)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action;
unsigned long flags;
- int i;
+ int i, rc = 0;
spin_lock_irqsave(&desc->lock, flags);
action = (irq_guest_action_t *)desc->action;
- i = 0;
- while ( action->guest[i] && (action->guest[i] != d) )
- i++;
+ for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
+ continue;
+ if ( i == action->nr_guests )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
action->nr_guests--;
@@ -492,7 +496,9 @@ void pirq_guest_unbind(struct domain *d,
desc->handler->shutdown(irq);
}
+ out:
spin_unlock_irqrestore(&desc->lock, flags);
+ return rc;
}
void
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -414,8 +414,6 @@ int arch_domain_create(struct domain *d,
goto fail;
}
- spin_lock_init(&d->arch.irq_lock);
-
if ( is_hvm_domain(d) )
{
if ( (rc = hvm_domain_initialise(d)) != 0 )
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -48,22 +48,6 @@ atomic_t irq_mis_count;
int msi_enable = 0;
boolean_param("msi", msi_enable);
-int domain_irq_to_vector(struct domain *d, int irq)
-{
- if ( !msi_enable )
- return irq_to_vector(irq);
- else
- return d->arch.pirq_vector[irq];
-}
-
-int domain_vector_to_irq(struct domain *d, int vector)
-{
- if ( !msi_enable )
- return vector_to_irq(vector);
- else
- return d->arch.vector_pirq[vector];
-}
-
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
@@ -730,7 +714,6 @@ next:
static struct hw_interrupt_type ioapic_level_type;
static struct hw_interrupt_type ioapic_edge_type;
-struct hw_interrupt_type pci_msi_type;
#define IOAPIC_AUTO -1
#define IOAPIC_EDGE 0
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -277,6 +277,35 @@ static void __do_IRQ_guest(int vector)
}
}
+/*
+ * Retrieve Xen irq-descriptor corresponding to a domain-specific irq.
+ * The descriptor is returned locked. This function is safe against changes
+ * to the per-domain irq-to-vector mapping.
+ */
+static irq_desc_t *domain_spin_lock_irq_desc(
+ struct domain *d, int irq, unsigned long *pflags)
+{
+ unsigned int vector;
+ unsigned long flags;
+ irq_desc_t *desc;
+
+ for ( ; ; )
+ {
+ vector = domain_irq_to_vector(d, irq);
+ if ( vector <= 0 )
+ return NULL;
+ desc = &irq_desc[vector];
+ spin_lock_irqsave(&desc->lock, flags);
+ if ( vector == domain_irq_to_vector(d, irq) )
+ break;
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ if ( pflags != NULL )
+ *pflags = flags;
+ return desc;
+}
+
/* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
static void flush_ready_eoi(void *unused)
{
@@ -342,11 +371,13 @@ static void __pirq_guest_eoi(struct doma
cpumask_t cpu_eoi_map;
int vector;
- vector = domain_irq_to_vector(d, irq);
- desc = &irq_desc[vector];
- action = (irq_guest_action_t *)desc->action;
+ ASSERT(local_irq_is_enabled());
+ desc = domain_spin_lock_irq_desc(d, irq, NULL);
+ if ( desc == NULL )
+ return;
- spin_lock_irq(&desc->lock);
+ action = (irq_guest_action_t *)desc->action;
+ vector = desc - irq_desc;
ASSERT(!test_bit(irq, d->pirq_mask) ||
(action->ack_type != ACKTYPE_NONE));
@@ -418,7 +449,7 @@ int pirq_acktype(struct domain *d, int i
unsigned int vector;
vector = domain_irq_to_vector(d, irq);
- if ( vector == 0 )
+ if ( vector <= 0 )
return ACKTYPE_NONE;
desc = &irq_desc[vector];
@@ -447,13 +478,6 @@ int pirq_acktype(struct domain *d, int i
if ( !strcmp(desc->handler->typename, "XT-PIC") )
return ACKTYPE_UNMASK;
- if ( strstr(desc->handler->typename, "MPIC") )
- {
- if ( desc->status & IRQ_LEVEL )
- return (desc->status & IRQ_PER_CPU) ? ACKTYPE_EOI : ACKTYPE_UNMASK;
- return ACKTYPE_NONE; /* edge-triggered => no final EOI */
- }
-
printk("Unknown PIC type '%s' for IRQ %d\n", desc->handler->typename, irq);
BUG();
@@ -462,21 +486,18 @@ int pirq_acktype(struct domain *d, int i
int pirq_shared(struct domain *d, int irq)
{
- unsigned int vector;
irq_desc_t *desc;
irq_guest_action_t *action;
unsigned long flags;
int shared;
- vector = domain_irq_to_vector(d, irq);
- if ( vector == 0 )
+ desc = domain_spin_lock_irq_desc(d, irq, &flags);
+ if ( desc == NULL )
return 0;
- desc = &irq_desc[vector];
-
- spin_lock_irqsave(&desc->lock, flags);
action = (irq_guest_action_t *)desc->action;
shared = ((desc->status & IRQ_GUEST) && (action->nr_guests > 1));
+
spin_unlock_irqrestore(&desc->lock, flags);
return shared;
@@ -491,16 +512,15 @@ int pirq_guest_bind(struct vcpu *v, int
int rc = 0;
cpumask_t cpumask = CPU_MASK_NONE;
+ WARN_ON(!spin_is_locked(&v->domain->evtchn_lock));
+
retry:
- vector = domain_irq_to_vector(v->domain, irq);
- if ( vector == 0 )
+ desc = domain_spin_lock_irq_desc(v->domain, irq, &flags);
+ if ( desc == NULL )
return -EINVAL;
- desc = &irq_desc[vector];
-
- spin_lock_irqsave(&desc->lock, flags);
-
action = (irq_guest_action_t *)desc->action;
+ vector = desc - irq_desc;
if ( !(desc->status & IRQ_GUEST) )
{
@@ -575,26 +595,39 @@ int pirq_guest_bind(struct vcpu *v, int
return rc;
}
-void pirq_guest_unbind(struct domain *d, int irq)
+int pirq_guest_unbind(struct domain *d, int irq)
{
- unsigned int vector;
+ int vector;
irq_desc_t *desc;
irq_guest_action_t *action;
cpumask_t cpu_eoi_map;
unsigned long flags;
- int i;
+ int i, rc = 0;
- vector = domain_irq_to_vector(d, irq);
- desc = &irq_desc[vector];
- BUG_ON(vector == 0);
+ WARN_ON(!spin_is_locked(&d->evtchn_lock));
- spin_lock_irqsave(&desc->lock, flags);
+ desc = domain_spin_lock_irq_desc(d, irq, &flags);
+ if ( unlikely(desc == NULL) )
+ {
+ if ( !msi_enable || (vector = -domain_irq_to_vector(d, irq)) == 0 )
+ return -EINVAL;
+ BUG_ON(vector <= 0);
+ desc = &irq_desc[vector];
+ spin_lock_irqsave(&desc->lock, flags);
+ d->arch.pirq_vector[irq] = d->arch.vector_pirq[vector] = 0;
+ goto out;
+ }
action = (irq_guest_action_t *)desc->action;
+ vector = desc - irq_desc;
- i = 0;
- while ( action->guest[i] && (action->guest[i] != d) )
- i++;
+ for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
+ continue;
+ if ( i == action->nr_guests )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
action->nr_guests--;
@@ -661,7 +694,8 @@ void pirq_guest_unbind(struct domain *d,
desc->handler->shutdown(vector);
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return rc;
}
extern void dump_ioapic_irq_info(void);
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -727,7 +727,6 @@ void pci_disable_msi(int vector)
__pci_disable_msix(vector);
}
-extern struct hw_interrupt_type pci_msi_type;
static void msi_free_vectors(struct pci_dev* dev)
{
struct msi_desc *entry, *tmp;
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -26,17 +26,11 @@ int
ioapic_guest_write(
unsigned long physbase, unsigned int reg, u32 pval);
-
-extern struct hw_interrupt_type pci_msi_type;
-
static int get_free_pirq(struct domain *d, int type, int index)
{
int i;
- if ( d == NULL )
- return -EINVAL;
-
- ASSERT(spin_is_locked(&d->arch.irq_lock));
+ ASSERT(spin_is_locked(&d->evtchn_lock));
if ( type == MAP_PIRQ_TYPE_GSI )
{
@@ -64,11 +58,10 @@ static int map_domain_pirq(struct domain
int ret = 0;
int old_vector, old_pirq;
struct msi_info msi;
+ irq_desc_t *desc;
+ unsigned long flags;
- if ( d == NULL )
- return -EINVAL;
-
- ASSERT(spin_is_locked(&d->arch.irq_lock));
+ ASSERT(spin_is_locked(&d->evtchn_lock));
if ( !IS_PRIV(current->domain) )
return -EPERM;
@@ -88,8 +81,7 @@ static int map_domain_pirq(struct domain
{
dprintk(XENLOG_G_ERR, "dom%d: pirq %d or vector %d already mapped\n",
d->domain_id, pirq, vector);
- ret = -EINVAL;
- goto done;
+ return -EINVAL;
}
ret = irq_permit_access(d, pirq);
@@ -97,17 +89,14 @@ static int map_domain_pirq(struct domain
{
dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
d->domain_id, pirq);
- goto done;
+ return ret;
}
+ desc = &irq_desc[vector];
+ spin_lock_irqsave(&desc->lock, flags);
+
if ( map && MAP_PIRQ_TYPE_MSI == map->type )
{
- irq_desc_t *desc;
- unsigned long flags;
-
- desc = &irq_desc[vector];
-
- spin_lock_irqsave(&desc->lock, flags);
if ( desc->handler != &no_irq_type )
dprintk(XENLOG_G_ERR, "dom%d: vector %d in use\n",
d->domain_id, vector);
@@ -120,8 +109,6 @@ static int map_domain_pirq(struct domain
msi.vector = vector;
ret = pci_enable_msi(&msi);
-
- spin_unlock_irqrestore(&desc->lock, flags);
if ( ret )
goto done;
}
@@ -130,6 +117,7 @@ static int map_domain_pirq(struct domain
d->arch.vector_pirq[vector] = pirq;
done:
+ spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
@@ -145,11 +133,11 @@ static int unmap_domain_pirq(struct doma
if ( !IS_PRIV(current->domain) )
return -EINVAL;
- ASSERT(spin_is_locked(&d->arch.irq_lock));
+ ASSERT(spin_is_locked(&d->evtchn_lock));
vector = d->arch.pirq_vector[pirq];
- if ( !vector )
+ if ( vector <= 0 )
{
dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
d->domain_id, pirq);
@@ -159,21 +147,35 @@ static int unmap_domain_pirq(struct doma
{
unsigned long flags;
irq_desc_t *desc;
+ bool_t forced_unbind = (pirq_guest_unbind(d, pirq) == 0);
+
+ if ( forced_unbind )
+ dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
+ d->domain_id, pirq);
desc = &irq_desc[vector];
spin_lock_irqsave(&desc->lock, flags);
+
+ BUG_ON(vector != d->arch.pirq_vector[pirq]);
+
if ( desc->msi_desc )
pci_disable_msi(vector);
if ( desc->handler == &pci_msi_type )
+ desc->handler = &no_irq_type;
+
+ if ( !forced_unbind )
{
- /* MSI is not shared, so should be released already */
- BUG_ON(desc->status & IRQ_GUEST);
- irq_desc[vector].handler = &no_irq_type;
+ d->arch.pirq_vector[pirq] = 0;
+ d->arch.vector_pirq[vector] = 0;
+ }
+ else
+ {
+ d->arch.pirq_vector[pirq] = -vector;
+ d->arch.vector_pirq[vector] = -pirq;
}
- spin_unlock_irqrestore(&desc->lock, flags);
- d->arch.pirq_vector[pirq] = d->arch.vector_pirq[vector] = 0;
+ spin_unlock_irqrestore(&desc->lock, flags);
}
ret = irq_deny_access(d, pirq);
@@ -189,7 +191,6 @@ static int physdev_map_pirq(struct physd
{
struct domain *d;
int vector, pirq, ret = 0;
- unsigned long flags;
/* if msi_enable is not enabled, map always succeeds */
if ( !msi_enable )
@@ -250,8 +251,8 @@ static int physdev_map_pirq(struct physd
goto free_domain;
}
- spin_lock_irqsave(&d->arch.irq_lock, flags);
- if ( map->pirq == -1 )
+ spin_lock(&d->evtchn_lock);
+ if ( map->pirq < 0 )
{
if ( d->arch.vector_pirq[vector] )
{
@@ -259,6 +260,11 @@ static int physdev_map_pirq(struct physd
d->domain_id, map->index, map->pirq,
d->arch.vector_pirq[vector]);
pirq = d->arch.vector_pirq[vector];
+ if ( pirq < 0 )
+ {
+ ret = -EBUSY;
+ goto done;
+ }
}
else
{
@@ -291,7 +297,7 @@ static int physdev_map_pirq(struct physd
if ( !ret )
map->pirq = pirq;
done:
- spin_unlock_irqrestore(&d->arch.irq_lock, flags);
+ spin_unlock(&d->evtchn_lock);
free_domain:
rcu_unlock_domain(d);
return ret;
@@ -300,7 +306,6 @@ free_domain:
static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
{
struct domain *d;
- unsigned long flags;
int ret;
if ( !msi_enable )
@@ -323,9 +328,10 @@ static int physdev_unmap_pirq(struct phy
return -ESRCH;
}
- spin_lock_irqsave(&d->arch.irq_lock, flags);
+ spin_lock(&d->evtchn_lock);
ret = unmap_domain_pirq(d, unmap->pirq);
- spin_unlock_irqrestore(&d->arch.irq_lock, flags);
+ spin_unlock(&d->evtchn_lock);
+
rcu_unlock_domain(d);
return ret;
@@ -431,7 +437,6 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
case PHYSDEVOP_alloc_irq_vector: {
struct physdev_irq irq_op;
- unsigned long flags;
ret = -EFAULT;
if ( copy_from_guest(&irq_op, arg, 1) != 0 )
@@ -456,9 +461,9 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
if ( msi_enable )
{
- spin_lock_irqsave(&dom0->arch.irq_lock, flags);
+ spin_lock(&dom0->evtchn_lock);
ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector, NULL);
- spin_unlock_irqrestore(&dom0->arch.irq_lock, flags);
+ spin_unlock(&dom0->evtchn_lock);
}
if ( copy_to_guest(arg, &irq_op, 1) != 0 )
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -376,7 +376,8 @@ static long __evtchn_close(struct domain
break;
case ECS_PIRQ:
- pirq_guest_unbind(d1, chn1->u.pirq);
+ if ( pirq_guest_unbind(d1, chn1->u.pirq) != 0 )
+ BUG();
d1->pirq_to_evtchn[chn1->u.pirq] = 0;
break;
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -235,7 +235,7 @@ struct arch_domain
/* Shadow translated domain: P2M mapping */
pagetable_t phys_table;
- spinlock_t irq_lock;
+ /* NB. protected by d->evtchn_lock and by irq_desc[vector].lock */
int vector_pirq[NR_VECTORS];
int pirq_vector[NR_PIRQS];
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -52,6 +52,7 @@ extern atomic_t irq_mis_count;
int pirq_acktype(struct domain *d, int irq);
int pirq_shared(struct domain *d , int irq);
-extern int domain_irq_to_vector(struct domain *d, int irq);
-extern int domain_vector_to_irq(struct domain *d, int vector);
+#define domain_irq_to_vector(d, irq) (msi_enable ? (d)->arch.pirq_vector[irq] : irq_to_vector(irq))
+#define domain_vector_to_irq(d, vec) (msi_enable ? (d)->arch.vector_pirq[vec] : vector_to_irq(vec))
+
#endif /* _ASM_HW_IRQ_H */
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -106,7 +106,7 @@ struct msi_desc {
*/
#define NR_HP_RESERVED_VECTORS 20
-extern int vector_irq[NR_VECTORS];
+extern struct hw_interrupt_type pci_msi_type;
/*
* MSI-X Address Register
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -22,7 +22,6 @@ struct irqaction
#define IRQ_PENDING 4 /* IRQ pending - replay on enable */
#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
#define IRQ_GUEST 16 /* IRQ is handled by guest OS(es) */
-#define IRQ_LEVEL 64 /* IRQ level triggered */
#define IRQ_PER_CPU 256 /* IRQ is per CPU */
/*
@@ -78,7 +77,7 @@ struct vcpu;
extern int pirq_guest_eoi(struct domain *d, int irq);
extern int pirq_guest_unmask(struct domain *d);
extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
-extern void pirq_guest_unbind(struct domain *d, int irq);
+extern int pirq_guest_unbind(struct domain *d, int irq);
static inline void set_native_irq_info(int irq, cpumask_t mask)
{

View File

@ -0,0 +1,52 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1222333937 -3600
# Node ID d4a093819310b70f24dfdc986755588ed5581a6e
# Parent 50170dc8649cb43630f067eb9b2db3ce3f9c6db4
x86 pmstat: Fix get_pm_info hypercall argument checking.
Signed-off-by: Lu Guanqun <guanqun.lu@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/acpi/pmstat.c
+++ b/xen/arch/x86/acpi/pmstat.c
@@ -36,6 +36,7 @@
#include <xen/cpumask.h>
#include <asm/processor.h>
#include <xen/percpu.h>
+#include <xen/domain.h>
#include <public/sysctl.h>
#include <acpi/cpufreq/cpufreq.h>
@@ -53,14 +54,26 @@ int do_get_pm_info(struct xen_sysctl_get
struct processor_pminfo *pmpt = &processor_pminfo[op->cpuid];
/* to protect the case when Px was not controlled by xen */
- if ( (!(pmpt->perf.init & XEN_PX_INIT)) &&
- (op->type & PMSTAT_CATEGORY_MASK) == PMSTAT_PX )
+ if ( (op->cpuid >= NR_CPUS) || !cpu_online(op->cpuid) )
return -EINVAL;
- if ( !cpu_online(op->cpuid) )
- return -EINVAL;
+ switch ( op->type & PMSTAT_CATEGORY_MASK )
+ {
+ case PMSTAT_CX:
+ if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_CX) )
+ return -ENODEV;
+ break;
+ case PMSTAT_PX:
+ if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) )
+ return -ENODEV;
+ if ( !(pmpt->perf.init & XEN_PX_INIT) )
+ return -EINVAL;
+ break;
+ default:
+ return -ENODEV;
+ }
- switch( op->type )
+ switch ( op->type )
{
case PMSTAT_get_max_px:
{

View File

@ -0,0 +1,268 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1222349872 -3600
# Node ID e1507b441be45d6d1cac25a196b53beff857a083
# Parent ddf62f69611127319e3c756b9fbc82e29f59ef36
x86: Clean up and fix 18539:31f09a5e24cf8
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/ia64/xen/irq.c
+++ b/xen/arch/ia64/xen/irq.c
@@ -459,12 +459,12 @@ int pirq_guest_bind(struct vcpu *v, int
return rc;
}
-int pirq_guest_unbind(struct domain *d, int irq)
+void pirq_guest_unbind(struct domain *d, int irq)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action;
unsigned long flags;
- int i, rc = 0;
+ int i;
spin_lock_irqsave(&desc->lock, flags);
@@ -472,11 +472,7 @@ int pirq_guest_unbind(struct domain *d,
for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
continue;
- if ( i == action->nr_guests )
- {
- rc = -EINVAL;
- goto out;
- }
+ BUG_ON(i == action->nr_guests);
memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
action->nr_guests--;
@@ -496,9 +492,7 @@ int pirq_guest_unbind(struct domain *d,
desc->handler->shutdown(irq);
}
- out:
spin_unlock_irqrestore(&desc->lock, flags);
- return rc;
}
void
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -508,14 +508,14 @@ int pirq_guest_bind(struct vcpu *v, int
unsigned int vector;
irq_desc_t *desc;
irq_guest_action_t *action;
- unsigned long flags;
int rc = 0;
cpumask_t cpumask = CPU_MASK_NONE;
WARN_ON(!spin_is_locked(&v->domain->evtchn_lock));
+ BUG_ON(!local_irq_is_enabled());
retry:
- desc = domain_spin_lock_irq_desc(v->domain, irq, &flags);
+ desc = domain_spin_lock_irq_desc(v->domain, irq, NULL);
if ( desc == NULL )
return -EINVAL;
@@ -575,7 +575,7 @@ int pirq_guest_bind(struct vcpu *v, int
*/
ASSERT(action->ack_type == ACKTYPE_EOI);
ASSERT(desc->status & IRQ_DISABLED);
- spin_unlock_irqrestore(&desc->lock, flags);
+ spin_unlock_irq(&desc->lock);
cpu_relax();
goto retry;
}
@@ -591,43 +591,25 @@ int pirq_guest_bind(struct vcpu *v, int
action->guest[action->nr_guests++] = v->domain;
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ spin_unlock_irq(&desc->lock);
return rc;
}
-int pirq_guest_unbind(struct domain *d, int irq)
+static void __pirq_guest_unbind(struct domain *d, int irq, irq_desc_t *desc)
{
- int vector;
- irq_desc_t *desc;
+ unsigned int vector;
irq_guest_action_t *action;
cpumask_t cpu_eoi_map;
- unsigned long flags;
- int i, rc = 0;
+ int i;
- WARN_ON(!spin_is_locked(&d->evtchn_lock));
-
- desc = domain_spin_lock_irq_desc(d, irq, &flags);
- if ( unlikely(desc == NULL) )
- {
- if ( !msi_enable || (vector = -domain_irq_to_vector(d, irq)) == 0 )
- return -EINVAL;
- BUG_ON(vector <= 0);
- desc = &irq_desc[vector];
- spin_lock_irqsave(&desc->lock, flags);
- d->arch.pirq_vector[irq] = d->arch.vector_pirq[vector] = 0;
- goto out;
- }
+ BUG_ON(!(desc->status & IRQ_GUEST));
action = (irq_guest_action_t *)desc->action;
vector = desc - irq_desc;
for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
continue;
- if ( i == action->nr_guests )
- {
- rc = -EINVAL;
- goto out;
- }
+ BUG_ON(i == action->nr_guests);
memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
action->nr_guests--;
@@ -645,9 +627,9 @@ int pirq_guest_unbind(struct domain *d,
(action->nr_guests != 0) )
{
cpu_eoi_map = action->cpu_eoi_map;
- spin_unlock_irqrestore(&desc->lock, flags);
+ spin_unlock_irq(&desc->lock);
on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
- spin_lock_irqsave(&desc->lock, flags);
+ spin_lock_irq(&desc->lock);
}
break;
}
@@ -659,7 +641,7 @@ int pirq_guest_unbind(struct domain *d,
BUG_ON(test_bit(irq, d->pirq_mask));
if ( action->nr_guests != 0 )
- goto out;
+ return;
BUG_ON(action->in_flight != 0);
@@ -679,9 +661,9 @@ int pirq_guest_unbind(struct domain *d,
if ( !cpus_empty(cpu_eoi_map) )
{
BUG_ON(action->ack_type != ACKTYPE_EOI);
- spin_unlock_irqrestore(&desc->lock, flags);
+ spin_unlock_irq(&desc->lock);
on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 1);
- spin_lock_irqsave(&desc->lock, flags);
+ spin_lock_irq(&desc->lock);
}
BUG_ON(!cpus_empty(action->cpu_eoi_map));
@@ -692,10 +674,63 @@ int pirq_guest_unbind(struct domain *d,
desc->status &= ~IRQ_INPROGRESS;
kill_timer(&irq_guest_eoi_timer[vector]);
desc->handler->shutdown(vector);
+}
+
+void pirq_guest_unbind(struct domain *d, int irq)
+{
+ irq_desc_t *desc;
+ int vector;
+
+ WARN_ON(!spin_is_locked(&d->evtchn_lock));
+
+ BUG_ON(!local_irq_is_enabled());
+ desc = domain_spin_lock_irq_desc(d, irq, NULL);
+
+ if ( desc == NULL )
+ {
+ if ( !msi_enable )
+ return;
+ vector = -domain_irq_to_vector(d, irq);
+ BUG_ON(vector <= 0);
+ desc = &irq_desc[vector];
+ spin_lock_irq(&desc->lock);
+ d->arch.pirq_vector[irq] = d->arch.vector_pirq[vector] = 0;
+ }
+ else
+ {
+ __pirq_guest_unbind(d, irq, desc);
+ }
+
+ spin_unlock_irq(&desc->lock);
+}
+
+int pirq_guest_force_unbind(struct domain *d, int irq)
+{
+ irq_desc_t *desc;
+ irq_guest_action_t *action;
+ int i, bound = 0;
+
+ WARN_ON(!spin_is_locked(&d->evtchn_lock));
+
+ BUG_ON(!local_irq_is_enabled());
+ desc = domain_spin_lock_irq_desc(d, irq, NULL);
+ BUG_ON(desc == NULL);
+
+ if ( !(desc->status & IRQ_GUEST) )
+ goto out;
+
+ action = (irq_guest_action_t *)desc->action;
+ for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
+ continue;
+ if ( i == action->nr_guests )
+ goto out;
+
+ bound = 1;
+ __pirq_guest_unbind(d, irq, desc);
out:
- spin_unlock_irqrestore(&desc->lock, flags);
- return rc;
+ spin_unlock_irq(&desc->lock);
+ return bound;
}
extern void dump_ioapic_irq_info(void);
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -147,7 +147,7 @@ static int unmap_domain_pirq(struct doma
{
unsigned long flags;
irq_desc_t *desc;
- bool_t forced_unbind = (pirq_guest_unbind(d, pirq) == 0);
+ bool_t forced_unbind = pirq_guest_force_unbind(d, pirq);
if ( forced_unbind )
dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -376,8 +376,7 @@ static long __evtchn_close(struct domain
break;
case ECS_PIRQ:
- if ( pirq_guest_unbind(d1, chn1->u.pirq) != 0 )
- BUG();
+ pirq_guest_unbind(d1, chn1->u.pirq);
d1->pirq_to_evtchn[chn1->u.pirq] = 0;
break;
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -55,4 +55,6 @@ int pirq_shared(struct domain *d , int i
#define domain_irq_to_vector(d, irq) (msi_enable ? (d)->arch.pirq_vector[irq] : irq_to_vector(irq))
#define domain_vector_to_irq(d, vec) (msi_enable ? (d)->arch.vector_pirq[vec] : vector_to_irq(vec))
+int pirq_guest_force_unbind(struct domain *d, int irq);
+
#endif /* _ASM_HW_IRQ_H */
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -77,7 +77,7 @@ struct vcpu;
extern int pirq_guest_eoi(struct domain *d, int irq);
extern int pirq_guest_unmask(struct domain *d);
extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
-extern int pirq_guest_unbind(struct domain *d, int irq);
+extern void pirq_guest_unbind(struct domain *d, int irq);
static inline void set_native_irq_info(int irq, cpumask_t mask)
{

449
18573-move-pirq-logic.patch Normal file
View File

@ -0,0 +1,449 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1223459328 -3600
# Node ID ed398097c03e16dacb1f3af19fa8faddf2deae1f
# Parent 1f85f7b216b34bfda4911b6a46548478f0e5d682
x86: Move pirq logic to irq.c.
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -14,8 +14,11 @@
#include <xen/sched.h>
#include <xen/keyhandler.h>
#include <xen/compat.h>
-#include <asm/current.h>
+#include <xen/iocap.h>
#include <xen/iommu.h>
+#include <asm/msi.h>
+#include <asm/current.h>
+#include <public/physdev.h>
/* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
int opt_noirqbalance = 0;
@@ -733,6 +736,157 @@ int pirq_guest_force_unbind(struct domai
return bound;
}
+int get_free_pirq(struct domain *d, int type, int index)
+{
+ int i;
+
+ ASSERT(spin_is_locked(&d->evtchn_lock));
+
+ if ( type == MAP_PIRQ_TYPE_GSI )
+ {
+ for ( i = 16; i < NR_PIRQS; i++ )
+ if ( !d->arch.pirq_vector[i] )
+ break;
+ if ( i == NR_PIRQS )
+ return -ENOSPC;
+ }
+ else
+ {
+ for ( i = NR_PIRQS - 1; i >= 16; i-- )
+ if ( !d->arch.pirq_vector[i] )
+ break;
+ if ( i == 16 )
+ return -ENOSPC;
+ }
+
+ return i;
+}
+
+int map_domain_pirq(
+ struct domain *d, int pirq, int vector, int type, void *data)
+{
+ int ret = 0;
+ int old_vector, old_pirq;
+ irq_desc_t *desc;
+ unsigned long flags;
+
+ ASSERT(spin_is_locked(&d->evtchn_lock));
+
+ if ( !IS_PRIV(current->domain) )
+ return -EPERM;
+
+ if ( pirq < 0 || pirq >= NR_PIRQS || vector < 0 || vector >= NR_VECTORS )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or vector %d\n",
+ d->domain_id, pirq, vector);
+ return -EINVAL;
+ }
+
+ old_vector = d->arch.pirq_vector[pirq];
+ old_pirq = d->arch.vector_pirq[vector];
+
+ if ( (old_vector && (old_vector != vector) ) ||
+ (old_pirq && (old_pirq != pirq)) )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: pirq %d or vector %d already mapped\n",
+ d->domain_id, pirq, vector);
+ return -EINVAL;
+ }
+
+ ret = irq_permit_access(d, pirq);
+ if ( ret )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
+ d->domain_id, pirq);
+ return ret;
+ }
+
+ desc = &irq_desc[vector];
+ spin_lock_irqsave(&desc->lock, flags);
+
+ if ( type == MAP_PIRQ_TYPE_MSI )
+ {
+ struct msi_info *msi = (struct msi_info *)data;
+ if ( desc->handler != &no_irq_type )
+ dprintk(XENLOG_G_ERR, "dom%d: vector %d in use\n",
+ d->domain_id, vector);
+ desc->handler = &pci_msi_type;
+ ret = pci_enable_msi(msi);
+ if ( ret )
+ goto done;
+ }
+
+ d->arch.pirq_vector[pirq] = vector;
+ d->arch.vector_pirq[vector] = pirq;
+
+done:
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return ret;
+}
+
+/* The pirq should have been unbound before this call. */
+int unmap_domain_pirq(struct domain *d, int pirq)
+{
+ unsigned long flags;
+ irq_desc_t *desc;
+ int vector, ret = 0;
+ bool_t forced_unbind;
+
+ if ( (pirq < 0) || (pirq >= NR_PIRQS) )
+ return -EINVAL;
+
+ if ( !IS_PRIV(current->domain) )
+ return -EINVAL;
+
+ ASSERT(spin_is_locked(&d->evtchn_lock));
+
+ vector = d->arch.pirq_vector[pirq];
+ if ( vector <= 0 )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
+ d->domain_id, pirq);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ forced_unbind = pirq_guest_force_unbind(d, pirq);
+ if ( forced_unbind )
+ dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
+ d->domain_id, pirq);
+
+ desc = &irq_desc[vector];
+ spin_lock_irqsave(&desc->lock, flags);
+
+ BUG_ON(vector != d->arch.pirq_vector[pirq]);
+
+ if ( desc->msi_desc )
+ pci_disable_msi(vector);
+
+ if ( desc->handler == &pci_msi_type )
+ desc->handler = &no_irq_type;
+
+ if ( !forced_unbind )
+ {
+ d->arch.pirq_vector[pirq] = 0;
+ d->arch.vector_pirq[vector] = 0;
+ }
+ else
+ {
+ d->arch.pirq_vector[pirq] = -vector;
+ d->arch.vector_pirq[vector] = -pirq;
+ }
+
+ spin_unlock_irqrestore(&desc->lock, flags);
+
+ ret = irq_deny_access(d, pirq);
+ if ( ret )
+ dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
+ d->domain_id, pirq);
+
+ done:
+ return ret;
+}
+
extern void dump_ioapic_irq_info(void);
static void dump_irqs(unsigned char key)
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -26,171 +26,12 @@ int
ioapic_guest_write(
unsigned long physbase, unsigned int reg, u32 pval);
-static int get_free_pirq(struct domain *d, int type, int index)
-{
- int i;
-
- ASSERT(spin_is_locked(&d->evtchn_lock));
-
- if ( type == MAP_PIRQ_TYPE_GSI )
- {
- for ( i = 16; i < NR_PIRQS; i++ )
- if ( !d->arch.pirq_vector[i] )
- break;
- if ( i == NR_PIRQS )
- return -ENOSPC;
- }
- else
- {
- for ( i = NR_PIRQS - 1; i >= 16; i-- )
- if ( !d->arch.pirq_vector[i] )
- break;
- if ( i == 16 )
- return -ENOSPC;
- }
-
- return i;
-}
-
-static int map_domain_pirq(struct domain *d, int pirq, int vector,
- struct physdev_map_pirq *map)
-{
- int ret = 0;
- int old_vector, old_pirq;
- struct msi_info msi;
- irq_desc_t *desc;
- unsigned long flags;
-
- ASSERT(spin_is_locked(&d->evtchn_lock));
-
- if ( !IS_PRIV(current->domain) )
- return -EPERM;
-
- if ( pirq < 0 || pirq >= NR_PIRQS || vector < 0 || vector >= NR_VECTORS )
- {
- dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or vector %d\n",
- d->domain_id, pirq, vector);
- return -EINVAL;
- }
-
- old_vector = d->arch.pirq_vector[pirq];
- old_pirq = d->arch.vector_pirq[vector];
-
- if ( (old_vector && (old_vector != vector) ) ||
- (old_pirq && (old_pirq != pirq)) )
- {
- dprintk(XENLOG_G_ERR, "dom%d: pirq %d or vector %d already mapped\n",
- d->domain_id, pirq, vector);
- return -EINVAL;
- }
-
- ret = irq_permit_access(d, pirq);
- if ( ret )
- {
- dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
- d->domain_id, pirq);
- return ret;
- }
-
- desc = &irq_desc[vector];
- spin_lock_irqsave(&desc->lock, flags);
-
- if ( map && MAP_PIRQ_TYPE_MSI == map->type )
- {
- if ( desc->handler != &no_irq_type )
- dprintk(XENLOG_G_ERR, "dom%d: vector %d in use\n",
- d->domain_id, vector);
- desc->handler = &pci_msi_type;
-
- msi.bus = map->bus;
- msi.devfn = map->devfn;
- msi.entry_nr = map->entry_nr;
- msi.table_base = map->table_base;
- msi.vector = vector;
-
- ret = pci_enable_msi(&msi);
- if ( ret )
- goto done;
- }
-
- d->arch.pirq_vector[pirq] = vector;
- d->arch.vector_pirq[vector] = pirq;
-
-done:
- spin_unlock_irqrestore(&desc->lock, flags);
- return ret;
-}
-
-/* The pirq should have been unbound before this call. */
-static int unmap_domain_pirq(struct domain *d, int pirq)
-{
- int ret = 0;
- int vector;
-
- if ( d == NULL || pirq < 0 || pirq >= NR_PIRQS )
- return -EINVAL;
-
- if ( !IS_PRIV(current->domain) )
- return -EINVAL;
-
- ASSERT(spin_is_locked(&d->evtchn_lock));
-
- vector = d->arch.pirq_vector[pirq];
-
- if ( vector <= 0 )
- {
- dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
- d->domain_id, pirq);
- ret = -EINVAL;
- }
- else
- {
- unsigned long flags;
- irq_desc_t *desc;
- bool_t forced_unbind = pirq_guest_force_unbind(d, pirq);
-
- if ( forced_unbind )
- dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
- d->domain_id, pirq);
-
- desc = &irq_desc[vector];
- spin_lock_irqsave(&desc->lock, flags);
-
- BUG_ON(vector != d->arch.pirq_vector[pirq]);
-
- if ( desc->msi_desc )
- pci_disable_msi(vector);
-
- if ( desc->handler == &pci_msi_type )
- desc->handler = &no_irq_type;
-
- if ( !forced_unbind )
- {
- d->arch.pirq_vector[pirq] = 0;
- d->arch.vector_pirq[vector] = 0;
- }
- else
- {
- d->arch.pirq_vector[pirq] = -vector;
- d->arch.vector_pirq[vector] = -pirq;
- }
-
- spin_unlock_irqrestore(&desc->lock, flags);
- }
-
- ret = irq_deny_access(d, pirq);
-
- if ( ret )
- dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
- d->domain_id, pirq);
-
- return ret;
-}
-
static int physdev_map_pirq(struct physdev_map_pirq *map)
{
struct domain *d;
int vector, pirq, ret = 0;
+ struct msi_info _msi;
+ void *map_data = NULL;
/* if msi_enable is not enabled, map always succeeds */
if ( !msi_enable )
@@ -213,6 +54,7 @@ static int physdev_map_pirq(struct physd
goto free_domain;
}
+ /* Verify or get vector. */
switch ( map->type )
{
case MAP_PIRQ_TYPE_GSI:
@@ -227,15 +69,16 @@ static int physdev_map_pirq(struct physd
if ( !vector )
{
dprintk(XENLOG_G_ERR, "dom%d: map irq with no vector %d\n",
- d->domain_id, map->index);
+ d->domain_id, vector);
ret = -EINVAL;
goto free_domain;
}
break;
+
case MAP_PIRQ_TYPE_MSI:
vector = map->index;
- if ( vector == -1 )
- vector = assign_irq_vector(AUTO_ASSIGN);
+ if ( vector == -1 )
+ vector = assign_irq_vector(AUTO_ASSIGN);
if ( vector < 0 || vector >= NR_VECTORS )
{
@@ -244,13 +87,23 @@ static int physdev_map_pirq(struct physd
ret = -EINVAL;
goto free_domain;
}
+
+ _msi.bus = map->bus;
+ _msi.devfn = map->devfn;
+ _msi.entry_nr = map->entry_nr;
+ _msi.table_base = map->table_base;
+ _msi.vector = vector;
+ map_data = &_msi;
break;
+
default:
- dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, map->type);
+ dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
+ d->domain_id, map->type);
ret = -EINVAL;
goto free_domain;
}
+ /* Verify or get pirq. */
spin_lock(&d->evtchn_lock);
if ( map->pirq < 0 )
{
@@ -292,10 +145,10 @@ static int physdev_map_pirq(struct physd
}
- ret = map_domain_pirq(d, pirq, vector, map);
-
+ ret = map_domain_pirq(d, pirq, vector, map->type, map_data);
if ( !ret )
map->pirq = pirq;
+
done:
spin_unlock(&d->evtchn_lock);
free_domain:
@@ -462,7 +315,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
if ( msi_enable )
{
spin_lock(&dom0->evtchn_lock);
- ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector, NULL);
+ ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
+ MAP_PIRQ_TYPE_GSI, NULL);
spin_unlock(&dom0->evtchn_lock);
}
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -52,6 +52,11 @@ extern atomic_t irq_mis_count;
int pirq_acktype(struct domain *d, int irq);
int pirq_shared(struct domain *d , int irq);
+int map_domain_pirq(struct domain *d, int pirq, int vector, int type,
+ void *data);
+int unmap_domain_pirq(struct domain *d, int pirq);
+int get_free_pirq(struct domain *d, int type, int index);
+
#define domain_irq_to_vector(d, irq) (msi_enable ? (d)->arch.pirq_vector[irq] : irq_to_vector(irq))
#define domain_vector_to_irq(d, vec) (msi_enable ? (d)->arch.vector_pirq[vec] : vector_to_irq(vec))

205
18574-msi-free-vector.patch Normal file
View File

@ -0,0 +1,205 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1223463099 -3600
# Node ID 51a05fb4c6014059058de48b83a9431e7474a456
# Parent ed398097c03e16dacb1f3af19fa8faddf2deae1f
x86: Free MSI vector when a pirq is unmapped.
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -459,6 +459,7 @@ void arch_domain_destroy(struct domain *
hvm_domain_destroy(d);
pci_release_devices(d);
+ free_domain_pirqs(d);
if ( !is_idle_domain(d) )
iommu_domain_destroy(d);
--- a/xen/arch/x86/i8259.c
+++ b/xen/arch/x86/i8259.c
@@ -408,6 +408,10 @@ void __init init_IRQ(void)
irq_desc[LEGACY_VECTOR(i)].handler = &i8259A_irq_type;
}
+ /* Never allocate the hypercall vector or Linux/BSD fast-trap vector. */
+ vector_irq[HYPERCALL_VECTOR] = NEVER_ASSIGN;
+ vector_irq[0x80] = NEVER_ASSIGN;
+
apic_intr_init();
/* Set the clock to HZ Hz */
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -90,7 +90,8 @@ static struct irq_pin_list {
} irq_2_pin[PIN_MAP_SIZE];
static int irq_2_pin_free_entry = NR_IRQS;
-int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
+int vector_irq[NR_VECTORS] __read_mostly = {
+ [0 ... NR_VECTORS - 1] = FREE_TO_ASSIGN};
/*
* The common case is 1:1 IRQ<->pin mappings. Sometimes there are
@@ -669,40 +670,46 @@ static inline int IO_APIC_irq_trigger(in
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
+int free_irq_vector(int vector)
+{
+ int irq;
+
+ BUG_ON((vector > LAST_DYNAMIC_VECTOR) || (vector < FIRST_DYNAMIC_VECTOR));
+
+ spin_lock(&vector_lock);
+ if ((irq = vector_irq[vector]) == AUTO_ASSIGN)
+ vector_irq[vector] = FREE_TO_ASSIGN;
+ spin_unlock(&vector_lock);
+
+ return (irq == AUTO_ASSIGN) ? 0 : -EINVAL;
+}
+
int assign_irq_vector(int irq)
{
- static unsigned current_vector = FIRST_DYNAMIC_VECTOR, offset = 0;
+ static unsigned current_vector = FIRST_DYNAMIC_VECTOR;
unsigned vector;
BUG_ON(irq >= NR_IRQ_VECTORS);
+
spin_lock(&vector_lock);
- if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
+ if ((irq != AUTO_ASSIGN) && (IO_APIC_VECTOR(irq) > 0)) {
spin_unlock(&vector_lock);
return IO_APIC_VECTOR(irq);
}
-next:
- current_vector += 8;
+ vector = current_vector;
+ while (vector_irq[vector] != FREE_TO_ASSIGN) {
+ if (++vector > LAST_DYNAMIC_VECTOR)
+ vector = FIRST_DYNAMIC_VECTOR;
- /* Skip the hypercall vector. */
- if (current_vector == HYPERCALL_VECTOR)
- goto next;
-
- /* Skip the Linux/BSD fast-trap vector. */
- if (current_vector == 0x80)
- goto next;
-
- if (current_vector > LAST_DYNAMIC_VECTOR) {
- offset++;
- if (!(offset%8)) {
+ if (vector == current_vector) {
spin_unlock(&vector_lock);
return -ENOSPC;
}
- current_vector = FIRST_DYNAMIC_VECTOR + offset;
}
- vector = current_vector;
+ current_vector = vector;
vector_irq[vector] = irq;
if (irq != AUTO_ASSIGN)
IO_APIC_VECTOR(irq) = vector;
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -863,7 +863,10 @@ int unmap_domain_pirq(struct domain *d,
pci_disable_msi(vector);
if ( desc->handler == &pci_msi_type )
+ {
desc->handler = &no_irq_type;
+ free_irq_vector(vector);
+ }
if ( !forced_unbind )
{
@@ -887,6 +890,24 @@ int unmap_domain_pirq(struct domain *d,
return ret;
}
+void free_domain_pirqs(struct domain *d)
+{
+ int i;
+
+ ASSERT(d->is_dying == DOMDYING_dying);
+
+ if ( !msi_enable )
+ return;
+
+ spin_lock(&d->evtchn_lock);
+
+ for ( i = 0; i < NR_PIRQS; i++ )
+ if ( d->arch.pirq_vector[i] > 0 )
+ unmap_domain_pirq(d, i);
+
+ spin_unlock(&d->evtchn_lock);
+}
+
extern void dump_ioapic_irq_info(void);
static void dump_irqs(unsigned char key)
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -83,7 +83,7 @@ static int physdev_map_pirq(struct physd
if ( vector < 0 || vector >= NR_VECTORS )
{
dprintk(XENLOG_G_ERR, "dom%d: map irq with wrong vector %d\n",
- d->domain_id, map->index);
+ d->domain_id, vector);
ret = -EINVAL;
goto free_domain;
}
@@ -144,13 +144,14 @@ static int physdev_map_pirq(struct physd
pirq = map->pirq;
}
-
ret = map_domain_pirq(d, pirq, vector, map->type, map_data);
- if ( !ret )
+ if ( ret == 0 )
map->pirq = pirq;
done:
spin_unlock(&d->evtchn_lock);
+ if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
+ free_irq_vector(vector);
free_domain:
rcu_unlock_domain(d);
return ret;
--- a/xen/include/asm-x86/io_apic.h
+++ b/xen/include/asm-x86/io_apic.h
@@ -192,5 +192,6 @@ static inline int ioapic_resume(void) {r
#endif
extern int assign_irq_vector(int irq);
+extern int free_irq_vector(int vector);
#endif
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -19,7 +19,9 @@
extern int vector_irq[NR_VECTORS];
extern u8 irq_vector[NR_IRQ_VECTORS];
-#define AUTO_ASSIGN -1
+#define AUTO_ASSIGN -1
+#define NEVER_ASSIGN -2
+#define FREE_TO_ASSIGN -3
#define platform_legacy_irq(irq) ((irq) < 16)
@@ -56,6 +58,7 @@ int map_domain_pirq(struct domain *d, in
void *data);
int unmap_domain_pirq(struct domain *d, int pirq);
int get_free_pirq(struct domain *d, int type, int index);
+void free_domain_pirqs(struct domain *d);
#define domain_irq_to_vector(d, irq) (msi_enable ? (d)->arch.pirq_vector[irq] : irq_to_vector(irq))
#define domain_vector_to_irq(d, vec) (msi_enable ? (d)->arch.vector_pirq[vec] : vector_to_irq(vec))

19
18577-bad-assertion.patch Normal file
View File

@ -0,0 +1,19 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1223470858 -3600
# Node ID e66cecb66b1ed83b65804d2eb7c3a30f9f1f01d4
# Parent 5e4e234d58be41401909f160cb9ed2ee0379c6a9
x86: Remove bogus assertion from free_domain_pirqs().
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -894,8 +894,6 @@ void free_domain_pirqs(struct domain *d)
{
int i;
- ASSERT(d->is_dying == DOMDYING_dying);
-
if ( !msi_enable )
return;

View File

@ -0,0 +1,559 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1223547292 -3600
# Node ID a11ad61bdb5b188a8116b533c87c31d6e9bd62d4
# Parent b8f329d2c074a06b47f3be2b4e0bfe1ac5b232e5
Fix lock issue for hvm pass-through domain
This patch protect the hvm_irq_dpci structure with evtchn_lock, thus
the access to domain's pirq_vector mapping is also protected.
Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
diff -r b8f329d2c074 -r a11ad61bdb5b xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c Thu Oct 09 11:14:52 2008 +0100
@@ -124,9 +124,11 @@ static void svm_dirq_assist(struct vcpu
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
+ spin_lock(&d->evtchn_lock);
if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
{
hvm_pci_msi_assert(d, irq);
+ spin_unlock(&d->evtchn_lock);
continue;
}
@@ -137,9 +139,7 @@ static void svm_dirq_assist(struct vcpu
device = digl->device;
intx = digl->intx;
hvm_pci_intx_assert(d, device, intx);
- spin_lock(&hvm_irq_dpci->dirq_lock);
hvm_irq_dpci->mirq[irq].pending++;
- spin_unlock(&hvm_irq_dpci->dirq_lock);
}
/*
@@ -151,6 +151,7 @@ static void svm_dirq_assist(struct vcpu
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
+ spin_unlock(&d->evtchn_lock);
}
}
diff -r b8f329d2c074 -r a11ad61bdb5b xen/arch/x86/hvm/vmsi.c
--- a/xen/arch/x86/hvm/vmsi.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/arch/x86/hvm/vmsi.c Thu Oct 09 11:14:52 2008 +0100
@@ -134,7 +134,7 @@ int vmsi_deliver(struct domain *d, int p
"vector=%x trig_mode=%x\n",
dest, dest_mode, delivery_mode, vector, trig_mode);
- if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
+ if ( !test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags) )
{
gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
return 0;
diff -r b8f329d2c074 -r a11ad61bdb5b xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c Thu Oct 09 11:14:52 2008 +0100
@@ -127,11 +127,13 @@ static void vmx_dirq_assist(struct vcpu
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
- if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
- {
- hvm_pci_msi_assert(d, irq);
- continue;
- }
+ spin_lock(&d->evtchn_lock);
+ if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
+ {
+ hvm_pci_msi_assert(d, irq);
+ spin_unlock(&d->evtchn_lock);
+ continue;
+ }
stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
@@ -140,9 +142,7 @@ static void vmx_dirq_assist(struct vcpu
device = digl->device;
intx = digl->intx;
hvm_pci_intx_assert(d, device, intx);
- spin_lock(&hvm_irq_dpci->dirq_lock);
hvm_irq_dpci->mirq[irq].pending++;
- spin_unlock(&hvm_irq_dpci->dirq_lock);
}
/*
@@ -154,6 +154,7 @@ static void vmx_dirq_assist(struct vcpu
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
+ spin_unlock(&d->evtchn_lock);
}
}
diff -r b8f329d2c074 -r a11ad61bdb5b xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/arch/x86/irq.c Thu Oct 09 11:14:52 2008 +0100
@@ -285,7 +285,7 @@ static void __do_IRQ_guest(int vector)
* The descriptor is returned locked. This function is safe against changes
* to the per-domain irq-to-vector mapping.
*/
-static irq_desc_t *domain_spin_lock_irq_desc(
+irq_desc_t *domain_spin_lock_irq_desc(
struct domain *d, int irq, unsigned long *pflags)
{
unsigned int vector;
diff -r b8f329d2c074 -r a11ad61bdb5b xen/drivers/passthrough/io.c
--- a/xen/drivers/passthrough/io.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/drivers/passthrough/io.c Thu Oct 09 11:14:52 2008 +0100
@@ -26,10 +26,14 @@ static void pt_irq_time_out(void *data)
struct hvm_mirq_dpci_mapping *irq_map = data;
unsigned int guest_gsi, machine_gsi = 0;
int vector;
- struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom);
+ struct hvm_irq_dpci *dpci = NULL;
struct dev_intx_gsi_link *digl;
uint32_t device, intx;
+ spin_lock(&irq_map->dom->evtchn_lock);
+
+ dpci = domain_get_irq_dpci(irq_map->dom);
+ ASSERT(dpci);
list_for_each_entry ( digl, &irq_map->digl_list, list )
{
guest_gsi = digl->gsi;
@@ -41,55 +45,65 @@ static void pt_irq_time_out(void *data)
clear_bit(machine_gsi, dpci->dirq_mask);
vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
- stop_timer(&dpci->hvm_timer[vector]);
- spin_lock(&dpci->dirq_lock);
dpci->mirq[machine_gsi].pending = 0;
- spin_unlock(&dpci->dirq_lock);
+ spin_unlock(&irq_map->dom->evtchn_lock);
pirq_guest_eoi(irq_map->dom, machine_gsi);
}
int pt_irq_create_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct dev_intx_gsi_link *digl;
+ int pirq = pt_irq_bind->machine_irq;
+ if ( pirq < 0 || pirq >= NR_PIRQS )
+ return -EINVAL;
+
+ spin_lock(&d->evtchn_lock);
+
+ hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci == NULL )
{
hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
if ( hvm_irq_dpci == NULL )
+ {
+ spin_unlock(&d->evtchn_lock);
return -ENOMEM;
-
+ }
memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
- spin_lock_init(&hvm_irq_dpci->dirq_lock);
for ( int i = 0; i < NR_IRQS; i++ )
INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
+ }
- if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
- xfree(hvm_irq_dpci);
+ if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
+ {
+ xfree(hvm_irq_dpci);
+ spin_unlock(&d->evtchn_lock);
+ return -EINVAL;
}
if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
{
- int pirq = pt_irq_bind->machine_irq;
- if ( pirq < 0 || pirq >= NR_IRQS )
- return -EINVAL;
-
- if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID ) )
+ if ( !test_and_set_bit(pirq, hvm_irq_dpci->mapping))
{
- hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |
- HVM_IRQ_DPCI_MSI ;
+ set_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags);
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
+ hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
+ hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
+ /* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
pirq_guest_bind(d->vcpu[0], pirq, 0);
}
+ else if (hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec
+ ||hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] != pirq)
- hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |HVM_IRQ_DPCI_MSI ;
- hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
- hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
- hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
-
+ {
+ spin_unlock(&d->evtchn_lock);
+ return -EBUSY;
+ }
}
else
{
@@ -102,7 +116,10 @@ int pt_irq_create_bind_vtd(
digl = xmalloc(struct dev_intx_gsi_link);
if ( !digl )
+ {
+ spin_unlock(&d->evtchn_lock);
return -ENOMEM;
+ }
digl->device = device;
digl->intx = intx;
@@ -117,11 +134,11 @@ int pt_irq_create_bind_vtd(
hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
/* Bind the same mirq once in the same domain */
- if ( !(hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
+ if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
{
- hvm_irq_dpci->mirq[machine_gsi].flags |= HVM_IRQ_DPCI_VALID;
hvm_irq_dpci->mirq[machine_gsi].dom = d;
+ /* Init timer before binding */
init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)],
pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
/* Deal with gsi for legacy devices */
@@ -132,37 +149,45 @@ int pt_irq_create_bind_vtd(
"VT-d irq bind: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
}
+ spin_unlock(&d->evtchn_lock);
return 0;
}
int pt_irq_destroy_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct list_head *digl_list, *tmp;
struct dev_intx_gsi_link *digl;
-
- if ( hvm_irq_dpci == NULL )
- return 0;
machine_gsi = pt_irq_bind->machine_irq;
device = pt_irq_bind->u.pci.device;
intx = pt_irq_bind->u.pci.intx;
guest_gsi = hvm_pci_intx_gsi(device, intx);
link = hvm_pci_intx_link(device, intx);
- hvm_irq_dpci->link_cnt[link]--;
gdprintk(XENLOG_INFO,
"pt_irq_destroy_bind_vtd: machine_gsi=%d "
"guest_gsi=%d, device=%d, intx=%d.\n",
machine_gsi, guest_gsi, device, intx);
+ spin_lock(&d->evtchn_lock);
+
+ hvm_irq_dpci = domain_get_irq_dpci(d);
+
+ if ( hvm_irq_dpci == NULL )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return -EINVAL;
+ }
+
+ hvm_irq_dpci->link_cnt[link]--;
memset(&hvm_irq_dpci->girq[guest_gsi], 0,
sizeof(struct hvm_girq_dpci_mapping));
/* clear the mirq info */
- if ( (hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
+ if ( test_bit(machine_gsi, hvm_irq_dpci->mapping))
{
list_for_each_safe ( digl_list, tmp,
&hvm_irq_dpci->mirq[machine_gsi].digl_list )
@@ -185,9 +210,10 @@ int pt_irq_destroy_bind_vtd(
kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
hvm_irq_dpci->mirq[machine_gsi].flags = 0;
+ clear_bit(machine_gsi, hvm_irq_dpci->mapping);
}
}
-
+ spin_unlock(&d->evtchn_lock);
gdprintk(XENLOG_INFO,
"XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
@@ -199,8 +225,9 @@ int hvm_do_IRQ_dpci(struct domain *d, un
{
struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
+ ASSERT(spin_is_locked(&irq_desc[domain_irq_to_vector(d, mirq)].lock));
if ( !iommu_enabled || (d == dom0) || !dpci ||
- !dpci->mirq[mirq].flags & HVM_IRQ_DPCI_VALID )
+ !test_bit(mirq, dpci->mapping))
return 0;
/*
@@ -218,44 +245,46 @@ int hvm_do_IRQ_dpci(struct domain *d, un
return 1;
}
-
void hvm_dpci_msi_eoi(struct domain *d, int vector)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+ irq_desc_t *desc;
int pirq;
- unsigned long flags;
- irq_desc_t *desc;
if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
return;
+ spin_lock(&d->evtchn_lock);
pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
- (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID) &&
- (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
- {
- int vec;
- vec = domain_irq_to_vector(d, pirq);
- desc = &irq_desc[vec];
+ test_bit(pirq, hvm_irq_dpci->mapping) &&
+ (test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags)))
+ {
+ BUG_ON(!local_irq_is_enabled());
+ desc = domain_spin_lock_irq_desc(d, pirq, NULL);
+ if (!desc)
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
- spin_lock_irqsave(&desc->lock, flags);
- desc->status &= ~IRQ_INPROGRESS;
- spin_unlock_irqrestore(&desc->lock, flags);
+ desc->status &= ~IRQ_INPROGRESS;
+ spin_unlock_irq(&desc->lock);
- pirq_guest_eoi(d, pirq);
- }
+ pirq_guest_eoi(d, pirq);
+ }
+
+ spin_unlock(&d->evtchn_lock);
}
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
union vioapic_redir_entry *ent)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t device, intx, machine_gsi;
- if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
- (guest_gsi >= NR_ISAIRQS &&
- !hvm_irq_dpci->girq[guest_gsi].valid) )
+ if ( !iommu_enabled)
return;
if ( guest_gsi < NR_ISAIRQS )
@@ -264,23 +293,34 @@ void hvm_dpci_eoi(struct domain *d, unsi
return;
}
- machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
+ spin_lock(&d->evtchn_lock);
+ hvm_irq_dpci = domain_get_irq_dpci(d);
+
+ if((hvm_irq_dpci == NULL) ||
+ (guest_gsi >= NR_ISAIRQS &&
+ !hvm_irq_dpci->girq[guest_gsi].valid) )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
+
device = hvm_irq_dpci->girq[guest_gsi].device;
intx = hvm_irq_dpci->girq[guest_gsi].intx;
hvm_pci_intx_deassert(d, device, intx);
- spin_lock(&hvm_irq_dpci->dirq_lock);
+ machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
{
- spin_unlock(&hvm_irq_dpci->dirq_lock);
-
if ( (ent == NULL) || !ent->fields.mask )
{
+ /*
+ * No need to get vector lock for timer
+ * since interrupt is still not EOIed
+ */
stop_timer(&hvm_irq_dpci->hvm_timer[
domain_irq_to_vector(d, machine_gsi)]);
pirq_guest_eoi(d, machine_gsi);
}
}
- else
- spin_unlock(&hvm_irq_dpci->dirq_lock);
+ spin_unlock(&d->evtchn_lock);
}
diff -r b8f329d2c074 -r a11ad61bdb5b xen/drivers/passthrough/pci.c
--- a/xen/drivers/passthrough/pci.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/drivers/passthrough/pci.c Thu Oct 09 11:14:52 2008 +0100
@@ -154,7 +154,7 @@ int pci_remove_device(u8 bus, u8 devfn)
static void pci_clean_dpci_irqs(struct domain *d)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t i;
struct list_head *digl_list, *tmp;
struct dev_intx_gsi_link *digl;
@@ -165,13 +165,14 @@ static void pci_clean_dpci_irqs(struct d
if ( !is_hvm_domain(d) && !need_iommu(d) )
return;
+ spin_lock(&d->evtchn_lock);
+ hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci != NULL )
{
- for ( i = 0; i < NR_IRQS; i++ )
+ for ( i = find_first_bit(hvm_irq_dpci->mapping, NR_PIRQS);
+ i < NR_PIRQS;
+ i = find_next_bit(hvm_irq_dpci->mapping, NR_PIRQS, i + 1) )
{
- if ( !(hvm_irq_dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID) )
- continue;
-
pirq_guest_unbind(d, i);
kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
@@ -188,6 +189,7 @@ static void pci_clean_dpci_irqs(struct d
d->arch.hvm_domain.irq.dpci = NULL;
xfree(hvm_irq_dpci);
}
+ spin_unlock(&d->evtchn_lock);
}
void pci_release_devices(struct domain *d)
diff -r b8f329d2c074 -r a11ad61bdb5b xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c Thu Oct 09 11:14:52 2008 +0100
@@ -85,37 +85,41 @@ void hvm_dpci_isairq_eoi(struct domain *
void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
- struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *dpci = NULL;
struct dev_intx_gsi_link *digl, *tmp;
int i;
ASSERT(isairq < NR_ISAIRQS);
- if ( !vtd_enabled || !dpci ||
- !test_bit(isairq, dpci->isairq_map) )
+ if ( !vtd_enabled)
return;
+ spin_lock(&d->evtchn_lock);
+
+ dpci = domain_get_irq_dpci(d);
+
+ if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
/* Multiple mirq may be mapped to one isa irq */
- for ( i = 0; i < NR_IRQS; i++ )
+ for ( i = find_first_bit(dpci->mapping, NR_PIRQS);
+ i < NR_PIRQS;
+ i = find_next_bit(dpci->mapping, NR_PIRQS, i + 1) )
{
- if ( !dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID )
- continue;
-
list_for_each_entry_safe ( digl, tmp,
&dpci->mirq[i].digl_list, list )
{
if ( hvm_irq->pci_link.route[digl->link] == isairq )
{
hvm_pci_intx_deassert(d, digl->device, digl->intx);
- spin_lock(&dpci->dirq_lock);
if ( --dpci->mirq[i].pending == 0 )
{
- spin_unlock(&dpci->dirq_lock);
stop_timer(&dpci->hvm_timer[domain_irq_to_vector(d, i)]);
pirq_guest_eoi(d, i);
}
- else
- spin_unlock(&dpci->dirq_lock);
}
}
}
+ spin_unlock(&d->evtchn_lock);
}
diff -r b8f329d2c074 -r a11ad61bdb5b xen/include/asm-x86/hvm/irq.h
--- a/xen/include/asm-x86/hvm/irq.h Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/include/asm-x86/hvm/irq.h Thu Oct 09 11:14:52 2008 +0100
@@ -25,6 +25,7 @@
#include <xen/types.h>
#include <xen/spinlock.h>
#include <asm/irq.h>
+#include <asm/pirq.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/vpic.h>
#include <asm/hvm/vioapic.h>
@@ -38,8 +39,6 @@ struct dev_intx_gsi_link {
uint8_t link;
};
-#define HVM_IRQ_DPCI_VALID 0x1
-#define HVM_IRQ_DPCI_MSI 0x2
#define _HVM_IRQ_DPCI_MSI 0x1
struct hvm_gmsi_info {
@@ -64,9 +63,10 @@ struct hvm_girq_dpci_mapping {
#define NR_ISAIRQS 16
#define NR_LINK 4
+/* Protected by domain's evtchn_lock */
struct hvm_irq_dpci {
- spinlock_t dirq_lock;
/* Machine IRQ to guest device/intx mapping. */
+ DECLARE_BITMAP(mapping, NR_PIRQS);
struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
/* Guest IRQ to guest device/intx mapping. */
struct hvm_girq_dpci_mapping girq[NR_IRQS];
diff -r b8f329d2c074 -r a11ad61bdb5b xen/include/xen/irq.h
--- a/xen/include/xen/irq.h Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/include/xen/irq.h Thu Oct 09 11:14:52 2008 +0100
@@ -78,6 +78,8 @@ extern int pirq_guest_unmask(struct doma
extern int pirq_guest_unmask(struct domain *d);
extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
extern void pirq_guest_unbind(struct domain *d, int irq);
+extern irq_desc_t *domain_spin_lock_irq_desc(
+ struct domain *d, int irq, unsigned long *pflags);
static inline void set_native_irq_info(int irq, cpumask_t mask)
{

View File

@ -0,0 +1,784 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1223547471 -3600
# Node ID 0033c944318f266a0e367678bf9f46042ae03397
# Parent a11ad61bdb5b188a8116b533c87c31d6e9bd62d4
Rename evtchn_lock to event_lock, since it protects more than just
event-channel state now.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -124,11 +124,11 @@ static void svm_dirq_assist(struct vcpu
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
{
hvm_pci_msi_assert(d, irq);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
continue;
}
@@ -151,7 +151,7 @@ static void svm_dirq_assist(struct vcpu
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
}
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -127,11 +127,11 @@ static void vmx_dirq_assist(struct vcpu
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
{
hvm_pci_msi_assert(d, irq);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
continue;
}
@@ -154,7 +154,7 @@ static void vmx_dirq_assist(struct vcpu
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
}
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -514,7 +514,7 @@ int pirq_guest_bind(struct vcpu *v, int
int rc = 0;
cpumask_t cpumask = CPU_MASK_NONE;
- WARN_ON(!spin_is_locked(&v->domain->evtchn_lock));
+ WARN_ON(!spin_is_locked(&v->domain->event_lock));
BUG_ON(!local_irq_is_enabled());
retry:
@@ -684,7 +684,7 @@ void pirq_guest_unbind(struct domain *d,
irq_desc_t *desc;
int vector;
- WARN_ON(!spin_is_locked(&d->evtchn_lock));
+ WARN_ON(!spin_is_locked(&d->event_lock));
BUG_ON(!local_irq_is_enabled());
desc = domain_spin_lock_irq_desc(d, irq, NULL);
@@ -713,7 +713,7 @@ int pirq_guest_force_unbind(struct domai
irq_guest_action_t *action;
int i, bound = 0;
- WARN_ON(!spin_is_locked(&d->evtchn_lock));
+ WARN_ON(!spin_is_locked(&d->event_lock));
BUG_ON(!local_irq_is_enabled());
desc = domain_spin_lock_irq_desc(d, irq, NULL);
@@ -740,7 +740,7 @@ int get_free_pirq(struct domain *d, int
{
int i;
- ASSERT(spin_is_locked(&d->evtchn_lock));
+ ASSERT(spin_is_locked(&d->event_lock));
if ( type == MAP_PIRQ_TYPE_GSI )
{
@@ -770,7 +770,7 @@ int map_domain_pirq(
irq_desc_t *desc;
unsigned long flags;
- ASSERT(spin_is_locked(&d->evtchn_lock));
+ ASSERT(spin_is_locked(&d->event_lock));
if ( !IS_PRIV(current->domain) )
return -EPERM;
@@ -838,7 +838,7 @@ int unmap_domain_pirq(struct domain *d,
if ( !IS_PRIV(current->domain) )
return -EINVAL;
- ASSERT(spin_is_locked(&d->evtchn_lock));
+ ASSERT(spin_is_locked(&d->event_lock));
vector = d->arch.pirq_vector[pirq];
if ( vector <= 0 )
@@ -897,13 +897,13 @@ void free_domain_pirqs(struct domain *d)
if ( !msi_enable )
return;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
for ( i = 0; i < NR_PIRQS; i++ )
if ( d->arch.pirq_vector[i] > 0 )
unmap_domain_pirq(d, i);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
extern void dump_ioapic_irq_info(void);
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -104,7 +104,7 @@ static int physdev_map_pirq(struct physd
}
/* Verify or get pirq. */
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( map->pirq < 0 )
{
if ( d->arch.vector_pirq[vector] )
@@ -149,7 +149,7 @@ static int physdev_map_pirq(struct physd
map->pirq = pirq;
done:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
free_irq_vector(vector);
free_domain:
@@ -182,9 +182,9 @@ static int physdev_unmap_pirq(struct phy
return -ESRCH;
}
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
ret = unmap_domain_pirq(d, unmap->pirq);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
@@ -315,10 +315,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
if ( msi_enable )
{
- spin_lock(&dom0->evtchn_lock);
+ spin_lock(&dom0->event_lock);
ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
MAP_PIRQ_TYPE_GSI, NULL);
- spin_unlock(&dom0->evtchn_lock);
+ spin_unlock(&dom0->event_lock);
}
if ( copy_to_guest(arg, &irq_op, 1) != 0 )
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -133,7 +133,7 @@ static long evtchn_alloc_unbound(evtchn_
if ( rc )
return rc;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
ERROR_EXIT_DOM(port, d);
@@ -150,7 +150,7 @@ static long evtchn_alloc_unbound(evtchn_
alloc->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
return rc;
@@ -174,14 +174,14 @@ static long evtchn_bind_interdomain(evtc
/* Avoid deadlock by first acquiring lock of domain with smaller id. */
if ( ld < rd )
{
- spin_lock(&ld->evtchn_lock);
- spin_lock(&rd->evtchn_lock);
+ spin_lock(&ld->event_lock);
+ spin_lock(&rd->event_lock);
}
else
{
if ( ld != rd )
- spin_lock(&rd->evtchn_lock);
- spin_lock(&ld->evtchn_lock);
+ spin_lock(&rd->event_lock);
+ spin_lock(&ld->event_lock);
}
if ( (lport = get_free_port(ld)) < 0 )
@@ -216,9 +216,9 @@ static long evtchn_bind_interdomain(evtc
bind->local_port = lport;
out:
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
if ( ld != rd )
- spin_unlock(&rd->evtchn_lock);
+ spin_unlock(&rd->event_lock);
rcu_unlock_domain(rd);
@@ -244,7 +244,7 @@ static long evtchn_bind_virq(evtchn_bind
((v = d->vcpu[vcpu]) == NULL) )
return -ENOENT;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( v->virq_to_evtchn[virq] != 0 )
ERROR_EXIT(-EEXIST);
@@ -260,7 +260,7 @@ static long evtchn_bind_virq(evtchn_bind
v->virq_to_evtchn[virq] = bind->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
@@ -277,7 +277,7 @@ static long evtchn_bind_ipi(evtchn_bind_
(d->vcpu[vcpu] == NULL) )
return -ENOENT;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
ERROR_EXIT(port);
@@ -289,7 +289,7 @@ static long evtchn_bind_ipi(evtchn_bind_
bind->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
@@ -308,7 +308,7 @@ static long evtchn_bind_pirq(evtchn_bind
if ( !irq_access_permitted(d, pirq) )
return -EPERM;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( d->pirq_to_evtchn[pirq] != 0 )
ERROR_EXIT(-EEXIST);
@@ -333,7 +333,7 @@ static long evtchn_bind_pirq(evtchn_bind
bind->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
@@ -348,7 +348,7 @@ static long __evtchn_close(struct domain
long rc = 0;
again:
- spin_lock(&d1->evtchn_lock);
+ spin_lock(&d1->event_lock);
if ( !port_is_valid(d1, port1) )
{
@@ -404,12 +404,12 @@ static long __evtchn_close(struct domain
if ( d1 < d2 )
{
- spin_lock(&d2->evtchn_lock);
+ spin_lock(&d2->event_lock);
}
else if ( d1 != d2 )
{
- spin_unlock(&d1->evtchn_lock);
- spin_lock(&d2->evtchn_lock);
+ spin_unlock(&d1->event_lock);
+ spin_lock(&d2->event_lock);
goto again;
}
}
@@ -454,11 +454,11 @@ static long __evtchn_close(struct domain
if ( d2 != NULL )
{
if ( d1 != d2 )
- spin_unlock(&d2->evtchn_lock);
+ spin_unlock(&d2->event_lock);
put_domain(d2);
}
- spin_unlock(&d1->evtchn_lock);
+ spin_unlock(&d1->event_lock);
return rc;
}
@@ -476,11 +476,11 @@ int evtchn_send(struct domain *d, unsign
struct vcpu *rvcpu;
int rport, ret = 0;
- spin_lock(&ld->evtchn_lock);
+ spin_lock(&ld->event_lock);
if ( unlikely(!port_is_valid(ld, lport)) )
{
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
return -EINVAL;
}
@@ -489,7 +489,7 @@ int evtchn_send(struct domain *d, unsign
/* Guest cannot send via a Xen-attached event channel. */
if ( unlikely(lchn->consumer_is_xen) )
{
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
return -EINVAL;
}
@@ -527,7 +527,7 @@ int evtchn_send(struct domain *d, unsign
}
out:
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
return ret;
}
@@ -656,7 +656,7 @@ static long evtchn_status(evtchn_status_
if ( rc )
return rc;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
{
@@ -704,7 +704,7 @@ static long evtchn_status(evtchn_status_
status->vcpu = chn->notify_vcpu_id;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
return rc;
@@ -720,7 +720,7 @@ long evtchn_bind_vcpu(unsigned int port,
if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
return -ENOENT;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
{
@@ -756,7 +756,7 @@ long evtchn_bind_vcpu(unsigned int port,
}
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
@@ -768,11 +768,11 @@ static long evtchn_unmask(evtchn_unmask_
int port = unmask->port;
struct vcpu *v;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( unlikely(!port_is_valid(d, port)) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EINVAL;
}
@@ -790,7 +790,7 @@ static long evtchn_unmask(evtchn_unmask_
vcpu_mark_events_pending(v);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return 0;
}
@@ -944,7 +944,7 @@ int alloc_unbound_xen_event_channel(
struct domain *d = local_vcpu->domain;
int port;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
goto out;
@@ -956,7 +956,7 @@ int alloc_unbound_xen_event_channel(
chn->u.unbound.remote_domid = remote_domid;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return port;
}
@@ -968,11 +968,11 @@ void free_xen_event_channel(
struct evtchn *chn;
struct domain *d = local_vcpu->domain;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( unlikely(d->is_dying) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
@@ -981,7 +981,7 @@ void free_xen_event_channel(
BUG_ON(!chn->consumer_is_xen);
chn->consumer_is_xen = 0;
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
(void)__evtchn_close(d, port);
}
@@ -993,7 +993,7 @@ void notify_via_xen_event_channel(int lp
struct domain *ld = current->domain, *rd;
int rport;
- spin_lock(&ld->evtchn_lock);
+ spin_lock(&ld->event_lock);
ASSERT(port_is_valid(ld, lport));
lchn = evtchn_from_port(ld, lport);
@@ -1007,13 +1007,13 @@ void notify_via_xen_event_channel(int lp
evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
}
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
}
int evtchn_init(struct domain *d)
{
- spin_lock_init(&d->evtchn_lock);
+ spin_lock_init(&d->event_lock);
if ( get_free_port(d) != 0 )
return -EINVAL;
evtchn_from_port(d, 0)->state = ECS_RESERVED;
@@ -1027,7 +1027,7 @@ void evtchn_destroy(struct domain *d)
/* After this barrier no new event-channel allocations can occur. */
BUG_ON(!d->is_dying);
- spin_barrier(&d->evtchn_lock);
+ spin_barrier(&d->event_lock);
/* Close all existing event channels. */
for ( i = 0; port_is_valid(d, i); i++ )
@@ -1037,14 +1037,14 @@ void evtchn_destroy(struct domain *d)
}
/* Free all event-channel buckets. */
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
{
xsm_free_security_evtchn(d->evtchn[i]);
xfree(d->evtchn[i]);
d->evtchn[i] = NULL;
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
static void domain_dump_evtchn_info(struct domain *d)
@@ -1053,7 +1053,7 @@ static void domain_dump_evtchn_info(stru
printk("Domain %d polling vCPUs: %08lx\n", d->domain_id, d->poll_mask[0]);
- if ( !spin_trylock(&d->evtchn_lock) )
+ if ( !spin_trylock(&d->event_lock) )
return;
printk("Event channel information for domain %d:\n",
@@ -1094,7 +1094,7 @@ static void domain_dump_evtchn_info(stru
printk(" x=%d\n", chn->consumer_is_xen);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
static void dump_evtchn_info(unsigned char key)
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -30,7 +30,7 @@ static void pt_irq_time_out(void *data)
struct dev_intx_gsi_link *digl;
uint32_t device, intx;
- spin_lock(&irq_map->dom->evtchn_lock);
+ spin_lock(&irq_map->dom->event_lock);
dpci = domain_get_irq_dpci(irq_map->dom);
ASSERT(dpci);
@@ -46,7 +46,7 @@ static void pt_irq_time_out(void *data)
clear_bit(machine_gsi, dpci->dirq_mask);
vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
dpci->mirq[machine_gsi].pending = 0;
- spin_unlock(&irq_map->dom->evtchn_lock);
+ spin_unlock(&irq_map->dom->event_lock);
pirq_guest_eoi(irq_map->dom, machine_gsi);
}
@@ -62,7 +62,7 @@ int pt_irq_create_bind_vtd(
if ( pirq < 0 || pirq >= NR_PIRQS )
return -EINVAL;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci == NULL )
@@ -70,7 +70,7 @@ int pt_irq_create_bind_vtd(
hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
if ( hvm_irq_dpci == NULL )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -ENOMEM;
}
memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
@@ -81,7 +81,7 @@ int pt_irq_create_bind_vtd(
if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
{
xfree(hvm_irq_dpci);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EINVAL;
}
@@ -101,7 +101,7 @@ int pt_irq_create_bind_vtd(
||hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] != pirq)
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EBUSY;
}
}
@@ -117,7 +117,7 @@ int pt_irq_create_bind_vtd(
digl = xmalloc(struct dev_intx_gsi_link);
if ( !digl )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -ENOMEM;
}
@@ -149,7 +149,7 @@ int pt_irq_create_bind_vtd(
"VT-d irq bind: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return 0;
}
@@ -172,13 +172,13 @@ int pt_irq_destroy_bind_vtd(
"pt_irq_destroy_bind_vtd: machine_gsi=%d "
"guest_gsi=%d, device=%d, intx=%d.\n",
machine_gsi, guest_gsi, device, intx);
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci == NULL )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EINVAL;
}
@@ -213,7 +213,7 @@ int pt_irq_destroy_bind_vtd(
clear_bit(machine_gsi, hvm_irq_dpci->mapping);
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
gdprintk(XENLOG_INFO,
"XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
@@ -254,7 +254,7 @@ void hvm_dpci_msi_eoi(struct domain *d,
if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
return;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
@@ -265,7 +265,7 @@ void hvm_dpci_msi_eoi(struct domain *d,
desc = domain_spin_lock_irq_desc(d, pirq, NULL);
if (!desc)
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
@@ -275,7 +275,7 @@ void hvm_dpci_msi_eoi(struct domain *d,
pirq_guest_eoi(d, pirq);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
@@ -293,14 +293,14 @@ void hvm_dpci_eoi(struct domain *d, unsi
return;
}
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if((hvm_irq_dpci == NULL) ||
(guest_gsi >= NR_ISAIRQS &&
!hvm_irq_dpci->girq[guest_gsi].valid) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
@@ -322,5 +322,5 @@ void hvm_dpci_eoi(struct domain *d, unsi
pirq_guest_eoi(d, machine_gsi);
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -165,7 +165,7 @@ static void pci_clean_dpci_irqs(struct d
if ( !is_hvm_domain(d) && !need_iommu(d) )
return;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci != NULL )
{
@@ -189,7 +189,7 @@ static void pci_clean_dpci_irqs(struct d
d->arch.hvm_domain.irq.dpci = NULL;
xfree(hvm_irq_dpci);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
void pci_release_devices(struct domain *d)
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -93,13 +93,13 @@ void hvm_dpci_isairq_eoi(struct domain *
if ( !vtd_enabled)
return;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
dpci = domain_get_irq_dpci(d);
if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
/* Multiple mirq may be mapped to one isa irq */
@@ -121,5 +121,5 @@ void hvm_dpci_isairq_eoi(struct domain *
}
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -235,7 +235,7 @@ struct arch_domain
/* Shadow translated domain: P2M mapping */
pagetable_t phys_table;
- /* NB. protected by d->evtchn_lock and by irq_desc[vector].lock */
+ /* NB. protected by d->event_lock and by irq_desc[vector].lock */
int vector_pirq[NR_VECTORS];
int pirq_vector[NR_PIRQS];
--- a/xen/include/asm-x86/hvm/irq.h
+++ b/xen/include/asm-x86/hvm/irq.h
@@ -63,7 +63,7 @@ struct hvm_girq_dpci_mapping {
#define NR_ISAIRQS 16
#define NR_LINK 4
-/* Protected by domain's evtchn_lock */
+/* Protected by domain's event_lock */
struct hvm_irq_dpci {
/* Machine IRQ to guest device/intx mapping. */
DECLARE_BITMAP(mapping, NR_PIRQS);
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -188,7 +188,7 @@ struct domain
/* Event channel information. */
struct evtchn *evtchn[NR_EVTCHN_BUCKETS];
- spinlock_t evtchn_lock;
+ spinlock_t event_lock;
struct grant_table *grant_table;
--- a/xen/xsm/acm/acm_simple_type_enforcement_hooks.c
+++ b/xen/xsm/acm/acm_simple_type_enforcement_hooks.c
@@ -248,11 +248,11 @@ ste_init_state(struct acm_sized_buffer *
/* a) check for event channel conflicts */
for ( bucket = 0; bucket < NR_EVTCHN_BUCKETS; bucket++ )
{
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
ports = d->evtchn[bucket];
if ( ports == NULL)
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
break;
}
@@ -280,7 +280,7 @@ ste_init_state(struct acm_sized_buffer *
printkd("%s: Policy violation in event channel domain "
"%x -> domain %x.\n",
__func__, d->domain_id, rdomid);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
acm_array_append_tuple(errors,
ACM_EVTCHN_SHARING_VIOLATION,
@@ -288,7 +288,7 @@ ste_init_state(struct acm_sized_buffer *
goto out;
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}

View File

@ -1,39 +0,0 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1223629254 -3600
# Node ID a175b2c039ed2481e4064a8256bd29f15c98d0d1
# Parent 8f5a67f466e297535f84cc88eaaa2e71f37f2963
acpi/pmstat.c: refer to the array after range check.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Index: xen-3.3.1-testing/xen/arch/x86/acpi/pmstat.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/acpi/pmstat.c
+++ xen-3.3.1-testing/xen/arch/x86/acpi/pmstat.c
@@ -49,17 +49,20 @@ extern int pmstat_reset_cx_stat(uint32_t
int do_get_pm_info(struct xen_sysctl_get_pmstat *op)
{
int ret = 0;
- struct pm_px *pxpt = &px_statistic_data[op->cpuid];
- struct processor_pminfo *pmpt = &processor_pminfo[op->cpuid];
+ struct pm_px *pxpt;
+ const struct processor_pminfo *pmpt;
+
+ if ( (op->cpuid >= NR_CPUS) || !cpu_online(op->cpuid) )
+ return -EINVAL;
+
+ pmpt = processor_pminfo[op->cpuid];
+ pxpt = &px_statistic_data[op->cpuid];
/* to protect the case when Px was not controlled by xen */
if ( (!(pmpt->perf.init & XEN_PX_INIT)) &&
(op->type & PMSTAT_CATEGORY_MASK) == PMSTAT_PX )
return -EINVAL;
- if ( !cpu_online(op->cpuid) )
- return -EINVAL;
-
switch( op->type )
{
case PMSTAT_get_max_px:

View File

@ -0,0 +1,26 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1223900120 -3600
# Node ID a26194601c8f2b223e380bbb7153df7027e8d7f5
# Parent e4bddd01cb3ebe0c4a72603c722889b22d3943fd
x86: propagate return value of alloc_l1_table()
A blatant mistake of mine resulted in the return value of
alloc_l1_table() to be ignored with the preemptable page table update
changes.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
diff -r e4bddd01cb3e -r a26194601c8f xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Oct 13 10:09:09 2008 +0100
+++ b/xen/arch/x86/mm.c Mon Oct 13 13:15:20 2008 +0100
@@ -1883,8 +1883,7 @@ static int alloc_page_type(struct page_i
switch ( type & PGT_type_mask )
{
case PGT_l1_page_table:
- alloc_l1_table(page);
- rc = 0;
+ rc = alloc_l1_table(page);
break;
case PGT_l2_page_table:
rc = alloc_l2_table(page, type, preemptible);

117
18631-msix-intr-remap.patch Normal file
View File

@ -0,0 +1,117 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1224068295 -3600
# Node ID 61218a1763da340e6fbf5a153255ad723c254661
# Parent 8c3144b1ff631fd33489dbc1e7ee0e2cc3594ecd
vt-d: Fix MSI-x interrupt remapping
MSI-x may have multiple vectors, however in current interrupt
remapping code, one device only has one entry in interrupt remapping
table.
This patch adds 'remap_index' in msi_desc structure to track its index
in interrupt remapping table.
Signed-off-by: Haitao Shan <haitao.shan@intel.com>
Signed-off-by: Weidong Han <weidong.han@intel.com>
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -364,6 +364,7 @@ static struct msi_desc* alloc_msi_entry(
INIT_LIST_HEAD(&entry->list);
entry->dev = NULL;
+ entry->remap_index = -1;
return entry;
}
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -328,7 +328,8 @@ static int remap_entry_to_msi_msg(
}
static int msi_msg_to_remap_entry(
- struct iommu *iommu, struct pci_dev *pdev, struct msi_msg *msg)
+ struct iommu *iommu, struct pci_dev *pdev,
+ struct msi_desc *msi_desc, struct msi_msg *msg)
{
struct iremap_entry *iremap_entry = NULL, *iremap_entries;
struct iremap_entry new_ire;
@@ -336,32 +337,18 @@ static int msi_msg_to_remap_entry(
unsigned int index;
unsigned long flags;
struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
- int i = 0;
remap_rte = (struct msi_msg_remap_entry *) msg;
spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
- iremap_entries =
- (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
-
- /* If the entry for a PCI device has been there, use the old entry,
- * Or, assign a new entry for it.
- */
- for ( i = 0; i <= ir_ctrl->iremap_index; i++ )
+ if ( msi_desc->remap_index < 0 )
{
- iremap_entry = &iremap_entries[i];
- if ( iremap_entry->hi.sid ==
- ((pdev->bus << 8) | pdev->devfn) )
- break;
- }
-
- if ( i > ir_ctrl->iremap_index )
- {
- ir_ctrl->iremap_index++;
+ ir_ctrl->iremap_index++;
index = ir_ctrl->iremap_index;
+ msi_desc->remap_index = index;
}
else
- index = i;
+ index = msi_desc->remap_index;
if ( index > IREMAP_ENTRY_NR - 1 )
{
@@ -369,11 +356,13 @@ static int msi_msg_to_remap_entry(
"%s: intremap index (%d) is larger than"
" the maximum index (%ld)!\n",
__func__, index, IREMAP_ENTRY_NR - 1);
- unmap_vtd_domain_page(iremap_entries);
+ msi_desc->remap_index = -1;
spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
return -EFAULT;
}
+ iremap_entries =
+ (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
iremap_entry = &iremap_entries[index];
memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
@@ -450,7 +439,7 @@ void msi_msg_write_remap_rte(
if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
return;
- msi_msg_to_remap_entry(iommu, pdev, msg);
+ msi_msg_to_remap_entry(iommu, pdev, msi_desc, msg);
}
int intremap_setup(struct iommu *iommu)
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -90,10 +90,11 @@ struct msi_desc {
void __iomem *mask_base;
struct pci_dev *dev;
- int vector;
+ int vector;
- /* Last set MSI message */
- struct msi_msg msg;
+ struct msi_msg msg; /* Last set MSI message */
+
+ int remap_index; /* index in interrupt remapping table */
};
/*

44
18654-xend-vcpus.patch Normal file
View File

@ -0,0 +1,44 @@
diff -r 22c89412fc8c tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py Wed Oct 15 15:58:09 2008 +0100
+++ b/tools/python/xen/xend/XendDomainInfo.py Sun Oct 19 22:08:10 2008 -0600
@@ -1502,23 +1502,18 @@ class XendDomainInfo:
return self.info['VCPUs_max']
def setVCpuCount(self, vcpus):
- if vcpus <= 0:
- raise XendError('Invalid VCPUs')
+ def vcpus_valid(n):
+ if vcpus <= 0:
+ raise XendError('Zero or less VCPUs is invalid')
+ if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
+ raise XendError('Cannot set vcpus greater than max vcpus on running domain')
+ vcpus_valid(vcpus)
self.info['vcpu_avail'] = (1 << vcpus) - 1
if self.domid >= 0:
self.storeVm('vcpu_avail', self.info['vcpu_avail'])
- # update dom differently depending on whether we are adjusting
- # vcpu number up or down, otherwise _vcpuDomDetails does not
- # disable the vcpus
- if self.info['VCPUs_max'] > vcpus:
- # decreasing
- self._writeDom(self._vcpuDomDetails())
- self.info['VCPUs_live'] = vcpus
- else:
- # same or increasing
- self.info['VCPUs_live'] = vcpus
- self._writeDom(self._vcpuDomDetails())
+ self._writeDom(self._vcpuDomDetails())
+ self.info['VCPUs_live'] = vcpus
else:
if self.info['VCPUs_max'] > vcpus:
# decreasing
@@ -1528,7 +1523,7 @@ class XendDomainInfo:
for c in range(self.info['VCPUs_max'], vcpus):
self.info['cpus'].append(list())
self.info['VCPUs_max'] = vcpus
- xen.xend.XendDomain.instance().managed_config_save(self)
+ xen.xend.XendDomain.instance().managed_config_save(self)
log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
vcpus)

View File

@ -2,7 +2,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -2255,7 +2255,7 @@ class XendDomainInfo:
@@ -2250,7 +2250,7 @@ class XendDomainInfo:
vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
# Make sure there's enough RAM available for the domain

View File

@ -5,7 +5,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -2596,7 +2596,7 @@ class XendDomainInfo:
@@ -2595,7 +2595,7 @@ class XendDomainInfo:
(fn, BOOTLOADER_LOOPBACK_DEVICE))
vbd = {

View File

@ -2,7 +2,7 @@ Index: xen-3.3.1-testing/tools/python/xen/lowlevel/xc/xc.c
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/lowlevel/xc/xc.c
+++ xen-3.3.1-testing/tools/python/xen/lowlevel/xc/xc.c
@@ -872,14 +872,14 @@ static PyObject *pyxc_hvm_build(XcObject
@@ -887,14 +887,14 @@ static PyObject *pyxc_hvm_build(XcObject
int i;
#endif
char *image;
@ -21,7 +21,7 @@ Index: xen-3.3.1-testing/tools/python/xen/lowlevel/xc/xc.c
return NULL;
if ( xc_hvm_build(self->xc_handle, dom, memsize, image) != 0 )
@@ -904,6 +904,7 @@ static PyObject *pyxc_hvm_build(XcObject
@@ -919,6 +919,7 @@ static PyObject *pyxc_hvm_build(XcObject
va_hvm->checksum = -sum;
munmap(va_map, XC_PAGE_SIZE);
#endif
@ -84,7 +84,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xm/create.py
gopts.var('acpi', val='ACPI',
fn=set_int, default=1,
use="Disable or enable ACPI of HVM domain.")
@@ -851,7 +855,7 @@ def configure_vifs(config_devs, vals):
@@ -858,7 +862,7 @@ def configure_vifs(config_devs, vals):
def configure_hvm(config_image, vals):
"""Create the config for HVM devices.
"""

221
i386-highmem-assist.patch Normal file
View File

@ -0,0 +1,221 @@
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2431,6 +2431,29 @@ static inline cpumask_t vcpumask_to_pcpu
return pmask;
}
+#ifdef __i386__
+static inline void *fixmap_domain_page(unsigned long mfn)
+{
+ unsigned int cpu = smp_processor_id();
+ void *ptr = (void *)fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu);
+
+ l1e_write(fix_pae_highmem_pl1e - cpu,
+ l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+ flush_tlb_one_local(ptr);
+ return ptr;
+}
+static inline void fixunmap_domain_page(const void *ptr)
+{
+ unsigned int cpu = virt_to_fix((unsigned long)ptr) - FIX_PAE_HIGHMEM_0;
+
+ l1e_write(fix_pae_highmem_pl1e - cpu, l1e_empty());
+ this_cpu(make_cr3_timestamp) = this_cpu(tlbflush_time);
+}
+#else
+#define fixmap_domain_page(mfn) mfn_to_virt(mfn)
+#define fixunmap_domain_page(ptr) ((void)(ptr))
+#endif
+
int do_mmuext_op(
XEN_GUEST_HANDLE(mmuext_op_t) uops,
unsigned int count,
@@ -2700,6 +2723,66 @@ int do_mmuext_op(
break;
}
+ case MMUEXT_CLEAR_PAGE:
+ {
+ unsigned char *ptr;
+
+ okay = !get_page_and_type_from_pagenr(mfn, PGT_writable_page,
+ FOREIGNDOM, 0);
+ if ( unlikely(!okay) )
+ {
+ MEM_LOG("Error while clearing mfn %lx", mfn);
+ break;
+ }
+
+ /* A page is dirtied when it's being cleared. */
+ paging_mark_dirty(d, mfn);
+
+ ptr = fixmap_domain_page(mfn);
+ clear_page(ptr);
+ fixunmap_domain_page(ptr);
+
+ put_page_and_type(page);
+ break;
+ }
+
+ case MMUEXT_COPY_PAGE:
+ {
+ const unsigned char *src;
+ unsigned char *dst;
+ unsigned long src_mfn;
+
+ src_mfn = gmfn_to_mfn(FOREIGNDOM, op.arg2.src_mfn);
+ okay = get_page_from_pagenr(src_mfn, FOREIGNDOM);
+ if ( unlikely(!okay) )
+ {
+ MEM_LOG("Error while copying from mfn %lx", src_mfn);
+ break;
+ }
+
+ okay = !get_page_and_type_from_pagenr(mfn, PGT_writable_page,
+ FOREIGNDOM, 0);
+ if ( unlikely(!okay) )
+ {
+ put_page(mfn_to_page(src_mfn));
+ MEM_LOG("Error while copying to mfn %lx", mfn);
+ break;
+ }
+
+ /* A page is dirtied when it's being copied to. */
+ paging_mark_dirty(d, mfn);
+
+ src = map_domain_page(src_mfn);
+ dst = fixmap_domain_page(mfn);
+ copy_page(dst, src);
+ fixunmap_domain_page(dst);
+ unmap_domain_page(src);
+
+ put_page_and_type(page);
+ put_page(mfn_to_page(src_mfn));
+ break;
+ }
+
default:
MEM_LOG("Invalid extended pt command 0x%x", op.cmd);
rc = -ENOSYS;
--- a/xen/arch/x86/x86_32/domain_page.c
+++ b/xen/arch/x86/x86_32/domain_page.c
@@ -114,7 +114,7 @@ void *map_domain_page(unsigned long mfn)
return (void *)va;
}
-void unmap_domain_page(void *va)
+void unmap_domain_page(const void *va)
{
unsigned int idx;
struct vcpu *v;
@@ -241,7 +241,7 @@ void *map_domain_page_global(unsigned lo
return (void *)va;
}
-void unmap_domain_page_global(void *va)
+void unmap_domain_page_global(const void *va)
{
unsigned long __va = (unsigned long)va;
l2_pgentry_t *pl2e;
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -217,6 +217,8 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
case MMUEXT_PIN_L4_TABLE:
case MMUEXT_UNPIN_TABLE:
case MMUEXT_NEW_BASEPTR:
+ case MMUEXT_CLEAR_PAGE:
+ case MMUEXT_COPY_PAGE:
arg1 = XLAT_mmuext_op_arg1_mfn;
break;
default:
@@ -244,6 +246,9 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
case MMUEXT_INVLPG_MULTI:
arg2 = XLAT_mmuext_op_arg2_vcpumask;
break;
+ case MMUEXT_COPY_PAGE:
+ arg2 = XLAT_mmuext_op_arg2_src_mfn;
+ break;
default:
arg2 = -1;
break;
--- a/xen/include/asm-x86/fixmap.h
+++ b/xen/include/asm-x86/fixmap.h
@@ -29,6 +29,7 @@
* from the end of virtual memory backwards.
*/
enum fixed_addresses {
+ FIX_HOLE,
#ifdef __i386__
FIX_PAE_HIGHMEM_0,
FIX_PAE_HIGHMEM_END = FIX_PAE_HIGHMEM_0 + NR_CPUS-1,
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -231,6 +231,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
* nr_ents: Number of entries in LDT.
+ *
+ * cmd: MMUEXT_CLEAR_PAGE
+ * mfn: Machine frame number to be cleared.
+ *
+ * cmd: MMUEXT_COPY_PAGE
+ * mfn: Machine frame number of the destination page.
+ * src_mfn: Machine frame number of the source page.
*/
#define MMUEXT_PIN_L1_TABLE 0
#define MMUEXT_PIN_L2_TABLE 1
@@ -247,12 +254,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define MMUEXT_FLUSH_CACHE 12
#define MMUEXT_SET_LDT 13
#define MMUEXT_NEW_USER_BASEPTR 15
+#define MMUEXT_CLEAR_PAGE 16
+#define MMUEXT_COPY_PAGE 17
#ifndef __ASSEMBLY__
struct mmuext_op {
unsigned int cmd;
union {
- /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
+ * CLEAR_PAGE, COPY_PAGE */
xen_pfn_t mfn;
/* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
unsigned long linear_addr;
@@ -266,6 +276,8 @@ struct mmuext_op {
#else
void *vcpumask;
#endif
+ /* COPY_PAGE */
+ xen_pfn_t src_mfn;
} arg2;
};
typedef struct mmuext_op mmuext_op_t;
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -24,7 +24,7 @@ void *map_domain_page(unsigned long mfn)
* Pass a VA within a page previously mapped in the context of the
* currently-executing VCPU via a call to map_domain_page().
*/
-void unmap_domain_page(void *va);
+void unmap_domain_page(const void *va);
/*
* Similar to the above calls, except the mapping is accessible in all
@@ -32,7 +32,7 @@ void unmap_domain_page(void *va);
* mappings can also be unmapped from any context.
*/
void *map_domain_page_global(unsigned long mfn);
-void unmap_domain_page_global(void *va);
+void unmap_domain_page_global(const void *va);
#define DMCACHE_ENTRY_VALID 1U
#define DMCACHE_ENTRY_HELD 2U
@@ -75,7 +75,7 @@ map_domain_page_with_cache(unsigned long
}
static inline void
-unmap_domain_page_with_cache(void *va, struct domain_mmap_cache *cache)
+unmap_domain_page_with_cache(const void *va, struct domain_mmap_cache *cache)
{
ASSERT(cache != NULL);
cache->flags &= ~DMCACHE_ENTRY_HELD;

View File

@ -1,7 +1,5 @@
Index: xen-3.3.1-testing/xen/arch/x86/io_apic.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/io_apic.c
+++ xen-3.3.1-testing/xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -45,7 +45,7 @@
int (*ioapic_renumber_irq)(int ioapic, int irq);
atomic_t irq_mis_count;
@ -10,4 +8,4 @@ Index: xen-3.3.1-testing/xen/arch/x86/io_apic.c
+int msi_enable = 1;
boolean_param("msi", msi_enable);
int domain_irq_to_vector(struct domain *d, int irq)
/* Where if anywhere is the i8259 connect in external int mode */

13
network-route.patch Normal file
View File

@ -0,0 +1,13 @@
Index: xen-3.3.1-testing/tools/examples/network-route
===================================================================
--- xen-3.3.1-testing.orig/tools/examples/network-route
+++ xen-3.3.1-testing/tools/examples/network-route
@@ -21,7 +21,7 @@ dir=$(dirname "$0")
evalVariables "$@"
-netdev=${netdev:-eth${vifnum}}
+netdev=${netdev:-eth0}
echo 1 >/proc/sys/net/ipv4/ip_forward
echo 1 >/proc/sys/net/ipv4/conf/${netdev}/proxy_arp

View File

@ -1,21 +1,25 @@
Index: xen-3.3.1-testing/unmodified_drivers/linux-2.6/platform-pci/platform-compat.c
===================================================================
--- xen-3.3.1-testing.orig/unmodified_drivers/linux-2.6/platform-pci/platform-compat.c
+++ xen-3.3.1-testing/unmodified_drivers/linux-2.6/platform-pci/platform-compat.c
@@ -14,7 +14,7 @@ EXPORT_SYMBOL(system_state);
--- xen-3.3.1-testing.orig/unmodified_drivers/linux-2.6/platform-pci/platform-compat.c 2008-10-15 11:38:53.000000000 -0600
+++ xen-3.3.1-testing/unmodified_drivers/linux-2.6/platform-pci/platform-compat.c 2008-10-15 11:49:33.000000000 -0600
@@ -14,7 +14,11 @@
void ctrl_alt_del(void)
{
- kill_proc(1, SIGINT, 1); /* interrupt init */
+ kill_proc_info(SIGINT, SEND_SIG_PRIV, 1);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
+ kill_proc(1, SIGINT, 1);
+#else
+ kill_pid(cad_pid, SIGINT, 1);
+#endif
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8)
Index: xen-3.3.1-testing/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c
===================================================================
--- xen-3.3.1-testing.orig/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c
+++ xen-3.3.1-testing/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c
@@ -44,7 +44,11 @@ static void ap_suspend(void *_info)
--- xen-3.3.1-testing.orig/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c 2008-10-15 11:38:53.000000000 -0600
+++ xen-3.3.1-testing/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c 2008-10-15 11:39:04.000000000 -0600
@@ -44,7 +44,11 @@
atomic_dec(&info->nr_spinning);
}

View File

@ -289,10 +289,10 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xend/XendConfig.py
+++ xen-3.3.1-testing/tools/python/xen/xend/XendConfig.py
@@ -208,6 +208,7 @@ XENAPI_CFG_TYPES = {
'cpuid' : dict,
@@ -209,6 +209,7 @@ XENAPI_CFG_TYPES = {
'cpuid_check' : dict,
'machine_address_size': int,
'suppress_spurious_page_faults': bool0,
+ 'snapshotname': str,
}

75
vtd-alloc-checks.patch Normal file
View File

@ -0,0 +1,75 @@
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -458,7 +458,7 @@ int intremap_setup(struct iommu *iommu)
{
dprintk(XENLOG_WARNING VTDPREFIX,
"Cannot allocate memory for ir_ctrl->iremap_maddr\n");
- return -ENODEV;
+ return -ENOMEM;
}
ir_ctrl->iremap_index = -1;
}
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -220,10 +220,10 @@ static u64 addr_to_dma_page_maddr(struct
if ( !alloc )
break;
maddr = alloc_pgtable_maddr();
+ if ( !maddr )
+ break;
dma_set_pte_addr(*pte, maddr);
vaddr = map_vtd_domain_page(maddr);
- if ( !vaddr )
- break;
/*
* high level table always sets r/w, last level
@@ -236,8 +236,6 @@ static u64 addr_to_dma_page_maddr(struct
else
{
vaddr = map_vtd_domain_page(pte->val);
- if ( !vaddr )
- break;
}
if ( level == 2 )
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -429,7 +429,11 @@ int qinval_setup(struct iommu *iommu)
{
qi_ctrl->qinval_maddr = alloc_pgtable_maddr();
if ( qi_ctrl->qinval_maddr == 0 )
- panic("Cannot allocate memory for qi_ctrl->qinval_maddr\n");
+ {
+ dprintk(XENLOG_WARNING VTDPREFIX,
+ "Cannot allocate memory for qi_ctrl->qinval_maddr\n");
+ return -ENOMEM;
+ }
flush->context = flush_context_qi;
flush->iotlb = flush_iotlb_qi;
}
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -41,17 +41,19 @@ u64 alloc_pgtable_maddr(void)
{
struct page_info *pg;
u64 *vaddr;
+ unsigned long mfn;
pg = alloc_domheap_page(NULL, 0);
- vaddr = map_domain_page(page_to_mfn(pg));
- if ( !vaddr )
+ if ( !pg )
return 0;
+ mfn = page_to_mfn(pg);
+ vaddr = map_domain_page(mfn);
memset(vaddr, 0, PAGE_SIZE);
iommu_flush_cache_page(vaddr);
unmap_domain_page(vaddr);
- return page_to_maddr(pg);
+ return (u64)mfn << PAGE_SHIFT_4K;
}
void free_pgtable_maddr(u64 maddr)

View File

@ -229,7 +229,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/acpi/cpu_idle.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/acpi/cpu_idle.c
+++ xen-3.3.1-testing/xen/arch/x86/acpi/cpu_idle.c
@@ -126,7 +126,7 @@ struct acpi_processor_power
@@ -128,7 +128,7 @@ struct acpi_processor_power
struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
};
@ -238,7 +238,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/acpi/cpu_idle.c
static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power)
{
@@ -160,8 +160,11 @@ static void print_acpi_power(uint32_t cp
@@ -162,8 +162,11 @@ static void print_acpi_power(uint32_t cp
static void dump_cx(unsigned char key)
{
@ -252,7 +252,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/acpi/cpu_idle.c
}
static int __init cpu_idle_key_init(void)
@@ -270,14 +273,12 @@ static struct {
@@ -272,14 +275,12 @@ static struct {
static void acpi_processor_idle(void)
{
@ -268,7 +268,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/acpi/cpu_idle.c
/*
* Interrupts must be disabled during bus mastering calculations and
* for C2/C3 transitions.
@@ -290,7 +291,7 @@ static void acpi_processor_idle(void)
@@ -292,7 +293,7 @@ static void acpi_processor_idle(void)
return;
}
@ -277,7 +277,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/acpi/cpu_idle.c
if ( !cx )
{
if ( pm_idle_save )
@@ -938,7 +939,15 @@ long set_cx_pminfo(uint32_t cpu, struct
@@ -949,7 +950,15 @@ long set_cx_pminfo(uint32_t cpu, struct
return -EFAULT;
}
@ -294,7 +294,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/acpi/cpu_idle.c
init_cx_pminfo(acpi_power);
@@ -974,18 +983,25 @@ long set_cx_pminfo(uint32_t cpu, struct
@@ -985,18 +994,25 @@ long set_cx_pminfo(uint32_t cpu, struct
uint32_t pmstat_get_cx_nr(uint32_t cpuid)
{
@ -327,7 +327,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/acpi/pmstat.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/acpi/pmstat.c
+++ xen-3.3.1-testing/xen/arch/x86/acpi/pmstat.c
@@ -40,7 +40,7 @@
@@ -41,7 +41,7 @@
#include <public/sysctl.h>
#include <acpi/cpufreq/cpufreq.h>
@ -336,26 +336,33 @@ Index: xen-3.3.1-testing/xen/arch/x86/acpi/pmstat.c
extern uint32_t pmstat_get_cx_nr(uint32_t cpuid);
extern int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat);
@@ -49,17 +49,15 @@ extern int pmstat_reset_cx_stat(uint32_t
@@ -50,13 +50,14 @@ extern int pmstat_reset_cx_stat(uint32_t
int do_get_pm_info(struct xen_sysctl_get_pmstat *op)
{
int ret = 0;
- struct pm_px *pxpt;
const struct processor_pminfo *pmpt;
- struct pm_px *pxpt = &px_statistic_data[op->cpuid];
- struct processor_pminfo *pmpt = &processor_pminfo[op->cpuid];
+ struct processor_pminfo *pmpt;
/* to protect the case when Px was not controlled by xen */
if ( (op->cpuid >= NR_CPUS) || !cpu_online(op->cpuid) )
return -EINVAL;
pmpt = processor_pminfo[op->cpuid];
- pxpt = &px_statistic_data[op->cpuid];
/* to protect the case when Px was not controlled by xen */
- if ( (!(pmpt->perf.init & XEN_PX_INIT)) &&
+ if ( (!pmpt || !(pmpt->perf.init & XEN_PX_INIT)) &&
(op->type & PMSTAT_CATEGORY_MASK) == PMSTAT_PX )
return -EINVAL;
@@ -76,6 +74,10 @@ int do_get_pm_info(struct xen_sysctl_get
+ pmpt = processor_pminfo[op->cpuid];
+
switch ( op->type & PMSTAT_CATEGORY_MASK )
{
case PMSTAT_CX:
@@ -66,7 +67,7 @@ int do_get_pm_info(struct xen_sysctl_get
case PMSTAT_PX:
if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) )
return -ENODEV;
- if ( !(pmpt->perf.init & XEN_PX_INIT) )
+ if ( !pmpt || !(pmpt->perf.init & XEN_PX_INIT) )
return -EINVAL;
break;
default:
@@ -86,6 +87,10 @@ int do_get_pm_info(struct xen_sysctl_get
uint64_t now, ct;
uint64_t total_idle_ns;
uint64_t tmp_idle_ns;

85
x86-cpufreq-report.patch Normal file
View File

@ -0,0 +1,85 @@
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -21,7 +21,7 @@
#include <xen/acpi.h>
#include <asm/current.h>
#include <public/platform.h>
-#include <acpi/cpufreq/processor_perf.h>
+#include <acpi/cpufreq/cpufreq.h>
#include <asm/edd.h>
#include <asm/mtrr.h>
#include "cpu/mtrr/mtrr.h"
@@ -55,6 +55,7 @@ static long cpu_frequency_change_helper(
ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
{
ret_t ret = 0;
+ struct vcpu *v;
struct xen_platform_op curop, *op = &curop;
if ( !IS_PRIV(current->domain) )
@@ -300,7 +301,6 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
{
uint32_t cpu;
uint64_t idletime, now = NOW();
- struct vcpu *v;
struct xenctl_cpumap ctlmap;
cpumask_t cpumap;
XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
@@ -461,6 +461,19 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
break;
}
break;
+
+ case XENPF_get_cpu_freq:
+ if ( op->u.get_cpu_freq.vcpu >= MAX_VIRT_CPUS ||
+ !(v = current->domain->vcpu[op->u.get_cpu_freq.vcpu]) )
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ op->u.get_cpu_freq.freq = xen_px_policy[v->processor].cur;
+ if ( copy_field_to_guest(u_xenpf_op, op, u.get_cpu_freq.freq) )
+ ret = -EFAULT;
+ break;
default:
ret = -ENOSYS;
--- a/xen/arch/x86/x86_64/platform_hypercall.c
+++ b/xen/arch/x86/x86_64/platform_hypercall.c
@@ -21,6 +21,8 @@ DEFINE_XEN_GUEST_HANDLE(compat_platform_
#define xen_processor_power_t compat_processor_power_t
#define set_cx_pminfo compat_set_cx_pminfo
+#define xenpf_get_cpu_freq compat_pf_get_cpu_freq
+
#define xenpf_enter_acpi_sleep compat_pf_enter_acpi_sleep
#define COMPAT
--- a/xen/include/public/platform.h
+++ b/xen/include/public/platform.h
@@ -312,6 +312,16 @@ struct xenpf_set_processor_pminfo {
typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t);
+#define XENPF_get_cpu_freq ('N' << 24)
+struct xenpf_get_cpu_freq {
+ /* IN variables */
+ uint32_t vcpu;
+ /* OUT variables */
+ uint32_t freq; /* in kHz */
+};
+typedef struct xenpf_get_cpu_freq xenpf_get_cpu_freq_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_get_cpu_freq_t);
+
struct xen_platform_op {
uint32_t cmd;
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -327,6 +337,7 @@ struct xen_platform_op {
struct xenpf_change_freq change_freq;
struct xenpf_getidletime getidletime;
struct xenpf_set_processor_pminfo set_pminfo;
+ struct xenpf_get_cpu_freq get_cpu_freq;
uint8_t pad[128];
} u;
};

25
x86-emul-movnti.patch Normal file
View File

@ -0,0 +1,25 @@
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -232,7 +232,8 @@ static uint8_t twobyte_table[256] = {
DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
ByteOp|DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem16|ModRM|Mov,
/* 0xC0 - 0xC7 */
- ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, 0,
+ ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
+ 0, DstMem|SrcReg|ModRM|Mov,
0, 0, 0, ImplicitOps|ModRM,
/* 0xC8 - 0xCF */
ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
@@ -3656,6 +3657,12 @@ x86_emulate(
case 8: *src.reg = dst.val; break;
}
goto add;
+
+ case 0xc3: /* movnti */
+ /* Ignore the non-temporal hint for now. */
+ generate_exception_if(dst.bytes <= 2, EXC_UD, -1);
+ dst.val = src.val;
+ break;
}
goto writeback;

View File

@ -1,19 +0,0 @@
A blatant mistake of mine resulted in the return value of alloc_l1_table to
be ignored with the preemptable page table update changes.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Index: 2008-09-19/xen/arch/x86/mm.c
===================================================================
--- 2008-09-19.orig/xen/arch/x86/mm.c 2008-09-19 14:00:01.000000000 +0200
+++ 2008-09-19/xen/arch/x86/mm.c 2008-10-13 12:07:13.000000000 +0200
@@ -1883,8 +1883,7 @@ static int alloc_page_type(struct page_i
switch ( type & PGT_type_mask )
{
case PGT_l1_page_table:
- alloc_l1_table(page);
- rc = 0;
+ rc = alloc_l1_table(page);
break;
case PGT_l2_page_table:
rc = alloc_l2_table(page, type, preemptible);

View File

@ -2,7 +2,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/traps.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/traps.c
+++ xen-3.3.1-testing/xen/arch/x86/traps.c
@@ -1261,6 +1261,7 @@ asmlinkage void do_early_page_fault(stru
@@ -1265,6 +1265,7 @@ asmlinkage void do_early_page_fault(stru
unsigned long *stk = (unsigned long *)regs;
printk("Early fatal page fault at %04x:%p (cr2=%p, ec=%04x)\n",
regs->cs, _p(regs->eip), _p(cr2), regs->error_code);

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:bbc40412e65f50bdfae0f38d8ea3f51b8df773ffea6cd105988729fe470b4a67
size 22689575
oid sha256:84362b1ccb7f06991e6a8350ae29df89c82e7d9461eef2cce560fcdd8cc88242
size 22692829

View File

@ -148,7 +148,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
from xen.xend.XendError import XendError, VmError
from xen.xend.XendDevices import XendDevices
from xen.xend.XendTask import XendTask
@@ -1875,6 +1875,10 @@ class XendDomainInfo:
@@ -1870,6 +1870,10 @@ class XendDomainInfo:
deviceClass, config = self.info['devices'].get(dev_uuid)
self._waitForDevice(deviceClass, config['devid'])
@ -159,7 +159,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
def _waitForDevice_destroy(self, deviceClass, devid, backpath):
return self.getDeviceController(deviceClass).waitForDevice_destroy(
devid, backpath)
@@ -2547,8 +2551,11 @@ class XendDomainInfo:
@@ -2546,8 +2550,11 @@ class XendDomainInfo:
blexec = osdep.pygrub_path
blcfg = None
@ -173,7 +173,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
if not disks:
msg = "Had a bootloader specified, but no disks are bootable"
@@ -2559,13 +2566,10 @@ class XendDomainInfo:
@@ -2558,13 +2565,10 @@ class XendDomainInfo:
devtype = devinfo[0]
disk = devinfo[1]['uname']
@ -190,7 +190,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
log.info("Mounting %s on %s." %
(fn, BOOTLOADER_LOOPBACK_DEVICE))
@@ -2577,7 +2581,9 @@ class XendDomainInfo:
@@ -2576,7 +2580,9 @@ class XendDomainInfo:
from xen.xend import XendDomain
dom0 = XendDomain.instance().privilegedDomain()
@ -201,7 +201,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
fn = BOOTLOADER_LOOPBACK_DEVICE
try:
@@ -2588,7 +2594,7 @@ class XendDomainInfo:
@@ -2587,7 +2593,7 @@ class XendDomainInfo:
log.info("Unmounting %s from %s." %
(fn, BOOTLOADER_LOOPBACK_DEVICE))

View File

@ -56,20 +56,52 @@ Index: xen-3.3.1-testing/tools/ioemu-remote/i386-dm/qemu-ifup
===================================================================
--- xen-3.3.1-testing.orig/tools/ioemu-remote/i386-dm/qemu-ifup
+++ xen-3.3.1-testing/tools/ioemu-remote/i386-dm/qemu-ifup
@@ -1,11 +1,11 @@
@@ -1,36 +1,22 @@
#!/bin/sh
-#. /etc/rc.d/init.d/functions
-#ulimit -c unlimited
-
-echo 'config qemu network with xen bridge for ' $*
-
echo 'config qemu network with xen bridge for ' $*
+# If bridge is not specified, try device with default route.
bridge=$2
+if [ -z "$bridge" ]; then
+ brnum=$(ip route list | awk '/^default / { print $NF }' | sed 's/^[^0-9]*//')
+ brnum=${brnum:-0}
+ bridge=xenbr${brnum}
+ bridge=$(ip route list | awk '/^default / { print $NF }')
+fi
#
# Old style bridge setup with netloop, used to have a bridge name
-#
-# Old style bridge setup with netloop, used to have a bridge name
-# of xenbrX, enslaving pethX and vif0.X, and then configuring
-# eth0.
-#
-# New style bridge setup does not use netloop, so the bridge name
-# is ethX and the physical device is enslaved pethX
-#
-# So if...
-#
-# - User asks for xenbrX
-# - AND xenbrX doesn't exist
-# - AND there is a ethX device which is a bridge
-#
-# ..then we translate xenbrX to ethX
-#
-# This lets old config files work without modification
-#
-if [ ! -e "/sys/class/net/$bridge" ] && [ -z "${bridge##xenbr*}" ]
+# Exit if $bridge is not a bridge. Exit with 0 status
+# so qemu-dm process is not terminated. No networking in
+# vm is bad but not catastrophic. The vm could still run
+# cpu and disk IO workloads.
+# Include an useful error message in qemu-dm log file.
+if [ ! -e "/sys/class/net/${bridge}/bridge" ]
then
- if [ -e "/sys/class/net/eth${bridge#xenbr}/bridge" ]
- then
- bridge="eth${bridge#xenbr}"
- fi
+ echo "WARNING! ${bridge} is not a bridge. qemu-ifup exiting. VM may not have a functioning networking stack."
+ exit 0
fi
ifconfig $1 0.0.0.0 up

View File

@ -5,7 +5,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/io_apic.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/io_apic.c
+++ xen-3.3.1-testing/xen/arch/x86/io_apic.c
@@ -1365,7 +1365,7 @@ static unsigned int startup_level_ioapic
@@ -1355,7 +1355,7 @@ static unsigned int startup_level_ioapic
return 0; /* don't check for pending */
}
@ -14,7 +14,7 @@ Index: xen-3.3.1-testing/xen/arch/x86/io_apic.c
static void setup_ioapic_ack(char *s)
{
if ( !strcmp(s, "old") )
@@ -1856,6 +1856,8 @@ void __init setup_IO_APIC(void)
@@ -1846,6 +1846,8 @@ void __init setup_IO_APIC(void)
else
io_apic_irqs = ~PIC_IRQS;

View File

@ -2,7 +2,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xm/create.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xm/create.py
+++ xen-3.3.1-testing/tools/python/xen/xm/create.py
@@ -1066,9 +1066,8 @@ def preprocess_access_control(vals):
@@ -1073,9 +1073,8 @@ def preprocess_access_control(vals):
def preprocess_ip(vals):
if vals.ip or vals.dhcp != 'off':

View File

@ -2,7 +2,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xm/create.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xm/create.py
+++ xen-3.3.1-testing/tools/python/xen/xm/create.py
@@ -1122,8 +1122,7 @@ def spawn_vnc(display):
@@ -1129,8 +1129,7 @@ def spawn_vnc(display):
returns the port that the vncviewer is listening on and sets the global
vncpid. On failure, returns 0. Note that vncviewer is daemonized.
"""

View File

@ -1,3 +1,37 @@
-------------------------------------------------------------------
Fri Oct 24 13:01:41 MDT 2008 - jfehlig@novell.com
- bnc#437756 - Fix default netdev device in network-route
-------------------------------------------------------------------
Wed Oct 22 10:38:58 MDT 2008 - jfehlig@novell.com
- bnc#434560 - Remove local patch that prevents creating PV vif
when "type=ioemu" is specified in guest vif config. This patch
is causing several problems with recent changes to xenstore
layout.
-------------------------------------------------------------------
Wed Oct 22 09:10:45 MDT 2008 - jfehlig@novell.com
- bnc#431758 - Added upstream changeset 18654 to prevent setting
vcpus > VCPUs_max on running domain.
-------------------------------------------------------------------
Tue Oct 21 10:28:55 MDT 2008 - carnold@novell.com
- Update to changeset 18455.
-------------------------------------------------------------------
Fri Oct 17 08:52:44 CEST 2008 - olh@suse.de
- add ExclusiveArch x86 x86_64
-------------------------------------------------------------------
Wed Oct 15 01:32:18 MDT 2008 - jfehlig@novell.com
- bnc#433722 - Fix handling of default bridge in qemu-ifup.
-------------------------------------------------------------------
Fri Oct 17 08:52:44 CEST 2008 - olh@suse.de

373
xen.spec

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -3225,6 +3225,14 @@ class XendDomainInfo:
@@ -3224,6 +3224,14 @@ class XendDomainInfo:
if not config.has_key('backend'):
config['backend'] = "00000000-0000-0000-0000-000000000000"

View File

@ -2,7 +2,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -1864,7 +1864,7 @@ class XendDomainInfo:
@@ -1859,7 +1859,7 @@ class XendDomainInfo:
try:
if not corefile:
this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())

View File

@ -173,7 +173,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
self._constructDomain()
self._storeVmDetails()
self._createChannels()
@@ -2317,6 +2321,11 @@ class XendDomainInfo:
@@ -2316,6 +2320,11 @@ class XendDomainInfo:
self._stateSet(DOM_STATE_HALTED)
self.domid = None # Do not push into _stateSet()!
@ -185,7 +185,7 @@ Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
finally:
self.refresh_shutdown_lock.release()
@@ -3538,6 +3547,74 @@ class XendDomainInfo:
@@ -3537,6 +3546,74 @@ class XendDomainInfo:
def has_device(self, dev_class, dev_uuid):
return (dev_uuid in self.info['%s_refs' % dev_class.lower()])

View File

@ -1,18 +0,0 @@
Index: xen-3.3.1-testing/tools/python/xen/xend/server/netif.py
===================================================================
--- xen-3.3.1-testing.orig/tools/python/xen/xend/server/netif.py
+++ xen-3.3.1-testing/tools/python/xen/xend/server/netif.py
@@ -101,6 +101,13 @@ class NetifController(DevController):
def __init__(self, vm):
DevController.__init__(self, vm)
+ def createDevice(self, config):
+ typ = config.get('type', '')
+ if typ == 'ioemu':
+ return 0
+
+ DevController.createDevice(self, config)
+
def getDeviceDetails(self, config):
"""@see DevController.getDeviceDetails"""

View File

@ -1,12 +0,0 @@
diff -r 8cc28137805d tools/python/xen/xend/server/DevController.py
--- a/tools/python/xen/xend/server/DevController.py Thu Oct 09 11:00:51 2008 +0100
+++ b/tools/python/xen/xend/server/DevController.py Thu Oct 09 16:00:08 2008 -0600
@@ -257,7 +257,7 @@ class DevController:
if force:
frontpath = self.frontendPath(dev)
- backpath = self.readVm(devid, "backend")
+ backpath = xstransact.Read(frontpath, "backend")
if backpath:
xstransact.Remove(backpath)
xstransact.Remove(frontpath)