References: bnc#713503 # HG changeset patch # User George Dunlap # Date 1311701818 -3600 # Node ID ef9ed3d2aa870a37ed5e611be9c524d526a2d604 # Parent 590aadf7c46ae979da3552332f592f9492ce6d8b xen: Infrastructure to allow irqs to share vector maps Laying the groundwork for per-device vector maps. This generic code allows any irq to point to a vector map; all irqs sharing the same vector map will avoid sharing vectors. Signed-off-by: George Dunlap # HG changeset patch # User George Dunlap # Date 1314026133 -3600 # Node ID 3a05da2dc7c0a5fc0fcfc40c535d1fcb71203625 # Parent d1cd78a73a79e0e648937322cdb8d92a7f86327a x86: Fix up irq vector map logic We need to make sure that cfg->used_vector is only cleared once; otherwise there may be a race condition that allows the same vector to be assigned twice, defeating the whole purpose of the map. This makes two changes: * __clear_irq_vector() only clears the vector if the irq is not being moved * smp_iqr_move_cleanup_interrupt() only clears used_vector if this is the last place it's being used (move_cleanup_count==0 after decrement). Also make use of asserts more consistent, to catch this kind of logic bug in the future. Signed-off-by: George Dunlap --- a/xen/arch/x86/io_apic.c +++ b/xen/arch/x86/io_apic.c @@ -548,6 +548,13 @@ fastcall void smp_irq_move_cleanup_inter } __get_cpu_var(vector_irq)[vector] = -1; cfg->move_cleanup_count--; + + if ( cfg->move_cleanup_count == 0 + && cfg->used_vectors ) + { + ASSERT(test_bit(vector, cfg->used_vectors)); + clear_bit(vector, cfg->used_vectors); + } unlock: spin_unlock(&desc->lock); } --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -94,6 +94,11 @@ static int __init __bind_irq_vector(int per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; cfg->cpu_mask = online_mask; + if ( cfg->used_vectors ) + { + ASSERT(!test_bit(vector, cfg->used_vectors)); + set_bit(vector, cfg->used_vectors); + } irq_status[irq] = IRQ_USED; if (IO_APIC_IRQ(irq)) irq_vector[irq] = vector; @@ -159,6 +164,7 @@ static void dynamic_irq_cleanup(unsigned desc->depth = 1; desc->msi_desc = NULL; desc->handler = &no_irq_type; + desc->chip_data->used_vectors=NULL; cpus_setall(desc->affinity); spin_unlock_irqrestore(&desc->lock, flags); @@ -191,6 +197,7 @@ static void __clear_irq_vector(int irq) if (likely(!cfg->move_in_progress)) return; + cpus_and(tmp_mask, cfg->old_cpu_mask, cpu_online_map); for_each_cpu_mask(cpu, tmp_mask) { for (vector = FIRST_DYNAMIC_VECTOR; vector <= LAST_DYNAMIC_VECTOR; @@ -202,6 +209,12 @@ static void __clear_irq_vector(int irq) } } + if ( cfg->used_vectors ) + { + ASSERT(test_bit(vector, cfg->used_vectors)); + clear_bit(vector, cfg->used_vectors); + } + cfg->move_in_progress = 0; } @@ -261,6 +274,7 @@ static void init_one_irq_cfg(struct irq_ cfg->vector = IRQ_VECTOR_UNASSIGNED; cpus_clear(cfg->cpu_mask); cpus_clear(cfg->old_cpu_mask); + cfg->used_vectors = NULL; } int init_irq_data(void) @@ -387,6 +401,10 @@ next: if (test_bit(vector, used_vectors)) goto next; + if (cfg->used_vectors + && test_bit(vector, cfg->used_vectors) ) + goto next; + for_each_cpu_mask(new_cpu, tmp_mask) if (per_cpu(vector_irq, new_cpu)[vector] != -1) goto next; @@ -402,6 +420,11 @@ next: per_cpu(vector_irq, new_cpu)[vector] = irq; cfg->vector = vector; cpus_copy(cfg->cpu_mask, tmp_mask); + if ( cfg->used_vectors ) + { + ASSERT(!test_bit(vector, cfg->used_vectors)); + set_bit(vector, cfg->used_vectors); + } irq_status[irq] = IRQ_USED; if (IO_APIC_IRQ(irq)) --- a/xen/include/asm-x86/irq.h +++ b/xen/include/asm-x86/irq.h @@ -23,11 +23,16 @@ #define irq_to_desc(irq) (&irq_desc[irq]) #define irq_cfg(irq) (&irq_cfg[irq]) +typedef struct { + DECLARE_BITMAP(_bits,NR_VECTORS); +} vmask_t; + struct irq_cfg { int vector; cpumask_t cpu_mask; cpumask_t old_cpu_mask; unsigned move_cleanup_count; + vmask_t *used_vectors; u8 move_in_progress : 1; };