xen/23096-x86-hpet-no-cpumask_lock.patch
Charles Arnold 4c73609012 - Upstream patches from Jan
22998-x86-get_page_from_l1e-retcode.patch
  22999-x86-mod_l1_entry-retcode.patch
  23000-x86-mod_l2_entry-retcode.patch
  23096-x86-hpet-no-cpumask_lock.patch
  23099-x86-rwlock-scalability.patch
  23103-x86-pirq-guest-eoi-check.patch
  23127-vtd-bios-settings.patch
  23153-x86-amd-clear-DramModEn.patch
  23154-x86-amd-iorr-no-rdwr.patch
  23199-amd-iommu-unmapped-intr-fault.patch
  23200-amd-iommu-intremap-sync.patch
  23228-x86-conditional-write_tsc.patch

- update xenalyze to revision 98
  * Unify setting of vcpu data type
  * Unify record size checks
  * Fix cr3_switch not to access hvm struct before it's initialized
- add xenalyze.gcc46.patch to fix unused-but-set-variable errors

- bnc#688473 - VUL-0: potential buffer overflow in tools
  cve-2011-1583-4.0.patch

- hotplug.losetup.patch
  correct dev:inode detection and use variable expansion

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=115
2011-05-03 17:51:18 +00:00

94 lines
3.4 KiB
Diff

# HG changeset patch
# User Jan Beulich <jbeulich@novell.com>
# Date 1301043797 0
# Node ID a65612bcbb921e98a8843157bf365e4ab16e8144
# Parent 941119d58655f2b2df86d9ecc4cb502bbc5e783c
x86/hpet: eliminate cpumask_lock
According to the (now getting removed) comment in struct
hpet_event_channel, this was to prevent accessing a CPU's
timer_deadline after it got cleared from cpumask. This can be done
without a lock altogether - hpet_broadcast_exit() can simply clear
the bit, and handle_hpet_broadcast() can read timer_deadline before
looking at the mask a second time (the cpumask bit was already
found set by the surrounding loop).
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Acked-by: Gang Wei <gang.wei@intel.com>
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -34,18 +34,6 @@ struct hpet_event_channel
int shift;
s_time_t next_event;
cpumask_t cpumask;
- /*
- * cpumask_lock is used to prevent hpet intr handler from accessing other
- * cpu's timer_deadline after the other cpu's mask was cleared --
- * mask cleared means cpu waken up, then accessing timer_deadline from
- * other cpu is not safe.
- * It is not used for protecting cpumask, so set ops needn't take it.
- * Multiple cpus clear cpumask simultaneously is ok due to the atomic
- * feature of cpu_clear, so hpet_broadcast_exit() can take read lock for
- * clearing cpumask, and handle_hpet_broadcast() have to take write lock
- * for read cpumask & access timer_deadline.
- */
- rwlock_t cpumask_lock;
spinlock_t lock;
void (*event_handler)(struct hpet_event_channel *);
@@ -208,17 +196,18 @@ again:
/* find all expired events */
for_each_cpu_mask(cpu, ch->cpumask)
{
- write_lock_irq(&ch->cpumask_lock);
+ s_time_t deadline;
- if ( cpu_isset(cpu, ch->cpumask) )
- {
- if ( per_cpu(timer_deadline, cpu) <= now )
- cpu_set(cpu, mask);
- else if ( per_cpu(timer_deadline, cpu) < next_event )
- next_event = per_cpu(timer_deadline, cpu);
- }
+ rmb();
+ deadline = per_cpu(timer_deadline, cpu);
+ rmb();
+ if ( !cpu_isset(cpu, ch->cpumask) )
+ continue;
- write_unlock_irq(&ch->cpumask_lock);
+ if ( deadline <= now )
+ cpu_set(cpu, mask);
+ else if ( deadline < next_event )
+ next_event = deadline;
}
/* wakeup the cpus which have an expired event. */
@@ -598,7 +587,6 @@ void hpet_broadcast_init(void)
hpet_events[i].shift = 32;
hpet_events[i].next_event = STIME_MAX;
spin_lock_init(&hpet_events[i].lock);
- rwlock_init(&hpet_events[i].cpumask_lock);
wmb();
hpet_events[i].event_handler = handle_hpet_broadcast;
}
@@ -634,7 +622,6 @@ void hpet_broadcast_init(void)
legacy_hpet_event.idx = 0;
legacy_hpet_event.flags = 0;
spin_lock_init(&legacy_hpet_event.lock);
- rwlock_init(&legacy_hpet_event.cpumask_lock);
wmb();
legacy_hpet_event.event_handler = handle_hpet_broadcast;
@@ -713,9 +700,7 @@ void hpet_broadcast_exit(void)
if ( !reprogram_timer(this_cpu(timer_deadline)) )
raise_softirq(TIMER_SOFTIRQ);
- read_lock_irq(&ch->cpumask_lock);
cpu_clear(cpu, ch->cpumask);
- read_unlock_irq(&ch->cpumask_lock);
if ( ch != &legacy_hpet_event )
{