# HG changeset patch # User Keir Fraser # Date 1224519405 -3600 # Node ID 54d74fc0037ce688e79759ca632d3918f7aaa399 # Parent f4dab783b58b41f2c67a66d6d095887faec3c296 spinlock: Modify recursive spinlock definitions to support up to 4095 CPUs. Signed-off-by: Keir Fraser --- a/xen/include/asm-x86/spinlock.h +++ b/xen/include/asm-x86/spinlock.h @@ -8,11 +8,11 @@ typedef struct { volatile s16 lock; - s8 recurse_cpu; - u8 recurse_cnt; + u16 recurse_cpu:12; + u16 recurse_cnt:4; } spinlock_t; -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 1, -1, 0 } +#define SPIN_LOCK_UNLOCKED { 1, 0xfffu, 0 } #define spin_lock_init(x) do { *(x) = (spinlock_t) SPIN_LOCK_UNLOCKED; } while(0) #define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0) @@ -59,11 +59,15 @@ static inline int _raw_spin_trylock(spin #define _raw_spin_lock_recursive(_lock) \ do { \ int cpu = smp_processor_id(); \ + /* Don't allow overflow of recurse_cpu field. */ \ + BUILD_BUG_ON(NR_CPUS > 0xfffu); \ if ( likely((_lock)->recurse_cpu != cpu) ) \ { \ spin_lock(_lock); \ (_lock)->recurse_cpu = cpu; \ } \ + /* We support only fairly shallow recursion, else the counter overflows. */ \ + ASSERT((_lock)->recurse_cnt < 0xfu); \ (_lock)->recurse_cnt++; \ } while ( 0 ) @@ -71,7 +75,7 @@ static inline int _raw_spin_trylock(spin do { \ if ( likely(--(_lock)->recurse_cnt == 0) ) \ { \ - (_lock)->recurse_cpu = -1; \ + (_lock)->recurse_cpu = 0xfffu; \ spin_unlock(_lock); \ } \ } while ( 0 )