diff --git a/glibc.changes b/glibc.changes index a4d0592..6231aa2 100644 --- a/glibc.changes +++ b/glibc.changes @@ -1,3 +1,11 @@ +------------------------------------------------------------------- +Wed Nov 7 11:09:05 UTC 2018 - schwab@suse.de + +- pthread-mutex-lock-elision-race.patch: Fix race in pthread_mutex_lock + while promoting to PTHREAD_MUTEX_ELISION_NP (BZ #23275) +- x86-haswell-string-flags.patch: x86: Fix Haswell CPU string flags (BZ + #23709) + ------------------------------------------------------------------- Wed Sep 26 09:41:59 UTC 2018 - schwab@suse.de diff --git a/glibc.spec b/glibc.spec index fcafa51..22db61c 100644 --- a/glibc.spec +++ b/glibc.spec @@ -12,7 +12,7 @@ # license that conforms to the Open Source Definition (Version 1.9) # published by the Open Source Initiative. -# Please submit bugfixes or comments via http://bugs.opensuse.org/ +# Please submit bugfixes or comments via https://bugs.opensuse.org/ # @@ -286,6 +286,10 @@ Patch1006: spawni-maybe-script-execute.patch Patch1007: gethostid-gethostbyname-failure.patch # PATCH-FIX-UPSTREAM Fix strstr bug with huge needles (BZ #23637) Patch1008: strstr-huge-needle.patch +# PATCH-FIX-UPSTREAM Fix race in pthread_mutex_lock while promoting to PTHREAD_MUTEX_ELISION_NP (BZ #23275) +Patch1009: pthread-mutex-lock-elision-race.patch +# PATCH-FIX-UPSTREAM x86: Fix Haswell CPU string flags (BZ #23709) +Patch1010: x86-haswell-string-flags.patch ### # Patches awaiting upstream approval @@ -504,6 +508,8 @@ makedb: A program to create a database for nss %patch1006 -p1 %patch1007 -p1 %patch1008 -p1 +%patch1009 -p1 +%patch1010 -p1 %patch2000 -p1 %patch2004 -p1 diff --git a/pthread-mutex-lock-elision-race.patch b/pthread-mutex-lock-elision-race.patch new file mode 100644 index 0000000..3b7e084 --- /dev/null +++ b/pthread-mutex-lock-elision-race.patch @@ -0,0 +1,717 @@ +2018-10-17 Stefan Liebler + + [BZ #23275] + * nptl/tst-mutex10.c: New File. + * nptl/Makefile (tests): Add tst-mutex10. + (tst-mutex10-ENV): New variable. + * sysdeps/unix/sysv/linux/s390/force-elision.h: (FORCE_ELISION): + Ensure that elision path is used if elision is available. + * sysdeps/unix/sysv/linux/powerpc/force-elision.h (FORCE_ELISION): + Likewise. + * sysdeps/unix/sysv/linux/x86/force-elision.h: (FORCE_ELISION): + Likewise. + * nptl/pthreadP.h (PTHREAD_MUTEX_TYPE, PTHREAD_MUTEX_TYPE_ELISION) + (PTHREAD_MUTEX_PSHARED): Use atomic_load_relaxed. + * nptl/pthread_mutex_consistent.c (pthread_mutex_consistent): Likewise. + * nptl/pthread_mutex_getprioceiling.c (pthread_mutex_getprioceiling): + Likewise. + * nptl/pthread_mutex_lock.c (__pthread_mutex_lock_full) + (__pthread_mutex_cond_lock_adjust): Likewise. + * nptl/pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling): + Likewise. + * nptl/pthread_mutex_timedlock.c (__pthread_mutex_timedlock): Likewise. + * nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock): Likewise. + * nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise. + * sysdeps/nptl/bits/thread-shared-types.h (struct __pthread_mutex_s): + Add comments. + * nptl/pthread_mutex_destroy.c (__pthread_mutex_destroy): + Use atomic_load_relaxed and atomic_store_relaxed. + * nptl/pthread_mutex_init.c (__pthread_mutex_init): + Use atomic_store_relaxed. + +Index: glibc-2.28/nptl/Makefile +=================================================================== +--- glibc-2.28.orig/nptl/Makefile ++++ glibc-2.28/nptl/Makefile +@@ -241,9 +241,9 @@ LDLIBS-tst-minstack-throw = -lstdc++ + + tests = tst-attr1 tst-attr2 tst-attr3 tst-default-attr \ + tst-mutex1 tst-mutex2 tst-mutex3 tst-mutex4 tst-mutex5 tst-mutex6 \ +- tst-mutex7 tst-mutex9 tst-mutex5a tst-mutex7a tst-mutex7robust \ +- tst-mutexpi1 tst-mutexpi2 tst-mutexpi3 tst-mutexpi4 tst-mutexpi5 \ +- tst-mutexpi5a tst-mutexpi6 tst-mutexpi7 tst-mutexpi7a \ ++ tst-mutex7 tst-mutex9 tst-mutex10 tst-mutex5a tst-mutex7a \ ++ tst-mutex7robust tst-mutexpi1 tst-mutexpi2 tst-mutexpi3 tst-mutexpi4 \ ++ tst-mutexpi5 tst-mutexpi5a tst-mutexpi6 tst-mutexpi7 tst-mutexpi7a \ + tst-mutexpi9 \ + tst-spin1 tst-spin2 tst-spin3 tst-spin4 \ + tst-cond1 tst-cond2 tst-cond3 tst-cond4 tst-cond5 tst-cond6 tst-cond7 \ +@@ -709,6 +709,8 @@ endif + + $(objpfx)tst-compat-forwarder: $(objpfx)tst-compat-forwarder-mod.so + ++tst-mutex10-ENV = GLIBC_TUNABLES=glibc.elision.enable=1 ++ + # The tests here better do not run in parallel + ifneq ($(filter %tests,$(MAKECMDGOALS)),) + .NOTPARALLEL: +Index: glibc-2.28/nptl/pthreadP.h +=================================================================== +--- glibc-2.28.orig/nptl/pthreadP.h ++++ glibc-2.28/nptl/pthreadP.h +@@ -110,19 +110,23 @@ enum + }; + #define PTHREAD_MUTEX_PSHARED_BIT 128 + ++/* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ + #define PTHREAD_MUTEX_TYPE(m) \ +- ((m)->__data.__kind & 127) ++ (atomic_load_relaxed (&((m)->__data.__kind)) & 127) + /* Don't include NO_ELISION, as that type is always the same + as the underlying lock type. */ + #define PTHREAD_MUTEX_TYPE_ELISION(m) \ +- ((m)->__data.__kind & (127|PTHREAD_MUTEX_ELISION_NP)) ++ (atomic_load_relaxed (&((m)->__data.__kind)) \ ++ & (127 | PTHREAD_MUTEX_ELISION_NP)) + + #if LLL_PRIVATE == 0 && LLL_SHARED == 128 + # define PTHREAD_MUTEX_PSHARED(m) \ +- ((m)->__data.__kind & 128) ++ (atomic_load_relaxed (&((m)->__data.__kind)) & 128) + #else + # define PTHREAD_MUTEX_PSHARED(m) \ +- (((m)->__data.__kind & 128) ? LLL_SHARED : LLL_PRIVATE) ++ ((atomic_load_relaxed (&((m)->__data.__kind)) & 128) \ ++ ? LLL_SHARED : LLL_PRIVATE) + #endif + + /* The kernel when waking robust mutexes on exit never uses +Index: glibc-2.28/nptl/pthread_mutex_consistent.c +=================================================================== +--- glibc-2.28.orig/nptl/pthread_mutex_consistent.c ++++ glibc-2.28/nptl/pthread_mutex_consistent.c +@@ -23,8 +23,11 @@ + int + pthread_mutex_consistent (pthread_mutex_t *mutex) + { +- /* Test whether this is a robust mutex with a dead owner. */ +- if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0 ++ /* Test whether this is a robust mutex with a dead owner. ++ See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0 + || mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT) + return EINVAL; + +Index: glibc-2.28/nptl/pthread_mutex_destroy.c +=================================================================== +--- glibc-2.28.orig/nptl/pthread_mutex_destroy.c ++++ glibc-2.28/nptl/pthread_mutex_destroy.c +@@ -27,12 +27,17 @@ __pthread_mutex_destroy (pthread_mutex_t + { + LIBC_PROBE (mutex_destroy, 1, mutex); + +- if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0 ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0 + && mutex->__data.__nusers != 0) + return EBUSY; + +- /* Set to an invalid value. */ +- mutex->__data.__kind = -1; ++ /* Set to an invalid value. Relaxed MO is enough as it is undefined behavior ++ if the mutex is used after it has been destroyed. But you can reinitialize ++ it with pthread_mutex_init. */ ++ atomic_store_relaxed (&(mutex->__data.__kind), -1); + + return 0; + } +Index: glibc-2.28/nptl/pthread_mutex_getprioceiling.c +=================================================================== +--- glibc-2.28.orig/nptl/pthread_mutex_getprioceiling.c ++++ glibc-2.28/nptl/pthread_mutex_getprioceiling.c +@@ -24,7 +24,9 @@ + int + pthread_mutex_getprioceiling (const pthread_mutex_t *mutex, int *prioceiling) + { +- if (__builtin_expect ((mutex->__data.__kind ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if (__builtin_expect ((atomic_load_relaxed (&(mutex->__data.__kind)) + & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0, 0)) + return EINVAL; + +Index: glibc-2.28/nptl/pthread_mutex_init.c +=================================================================== +--- glibc-2.28.orig/nptl/pthread_mutex_init.c ++++ glibc-2.28/nptl/pthread_mutex_init.c +@@ -101,7 +101,7 @@ __pthread_mutex_init (pthread_mutex_t *m + memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T); + + /* Copy the values from the attribute. */ +- mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS; ++ int mutex_kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS; + + if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0) + { +@@ -111,17 +111,17 @@ __pthread_mutex_init (pthread_mutex_t *m + return ENOTSUP; + #endif + +- mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ mutex_kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP; + } + + switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK) + { + case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: +- mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP; ++ mutex_kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP; + break; + + case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: +- mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP; ++ mutex_kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP; + + int ceiling = (imutexattr->mutexkind + & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK) +@@ -145,7 +145,11 @@ __pthread_mutex_init (pthread_mutex_t *m + FUTEX_PRIVATE_FLAG FUTEX_WAKE. */ + if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED + | PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0) +- mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT; ++ mutex_kind |= PTHREAD_MUTEX_PSHARED_BIT; ++ ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ atomic_store_relaxed (&(mutex->__data.__kind), mutex_kind); + + /* Default values: mutex not used yet. */ + // mutex->__count = 0; already done by memset +Index: glibc-2.28/nptl/pthread_mutex_lock.c +=================================================================== +--- glibc-2.28.orig/nptl/pthread_mutex_lock.c ++++ glibc-2.28/nptl/pthread_mutex_lock.c +@@ -62,6 +62,8 @@ static int __pthread_mutex_lock_full (pt + int + __pthread_mutex_lock (pthread_mutex_t *mutex) + { ++ /* See concurrency notes regarding mutex type which is loaded from __kind ++ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ + unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); + + LIBC_PROBE (mutex_entry, 1, mutex); +@@ -350,8 +352,14 @@ __pthread_mutex_lock_full (pthread_mutex + case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: + case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; +- int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ int kind, robust; ++ { ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); ++ kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ } + + if (robust) + { +@@ -502,7 +510,10 @@ __pthread_mutex_lock_full (pthread_mutex + case PTHREAD_MUTEX_PP_NORMAL_NP: + case PTHREAD_MUTEX_PP_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int kind = atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_KIND_MASK_NP; + + oldval = mutex->__data.__lock; + +@@ -607,15 +618,18 @@ hidden_def (__pthread_mutex_lock) + void + __pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex) + { +- assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0); +- assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0); +- assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0); ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); ++ assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0); ++ assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0); ++ assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0); + + /* Record the ownership. */ + pid_t id = THREAD_GETMEM (THREAD_SELF, tid); + mutex->__data.__owner = id; + +- if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP) ++ if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP) + ++mutex->__data.__count; + } + #endif +Index: glibc-2.28/nptl/pthread_mutex_setprioceiling.c +=================================================================== +--- glibc-2.28.orig/nptl/pthread_mutex_setprioceiling.c ++++ glibc-2.28/nptl/pthread_mutex_setprioceiling.c +@@ -27,9 +27,10 @@ int + pthread_mutex_setprioceiling (pthread_mutex_t *mutex, int prioceiling, + int *old_ceiling) + { +- /* The low bits of __kind aren't ever changed after pthread_mutex_init, +- so we don't need a lock yet. */ +- if ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0) ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0) + return EINVAL; + + /* See __init_sched_fifo_prio. */ +Index: glibc-2.28/nptl/pthread_mutex_timedlock.c +=================================================================== +--- glibc-2.28.orig/nptl/pthread_mutex_timedlock.c ++++ glibc-2.28/nptl/pthread_mutex_timedlock.c +@@ -53,6 +53,8 @@ __pthread_mutex_timedlock (pthread_mutex + /* We must not check ABSTIME here. If the thread does not block + abstime must not be checked for a valid value. */ + ++ /* See concurrency notes regarding mutex type which is loaded from __kind ++ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ + switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), + PTHREAD_MUTEX_TIMED_NP)) + { +@@ -338,8 +340,14 @@ __pthread_mutex_timedlock (pthread_mutex + case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: + case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; +- int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ int kind, robust; ++ { ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); ++ kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ } + + if (robust) + { +@@ -509,7 +517,10 @@ __pthread_mutex_timedlock (pthread_mutex + case PTHREAD_MUTEX_PP_NORMAL_NP: + case PTHREAD_MUTEX_PP_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int kind = atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_KIND_MASK_NP; + + oldval = mutex->__data.__lock; + +Index: glibc-2.28/nptl/pthread_mutex_trylock.c +=================================================================== +--- glibc-2.28.orig/nptl/pthread_mutex_trylock.c ++++ glibc-2.28/nptl/pthread_mutex_trylock.c +@@ -36,6 +36,8 @@ __pthread_mutex_trylock (pthread_mutex_t + int oldval; + pid_t id = THREAD_GETMEM (THREAD_SELF, tid); + ++ /* See concurrency notes regarding mutex type which is loaded from __kind ++ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ + switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), + PTHREAD_MUTEX_TIMED_NP)) + { +@@ -199,8 +201,14 @@ __pthread_mutex_trylock (pthread_mutex_t + case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: + case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; +- int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ int kind, robust; ++ { ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); ++ kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ } + + if (robust) + /* Note: robust PI futexes are signaled by setting bit 0. */ +@@ -325,7 +333,10 @@ __pthread_mutex_trylock (pthread_mutex_t + case PTHREAD_MUTEX_PP_NORMAL_NP: + case PTHREAD_MUTEX_PP_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int kind = atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_KIND_MASK_NP; + + oldval = mutex->__data.__lock; + +Index: glibc-2.28/nptl/pthread_mutex_unlock.c +=================================================================== +--- glibc-2.28.orig/nptl/pthread_mutex_unlock.c ++++ glibc-2.28/nptl/pthread_mutex_unlock.c +@@ -35,6 +35,8 @@ int + attribute_hidden + __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr) + { ++ /* See concurrency notes regarding mutex type which is loaded from __kind ++ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ + int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); + if (__builtin_expect (type & + ~(PTHREAD_MUTEX_KIND_MASK_NP|PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) +@@ -222,13 +224,19 @@ __pthread_mutex_unlock_full (pthread_mut + /* If the previous owner died and the caller did not succeed in + making the state consistent, mark the mutex as unrecoverable + and make all waiters. */ +- if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0 ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0 + && __builtin_expect (mutex->__data.__owner + == PTHREAD_MUTEX_INCONSISTENT, 0)) + pi_notrecoverable: + newowner = PTHREAD_MUTEX_NOTRECOVERABLE; + +- if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0) ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0) + { + continue_pi_robust: + /* Remove mutex from the list. +@@ -251,7 +259,10 @@ __pthread_mutex_unlock_full (pthread_mut + /* Unlock. Load all necessary mutex data before releasing the mutex + to not violate the mutex destruction requirements (see + lll_unlock). */ +- int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int robust = atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP; + private = (robust + ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) + : PTHREAD_MUTEX_PSHARED (mutex)); +Index: glibc-2.28/nptl/tst-mutex10.c +=================================================================== +--- /dev/null ++++ glibc-2.28/nptl/tst-mutex10.c +@@ -0,0 +1,109 @@ ++/* Testing race while enabling lock elision. ++ Copyright (C) 2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ . */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static pthread_barrier_t barrier; ++static pthread_mutex_t mutex; ++static long long int iteration_count = 1000000; ++static unsigned int thread_count = 3; ++ ++static void * ++thr_func (void *arg) ++{ ++ long long int i; ++ for (i = 0; i < iteration_count; i++) ++ { ++ if ((uintptr_t) arg == 0) ++ { ++ xpthread_mutex_destroy (&mutex); ++ xpthread_mutex_init (&mutex, NULL); ++ } ++ ++ xpthread_barrier_wait (&barrier); ++ ++ /* Test if enabling lock elision works if it is enabled concurrently. ++ There was a race in FORCE_ELISION macro which leads to either ++ pthread_mutex_destroy returning EBUSY as the owner was recorded ++ by pthread_mutex_lock - in "normal mutex" code path - but was not ++ resetted in pthread_mutex_unlock - in "elision" code path. ++ Or it leads to the assertion in nptl/pthread_mutex_lock.c: ++ assert (mutex->__data.__owner == 0); ++ Please ensure that the test is run with lock elision: ++ export GLIBC_TUNABLES=glibc.elision.enable=1 */ ++ xpthread_mutex_lock (&mutex); ++ xpthread_mutex_unlock (&mutex); ++ ++ xpthread_barrier_wait (&barrier); ++ } ++ return NULL; ++} ++ ++static int ++do_test (void) ++{ ++ unsigned int i; ++ printf ("Starting %d threads to run %lld iterations.\n", ++ thread_count, iteration_count); ++ ++ pthread_t *threads = xmalloc (thread_count * sizeof (pthread_t)); ++ xpthread_barrier_init (&barrier, NULL, thread_count); ++ xpthread_mutex_init (&mutex, NULL); ++ ++ for (i = 0; i < thread_count; i++) ++ threads[i] = xpthread_create (NULL, thr_func, (void *) (uintptr_t) i); ++ ++ for (i = 0; i < thread_count; i++) ++ xpthread_join (threads[i]); ++ ++ xpthread_barrier_destroy (&barrier); ++ free (threads); ++ ++ return EXIT_SUCCESS; ++} ++ ++#define OPT_ITERATIONS 10000 ++#define OPT_THREADS 10001 ++#define CMDLINE_OPTIONS \ ++ { "iterations", required_argument, NULL, OPT_ITERATIONS }, \ ++ { "threads", required_argument, NULL, OPT_THREADS }, ++static void ++cmdline_process (int c) ++{ ++ long long int arg = strtoll (optarg, NULL, 0); ++ switch (c) ++ { ++ case OPT_ITERATIONS: ++ if (arg > 0) ++ iteration_count = arg; ++ break; ++ case OPT_THREADS: ++ if (arg > 0 && arg < 100) ++ thread_count = arg; ++ break; ++ } ++} ++#define CMDLINE_PROCESS cmdline_process ++#define TIMEOUT 50 ++#include +Index: glibc-2.28/sysdeps/nptl/bits/thread-shared-types.h +=================================================================== +--- glibc-2.28.orig/sysdeps/nptl/bits/thread-shared-types.h ++++ glibc-2.28/sysdeps/nptl/bits/thread-shared-types.h +@@ -124,7 +124,27 @@ struct __pthread_mutex_s + unsigned int __nusers; + #endif + /* KIND must stay at this position in the structure to maintain +- binary compatibility with static initializers. */ ++ binary compatibility with static initializers. ++ ++ Concurrency notes: ++ The __kind of a mutex is initialized either by the static ++ PTHREAD_MUTEX_INITIALIZER or by a call to pthread_mutex_init. ++ ++ After a mutex has been initialized, the __kind of a mutex is usually not ++ changed. BUT it can be set to -1 in pthread_mutex_destroy or elision can ++ be enabled. This is done concurrently in the pthread_mutex_*lock functions ++ by using the macro FORCE_ELISION. This macro is only defined for ++ architectures which supports lock elision. ++ ++ For elision, there are the flags PTHREAD_MUTEX_ELISION_NP and ++ PTHREAD_MUTEX_NO_ELISION_NP which can be set in addition to the already set ++ type of a mutex. ++ Before a mutex is initialized, only PTHREAD_MUTEX_NO_ELISION_NP can be set ++ with pthread_mutexattr_settype. ++ After a mutex has been initialized, the functions pthread_mutex_*lock can ++ enable elision - if the mutex-type and the machine supports it - by setting ++ the flag PTHREAD_MUTEX_ELISION_NP. This is done concurrently. Afterwards ++ the lock / unlock functions are using specific elision code-paths. */ + int __kind; + __PTHREAD_COMPAT_PADDING_MID + #if __PTHREAD_MUTEX_NUSERS_AFTER_KIND +Index: glibc-2.28/sysdeps/unix/sysv/linux/powerpc/force-elision.h +=================================================================== +--- glibc-2.28.orig/sysdeps/unix/sysv/linux/powerpc/force-elision.h ++++ glibc-2.28/sysdeps/unix/sysv/linux/powerpc/force-elision.h +@@ -18,9 +18,45 @@ + + /* Automatically enable elision for existing user lock kinds. */ + #define FORCE_ELISION(m, s) \ +- if (__pthread_force_elision \ +- && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0) \ ++ if (__pthread_force_elision) \ + { \ +- mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP; \ +- s; \ ++ /* See concurrency notes regarding __kind in \ ++ struct __pthread_mutex_s in \ ++ sysdeps/nptl/bits/thread-shared-types.h. \ ++ \ ++ There are the following cases for the kind of a mutex \ ++ (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags \ ++ PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where \ ++ only one of both flags can be set): \ ++ - both flags are not set: \ ++ This is the first lock operation for this mutex. Enable \ ++ elision as it is not enabled so far. \ ++ Note: It can happen that multiple threads are calling e.g. \ ++ pthread_mutex_lock at the same time as the first lock \ ++ operation for this mutex. Then elision is enabled for this \ ++ mutex by multiple threads. Storing with relaxed MO is enough \ ++ as all threads will store the same new value for the kind of \ ++ the mutex. But we have to ensure that we always use the \ ++ elision path regardless if this thread has enabled elision or \ ++ another one. \ ++ \ ++ - PTHREAD_MUTEX_ELISION_NP flag is set: \ ++ Elision was already enabled for this mutex by a previous lock \ ++ operation. See case above. Just use the elision path. \ ++ \ ++ - PTHREAD_MUTEX_NO_ELISION_NP flag is set: \ ++ Elision was explicitly disabled by pthread_mutexattr_settype. \ ++ Do not use the elision path. \ ++ Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be \ ++ changed after mutex initialization. */ \ ++ int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind)); \ ++ if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0) \ ++ { \ ++ mutex_kind |= PTHREAD_MUTEX_ELISION_NP; \ ++ atomic_store_relaxed (&((m)->__data.__kind), mutex_kind); \ ++ } \ ++ if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0) \ ++ { \ ++ s; \ ++ } \ + } +Index: glibc-2.28/sysdeps/unix/sysv/linux/s390/force-elision.h +=================================================================== +--- glibc-2.28.orig/sysdeps/unix/sysv/linux/s390/force-elision.h ++++ glibc-2.28/sysdeps/unix/sysv/linux/s390/force-elision.h +@@ -18,9 +18,45 @@ + + /* Automatically enable elision for existing user lock kinds. */ + #define FORCE_ELISION(m, s) \ +- if (__pthread_force_elision \ +- && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0) \ ++ if (__pthread_force_elision) \ + { \ +- mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP; \ +- s; \ ++ /* See concurrency notes regarding __kind in \ ++ struct __pthread_mutex_s in \ ++ sysdeps/nptl/bits/thread-shared-types.h. \ ++ \ ++ There are the following cases for the kind of a mutex \ ++ (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags \ ++ PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where \ ++ only one of both flags can be set): \ ++ - both flags are not set: \ ++ This is the first lock operation for this mutex. Enable \ ++ elision as it is not enabled so far. \ ++ Note: It can happen that multiple threads are calling e.g. \ ++ pthread_mutex_lock at the same time as the first lock \ ++ operation for this mutex. Then elision is enabled for this \ ++ mutex by multiple threads. Storing with relaxed MO is enough \ ++ as all threads will store the same new value for the kind of \ ++ the mutex. But we have to ensure that we always use the \ ++ elision path regardless if this thread has enabled elision or \ ++ another one. \ ++ \ ++ - PTHREAD_MUTEX_ELISION_NP flag is set: \ ++ Elision was already enabled for this mutex by a previous lock \ ++ operation. See case above. Just use the elision path. \ ++ \ ++ - PTHREAD_MUTEX_NO_ELISION_NP flag is set: \ ++ Elision was explicitly disabled by pthread_mutexattr_settype. \ ++ Do not use the elision path. \ ++ Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be \ ++ changed after mutex initialization. */ \ ++ int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind)); \ ++ if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0) \ ++ { \ ++ mutex_kind |= PTHREAD_MUTEX_ELISION_NP; \ ++ atomic_store_relaxed (&((m)->__data.__kind), mutex_kind); \ ++ } \ ++ if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0) \ ++ { \ ++ s; \ ++ } \ + } +Index: glibc-2.28/sysdeps/unix/sysv/linux/x86/force-elision.h +=================================================================== +--- glibc-2.28.orig/sysdeps/unix/sysv/linux/x86/force-elision.h ++++ glibc-2.28/sysdeps/unix/sysv/linux/x86/force-elision.h +@@ -18,9 +18,45 @@ + + /* Automatically enable elision for existing user lock kinds. */ + #define FORCE_ELISION(m, s) \ +- if (__pthread_force_elision \ +- && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0) \ ++ if (__pthread_force_elision) \ + { \ +- mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP; \ +- s; \ ++ /* See concurrency notes regarding __kind in \ ++ struct __pthread_mutex_s in \ ++ sysdeps/nptl/bits/thread-shared-types.h. \ ++ \ ++ There are the following cases for the kind of a mutex \ ++ (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags \ ++ PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where \ ++ only one of both flags can be set): \ ++ - both flags are not set: \ ++ This is the first lock operation for this mutex. Enable \ ++ elision as it is not enabled so far. \ ++ Note: It can happen that multiple threads are calling e.g. \ ++ pthread_mutex_lock at the same time as the first lock \ ++ operation for this mutex. Then elision is enabled for this \ ++ mutex by multiple threads. Storing with relaxed MO is enough \ ++ as all threads will store the same new value for the kind of \ ++ the mutex. But we have to ensure that we always use the \ ++ elision path regardless if this thread has enabled elision or \ ++ another one. \ ++ \ ++ - PTHREAD_MUTEX_ELISION_NP flag is set: \ ++ Elision was already enabled for this mutex by a previous lock \ ++ operation. See case above. Just use the elision path. \ ++ \ ++ - PTHREAD_MUTEX_NO_ELISION_NP flag is set: \ ++ Elision was explicitly disabled by pthread_mutexattr_settype. \ ++ Do not use the elision path. \ ++ Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be \ ++ changed after mutex initialization. */ \ ++ int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind)); \ ++ if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0) \ ++ { \ ++ mutex_kind |= PTHREAD_MUTEX_ELISION_NP; \ ++ atomic_store_relaxed (&((m)->__data.__kind), mutex_kind); \ ++ } \ ++ if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0) \ ++ { \ ++ s; \ ++ } \ + } diff --git a/x86-haswell-string-flags.patch b/x86-haswell-string-flags.patch new file mode 100644 index 0000000..cfa35a6 --- /dev/null +++ b/x86-haswell-string-flags.patch @@ -0,0 +1,24 @@ +2018-10-23 Adhemerval Zanella + + [BZ #23709] + * sysdeps/x86/cpu-features.c (init_cpu_features): Set TSX bits + independently of other flags. + +Index: glibc-2.28/sysdeps/x86/cpu-features.c +=================================================================== +--- glibc-2.28.orig/sysdeps/x86/cpu-features.c ++++ glibc-2.28/sysdeps/x86/cpu-features.c +@@ -316,7 +316,13 @@ init_cpu_features (struct cpu_features * + | bit_arch_Fast_Unaligned_Copy + | bit_arch_Prefer_PMINUB_for_stringop); + break; ++ } + ++ /* Disable TSX on some Haswell processors to avoid TSX on kernels that ++ weren't updated with the latest microcode package (which disables ++ broken feature by default). */ ++ switch (model) ++ { + case 0x3f: + /* Xeon E7 v3 with stepping >= 4 has working TSX. */ + if (stepping >= 4)