Andreas Schwab
dcc3753b29
- new patch [BZ #18743] PowerPC: Fix a race condition when eliding a lock 0001-powerpc-Fix-a-race-condition-when-eliding-a-lock-20150730.patch related to openSUSE boo#923486 OBS-URL: https://build.opensuse.org/request/show/336473 OBS-URL: https://build.opensuse.org/package/show/Base:System/glibc?expand=0&rev=417
153 lines
4.9 KiB
Diff
153 lines
4.9 KiB
Diff
From: Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
|
|
Date: Thu, 30 Jul 2015 13:48:56 -0300
|
|
Subject: [PATCH][BZ 18743] PowerPC: Fix a race condition when eliding a lock
|
|
|
|
[PATCH][BZ 18743] PowerPC: Fix a race condition when eliding a lock
|
|
|
|
The previous code used to evaluate the preprocessor token is_lock_free to
|
|
a variable before starting a transaction. This behavior can cause an
|
|
error if another thread got the lock (without using a transaction)
|
|
between the conversion of the token and the beginning of the transaction.
|
|
|
|
This patch delays the evaluation of is_lock_free to inside a transaction
|
|
by moving this part of the code to the macro ELIDE_LOCK.
|
|
|
|
2015-07-30 Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
|
|
|
|
[BZ #18743]
|
|
* sysdeps/powerpc/nptl/elide.h (__elide_lock): Move most of this
|
|
code to...
|
|
(ELIDE_LOCK): ...here.
|
|
(__get_new_count): New function with part of the code from
|
|
__elide_lock that updates the value of adapt_count after a
|
|
transaction abort.
|
|
(__elided_trylock): Moved this code to...
|
|
(ELIDE_TRYLOCK): ...here.
|
|
|
|
Signed-off-by: Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
|
|
Signed-off-by: Michel Normand <normand@linux.vnet.ibm.com>
|
|
Index: glibc-2.22/sysdeps/powerpc/nptl/elide.h
|
|
===================================================================
|
|
--- glibc-2.22.orig/sysdeps/powerpc/nptl/elide.h
|
|
+++ glibc-2.22/sysdeps/powerpc/nptl/elide.h
|
|
@@ -23,67 +23,67 @@
|
|
# include <htm.h>
|
|
# include <elision-conf.h>
|
|
|
|
-/* Returns true if the lock defined by is_lock_free as elided.
|
|
- ADAPT_COUNT is a pointer to per-lock state variable. */
|
|
-
|
|
+/* Get the new value of adapt_count according to the elision
|
|
+ configurations. Returns true if the system should retry again or false
|
|
+ otherwise. */
|
|
static inline bool
|
|
-__elide_lock (uint8_t *adapt_count, int is_lock_free)
|
|
+__get_new_count (uint8_t *adapt_count)
|
|
{
|
|
- if (*adapt_count > 0)
|
|
+ /* A persistent failure indicates that a retry will probably
|
|
+ result in another failure. Use normal locking now and
|
|
+ for the next couple of calls. */
|
|
+ if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
|
|
{
|
|
- (*adapt_count)--;
|
|
+ if (__elision_aconf.skip_lock_internal_abort > 0)
|
|
+ *adapt_count = __elision_aconf.skip_lock_internal_abort;
|
|
return false;
|
|
}
|
|
-
|
|
- for (int i = __elision_aconf.try_tbegin; i > 0; i--)
|
|
- {
|
|
- if (__builtin_tbegin (0))
|
|
- {
|
|
- if (is_lock_free)
|
|
- return true;
|
|
- /* Lock was busy. */
|
|
- __builtin_tabort (_ABORT_LOCK_BUSY);
|
|
- }
|
|
- else
|
|
- {
|
|
- /* A persistent failure indicates that a retry will probably
|
|
- result in another failure. Use normal locking now and
|
|
- for the next couple of calls. */
|
|
- if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
|
|
- {
|
|
- if (__elision_aconf.skip_lock_internal_abort > 0)
|
|
- *adapt_count = __elision_aconf.skip_lock_internal_abort;
|
|
- break;
|
|
- }
|
|
- /* Same logic as above, but for a number of temporary failures in a
|
|
- a row. */
|
|
- else if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
|
|
- && __elision_aconf.try_tbegin > 0)
|
|
- *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
|
|
- }
|
|
- }
|
|
-
|
|
- return false;
|
|
+ /* Same logic as above, but for a number of temporary failures in a
|
|
+ a row. */
|
|
+ else if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
|
|
+ && __elision_aconf.try_tbegin > 0)
|
|
+ *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
|
|
+ return true;
|
|
}
|
|
|
|
-# define ELIDE_LOCK(adapt_count, is_lock_free) \
|
|
- __elide_lock (&(adapt_count), is_lock_free)
|
|
-
|
|
-
|
|
-static inline bool
|
|
-__elide_trylock (uint8_t *adapt_count, int is_lock_free, int write)
|
|
-{
|
|
- if (__elision_aconf.try_tbegin > 0)
|
|
- {
|
|
- if (write)
|
|
- __builtin_tabort (_ABORT_NESTED_TRYLOCK);
|
|
- return __elide_lock (adapt_count, is_lock_free);
|
|
- }
|
|
- return false;
|
|
-}
|
|
+/* Returns 0 if the lock defined by is_lock_free was elided.
|
|
+ ADAPT_COUNT is a per-lock state variable. */
|
|
+# define ELIDE_LOCK(adapt_count, is_lock_free) \
|
|
+ ({ \
|
|
+ int ret = 0; \
|
|
+ if (adapt_count > 0) \
|
|
+ (adapt_count)--; \
|
|
+ else \
|
|
+ for (int i = __elision_aconf.try_tbegin; i > 0; i--) \
|
|
+ { \
|
|
+ asm volatile("" ::: "memory"); \
|
|
+ if (__builtin_tbegin (0)) \
|
|
+ { \
|
|
+ if (is_lock_free) \
|
|
+ { \
|
|
+ ret = 1; \
|
|
+ break; \
|
|
+ } \
|
|
+ __builtin_tabort (_ABORT_LOCK_BUSY); \
|
|
+ } \
|
|
+ else \
|
|
+ if (!__get_new_count(&adapt_count)) \
|
|
+ break; \
|
|
+ } \
|
|
+ ret; \
|
|
+ })
|
|
|
|
# define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) \
|
|
- __elide_trylock (&(adapt_count), is_lock_free, write)
|
|
+ ({ \
|
|
+ int ret = 0; \
|
|
+ if (__elision_aconf.try_tbegin > 0) \
|
|
+ { \
|
|
+ if (write) \
|
|
+ __builtin_tabort (_ABORT_NESTED_TRYLOCK); \
|
|
+ ret = ELIDE_LOCK (adapt_count, is_lock_free); \
|
|
+ } \
|
|
+ ret; \
|
|
+ })
|
|
|
|
|
|
static inline bool
|