glibc/powerpc-lock-elision-race.patch
Andreas Schwab 5005d4836d Accepting request 359989 from home:Andreas_Schwab:Factory
- tls-dtor-list-mangling.patch: Harden tls_dtor_list with pointer mangling
  (BZ #19018)
- prelink-elf-rtype-class.patch: Keep only ELF_RTYPE_CLASS_{PLT|COPY} bits
  for prelink (BZ #19178)
- vector-finite-math-aliases.patch: Better workaround for aliases of
  *_finite symbols in vector math library (BZ# 19058)
- powerpc-elision-adapt-param.patch: powerpc: Fix usage of elision
  transient failure adapt param (BZ #19174)
- catopen-unbound-alloca.patch: Fix unbound alloca in catopen
  (CVE-2015-8779, bsc#962739, BZ #17905)
- strftime-range-check.patch: Add range check on time fields
  (CVE-2015-8776, bsc#962736, BZ #18985)
- hcreate-overflow-check.patch: Handle overflow in hcreate (CVE-2015-8778,
  bsc#962737, BZ #18240)
- errorcheck-mutex-no-elision.patch: Don't do lock elision on an error
  checking mutex (bsc#956716, BZ #17514)
- refactor-nan-parsing.patch: Refactor strtod parsing of NaN payloads
  (CVE-2014-9761, bsc#962738, BZ #16962)
- send-dg-buffer-overflow.patch: Fix getaddrinfo stack-based buffer
  overflow (CVE-2015-7547, bsc#961721, BZ #18665)
- powerpc-lock-elision-race.patch: renamed from
  0001-powerpc-Fix-a-race-condition-when-eliding-a-lock-20150730.patch

OBS-URL: https://build.opensuse.org/request/show/359989
OBS-URL: https://build.opensuse.org/package/show/Base:System/glibc?expand=0&rev=421
2016-02-17 14:42:34 +00:00

147 lines
4.7 KiB
Diff

2015-10-20 Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
[BZ #18743]
* sysdeps/powerpc/nptl/elide.h (__elide_lock): Move most of this
code to...
(ELIDE_LOCK): ...here.
(__get_new_count): New function with part of the code from
__elide_lock that updates the value of adapt_count after a
transaction abort.
(__elided_trylock): Moved this code to...
(ELIDE_TRYLOCK): ...here.
Index: glibc-2.22/sysdeps/powerpc/nptl/elide.h
===================================================================
--- glibc-2.22.orig/sysdeps/powerpc/nptl/elide.h
+++ glibc-2.22/sysdeps/powerpc/nptl/elide.h
@@ -23,67 +23,78 @@
# include <htm.h>
# include <elision-conf.h>
-/* Returns true if the lock defined by is_lock_free as elided.
- ADAPT_COUNT is a pointer to per-lock state variable. */
-
+/* Get the new value of adapt_count according to the elision
+ configurations. Returns true if the system should retry again or false
+ otherwise. */
static inline bool
-__elide_lock (uint8_t *adapt_count, int is_lock_free)
+__get_new_count (uint8_t *adapt_count)
{
- if (*adapt_count > 0)
+ /* A persistent failure indicates that a retry will probably
+ result in another failure. Use normal locking now and
+ for the next couple of calls. */
+ if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
{
- (*adapt_count)--;
+ if (__elision_aconf.skip_lock_internal_abort > 0)
+ *adapt_count = __elision_aconf.skip_lock_internal_abort;
return false;
}
-
- for (int i = __elision_aconf.try_tbegin; i > 0; i--)
- {
- if (__builtin_tbegin (0))
- {
- if (is_lock_free)
- return true;
- /* Lock was busy. */
- __builtin_tabort (_ABORT_LOCK_BUSY);
- }
- else
- {
- /* A persistent failure indicates that a retry will probably
- result in another failure. Use normal locking now and
- for the next couple of calls. */
- if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
- {
- if (__elision_aconf.skip_lock_internal_abort > 0)
- *adapt_count = __elision_aconf.skip_lock_internal_abort;
- break;
- }
- /* Same logic as above, but for a number of temporary failures in a
- a row. */
- else if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
- && __elision_aconf.try_tbegin > 0)
- *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
- }
- }
-
- return false;
+ /* Same logic as above, but for a number of temporary failures in a
+ a row. */
+ else if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
+ && __elision_aconf.try_tbegin > 0)
+ *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
+ return true;
}
-# define ELIDE_LOCK(adapt_count, is_lock_free) \
- __elide_lock (&(adapt_count), is_lock_free)
-
+/* CONCURRENCY NOTES:
-static inline bool
-__elide_trylock (uint8_t *adapt_count, int is_lock_free, int write)
-{
- if (__elision_aconf.try_tbegin > 0)
- {
- if (write)
- __builtin_tabort (_ABORT_NESTED_TRYLOCK);
- return __elide_lock (adapt_count, is_lock_free);
- }
- return false;
-}
+ The evaluation of the macro expression is_lock_free encompasses one or
+ more loads from memory locations that are concurrently modified by other
+ threads. For lock elision to work, this evaluation and the rest of the
+ critical section protected by the lock must be atomic because an
+ execution with lock elision must be equivalent to an execution in which
+ the lock would have been actually acquired and released. Therefore, we
+ evaluate is_lock_free inside of the transaction that represents the
+ critical section for which we want to use lock elision, which ensures
+ the atomicity that we require. */
+
+/* Returns 0 if the lock defined by is_lock_free was elided.
+ ADAPT_COUNT is a per-lock state variable. */
+# define ELIDE_LOCK(adapt_count, is_lock_free) \
+ ({ \
+ int ret = 0; \
+ if (adapt_count > 0) \
+ (adapt_count)--; \
+ else \
+ for (int i = __elision_aconf.try_tbegin; i > 0; i--) \
+ { \
+ if (__builtin_tbegin (0)) \
+ { \
+ if (is_lock_free) \
+ { \
+ ret = 1; \
+ break; \
+ } \
+ __builtin_tabort (_ABORT_LOCK_BUSY); \
+ } \
+ else \
+ if (!__get_new_count(&adapt_count)) \
+ break; \
+ } \
+ ret; \
+ })
# define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) \
- __elide_trylock (&(adapt_count), is_lock_free, write)
+ ({ \
+ int ret = 0; \
+ if (__elision_aconf.try_tbegin > 0) \
+ { \
+ if (write) \
+ __builtin_tabort (_ABORT_NESTED_TRYLOCK); \
+ ret = ELIDE_LOCK (adapt_count, is_lock_free); \
+ } \
+ ret; \
+ })
static inline bool