Compare commits

2 Commits
main ... 1.1

7 changed files with 966 additions and 17 deletions

View File

@@ -0,0 +1,64 @@
From c32fd59314c343db88c3ea4a203870481d33c3d2 Mon Sep 17 00:00:00 2001
From: Siddhesh Poyarekar <siddhesh@sourceware.org>
Date: Tue, 21 Jan 2025 16:11:06 -0500
Subject: [PATCH] Fix underallocation of abort_msg_s struct (CVE-2025-0395)
Include the space needed to store the length of the message itself, in
addition to the message string. This resolves BZ #32582.
Signed-off-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
Reviewed: Adhemerval Zanella <adhemerval.zanella@linaro.org>
(cherry picked from commit 68ee0f704cb81e9ad0a78c644a83e1e9cd2ee578)
---
NEWS | 6 ++++++
assert/assert.c | 4 +++-
sysdeps/posix/libc_fatal.c | 4 +++-
3 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/assert/assert.c b/assert/assert.c
index b7c7a4a1ba..65a9fedf0d 100644
--- a/assert/assert.c
+++ b/assert/assert.c
@@ -18,6 +18,7 @@
#include <assert.h>
#include <atomic.h>
#include <ldsodefs.h>
+#include <libc-pointer-arith.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
@@ -64,7 +65,8 @@ __assert_fail_base (const char *fmt, const char *assertion, const char *file,
(void) __fxprintf (NULL, "%s", str);
(void) fflush (stderr);
- total = (total + 1 + GLRO(dl_pagesize) - 1) & ~(GLRO(dl_pagesize) - 1);
+ total = ALIGN_UP (total + sizeof (struct abort_msg_s) + 1,
+ GLRO(dl_pagesize));
struct abort_msg_s *buf = __mmap (NULL, total, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0);
if (__glibc_likely (buf != MAP_FAILED))
diff --git a/sysdeps/posix/libc_fatal.c b/sysdeps/posix/libc_fatal.c
index 70edcc10c1..5b9e4b7918 100644
--- a/sysdeps/posix/libc_fatal.c
+++ b/sysdeps/posix/libc_fatal.c
@@ -20,6 +20,7 @@
#include <errno.h>
#include <fcntl.h>
#include <ldsodefs.h>
+#include <libc-pointer-arith.h>
#include <paths.h>
#include <stdarg.h>
#include <stdbool.h>
@@ -123,7 +124,8 @@ __libc_message (const char *fmt, ...)
WRITEV_FOR_FATAL (fd, iov, nlist, total);
- total = (total + 1 + GLRO(dl_pagesize) - 1) & ~(GLRO(dl_pagesize) - 1);
+ total = ALIGN_UP (total + sizeof (struct abort_msg_s) + 1,
+ GLRO(dl_pagesize));
struct abort_msg_s *buf = __mmap (NULL, total,
PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0);
--
2.48.1

View File

@@ -1,12 +1,31 @@
-------------------------------------------------------------------
Wed Oct 9 07:42:01 UTC 2024 - Andreas Schwab <schwab@suse.de>
Mon May 19 09:29:13 UTC 2025 - Andreas Schwab <schwab@suse.de>
- Apply libc_nonshared.a workaround also on s390x and ppc64le (bsc#1231051)
- static-setuid-ld-library-path.patch: elf: Ignore LD_LIBRARY_PATH and
debug env var for setuid for static (CVE-2025-4802, bsc#1243317)
-------------------------------------------------------------------
Wed Sep 18 14:45:44 UTC 2024 - Andreas Schwab <schwab@suse.de>
Thu Apr 3 14:43:21 UTC 2025 - Andreas Schwab <schwab@suse.de>
- Use nss-systemd by default also in SLE (bsc#1230638)
- pthread-wakeup.patch: pthreads NPTL: lost wakeup fix 2 (bsc#1234128, BZ
#25847)
-------------------------------------------------------------------
Mon Mar 24 15:24:22 UTC 2025 - Andreas Schwab <schwab@suse.de>
- Mark functions in libc_nonshared.a as hidden (bsc#1239883)
-------------------------------------------------------------------
Thu Jan 23 09:23:41 UTC 2025 - Andreas Schwab <schwab@suse.de>
- assert-message-allocation.patch: Fix underallocation of abort_msg_s
struct (CVE-2025-0395, bsc#1236282, BZ #32582))
-------------------------------------------------------------------
Thu Jan 9 12:49:26 UTC 2025 - Andreas Schwab <schwab@suse.de>
- prctl-syscall-wrapper.patch: Linux: Switch back to assembly syscall
wrapper for prctl (bsc#1234665, BZ #29770)
-------------------------------------------------------------------
Thu Jul 18 08:31:37 UTC 2024 - Andreas Schwab <schwab@suse.de>

View File

@@ -162,6 +162,7 @@ Source1: https://ftp.gnu.org/pub/gnu/glibc/glibc-%{version}.tar.xz.sig
Source2: http://savannah.gnu.org/project/memberlist-gpgkeys.php?group=libc&download=1#/glibc.keyring
Source4: manpages.tar.bz2
Source5: nsswitch.conf
Source6: sle-nsswitch.conf
Source7: bindresvport.blacklist
Source9: glibc.rpmlintrc
Source10: baselibs.conf
@@ -360,6 +361,14 @@ Patch1031: glibc-CVE-2024-33601-CVE-2024-33602-nscd-netgroup-Use-two.patch
Patch1032: nscd-netgroup-cache-timeout.patch
# PATCH-FIX-UPSTREAM s390x: Fix segfault in wcsncmp (BZ #31934)
Patch1033: s390x-wcsncmp.patch
# PATCH-FIX-UPSTREAM Linux: Switch back to assembly syscall wrapper for prctl (BZ #29770)
Patch1034: prctl-syscall-wrapper.patch
# PATCH-FIX-UPSTREAM Fix underallocation of abort_msg_s struct (CVE-2025-0395, BZ #32582)
Patch1035: assert-message-allocation.patch
# PATCH-FIX-UPSTREAM pthreads NPTL: lost wakeup fix 2 (BZ #25847)
Patch1036: pthread-wakeup.patch
# PATCH-FIX-UPSTREAM elf: Ignore LD_LIBRARY_PATH and debug env var for setuid for static (CVE-2025-4802)
Patch1037: static-setuid-ld-library-path.patch
###
# Patches awaiting upstream approval
@@ -769,25 +778,16 @@ echo 'CFLAGS-.os += -fdump-ipa-clones' \
make %{?_smp_mflags} %{?make_output_sync}
%if %{build_main} && 0%{?sle_version}
%ifarch x86_64 i686 s390x ppc64le
# Horrible workaround for bsc#1221482
%ifarch x86_64 i686
archsub=x86
%endif
%ifarch s390x
archsub=s390
%endif
%ifarch ppc64le
archsub=powerpc
%endif
# Horrible workaround for bsc#1221482
xstatbuild ()
{
gcc -O2 -I ../sysdeps/unix/sysv/linux/$archsub -xc - -c -o $1stat$2.oS <<EOF
#include <bits/wordsize.h>
gcc -O2 -I ../sysdeps/unix/sysv/linux/x86 -xc - -c -o $1stat$2.oS <<EOF
#include <xstatver.h>
int __$1xstat$2 (int, $3, void *);
int
__attribute__ ((visibility ("hidden")))
$1stat$2 ($3 file, void *buf)
{
return __$1xstat$2 (_STAT_VER, file, buf);
@@ -1005,7 +1005,7 @@ install -m 644 %{SOURCE7} %{buildroot}/etc
%if %suse_version > 1500
install -D -m 644 %{SOURCE5} %{buildroot}%{_prefix}/etc/nsswitch.conf
%else
install -m 644 %{SOURCE5} %{buildroot}/etc/nsswitch.conf
install -m 644 %{SOURCE6} %{buildroot}/etc/nsswitch.conf
%endif
%if %{build_html}

View File

@@ -0,0 +1,70 @@
From 6a04404521ac4119ae36827eeb288ea84eee7cf6 Mon Sep 17 00:00:00 2001
From: Florian Weimer <fweimer@redhat.com>
Date: Sat, 17 Feb 2024 09:17:04 +0100
Subject: [PATCH] Linux: Switch back to assembly syscall wrapper for prctl (bug
29770)
Commit ff026950e280bc3e9487b41b460fb31bc5b57721 ("Add a C wrapper for
prctl [BZ #25896]") replaced the assembler wrapper with a C function.
However, on powerpc64le-linux-gnu, the C variadic function
implementation requires extra work in the caller to set up the
parameter save area. Calling a function that needs a parameter save
area without one (because the prototype used indicates the function is
not variadic) corrupts the caller's stack. The Linux manual pages
project documents prctl as a non-variadic function. This has resulted
in various projects over the years using non-variadic prototypes,
including the sanitizer libraries in LLVm and GCC (GCC PR 113728).
This commit switches back to the assembler implementation on most
targets and only keeps the C implementation for x86-64 x32.
Also add the __prctl_time64 alias from commit
b39ffab860cd743a82c91946619f1b8158b0b65e ("Linux: Add time64 alias for
prctl") to sysdeps/unix/sysv/linux/syscalls.list; it was not yet
present in commit ff026950e280bc3e9487b41b460fb31bc5b57721.
This restores the old ABI on powerpc64le-linux-gnu, thus fixing
bug 29770.
Reviewed-By: Simon Chopin <simon.chopin@canonical.com>
---
sysdeps/unix/sysv/linux/syscalls.list | 1 +
sysdeps/unix/sysv/linux/{ => x86_64/x32}/prctl.c | 5 +----
2 files changed, 2 insertions(+), 4 deletions(-)
rename sysdeps/unix/sysv/linux/{ => x86_64/x32}/prctl.c (93%)
diff --git a/sysdeps/unix/sysv/linux/syscalls.list b/sysdeps/unix/sysv/linux/syscalls.list
index 73e941ef89..9ac42c3436 100644
--- a/sysdeps/unix/sysv/linux/syscalls.list
+++ b/sysdeps/unix/sysv/linux/syscalls.list
@@ -46,6 +46,7 @@ open_tree EXTRA open_tree i:isU open_tree
pipe2 - pipe2 i:fi __pipe2 pipe2
pidfd_open EXTRA pidfd_open i:iU pidfd_open
pidfd_getfd EXTRA pidfd_getfd i:iiU pidfd_getfd
+prctl EXTRA prctl i:iiiii __prctl prctl __prctl_time64
pivot_root EXTRA pivot_root i:ss pivot_root
pidfd_send_signal EXTRA pidfd_send_signal i:iiPU pidfd_send_signal
process_madvise EXTRA process_madvise i:iPniU process_madvise
diff --git a/sysdeps/unix/sysv/linux/prctl.c b/sysdeps/unix/sysv/linux/x86_64/x32/prctl.c
similarity index 93%
rename from sysdeps/unix/sysv/linux/prctl.c
rename to sysdeps/unix/sysv/linux/x86_64/x32/prctl.c
index 52d234ea0d..4bf1b479a0 100644
--- a/sysdeps/unix/sysv/linux/prctl.c
+++ b/sysdeps/unix/sysv/linux/x86_64/x32/prctl.c
@@ -1,4 +1,4 @@
-/* prctl - Linux specific syscall.
+/* prctl - Linux specific syscall. x86-64 x32 version.
Copyright (C) 2020-2023 Free Software Foundation, Inc.
This file is part of the GNU C Library.
@@ -40,6 +40,3 @@ __prctl (int option, ...)
libc_hidden_def (__prctl)
weak_alias (__prctl, prctl)
-#if __TIMESIZE != 64
-weak_alias (__prctl, __prctl_time64)
-#endif
--
2.47.1

650
pthread-wakeup.patch Normal file
View File

@@ -0,0 +1,650 @@
diff --git c/nptl/pthread_cond_broadcast.c w/nptl/pthread_cond_broadcast.c
index 7409958394..f5793e715f 100644
--- c/nptl/pthread_cond_broadcast.c
+++ w/nptl/pthread_cond_broadcast.c
@@ -57,10 +57,10 @@ ___pthread_cond_broadcast (pthread_cond_t *cond)
{
/* Add as many signals as the remaining size of the group. */
atomic_fetch_add_relaxed (cond->__data.__g_signals + g1,
- cond->__data.__g_size[g1] << 1);
+ cond->__data.__g_size[g1]);
cond->__data.__g_size[g1] = 0;
- /* We need to wake G1 waiters before we quiesce G1 below. */
+ /* We need to wake G1 waiters before we switch G1 below. */
/* TODO Only set it if there are indeed futex waiters. We could
also try to move this out of the critical section in cases when
G2 is empty (and we don't need to quiesce). */
@@ -69,11 +69,11 @@ ___pthread_cond_broadcast (pthread_cond_t *cond)
/* G1 is complete. Step (2) is next unless there are no waiters in G2, in
which case we can stop. */
- if (__condvar_quiesce_and_switch_g1 (cond, wseq, &g1, private))
+ if (__condvar_switch_g1 (cond, wseq, &g1, private))
{
/* Step (3): Send signals to all waiters in the old G2 / new G1. */
atomic_fetch_add_relaxed (cond->__data.__g_signals + g1,
- cond->__data.__g_size[g1] << 1);
+ cond->__data.__g_size[g1]);
cond->__data.__g_size[g1] = 0;
/* TODO Only set it if there are indeed futex waiters. */
do_futex_wake = true;
diff --git c/nptl/pthread_cond_common.c w/nptl/pthread_cond_common.c
index 7440bb18e6..99640968f1 100644
--- c/nptl/pthread_cond_common.c
+++ w/nptl/pthread_cond_common.c
@@ -189,19 +189,17 @@ __condvar_get_private (int flags)
return FUTEX_SHARED;
}
-/* This closes G1 (whose index is in G1INDEX), waits for all futex waiters to
- leave G1, converts G1 into a fresh G2, and then switches group roles so that
- the former G2 becomes the new G1 ending at the current __wseq value when we
- eventually make the switch (WSEQ is just an observation of __wseq by the
- signaler).
+/* This closes G1 (whose index is in G1INDEX), converts G1 into a fresh G2,
+ and then switches group roles so that the former G2 becomes the new G1
+ ending at the current __wseq value when we eventually make the switch
+ (WSEQ is just an observation of __wseq by the signaler).
If G2 is empty, it will not switch groups because then it would create an
empty G1 which would require switching groups again on the next signal.
Returns false iff groups were not switched because G2 was empty. */
static bool __attribute__ ((unused))
-__condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
+__condvar_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
unsigned int *g1index, int private)
{
- const unsigned int maxspin = 0;
unsigned int g1 = *g1index;
/* If there is no waiter in G2, we don't do anything. The expression may
@@ -210,96 +208,23 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
behavior.
Note that this works correctly for a zero-initialized condvar too. */
unsigned int old_orig_size = __condvar_get_orig_size (cond);
- uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond) >> 1;
- if (((unsigned) (wseq - old_g1_start - old_orig_size)
- + cond->__data.__g_size[g1 ^ 1]) == 0)
+ uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond);
+ uint64_t new_g1_start = old_g1_start + old_orig_size;
+ if (((unsigned) (wseq - new_g1_start) + cond->__data.__g_size[g1 ^ 1]) == 0)
return false;
- /* Now try to close and quiesce G1. We have to consider the following kinds
- of waiters:
+ /* We have to consider the following kinds of waiters:
* Waiters from less recent groups than G1 are not affected because
nothing will change for them apart from __g1_start getting larger.
* New waiters arriving concurrently with the group switching will all go
into G2 until we atomically make the switch. Waiters existing in G2
are not affected.
- * Waiters in G1 will be closed out immediately by setting a flag in
- __g_signals, which will prevent waiters from blocking using a futex on
- __g_signals and also notifies them that the group is closed. As a
- result, they will eventually remove their group reference, allowing us
- to close switch group roles. */
+ * Waiters in G1 have already received a signal and been woken. */
- /* First, set the closed flag on __g_signals. This tells waiters that are
- about to wait that they shouldn't do that anymore. This basically
- serves as an advance notification of the upcoming change to __g1_start;
- waiters interpret it as if __g1_start was larger than their waiter
- sequence position. This allows us to change __g1_start after waiting
- for all existing waiters with group references to leave, which in turn
- makes recovery after stealing a signal simpler because it then can be
- skipped if __g1_start indicates that the group is closed (otherwise,
- we would have to recover always because waiters don't know how big their
- groups are). Relaxed MO is fine. */
- atomic_fetch_or_relaxed (cond->__data.__g_signals + g1, 1);
-
- /* Wait until there are no group references anymore. The fetch-or operation
- injects us into the modification order of __g_refs; release MO ensures
- that waiters incrementing __g_refs after our fetch-or see the previous
- changes to __g_signals and to __g1_start that had to happen before we can
- switch this G1 and alias with an older group (we have two groups, so
- aliasing requires switching group roles twice). Note that nobody else
- can have set the wake-request flag, so we do not have to act upon it.
-
- Also note that it is harmless if older waiters or waiters from this G1
- get a group reference after we have quiesced the group because it will
- remain closed for them either because of the closed flag in __g_signals
- or the later update to __g1_start. New waiters will never arrive here
- but instead continue to go into the still current G2. */
- unsigned r = atomic_fetch_or_release (cond->__data.__g_refs + g1, 0);
- while ((r >> 1) > 0)
- {
- for (unsigned int spin = maxspin; ((r >> 1) > 0) && (spin > 0); spin--)
- {
- /* TODO Back off. */
- r = atomic_load_relaxed (cond->__data.__g_refs + g1);
- }
- if ((r >> 1) > 0)
- {
- /* There is still a waiter after spinning. Set the wake-request
- flag and block. Relaxed MO is fine because this is just about
- this futex word.
-
- Update r to include the set wake-request flag so that the upcoming
- futex_wait only blocks if the flag is still set (otherwise, we'd
- violate the basic client-side futex protocol). */
- r = atomic_fetch_or_relaxed (cond->__data.__g_refs + g1, 1) | 1;
-
- if ((r >> 1) > 0)
- futex_wait_simple (cond->__data.__g_refs + g1, r, private);
- /* Reload here so we eventually see the most recent value even if we
- do not spin. */
- r = atomic_load_relaxed (cond->__data.__g_refs + g1);
- }
- }
- /* Acquire MO so that we synchronize with the release operation that waiters
- use to decrement __g_refs and thus happen after the waiters we waited
- for. */
- atomic_thread_fence_acquire ();
-
- /* Update __g1_start, which finishes closing this group. The value we add
- will never be negative because old_orig_size can only be zero when we
- switch groups the first time after a condvar was initialized, in which
- case G1 will be at index 1 and we will add a value of 1. See above for
- why this takes place after waiting for quiescence of the group.
- Relaxed MO is fine because the change comes with no additional
- constraints that others would have to observe. */
- __condvar_add_g1_start_relaxed (cond,
- (old_orig_size << 1) + (g1 == 1 ? 1 : - 1));
-
- /* Now reopen the group, thus enabling waiters to again block using the
- futex controlled by __g_signals. Release MO so that observers that see
- no signals (and thus can block) also see the write __g1_start and thus
- that this is now a new group (see __pthread_cond_wait_common for the
- matching acquire MO loads). */
- atomic_store_release (cond->__data.__g_signals + g1, 0);
+ /* Update __g1_start, which closes this group. Relaxed MO is fine because
+ the change comes with no additional constraints that others would have
+ to observe. */
+ __condvar_add_g1_start_relaxed (cond, old_orig_size);
/* At this point, the old G1 is now a valid new G2 (but not in use yet).
No old waiter can neither grab a signal nor acquire a reference without
@@ -311,9 +236,13 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
g1 ^= 1;
*g1index ^= 1;
+ /* Now advance the new G1 g_signals to the new g1_start, giving it
+ an effective signal count of 0 to start. */
+ atomic_store_release (cond->__data.__g_signals + g1, (unsigned)new_g1_start);
+
/* These values are just observed by signalers, and thus protected by the
lock. */
- unsigned int orig_size = wseq - (old_g1_start + old_orig_size);
+ unsigned int orig_size = wseq - new_g1_start;
__condvar_set_orig_size (cond, orig_size);
/* Use and addition to not loose track of cancellations in what was
previously G2. */
diff --git c/nptl/pthread_cond_signal.c w/nptl/pthread_cond_signal.c
index 9ac4bd9f75..bdc1e82466 100644
--- c/nptl/pthread_cond_signal.c
+++ w/nptl/pthread_cond_signal.c
@@ -69,19 +69,18 @@ ___pthread_cond_signal (pthread_cond_t *cond)
bool do_futex_wake = false;
/* If G1 is still receiving signals, we put the signal there. If not, we
- check if G2 has waiters, and if so, quiesce and switch G1 to the former
- G2; if this results in a new G1 with waiters (G2 might have cancellations
- already, see __condvar_quiesce_and_switch_g1), we put the signal in the
- new G1. */
+ check if G2 has waiters, and if so, switch G1 to the former G2; if this
+ results in a new G1 with waiters (G2 might have cancellations already,
+ see __condvar_switch_g1), we put the signal in the new G1. */
if ((cond->__data.__g_size[g1] != 0)
- || __condvar_quiesce_and_switch_g1 (cond, wseq, &g1, private))
+ || __condvar_switch_g1 (cond, wseq, &g1, private))
{
/* Add a signal. Relaxed MO is fine because signaling does not need to
- establish a happens-before relation (see above). We do not mask the
- release-MO store when initializing a group in
- __condvar_quiesce_and_switch_g1 because we use an atomic
- read-modify-write and thus extend that store's release sequence. */
- atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, 2);
+ establish a happens-before relation (see above). We do not mask the
+ release-MO store when initializing a group in __condvar_switch_g1
+ because we use an atomic read-modify-write and thus extend that
+ store's release sequence. */
+ atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, 1);
cond->__data.__g_size[g1]--;
/* TODO Only set it if there are indeed futex waiters. */
do_futex_wake = true;
diff --git c/nptl/pthread_cond_wait.c w/nptl/pthread_cond_wait.c
index 806c432d13..d919b3622a 100644
--- c/nptl/pthread_cond_wait.c
+++ w/nptl/pthread_cond_wait.c
@@ -84,7 +84,7 @@ __condvar_cancel_waiting (pthread_cond_t *cond, uint64_t seq, unsigned int g,
not hold a reference on the group. */
__condvar_acquire_lock (cond, private);
- uint64_t g1_start = __condvar_load_g1_start_relaxed (cond) >> 1;
+ uint64_t g1_start = __condvar_load_g1_start_relaxed (cond);
if (g1_start > seq)
{
/* Our group is closed, so someone provided enough signals for it.
@@ -143,23 +143,6 @@ __condvar_cancel_waiting (pthread_cond_t *cond, uint64_t seq, unsigned int g,
}
}
-/* Wake up any signalers that might be waiting. */
-static void
-__condvar_dec_grefs (pthread_cond_t *cond, unsigned int g, int private)
-{
- /* Release MO to synchronize-with the acquire load in
- __condvar_quiesce_and_switch_g1. */
- if (atomic_fetch_add_release (cond->__data.__g_refs + g, -2) == 3)
- {
- /* Clear the wake-up request flag before waking up. We do not need more
- than relaxed MO and it doesn't matter if we apply this for an aliased
- group because we wake all futex waiters right after clearing the
- flag. */
- atomic_fetch_and_relaxed (cond->__data.__g_refs + g, ~(unsigned int) 1);
- futex_wake (cond->__data.__g_refs + g, INT_MAX, private);
- }
-}
-
/* Clean-up for cancellation of waiters waiting for normal signals. We cancel
our registration as a waiter, confirm we have woken up, and re-acquire the
mutex. */
@@ -171,8 +154,6 @@ __condvar_cleanup_waiting (void *arg)
pthread_cond_t *cond = cbuffer->cond;
unsigned g = cbuffer->wseq & 1;
- __condvar_dec_grefs (cond, g, cbuffer->private);
-
__condvar_cancel_waiting (cond, cbuffer->wseq >> 1, g, cbuffer->private);
/* FIXME With the current cancellation implementation, it is possible that
a thread is cancelled after it has returned from a syscall. This could
@@ -238,9 +219,7 @@ __condvar_cleanup_waiting (void *arg)
signaled), and a reference count.
The group reference count is used to maintain the number of waiters that
- are using the group's futex. Before a group can change its role, the
- reference count must show that no waiters are using the futex anymore; this
- prevents ABA issues on the futex word.
+ are using the group's futex.
To represent which intervals in the waiter sequence the groups cover (and
thus also which group slot contains G1 or G2), we use a 64b counter to
@@ -251,7 +230,7 @@ __condvar_cleanup_waiting (void *arg)
figure out whether they are in a group that has already been completely
signaled (i.e., if the current G1 starts at a later position that the
waiter's position). Waiters cannot determine whether they are currently
- in G2 or G1 -- but they do not have too because all they are interested in
+ in G2 or G1 -- but they do not have to because all they are interested in
is whether there are available signals, and they always start in G2 (whose
group slot they know because of the bit in the waiter sequence. Signalers
will simply fill the right group until it is completely signaled and can
@@ -280,7 +259,6 @@ __condvar_cleanup_waiting (void *arg)
* Waiters fetch-add while having acquire the mutex associated with the
condvar. Signalers load it and fetch-xor it concurrently.
__g1_start: Starting position of G1 (inclusive)
- * LSB is index of current G2.
* Modified by signalers while having acquired the condvar-internal lock
and observed concurrently by waiters.
__g1_orig_size: Initial size of G1
@@ -300,11 +278,10 @@ __condvar_cleanup_waiting (void *arg)
last reference.
* Reference count used by waiters concurrently with signalers that have
acquired the condvar-internal lock.
- __g_signals: The number of signals that can still be consumed.
+ __g_signals: The number of signals that can still be consumed, relative to
+ the current g1_start. (i.e. g1_start with the signal count added)
* Used as a futex word by waiters. Used concurrently by waiters and
signalers.
- * LSB is true iff this group has been completely signaled (i.e., it is
- closed).
__g_size: Waiters remaining in this group (i.e., which have not been
signaled yet.
* Accessed by signalers and waiters that cancel waiting (both do so only
@@ -328,27 +305,6 @@ __condvar_cleanup_waiting (void *arg)
sufficient because if a waiter can see a sufficiently large value, it could
have also consume a signal in the waiters group.
- Waiters try to grab a signal from __g_signals without holding a reference
- count, which can lead to stealing a signal from a more recent group after
- their own group was already closed. They cannot always detect whether they
- in fact did because they do not know when they stole, but they can
- conservatively add a signal back to the group they stole from; if they
- did so unnecessarily, all that happens is a spurious wake-up. To make this
- even less likely, __g1_start contains the index of the current g2 too,
- which allows waiters to check if there aliasing on the group slots; if
- there wasn't, they didn't steal from the current G1, which means that the
- G1 they stole from must have been already closed and they do not need to
- fix anything.
-
- It is essential that the last field in pthread_cond_t is __g_signals[1]:
- The previous condvar used a pointer-sized field in pthread_cond_t, so a
- PTHREAD_COND_INITIALIZER from that condvar implementation might only
- initialize 4 bytes to zero instead of the 8 bytes we need (i.e., 44 bytes
- in total instead of the 48 we need). __g_signals[1] is not accessed before
- the first group switch (G2 starts at index 0), which will set its value to
- zero after a harmless fetch-or whose return value is ignored. This
- effectively completes initialization.
-
Limitations:
* This condvar isn't designed to allow for more than
@@ -379,7 +335,6 @@ static __always_inline int
__pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
clockid_t clockid, const struct __timespec64 *abstime)
{
- const int maxspin = 0;
int err;
int result = 0;
@@ -396,8 +351,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
because we do not need to establish any happens-before relation with
signalers (see __pthread_cond_signal); modification order alone
establishes a total order of waiters/signals. We do need acquire MO
- to synchronize with group reinitialization in
- __condvar_quiesce_and_switch_g1. */
+ to synchronize with group reinitialization in __condvar_switch_g1. */
uint64_t wseq = __condvar_fetch_add_wseq_acquire (cond, 2);
/* Find our group's index. We always go into what was G2 when we acquired
our position. */
@@ -424,178 +378,64 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
return err;
}
- /* Now wait until a signal is available in our group or it is closed.
- Acquire MO so that if we observe a value of zero written after group
- switching in __condvar_quiesce_and_switch_g1, we synchronize with that
- store and will see the prior update of __g1_start done while switching
- groups too. */
- unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g);
- do
+ while (1)
{
- while (1)
- {
- /* Spin-wait first.
- Note that spinning first without checking whether a timeout
- passed might lead to what looks like a spurious wake-up even
- though we should return ETIMEDOUT (e.g., if the caller provides
- an absolute timeout that is clearly in the past). However,
- (1) spurious wake-ups are allowed, (2) it seems unlikely that a
- user will (ab)use pthread_cond_wait as a check for whether a
- point in time is in the past, and (3) spinning first without
- having to compare against the current time seems to be the right
- choice from a performance perspective for most use cases. */
- unsigned int spin = maxspin;
- while (signals == 0 && spin > 0)
- {
- /* Check that we are not spinning on a group that's already
- closed. */
- if (seq < (__condvar_load_g1_start_relaxed (cond) >> 1))
- goto done;
-
- /* TODO Back off. */
-
- /* Reload signals. See above for MO. */
- signals = atomic_load_acquire (cond->__data.__g_signals + g);
- spin--;
- }
-
- /* If our group will be closed as indicated by the flag on signals,
- don't bother grabbing a signal. */
- if (signals & 1)
- goto done;
-
- /* If there is an available signal, don't block. */
- if (signals != 0)
- break;
-
- /* No signals available after spinning, so prepare to block.
- We first acquire a group reference and use acquire MO for that so
- that we synchronize with the dummy read-modify-write in
- __condvar_quiesce_and_switch_g1 if we read from that. In turn,
- in this case this will make us see the closed flag on __g_signals
- that designates a concurrent attempt to reuse the group's slot.
- We use acquire MO for the __g_signals check to make the
- __g1_start check work (see spinning above).
- Note that the group reference acquisition will not mask the
- release MO when decrementing the reference count because we use
- an atomic read-modify-write operation and thus extend the release
- sequence. */
- atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2);
- if (((atomic_load_acquire (cond->__data.__g_signals + g) & 1) != 0)
- || (seq < (__condvar_load_g1_start_relaxed (cond) >> 1)))
- {
- /* Our group is closed. Wake up any signalers that might be
- waiting. */
- __condvar_dec_grefs (cond, g, private);
- goto done;
- }
-
- // Now block.
- struct _pthread_cleanup_buffer buffer;
- struct _condvar_cleanup_buffer cbuffer;
- cbuffer.wseq = wseq;
- cbuffer.cond = cond;
- cbuffer.mutex = mutex;
- cbuffer.private = private;
- __pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer);
-
- err = __futex_abstimed_wait_cancelable64 (
- cond->__data.__g_signals + g, 0, clockid, abstime, private);
-
- __pthread_cleanup_pop (&buffer, 0);
-
- if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW))
- {
- __condvar_dec_grefs (cond, g, private);
- /* If we timed out, we effectively cancel waiting. Note that
- we have decremented __g_refs before cancellation, so that a
- deadlock between waiting for quiescence of our group in
- __condvar_quiesce_and_switch_g1 and us trying to acquire
- the lock during cancellation is not possible. */
- __condvar_cancel_waiting (cond, seq, g, private);
- result = err;
- goto done;
- }
- else
- __condvar_dec_grefs (cond, g, private);
-
- /* Reload signals. See above for MO. */
- signals = atomic_load_acquire (cond->__data.__g_signals + g);
+ /* Now wait until a signal is available in our group or it is closed.
+ Acquire MO so that if we observe (signals == lowseq) after group
+ switching in __condvar_switch_g1, we synchronize with that store and
+ will see the prior update of __g1_start done while switching groups
+ too. */
+ unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g);
+ uint64_t g1_start = __condvar_load_g1_start_relaxed (cond);
+
+ if (seq < g1_start)
+ {
+ /* If the group is closed already,
+ then this waiter originally had enough extra signals to
+ consume, up until the time its group was closed. */
+ break;
+ }
+
+ /* If there is an available signal, don't block.
+ If __g1_start has advanced at all, then we must be in G1
+ by now, perhaps in the process of switching back to an older
+ G2, but in either case we're allowed to consume the available
+ signal and should not block anymore. */
+ if ((int)(signals - (unsigned int)g1_start) > 0)
+ {
+ /* Try to grab a signal. See above for MO. (if we do another loop
+ iteration we need to see the correct value of g1_start) */
+ if (atomic_compare_exchange_weak_acquire (
+ cond->__data.__g_signals + g,
+ &signals, signals - 1))
+ break;
+ else
+ continue;
}
+ // Now block.
+ struct _pthread_cleanup_buffer buffer;
+ struct _condvar_cleanup_buffer cbuffer;
+ cbuffer.wseq = wseq;
+ cbuffer.cond = cond;
+ cbuffer.mutex = mutex;
+ cbuffer.private = private;
+ __pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer);
+
+ err = __futex_abstimed_wait_cancelable64 (
+ cond->__data.__g_signals + g, signals, clockid, abstime, private);
+
+ __pthread_cleanup_pop (&buffer, 0);
+
+ if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW))
+ {
+ /* If we timed out, we effectively cancel waiting. */
+ __condvar_cancel_waiting (cond, seq, g, private);
+ result = err;
+ break;
+ }
}
- /* Try to grab a signal. Use acquire MO so that we see an up-to-date value
- of __g1_start below (see spinning above for a similar case). In
- particular, if we steal from a more recent group, we will also see a
- more recent __g1_start below. */
- while (!atomic_compare_exchange_weak_acquire (cond->__data.__g_signals + g,
- &signals, signals - 2));
-
- /* We consumed a signal but we could have consumed from a more recent group
- that aliased with ours due to being in the same group slot. If this
- might be the case our group must be closed as visible through
- __g1_start. */
- uint64_t g1_start = __condvar_load_g1_start_relaxed (cond);
- if (seq < (g1_start >> 1))
- {
- /* We potentially stole a signal from a more recent group but we do not
- know which group we really consumed from.
- We do not care about groups older than current G1 because they are
- closed; we could have stolen from these, but then we just add a
- spurious wake-up for the current groups.
- We will never steal a signal from current G2 that was really intended
- for G2 because G2 never receives signals (until it becomes G1). We
- could have stolen a signal from G2 that was conservatively added by a
- previous waiter that also thought it stole a signal -- but given that
- that signal was added unnecessarily, it's not a problem if we steal
- it.
- Thus, the remaining case is that we could have stolen from the current
- G1, where "current" means the __g1_start value we observed. However,
- if the current G1 does not have the same slot index as we do, we did
- not steal from it and do not need to undo that. This is the reason
- for putting a bit with G2's index into__g1_start as well. */
- if (((g1_start & 1) ^ 1) == g)
- {
- /* We have to conservatively undo our potential mistake of stealing
- a signal. We can stop trying to do that when the current G1
- changes because other spinning waiters will notice this too and
- __condvar_quiesce_and_switch_g1 has checked that there are no
- futex waiters anymore before switching G1.
- Relaxed MO is fine for the __g1_start load because we need to
- merely be able to observe this fact and not have to observe
- something else as well.
- ??? Would it help to spin for a little while to see whether the
- current G1 gets closed? This might be worthwhile if the group is
- small or close to being closed. */
- unsigned int s = atomic_load_relaxed (cond->__data.__g_signals + g);
- while (__condvar_load_g1_start_relaxed (cond) == g1_start)
- {
- /* Try to add a signal. We don't need to acquire the lock
- because at worst we can cause a spurious wake-up. If the
- group is in the process of being closed (LSB is true), this
- has an effect similar to us adding a signal. */
- if (((s & 1) != 0)
- || atomic_compare_exchange_weak_relaxed
- (cond->__data.__g_signals + g, &s, s + 2))
- {
- /* If we added a signal, we also need to add a wake-up on
- the futex. We also need to do that if we skipped adding
- a signal because the group is being closed because
- while __condvar_quiesce_and_switch_g1 could have closed
- the group, it might still be waiting for futex waiters to
- leave (and one of those waiters might be the one we stole
- the signal from, which cause it to block using the
- futex). */
- futex_wake (cond->__data.__g_signals + g, 1, private);
- break;
- }
- /* TODO Back off. */
- }
- }
- }
-
- done:
/* Confirm that we have been woken. We do that before acquiring the mutex
to allow for execution of pthread_cond_destroy while having acquired the
diff --git c/nptl/tst-cond22.c w/nptl/tst-cond22.c
index 1336e9c79d..bdcb45c536 100644
--- c/nptl/tst-cond22.c
+++ w/nptl/tst-cond22.c
@@ -106,13 +106,13 @@ do_test (void)
status = 1;
}
- printf ("cond = { 0x%x:%x, 0x%x:%x, %u/%u/%u, %u/%u/%u, %u, %u }\n",
+ printf ("cond = { 0x%x:%x, 0x%x:%x, %u/%u, %u/%u, %u, %u }\n",
c.__data.__wseq.__value32.__high,
c.__data.__wseq.__value32.__low,
c.__data.__g1_start.__value32.__high,
c.__data.__g1_start.__value32.__low,
- c.__data.__g_signals[0], c.__data.__g_refs[0], c.__data.__g_size[0],
- c.__data.__g_signals[1], c.__data.__g_refs[1], c.__data.__g_size[1],
+ c.__data.__g_signals[0], c.__data.__g_size[0],
+ c.__data.__g_signals[1], c.__data.__g_size[1],
c.__data.__g1_orig_size, c.__data.__wrefs);
if (pthread_create (&th, NULL, tf, (void *) 1l) != 0)
@@ -152,13 +152,13 @@ do_test (void)
status = 1;
}
- printf ("cond = { 0x%x:%x, 0x%x:%x, %u/%u/%u, %u/%u/%u, %u, %u }\n",
+ printf ("cond = { 0x%x:%x, 0x%x:%x, %u/%u, %u/%u, %u, %u }\n",
c.__data.__wseq.__value32.__high,
c.__data.__wseq.__value32.__low,
c.__data.__g1_start.__value32.__high,
c.__data.__g1_start.__value32.__low,
- c.__data.__g_signals[0], c.__data.__g_refs[0], c.__data.__g_size[0],
- c.__data.__g_signals[1], c.__data.__g_refs[1], c.__data.__g_size[1],
+ c.__data.__g_signals[0], c.__data.__g_size[0],
+ c.__data.__g_signals[1], c.__data.__g_size[1],
c.__data.__g1_orig_size, c.__data.__wrefs);
return status;
diff --git c/sysdeps/nptl/bits/thread-shared-types.h w/sysdeps/nptl/bits/thread-shared-types.h
index 2de6ff9caf..3fe5d4afc0 100644
--- c/sysdeps/nptl/bits/thread-shared-types.h
+++ w/sysdeps/nptl/bits/thread-shared-types.h
@@ -95,11 +95,12 @@ struct __pthread_cond_s
{
__atomic_wide_counter __wseq;
__atomic_wide_counter __g1_start;
- unsigned int __g_refs[2] __LOCK_ALIGNMENT;
- unsigned int __g_size[2];
+ unsigned int __g_size[2] __LOCK_ALIGNMENT;
unsigned int __g1_orig_size;
unsigned int __wrefs;
unsigned int __g_signals[2];
+ unsigned int __unused_initialized_1;
+ unsigned int __unused_initialized_2;
};
typedef unsigned int __tss_t;
diff --git c/sysdeps/nptl/pthread.h w/sysdeps/nptl/pthread.h
index 7f65483542..476cd0ed54 100644
--- c/sysdeps/nptl/pthread.h
+++ w/sysdeps/nptl/pthread.h
@@ -152,7 +152,7 @@ enum
/* Conditional variable handling. */
-#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, {0, 0}, 0, 0, {0, 0} } }
+#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, 0, 0, {0, 0}, 0, 0 } }
/* Cleanup buffers */

75
sle-nsswitch.conf Normal file
View File

@@ -0,0 +1,75 @@
#
# /etc/nsswitch.conf
#
# An example Name Service Switch config file. This file should be
# sorted with the most-used services at the beginning.
#
# Valid databases are: aliases, ethers, group, gshadow, hosts,
# initgroups, netgroup, networks, passwd, protocols, publickey,
# rpc, services, and shadow.
#
# Valid service provider entries include (in alphabetical order):
#
# compat Use /etc files plus *_compat pseudo-db
# db Use the pre-processed /var/db files
# dns Use DNS (Domain Name Service)
# files Use the local files in /etc
# hesiod Use Hesiod (DNS) for user lookups
# nis Use NIS (NIS version 2), also called YP
# nisplus Use NIS+ (NIS version 3)
#
# See `info libc 'NSS Basics'` for more information.
#
# Commonly used alternative service providers (may need installation):
#
# ldap Use LDAP directory server
# myhostname Use systemd host names
# mymachines Use systemd machine names
# mdns*, mdns*_minimal Use Avahi mDNS/DNS-SD
# resolve Use systemd resolved resolver
# sss Use System Security Services Daemon (sssd)
# systemd Use systemd for dynamic user option
# winbind Use Samba winbind support
# wins Use Samba wins support
# wrapper Use wrapper module for testing
#
# Notes:
#
# 'sssd' performs its own 'files'-based caching, so it should generally
# come before 'files'.
#
# WARNING: Running nscd with a secondary caching service like sssd may
# lead to unexpected behaviour, especially with how long
# entries are cached.
#
# Installation instructions:
#
# To use 'db', install the appropriate package(s) (provide 'makedb' and
# libnss_db.so.*), and place the 'db' in front of 'files' for entries
# you want to be looked up first in the databases, like this:
#
# passwd: db files
# shadow: db files
# group: db files
passwd: compat
group: compat
shadow: compat
# Allow initgroups to default to the setting for group.
# initgroups: compat
hosts: files dns
networks: files dns
aliases: files usrfiles
ethers: files usrfiles
gshadow: files usrfiles
netgroup: files nis
protocols: files usrfiles
publickey: files
rpc: files usrfiles
services: files usrfiles
automount: files nis
bootparams: files
netmasks: files

View File

@@ -0,0 +1,71 @@
From 5451fa962cd0a90a0e2ec1d8910a559ace02bba0 Mon Sep 17 00:00:00 2001
From: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date: Mon, 6 Nov 2023 17:25:49 -0300
Subject: [PATCH] elf: Ignore LD_LIBRARY_PATH and debug env var for setuid for
static
It mimics the ld.so behavior.
Checked on x86_64-linux-gnu.
Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
---
elf/dl-support.c | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
Index: glibc-2.38/elf/dl-support.c
===================================================================
--- glibc-2.38.orig/elf/dl-support.c
+++ glibc-2.38/elf/dl-support.c
@@ -276,8 +276,6 @@ _dl_non_dynamic_init (void)
_dl_main_map.l_phdr = GL(dl_phdr);
_dl_main_map.l_phnum = GL(dl_phnum);
- _dl_verbose = *(getenv ("LD_WARN") ?: "") == '\0' ? 0 : 1;
-
/* Set up the data structures for the system-supplied DSO early,
so they can influence _dl_init_paths. */
setup_vdso (NULL, NULL);
@@ -285,6 +283,22 @@ _dl_non_dynamic_init (void)
/* With vDSO setup we can initialize the function pointers. */
setup_vdso_pointers ();
+ if (__libc_enable_secure)
+ {
+ static const char unsecure_envvars[] =
+ UNSECURE_ENVVARS
+ ;
+ const char *cp = unsecure_envvars;
+
+ while (cp < unsecure_envvars + sizeof (unsecure_envvars))
+ {
+ __unsetenv (cp);
+ cp = strchr (cp, '\0') + 1;
+ }
+ }
+
+ _dl_verbose = *(getenv ("LD_WARN") ?: "") == '\0' ? 0 : 1;
+
/* Initialize the data structures for the search paths for shared
objects. */
_dl_init_paths (getenv ("LD_LIBRARY_PATH"), "LD_LIBRARY_PATH",
@@ -306,20 +320,6 @@ _dl_non_dynamic_init (void)
_dl_profile_output
= &"/var/tmp\0/var/profile"[__libc_enable_secure ? 9 : 0];
- if (__libc_enable_secure)
- {
- static const char unsecure_envvars[] =
- UNSECURE_ENVVARS
- ;
- const char *cp = unsecure_envvars;
-
- while (cp < unsecure_envvars + sizeof (unsecure_envvars))
- {
- __unsetenv (cp);
- cp = strchr (cp, '\0') + 1;
- }
- }
-
#ifdef DL_PLATFORM_INIT
DL_PLATFORM_INIT;
#endif