diff --git a/call-init-proxy-objects.patch b/call-init-proxy-objects.patch new file mode 100644 index 0000000..ec3497b --- /dev/null +++ b/call-init-proxy-objects.patch @@ -0,0 +1,37 @@ +From 7ae211a01b085d0bde54bd13b887ce8f9d57c2b4 Mon Sep 17 00:00:00 2001 +From: Florian Weimer +Date: Tue, 22 Aug 2023 13:56:25 +0200 +Subject: [PATCH] elf: Do not run constructors for proxy objects + +Otherwise, the ld.so constructor runs for each audit namespace +and each dlmopen namespace. + +(cherry picked from commit f6c8204fd7fabf0cf4162eaf10ccf23258e4d10e) +--- + elf/dl-init.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/elf/dl-init.c b/elf/dl-init.c +index 5b0732590f..ba4d2fdc85 100644 +--- a/elf/dl-init.c ++++ b/elf/dl-init.c +@@ -25,10 +25,14 @@ + static void + call_init (struct link_map *l, int argc, char **argv, char **env) + { ++ /* Do not run constructors for proxy objects. */ ++ if (l != l->l_real) ++ return; ++ + /* If the object has not been relocated, this is a bug. The + function pointers are invalid in this case. (Executables do not +- need relocation, and neither do proxy objects.) */ +- assert (l->l_real->l_relocated || l->l_real->l_type == lt_executable); ++ need relocation.) */ ++ assert (l->l_relocated || l->l_type == lt_executable); + + if (l->l_init_called) + /* This object is all done. */ +-- +2.42.0 + diff --git a/dtors-reverse-ctor-order.patch b/dtors-reverse-ctor-order.patch new file mode 100644 index 0000000..3e058ad --- /dev/null +++ b/dtors-reverse-ctor-order.patch @@ -0,0 +1,834 @@ +From a3189f66a5f2fe86568286fa025fa153be04c6c0 Mon Sep 17 00:00:00 2001 +From: Florian Weimer +Date: Fri, 8 Sep 2023 12:32:14 +0200 +Subject: [PATCH] elf: Always call destructors in reverse constructor order + (bug 30785) + +The current implementation of dlclose (and process exit) re-sorts the +link maps before calling ELF destructors. Destructor order is not the +reverse of the constructor order as a result: The second sort takes +relocation dependencies into account, and other differences can result +from ambiguous inputs, such as cycles. (The force_first handling in +_dl_sort_maps is not effective for dlclose.) After the changes in +this commit, there is still a required difference due to +dlopen/dlclose ordering by the application, but the previous +discrepancies went beyond that. + +A new global (namespace-spanning) list of link maps, +_dl_init_called_list, is updated right before ELF constructors are +called from _dl_init. + +In dl_close_worker, the maps variable, an on-stack variable length +array, is eliminated. (VLAs are problematic, and dlclose should not +call malloc because it cannot readily deal with malloc failure.) +Marking still-used objects uses the namespace list directly, with +next and next_idx replacing the done_index variable. + +After marking, _dl_init_called_list is used to call the destructors +of now-unused maps in reverse destructor order. These destructors +can call dlopen. Previously, new objects do not have l_map_used set. +This had to change: There is no copy of the link map list anymore, +so processing would cover newly opened (and unmarked) mappings, +unloading them. Now, _dl_init (indirectly) sets l_map_used, too. +(dlclose is handled by the existing reentrancy guard.) + +After _dl_init_called_list traversal, two more loops follow. The +processing order changes to the original link map order in the +namespace. Previously, dependency order was used. The difference +should not matter because relocation dependencies could already +reorder link maps in the old code. + +The changes to _dl_fini remove the sorting step and replace it with +a traversal of _dl_init_called_list. The l_direct_opencount +decrement outside the loader lock is removed because it appears +incorrect: the counter manipulation could race with other dynamic +loader operations. + +tst-audit23 needs adjustments to the changes in LA_ACT_DELETE +notifications. The new approach for checking la_activity should +make it clearer that la_activty calls come in pairs around namespace +updates. + +The dependency sorting test cases need updates because the destructor +order is always the opposite order of constructor order, even with +relocation dependencies or cycles present. + +There is a future cleanup opportunity to remove the now-constant +force_first and for_fini arguments from the _dl_sort_maps function. + +Fixes commit 1df71d32fe5f5905ffd5d100e5e9ca8ad62 ("elf: Implement +force_first handling in _dl_sort_maps_dfs (bug 28937)"). + +Reviewed-by: DJ Delorie +(cherry picked from commit 6985865bc3ad5b23147ee73466583dd7fdf65892) +--- + NEWS | 7 ++ + elf/dl-close.c | 113 +++++++++++++++++---------- + elf/dl-fini.c | 152 +++++++++++++------------------------ + elf/dl-init.c | 16 ++++ + elf/dso-sort-tests-1.def | 19 ++--- + elf/tst-audit23.c | 44 ++++++----- + include/link.h | 4 + + sysdeps/generic/ldsodefs.h | 4 + + 8 files changed, 186 insertions(+), 173 deletions(-) + +diff --git a/elf/dl-close.c b/elf/dl-close.c +index b887a44888..ea62d0e601 100644 +--- a/elf/dl-close.c ++++ b/elf/dl-close.c +@@ -138,30 +138,31 @@ _dl_close_worker (struct link_map *map, bool force) + + bool any_tls = false; + const unsigned int nloaded = ns->_ns_nloaded; +- struct link_map *maps[nloaded]; + +- /* Run over the list and assign indexes to the link maps and enter +- them into the MAPS array. */ ++ /* Run over the list and assign indexes to the link maps. */ + int idx = 0; + for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next) + { + l->l_map_used = 0; + l->l_map_done = 0; + l->l_idx = idx; +- maps[idx] = l; + ++idx; + } + assert (idx == nloaded); + +- /* Keep track of the lowest index link map we have covered already. */ +- int done_index = -1; +- while (++done_index < nloaded) ++ /* Keep marking link maps until no new link maps are found. */ ++ for (struct link_map *l = ns->_ns_loaded; l != NULL; ) + { +- struct link_map *l = maps[done_index]; ++ /* next is reset to earlier link maps for remarking. */ ++ struct link_map *next = l->l_next; ++ int next_idx = l->l_idx + 1; /* next->l_idx, but covers next == NULL. */ + + if (l->l_map_done) +- /* Already handled. */ +- continue; ++ { ++ /* Already handled. */ ++ l = next; ++ continue; ++ } + + /* Check whether this object is still used. */ + if (l->l_type == lt_loaded +@@ -171,7 +172,10 @@ _dl_close_worker (struct link_map *map, bool force) + acquire is sufficient and correct. */ + && atomic_load_acquire (&l->l_tls_dtor_count) == 0 + && !l->l_map_used) +- continue; ++ { ++ l = next; ++ continue; ++ } + + /* We need this object and we handle it now. */ + l->l_map_used = 1; +@@ -198,8 +202,11 @@ _dl_close_worker (struct link_map *map, bool force) + already processed it, then we need to go back + and process again from that point forward to + ensure we keep all of its dependencies also. */ +- if ((*lp)->l_idx - 1 < done_index) +- done_index = (*lp)->l_idx - 1; ++ if ((*lp)->l_idx < next_idx) ++ { ++ next = *lp; ++ next_idx = next->l_idx; ++ } + } + } + +@@ -219,44 +226,65 @@ _dl_close_worker (struct link_map *map, bool force) + if (!jmap->l_map_used) + { + jmap->l_map_used = 1; +- if (jmap->l_idx - 1 < done_index) +- done_index = jmap->l_idx - 1; ++ if (jmap->l_idx < next_idx) ++ { ++ next = jmap; ++ next_idx = next->l_idx; ++ } + } + } + } +- } + +- /* Sort the entries. We can skip looking for the binary itself which is +- at the front of the search list for the main namespace. */ +- _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true); ++ l = next; ++ } + +- /* Call all termination functions at once. */ +- bool unload_any = false; +- bool scope_mem_left = false; +- unsigned int unload_global = 0; +- unsigned int first_loaded = ~0; +- for (unsigned int i = 0; i < nloaded; ++i) ++ /* Call the destructors in reverse constructor order, and remove the ++ closed link maps from the list. */ ++ for (struct link_map **init_called_head = &_dl_init_called_list; ++ *init_called_head != NULL; ) + { +- struct link_map *imap = maps[i]; ++ struct link_map *imap = *init_called_head; + +- /* All elements must be in the same namespace. */ +- assert (imap->l_ns == nsid); +- +- if (!imap->l_map_used) ++ /* _dl_init_called_list is global, to produce a global odering. ++ Ignore the other namespaces (and link maps that are still used). */ ++ if (imap->l_ns != nsid || imap->l_map_used) ++ init_called_head = &imap->l_init_called_next; ++ else + { + assert (imap->l_type == lt_loaded && !imap->l_nodelete_active); + +- /* Call its termination function. Do not do it for +- half-cooked objects. Temporarily disable exception +- handling, so that errors are fatal. */ +- if (imap->l_init_called) ++ /* _dl_init_called_list is updated at the same time as ++ l_init_called. */ ++ assert (imap->l_init_called); ++ ++ if (imap->l_info[DT_FINI_ARRAY] != NULL ++ || imap->l_info[DT_FINI] != NULL) + _dl_catch_exception (NULL, _dl_call_fini, imap); + + #ifdef SHARED + /* Auditing checkpoint: we remove an object. */ + _dl_audit_objclose (imap); + #endif ++ /* Unlink this link map. */ ++ *init_called_head = imap->l_init_called_next; ++ } ++ } ++ ++ ++ bool unload_any = false; ++ bool scope_mem_left = false; ++ unsigned int unload_global = 0; ++ ++ /* For skipping un-unloadable link maps in the second loop. */ ++ struct link_map *first_loaded = ns->_ns_loaded; + ++ /* Iterate over the namespace to find objects to unload. Some ++ unloadable objects may not be on _dl_init_called_list due to ++ dlopen failure. */ ++ for (struct link_map *imap = first_loaded; imap != NULL; imap = imap->l_next) ++ { ++ if (!imap->l_map_used) ++ { + /* This object must not be used anymore. */ + imap->l_removed = 1; + +@@ -267,8 +295,8 @@ _dl_close_worker (struct link_map *map, bool force) + ++unload_global; + + /* Remember where the first dynamically loaded object is. */ +- if (i < first_loaded) +- first_loaded = i; ++ if (first_loaded == NULL) ++ first_loaded = imap; + } + /* Else imap->l_map_used. */ + else if (imap->l_type == lt_loaded) +@@ -404,8 +432,8 @@ _dl_close_worker (struct link_map *map, bool force) + imap->l_loader = NULL; + + /* Remember where the first dynamically loaded object is. */ +- if (i < first_loaded) +- first_loaded = i; ++ if (first_loaded == NULL) ++ first_loaded = imap; + } + } + +@@ -476,10 +504,11 @@ _dl_close_worker (struct link_map *map, bool force) + + /* Check each element of the search list to see if all references to + it are gone. */ +- for (unsigned int i = first_loaded; i < nloaded; ++i) ++ for (struct link_map *imap = first_loaded; imap != NULL; ) + { +- struct link_map *imap = maps[i]; +- if (!imap->l_map_used) ++ if (imap->l_map_used) ++ imap = imap->l_next; ++ else + { + assert (imap->l_type == lt_loaded); + +@@ -690,7 +719,9 @@ _dl_close_worker (struct link_map *map, bool force) + if (imap == GL(dl_initfirst)) + GL(dl_initfirst) = NULL; + ++ struct link_map *next = imap->l_next; + free (imap); ++ imap = next; + } + } + +diff --git a/elf/dl-fini.c b/elf/dl-fini.c +index 9acb64f47c..e201d36651 100644 +--- a/elf/dl-fini.c ++++ b/elf/dl-fini.c +@@ -24,116 +24,68 @@ + void + _dl_fini (void) + { +- /* Lots of fun ahead. We have to call the destructors for all still +- loaded objects, in all namespaces. The problem is that the ELF +- specification now demands that dependencies between the modules +- are taken into account. I.e., the destructor for a module is +- called before the ones for any of its dependencies. +- +- To make things more complicated, we cannot simply use the reverse +- order of the constructors. Since the user might have loaded objects +- using `dlopen' there are possibly several other modules with its +- dependencies to be taken into account. Therefore we have to start +- determining the order of the modules once again from the beginning. */ +- +- /* We run the destructors of the main namespaces last. As for the +- other namespaces, we pick run the destructors in them in reverse +- order of the namespace ID. */ +-#ifdef SHARED +- int do_audit = 0; +- again: +-#endif +- for (Lmid_t ns = GL(dl_nns) - 1; ns >= 0; --ns) +- { +- /* Protect against concurrent loads and unloads. */ +- __rtld_lock_lock_recursive (GL(dl_load_lock)); +- +- unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded; +- /* No need to do anything for empty namespaces or those used for +- auditing DSOs. */ +- if (nloaded == 0 +-#ifdef SHARED +- || GL(dl_ns)[ns]._ns_loaded->l_auditing != do_audit +-#endif +- ) +- __rtld_lock_unlock_recursive (GL(dl_load_lock)); +- else +- { ++ /* Call destructors strictly in the reverse order of constructors. ++ This causes fewer surprises than some arbitrary reordering based ++ on new (relocation) dependencies. None of the objects are ++ unmapped, so applications can deal with this if their DSOs remain ++ in a consistent state after destructors have run. */ ++ ++ /* Protect against concurrent loads and unloads. */ ++ __rtld_lock_lock_recursive (GL(dl_load_lock)); ++ ++ /* Ignore objects which are opened during shutdown. */ ++ struct link_map *local_init_called_list = _dl_init_called_list; ++ ++ for (struct link_map *l = local_init_called_list; l != NULL; ++ l = l->l_init_called_next) ++ /* Bump l_direct_opencount of all objects so that they ++ are not dlclose()ed from underneath us. */ ++ ++l->l_direct_opencount; ++ ++ /* After this point, everything linked from local_init_called_list ++ cannot be unloaded because of the reference counter update. */ ++ __rtld_lock_unlock_recursive (GL(dl_load_lock)); ++ ++ /* Perform two passes: One for non-audit modules, one for audit ++ modules. This way, audit modules receive unload notifications ++ for non-audit objects, and the destructors for audit modules ++ still run. */ + #ifdef SHARED +- _dl_audit_activity_nsid (ns, LA_ACT_DELETE); ++ int last_pass = GLRO(dl_naudit) > 0; ++ Lmid_t last_ns = -1; ++ for (int do_audit = 0; do_audit <= last_pass; ++do_audit) + #endif +- +- /* Now we can allocate an array to hold all the pointers and +- copy the pointers in. */ +- struct link_map *maps[nloaded]; +- +- unsigned int i; +- struct link_map *l; +- assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL); +- for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next) +- /* Do not handle ld.so in secondary namespaces. */ +- if (l == l->l_real) +- { +- assert (i < nloaded); +- +- maps[i] = l; +- l->l_idx = i; +- ++i; +- +- /* Bump l_direct_opencount of all objects so that they +- are not dlclose()ed from underneath us. */ +- ++l->l_direct_opencount; +- } +- assert (ns != LM_ID_BASE || i == nloaded); +- assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1); +- unsigned int nmaps = i; +- +- /* Now we have to do the sorting. We can skip looking for the +- binary itself which is at the front of the search list for +- the main namespace. */ +- _dl_sort_maps (maps, nmaps, (ns == LM_ID_BASE), true); +- +- /* We do not rely on the linked list of loaded object anymore +- from this point on. We have our own list here (maps). The +- various members of this list cannot vanish since the open +- count is too high and will be decremented in this loop. So +- we release the lock so that some code which might be called +- from a destructor can directly or indirectly access the +- lock. */ +- __rtld_lock_unlock_recursive (GL(dl_load_lock)); +- +- /* 'maps' now contains the objects in the right order. Now +- call the destructors. We have to process this array from +- the front. */ +- for (i = 0; i < nmaps; ++i) +- { +- struct link_map *l = maps[i]; +- +- if (l->l_init_called) +- { +- _dl_call_fini (l); ++ for (struct link_map *l = local_init_called_list; l != NULL; ++ l = l->l_init_called_next) ++ { + #ifdef SHARED +- /* Auditing checkpoint: another object closed. */ +- _dl_audit_objclose (l); ++ if (GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing != do_audit) ++ continue; ++ ++ /* Avoid back-to-back calls of _dl_audit_activity_nsid for the ++ same namespace. */ ++ if (last_ns != l->l_ns) ++ { ++ if (last_ns >= 0) ++ _dl_audit_activity_nsid (last_ns, LA_ACT_CONSISTENT); ++ _dl_audit_activity_nsid (l->l_ns, LA_ACT_DELETE); ++ last_ns = l->l_ns; ++ } + #endif +- } + +- /* Correct the previous increment. */ +- --l->l_direct_opencount; +- } ++ /* There is no need to re-enable exceptions because _dl_fini ++ is not called from a context where exceptions are caught. */ ++ _dl_call_fini (l); + + #ifdef SHARED +- _dl_audit_activity_nsid (ns, LA_ACT_CONSISTENT); ++ /* Auditing checkpoint: another object closed. */ ++ _dl_audit_objclose (l); + #endif +- } +- } ++ } + + #ifdef SHARED +- if (! do_audit && GLRO(dl_naudit) > 0) +- { +- do_audit = 1; +- goto again; +- } ++ if (last_ns >= 0) ++ _dl_audit_activity_nsid (last_ns, LA_ACT_CONSISTENT); + + if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS)) + _dl_debug_printf ("\nruntime linker statistics:\n" +diff --git a/elf/dl-init.c b/elf/dl-init.c +index ba4d2fdc85..ffd05b7806 100644 +--- a/elf/dl-init.c ++++ b/elf/dl-init.c +@@ -21,6 +21,7 @@ + #include + #include + ++struct link_map *_dl_init_called_list; + + static void + call_init (struct link_map *l, int argc, char **argv, char **env) +@@ -42,6 +43,21 @@ call_init (struct link_map *l, int argc, char **argv, char **env) + dependency. */ + l->l_init_called = 1; + ++ /* Help an already-running dlclose: The just-loaded object must not ++ be removed during the current pass. (No effect if no dlclose in ++ progress.) */ ++ l->l_map_used = 1; ++ ++ /* Record execution before starting any initializers. This way, if ++ the initializers themselves call dlopen, their ELF destructors ++ will eventually be run before this object is destructed, matching ++ that their ELF constructors have run before this object was ++ constructed. _dl_fini uses this list for audit callbacks, so ++ register objects on the list even if they do not have a ++ constructor. */ ++ l->l_init_called_next = _dl_init_called_list; ++ _dl_init_called_list = l; ++ + /* Check for object which constructors we do not run here. */ + if (__builtin_expect (l->l_name[0], 'a') == '\0' + && l->l_type == lt_executable) +diff --git a/elf/dso-sort-tests-1.def b/elf/dso-sort-tests-1.def +index 4bf9052db1..61dc54f8ae 100644 +--- a/elf/dso-sort-tests-1.def ++++ b/elf/dso-sort-tests-1.def +@@ -53,21 +53,14 @@ tst-dso-ordering10: {}->a->b->c;soname({})=c + output: b>a>{}b->c->d order). +-# The older dynamic_sort=1 algorithm does not achieve this, while the DFS-based +-# dynamic_sort=2 algorithm does, although it is still arguable whether going +-# beyond spec to do this is the right thing to do. +-# The below expected outputs are what the two algorithms currently produce +-# respectively, for regression testing purposes. ++# relocation(dynamic) dependencies. For both sorting algorithms, the ++# destruction order is the reverse of the construction order, and ++# relocation dependencies are not taken into account. + tst-bz15311: {+a;+e;+f;+g;+d;%d;-d;-g;-f;-e;-a};a->b->c->d;d=>[ba];c=>a;b=>e=>a;c=>f=>b;d=>g=>c +-output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[a1;a->a2;a2->a;b->b1;c->a1;c=>a1 +-output(glibc.rtld.dynamic_sort=1): {+a[a2>a1>a>];+b[b1>b>];-b[];%c(a1());}a1>a>];+b[b1>b>];-b[];%c(a1());}a1>a>];+b[b1>b>];-b[];%c(a1());} +Date: Fri, 8 Sep 2023 13:02:06 +0200 +Subject: [PATCH] elf: Remove unused l_text_end field from struct link_map + +It is a left-over from commit 52a01100ad011293197637e42b5be1a479a2 +("elf: Remove ad-hoc restrictions on dlopen callers [BZ #22787]"). + +When backporting commmit 6985865bc3ad5b23147ee73466583dd7fdf65892 +("elf: Always call destructors in reverse constructor order +(bug 30785)"), we can move the l_init_called_next field to this +place, so that the internal GLIBC_PRIVATE ABI does not change. + +Reviewed-by: Carlos O'Donell +Tested-by: Carlos O'Donell +(cherry picked from commit 53df2ce6885da3d0e89e87dca7b095622296014f) +--- + elf/dl-load.c | 2 +- + elf/dl-load.h | 7 ++----- + elf/rtld.c | 6 ------ + elf/setup-vdso.h | 4 ---- + include/link.h | 2 -- + 5 files changed, 3 insertions(+), 18 deletions(-) + +diff --git a/elf/dl-load.c b/elf/dl-load.c +index 9a87fda9c9..2923b1141d 100644 +--- a/elf/dl-load.c ++++ b/elf/dl-load.c +@@ -1253,7 +1253,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd, + + /* Now process the load commands and map segments into memory. + This is responsible for filling in: +- l_map_start, l_map_end, l_addr, l_contiguous, l_text_end, l_phdr ++ l_map_start, l_map_end, l_addr, l_contiguous, l_phdr + */ + errstring = _dl_map_segments (l, fd, header, type, loadcmds, nloadcmds, + maplength, has_holes, loader); +diff --git a/elf/dl-load.h b/elf/dl-load.h +index ecf6910c68..1d5207694b 100644 +--- a/elf/dl-load.h ++++ b/elf/dl-load.h +@@ -83,14 +83,11 @@ struct loadcmd + + /* This is a subroutine of _dl_map_segments. It should be called for each + load command, some time after L->l_addr has been set correctly. It is +- responsible for setting up the l_text_end and l_phdr fields. */ ++ responsible for setting the l_phdr fields */ + static __always_inline void + _dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header, + const struct loadcmd *c) + { +- if (c->prot & PROT_EXEC) +- l->l_text_end = l->l_addr + c->mapend; +- + if (l->l_phdr == 0 + && c->mapoff <= header->e_phoff + && ((size_t) (c->mapend - c->mapstart + c->mapoff) +@@ -103,7 +100,7 @@ _dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header, + + /* This is a subroutine of _dl_map_object_from_fd. It is responsible + for filling in several fields in *L: l_map_start, l_map_end, l_addr, +- l_contiguous, l_text_end, l_phdr. On successful return, all the ++ l_contiguous, l_phdr. On successful return, all the + segments are mapped (or copied, or whatever) from the file into their + final places in the address space, with the correct page permissions, + and any bss-like regions already zeroed. It returns a null pointer +diff --git a/elf/rtld.c b/elf/rtld.c +index a91e2a4471..5107d16fe3 100644 +--- a/elf/rtld.c ++++ b/elf/rtld.c +@@ -477,7 +477,6 @@ _dl_start_final (void *arg, struct dl_start_final_info *info) + GL(dl_rtld_map).l_real = &GL(dl_rtld_map); + GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start; + GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end; +- GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext; + /* Copy the TLS related data if necessary. */ + #ifndef DONT_USE_BOOTSTRAP_MAP + # if NO_TLS_OFFSET != 0 +@@ -1119,7 +1118,6 @@ rtld_setup_main_map (struct link_map *main_map) + bool has_interp = false; + + main_map->l_map_end = 0; +- main_map->l_text_end = 0; + /* Perhaps the executable has no PT_LOAD header entries at all. */ + main_map->l_map_start = ~0; + /* And it was opened directly. */ +@@ -1211,8 +1209,6 @@ rtld_setup_main_map (struct link_map *main_map) + allocend = main_map->l_addr + ph->p_vaddr + ph->p_memsz; + if (main_map->l_map_end < allocend) + main_map->l_map_end = allocend; +- if ((ph->p_flags & PF_X) && allocend > main_map->l_text_end) +- main_map->l_text_end = allocend; + + /* The next expected address is the page following this load + segment. */ +@@ -1272,8 +1268,6 @@ rtld_setup_main_map (struct link_map *main_map) + = (char *) main_map->l_tls_initimage + main_map->l_addr; + if (! main_map->l_map_end) + main_map->l_map_end = ~0; +- if (! main_map->l_text_end) +- main_map->l_text_end = ~0; + if (! GL(dl_rtld_map).l_libname && GL(dl_rtld_map).l_name) + { + /* We were invoked directly, so the program might not have a +diff --git a/elf/setup-vdso.h b/elf/setup-vdso.h +index 0079842d1f..d92b12a7aa 100644 +--- a/elf/setup-vdso.h ++++ b/elf/setup-vdso.h +@@ -51,9 +51,6 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)), + l->l_addr = ph->p_vaddr; + if (ph->p_vaddr + ph->p_memsz >= l->l_map_end) + l->l_map_end = ph->p_vaddr + ph->p_memsz; +- if ((ph->p_flags & PF_X) +- && ph->p_vaddr + ph->p_memsz >= l->l_text_end) +- l->l_text_end = ph->p_vaddr + ph->p_memsz; + } + else + /* There must be no TLS segment. */ +@@ -62,7 +59,6 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)), + l->l_map_start = (ElfW(Addr)) GLRO(dl_sysinfo_dso); + l->l_addr = l->l_map_start - l->l_addr; + l->l_map_end += l->l_addr; +- l->l_text_end += l->l_addr; + l->l_ld = (void *) ((ElfW(Addr)) l->l_ld + l->l_addr); + elf_get_dynamic_info (l, false, false); + _dl_setup_hash (l); +diff --git a/include/link.h b/include/link.h +index 69bda3ed17..c6af095d87 100644 +--- a/include/link.h ++++ b/include/link.h +@@ -253,8 +253,6 @@ struct link_map + /* Start and finish of memory map for this object. l_map_start + need not be the same as l_addr. */ + ElfW(Addr) l_map_start, l_map_end; +- /* End of the executable part of the mapping. */ +- ElfW(Addr) l_text_end; + + /* Default array for 'l_scope'. */ + struct r_scope_elem *l_scope_mem[4]; +-- +2.42.0 + +From d3ba6c1333b10680ce5900a628108507d9d4b844 Mon Sep 17 00:00:00 2001 +From: Florian Weimer +Date: Mon, 11 Sep 2023 09:17:52 +0200 +Subject: [PATCH] elf: Move l_init_called_next to old place of l_text_end in + link map + +This preserves all member offsets and the GLIBC_PRIVATE ABI +for backporting. +--- + include/link.h | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/include/link.h b/include/link.h +index c6af095d87..686813f281 100644 +--- a/include/link.h ++++ b/include/link.h +@@ -254,6 +254,10 @@ struct link_map + need not be the same as l_addr. */ + ElfW(Addr) l_map_start, l_map_end; + ++ /* Linked list of objects in reverse ELF constructor execution ++ order. Head of list is stored in _dl_init_called_list. */ ++ struct link_map *l_init_called_next; ++ + /* Default array for 'l_scope'. */ + struct r_scope_elem *l_scope_mem[4]; + /* Size of array allocated for 'l_scope'. */ +@@ -276,10 +280,6 @@ struct link_map + /* List of object in order of the init and fini calls. */ + struct link_map **l_initfini; + +- /* Linked list of objects in reverse ELF constructor execution +- order. Head of list is stored in _dl_init_called_list. */ +- struct link_map *l_init_called_next; +- + /* List of the dependencies introduced through symbol binding. */ + struct link_map_reldeps + { +-- +2.42.0 + diff --git a/glibc.changes b/glibc.changes index f0d9791..a085d8e 100644 --- a/glibc.changes +++ b/glibc.changes @@ -1,3 +1,14 @@ +------------------------------------------------------------------- +Mon Sep 11 09:20:07 UTC 2023 - Andreas Schwab + +- ppc64-flock-fob64.patch: io: Fix record locking contants for powerpc64 + with __USE_FILE_OFFSET64 (BZ #30804) +- libio-io-vtables.patch: libio: Fix oversized __io_vtables +- call-init-proxy-objects.patch: elf: Do not run constructors for proxy + objects +- dtors-reverse-ctor-order.patch: elf: Always call destructors in reverse + constructor order (BZ #30785) + ------------------------------------------------------------------- Tue Sep 5 11:13:13 UTC 2023 - Andreas Schwab diff --git a/glibc.spec b/glibc.spec index f9d3049..d4b86c7 100644 --- a/glibc.spec +++ b/glibc.spec @@ -307,6 +307,14 @@ Patch1002: cache-intel-shared.patch Patch1003: posix-memalign-fragmentation.patch # PATCH-FIX-UPSTREAM intl: Treat C.UTF-8 locale like C locale (BZ #16621) Patch1004: intl-c-utf-8-like-c-locale.patch +# PATCH-FIX-UPSTREAM io: Fix record locking contants for powerpc64 with __USE_FILE_OFFSET64 (BZ #30804) +Patch1005: ppc64-flock-fob64.patch +# PATCH-FIX-UPSTREAM libio: Fix oversized __io_vtables +Patch1006: libio-io-vtables.patch +# PATCH-FIX-UPSTREAM elf: Do not run constructors for proxy objects +Patch1007: call-init-proxy-objects.patch +# PATCH-FIX-UPSTREAM elf: Always call destructors in reverse constructor order (BZ #30785) +Patch1008: dtors-reverse-ctor-order.patch ### # Patches awaiting upstream approval @@ -534,6 +542,10 @@ library in a cross compilation setting. %patch1002 -p1 %patch1003 -p1 %patch1004 -p1 +%patch1005 -p1 +%patch1006 -p1 +%patch1007 -p1 +%patch1008 -p1 %endif %patch2000 -p1 diff --git a/libio-io-vtables.patch b/libio-io-vtables.patch new file mode 100644 index 0000000..4b622f4 --- /dev/null +++ b/libio-io-vtables.patch @@ -0,0 +1,51 @@ +From 92201f16cbcfd9eafe314ef6654be2ea7ba25675 Mon Sep 17 00:00:00 2001 +From: Adam Jackson +Date: Fri, 8 Sep 2023 15:55:19 -0400 +Subject: [PATCH] libio: Fix oversized __io_vtables + +IO_VTABLES_LEN is the size of the struct array in bytes, not the number +of __IO_jump_t's in the array. Drops just under 384kb from .rodata on +LP64 machines. + +Fixes: 3020f72618e ("libio: Remove the usage of __libc_IO_vtables") +Signed-off-by: Adam Jackson +Reviewed-by: Florian Weimer +Tested-by: Florian Weimer +(cherry picked from commit 8cb69e054386f980f9ff4d93b157861d72b2019e) +--- + libio/vtables.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/libio/vtables.c b/libio/vtables.c +index 1d8ad612e9..34f7e15f1c 100644 +--- a/libio/vtables.c ++++ b/libio/vtables.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -88,7 +89,7 @@ + # pragma weak __wprintf_buffer_as_file_xsputn + #endif + +-const struct _IO_jump_t __io_vtables[IO_VTABLES_LEN] attribute_relro = ++const struct _IO_jump_t __io_vtables[] attribute_relro = + { + /* _IO_str_jumps */ + [IO_STR_JUMPS] = +@@ -485,6 +486,8 @@ const struct _IO_jump_t __io_vtables[IO_VTABLES_LEN] attribute_relro = + }, + #endif + }; ++_Static_assert (array_length (__io_vtables) == IO_VTABLES_NUM, ++ "initializer count"); + + #ifdef SHARED + +-- +2.42.0 + diff --git a/ppc64-flock-fob64.patch b/ppc64-flock-fob64.patch new file mode 100644 index 0000000..55c9b2f --- /dev/null +++ b/ppc64-flock-fob64.patch @@ -0,0 +1,76 @@ +From 434bf72a94de68f0cc7fbf3c44bf38c1911b70cb Mon Sep 17 00:00:00 2001 +From: Aurelien Jarno +Date: Mon, 28 Aug 2023 23:30:37 +0200 +Subject: [PATCH] io: Fix record locking contants for powerpc64 with + __USE_FILE_OFFSET64 + +Commit 5f828ff824e3b7cd1 ("io: Fix F_GETLK, F_SETLK, and F_SETLKW for +powerpc64") fixed an issue with the value of the lock constants on +powerpc64 when not using __USE_FILE_OFFSET64, but it ended-up also +changing the value when using __USE_FILE_OFFSET64 causing an API change. + +Fix that by also checking that define, restoring the pre +4d0fe291aed3a476a commit values: + +Default values: +- F_GETLK: 5 +- F_SETLK: 6 +- F_SETLKW: 7 + +With -D_FILE_OFFSET_BITS=64: +- F_GETLK: 12 +- F_SETLK: 13 +- F_SETLKW: 14 + +At the same time, it has been noticed that there was no test for io lock +with __USE_FILE_OFFSET64, so just add one. + +Tested on x86_64-linux-gnu, i686-linux-gnu and +powerpc64le-unknown-linux-gnu. + +Resolves: BZ #30804. +Co-authored-by: Adhemerval Zanella +Signed-off-by: Aurelien Jarno +--- + io/Makefile | 1 + + io/tst-fcntl-lock-lfs.c | 2 ++ + sysdeps/unix/sysv/linux/powerpc/bits/fcntl.h | 2 +- + 3 files changed, 4 insertions(+), 1 deletion(-) + create mode 100644 io/tst-fcntl-lock-lfs.c + +diff --git a/io/Makefile b/io/Makefile +index 6ccc0e8691..8a3c83a3bb 100644 +--- a/io/Makefile ++++ b/io/Makefile +@@ -192,6 +192,7 @@ tests := \ + tst-fchownat \ + tst-fcntl \ + tst-fcntl-lock \ ++ tst-fcntl-lock-lfs \ + tst-fstatat \ + tst-fts \ + tst-fts-lfs \ +diff --git a/io/tst-fcntl-lock-lfs.c b/io/tst-fcntl-lock-lfs.c +new file mode 100644 +index 0000000000..f2a909fb02 +--- /dev/null ++++ b/io/tst-fcntl-lock-lfs.c +@@ -0,0 +1,2 @@ ++#define _FILE_OFFSET_BITS 64 ++#include +diff --git a/sysdeps/unix/sysv/linux/powerpc/bits/fcntl.h b/sysdeps/unix/sysv/linux/powerpc/bits/fcntl.h +index f7615a447e..d8a291a331 100644 +--- a/sysdeps/unix/sysv/linux/powerpc/bits/fcntl.h ++++ b/sysdeps/unix/sysv/linux/powerpc/bits/fcntl.h +@@ -33,7 +33,7 @@ + # define __O_LARGEFILE 0200000 + #endif + +-#if __WORDSIZE == 64 ++#if __WORDSIZE == 64 && !defined __USE_FILE_OFFSET64 + # define F_GETLK 5 + # define F_SETLK 6 + # define F_SETLKW 7 +-- +2.42.0 +