* [PATCH v3 00/32] RELRO linkmaps
@ 2023-12-07 10:30 Florian Weimer
2023-12-07 10:30 ` [PATCH v3 01/32] support: Add <support/memprobe.h> for protection flags probing Florian Weimer
` (32 more replies)
0 siblings, 33 replies; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:30 UTC (permalink / raw)
To: libc-alpha
This is a rebase on top of the current development branch. There were
quite a few conflicts.
Is the struct link_map_private change something we want? (We currently
have different definitions of struct link_map within glibc and for
applications, resulting in conflicting debugging information.) I can
submit that as a separate patch.
Likewise, I'd like to move the dl_rtld_map out of _rtld_global because
it makes the internal GLIBC_PRIVATE ABI more stable. This series
achieves this as a side effect because the link map is now allocated
using the protected memory allocator. But if the entire series can't
land in 2.39, I'd like to submit this separately as well, along with the
“_dl_rtld_map should not exist in static builds” cleanup.
Thanks,
Florian
Florian Weimer (32):
support: Add <support/memprobe.h> for protection flags probing
misc: Enable internal use of memory protection keys
elf: Remove _dl_sysdep_open_object hook function
elf: Eliminate second loop in find_version in dl-version.c
elf: In rtld_setup_main_map, assume ld.so has a DYNAMIC segment
elf: Remove version assert in check_match in elf/dl-lookup.c
elf: Disambiguate some failures in _dl_load_cache_lookup
elf: Eliminate alloca in open_verify
Do not export <alloc_buffer.h> functions from libc
elf: Make <alloc_buffer.h> usable in ld.so
elf: Merge the three implementations of _dl_dst_substitute
elf: Move __rtld_malloc_init_stubs call into _dl_start_final
elf: Merge __dl_libc_freemem into __rtld_libc_freeres
elf: Use struct link_map_private for the internal link map
elf: Remove run-time-writable fields from struct link_map_private
elf: Move l_tls_offset into read-write part of link map
elf: Allocate auditor state after read-write link map
elf: Move link map fields used by dependency sorting to writable part
elf: Split _dl_lookup_map, _dl_map_new_object from _dl_map_object
elf: Add l_soname accessor function for DT_SONAME values
elf: _dl_rtld_map should not exist in static builds
elf: Introduce GLPM accessor for the protected memory area
elf: Bootstrap allocation for future protected memory allocator
elf: Implement a basic protected memory allocator
elf: Move most of the _dl_find_object data to the protected heap
elf: Switch to a region-based protected memory allocator
elf: Determine the caller link map in _dl_open
elf: Add fast path to dlopen for fully-opened maps
elf: Use _dl_find_object instead of _dl_find_dso_for_object in dlopen
elf: Put critical _dl_find_object pointers into protected memory area
elf: Add hash tables to speed up DT_NEEDED, dlopen lookups
elf: Use memory protection keys for the protected memory allocator
NEWS | 4 +
csu/libc-start.c | 7 +-
csu/libc-tls.c | 8 +-
debug/backtracesyms.c | 4 +-
debug/backtracesymsfd.c | 6 +-
dlfcn/dladdr1.c | 7 +-
dlfcn/dlinfo.c | 4 +-
dlfcn/tst-dlinfo-phdr.c | 15 +-
elf/Makefile | 24 +
elf/circleload1.c | 18 +-
elf/dl-addr-obj.c | 4 +-
elf/dl-addr.c | 13 +-
elf/dl-audit.c | 25 +-
elf/dl-cache.c | 33 +-
elf/dl-call-libc-early-init.c | 2 +-
elf/dl-call_fini.c | 11 +-
elf/dl-close.c | 187 ++---
elf/dl-debug.c | 12 -
elf/dl-deps.c | 177 +++--
elf/dl-diagnostics.c | 2 +
elf/dl-find_object.c | 167 ++---
elf/dl-find_object.h | 21 +-
elf/dl-fini.c | 16 +-
elf/dl-fptr.c | 6 +-
elf/dl-init.c | 22 +-
elf/dl-iteratephdr.c | 11 +-
elf/dl-libc.c | 115 +--
elf/dl-libc_freeres.c | 94 ++-
elf/dl-libname.c | 281 ++++++++
elf/dl-libname.h | 121 ++++
elf/dl-load.c | 501 ++++++-------
elf/dl-load.h | 6 +-
elf/dl-lookup-direct.c | 5 +-
elf/dl-lookup.c | 150 ++--
elf/dl-machine-reject-phdr.h | 4 +-
elf/dl-map-segments.h | 16 +-
elf/dl-minimal.c | 4 +-
elf/dl-misc.c | 20 -
elf/dl-object.c | 181 +++--
elf/dl-open.c | 227 +++---
elf/dl-profile.c | 4 +-
elf/dl-protmem-internal.h | 100 +++
elf/dl-protmem.c | 679 ++++++++++++++++++
elf/dl-protmem.h | 102 +++
elf/dl-protmem_bootstrap.h | 36 +
elf/dl-reloc-static-pie.c | 7 +-
elf/dl-reloc.c | 46 +-
elf/dl-runtime.c | 6 +-
elf/dl-setup_hash.c | 2 +-
elf/dl-sort-maps.c | 53 +-
elf/dl-static-tls.h | 10 +-
elf/dl-support.c | 46 +-
elf/dl-sym-post.h | 6 +-
elf/dl-sym.c | 10 +-
elf/dl-symaddr.c | 2 +-
elf/dl-sysdep-open.h | 45 --
elf/dl-tls.c | 61 +-
elf/dl-tunables.list | 6 +
elf/dl-unmap-segments.h | 2 +-
elf/dl-usage.c | 2 +-
elf/dl-version.c | 77 +-
elf/do-rel.h | 19 +-
elf/dynamic-link.h | 14 +-
elf/get-dynamic-info.h | 12 +-
elf/libc-early-init.h | 6 +-
elf/loadtest.c | 34 +-
elf/neededtest.c | 18 +-
elf/neededtest2.c | 18 +-
elf/neededtest3.c | 18 +-
elf/neededtest4.c | 18 +-
elf/pldd-xx.c | 19 +-
elf/pldd.c | 1 +
elf/rtld.c | 454 ++++++------
elf/rtld_static_init.c | 2 +-
elf/setup-vdso.h | 46 +-
elf/sotruss-lib.c | 5 +-
elf/sprof.c | 27 +-
elf/tlsdeschtab.h | 4 +-
elf/tst-_dl_addr_inside_object.c | 13 +-
elf/tst-audit19a.c | 2 +-
elf/tst-dl-protmem.c | 364 ++++++++++
elf/tst-dl_find_object-threads.c | 6 +-
elf/tst-dl_find_object.c | 19 +-
elf/tst-relro-linkmap-disabled-mod1.c | 46 ++
elf/tst-relro-linkmap-disabled-mod2.c | 2 +
elf/tst-relro-linkmap-disabled.c | 64 ++
elf/tst-relro-linkmap-mod1.c | 42 ++
elf/tst-relro-linkmap-mod2.c | 2 +
elf/tst-relro-linkmap-mod3.c | 2 +
elf/tst-relro-linkmap.c | 112 +++
elf/tst-rtld-list-tunables.exp | 1 +
elf/tst-rtld-nomem.c | 177 +++++
elf/tst-tls6.c | 8 +-
elf/tst-tls7.c | 8 +-
elf/tst-tls8.c | 24 +-
elf/unload.c | 10 +-
elf/unload2.c | 10 +-
htl/pt-alloc.c | 7 +-
include/alloc_buffer.h | 26 +-
include/dlfcn.h | 6 +-
include/link.h | 178 +++--
include/rtld-malloc.h | 5 +-
include/set-freeres.h | 1 -
libio/vtables.c | 2 +-
malloc/Makefile | 6 +-
malloc/Versions | 7 -
malloc/alloc_buffer_alloc_array.c | 1 -
malloc/alloc_buffer_allocate.c | 1 -
malloc/alloc_buffer_copy_bytes.c | 1 -
malloc/alloc_buffer_copy_string.c | 1 -
malloc/alloc_buffer_create_failure.c | 7 +-
malloc/set-freeres.c | 2 -
malloc/tst-alloc_buffer.c | 4 +
manual/tunables.texi | 29 +
nptl/Versions | 3 +-
nptl/pthread_create.c | 8 +
nptl_db/db_info.c | 3 +-
nptl_db/structs.def | 3 +-
nptl_db/td_thr_tlsbase.c | 12 +-
nss/Makefile | 4 +-
stdlib/cxa_thread_atexit_impl.c | 10 +-
stdlib/tst-tls-atexit.c | 10 +-
support/Makefile | 3 +
support/memprobe.h | 43 ++
support/support-alloc_buffer.c | 26 +
support/support_memprobe.c | 251 +++++++
support/tst-support_memprobe.c | 118 +++
sysdeps/aarch64/dl-bti.c | 14 +-
sysdeps/aarch64/dl-lookupcfg.h | 4 +-
sysdeps/aarch64/dl-machine.h | 29 +-
sysdeps/aarch64/dl-prop.h | 12 +-
sysdeps/aarch64/dl-tlsdesc.h | 2 +-
sysdeps/aarch64/tlsdesc.c | 2 +-
sysdeps/alpha/dl-machine.h | 24 +-
sysdeps/arc/dl-machine.h | 21 +-
sysdeps/arm/dl-lookupcfg.h | 4 +-
sysdeps/arm/dl-machine.h | 43 +-
sysdeps/arm/dl-tlsdesc.h | 2 +-
sysdeps/arm/tlsdesc.c | 2 +-
sysdeps/csky/dl-machine.h | 22 +-
sysdeps/generic/dl-debug.h | 2 +-
sysdeps/generic/dl-early_mmap.h | 35 +
sysdeps/generic/dl-fptr.h | 4 +-
sysdeps/generic/dl-prop.h | 8 +-
sysdeps/generic/dl-protected.h | 10 +-
sysdeps/generic/dl-protmem-pkey.h | 20 +
sysdeps/generic/ldsodefs.h | 280 +++++---
sysdeps/generic/rtld_static_init.h | 3 +-
sysdeps/hppa/dl-fptr.c | 10 +-
sysdeps/hppa/dl-lookupcfg.h | 6 +-
sysdeps/hppa/dl-machine.h | 29 +-
sysdeps/hppa/dl-runtime.c | 4 +-
sysdeps/hppa/dl-runtime.h | 2 +-
sysdeps/hppa/dl-symaddr.c | 2 +-
sysdeps/htl/pthreadP.h | 2 +-
sysdeps/i386/dl-machine.h | 41 +-
sysdeps/i386/dl-tlsdesc.h | 2 +-
sysdeps/i386/tlsdesc.c | 2 +-
sysdeps/ia64/dl-lookupcfg.h | 6 +-
sysdeps/ia64/dl-machine.h | 29 +-
sysdeps/loongarch/dl-machine.h | 19 +-
sysdeps/loongarch/dl-tls.h | 2 +-
sysdeps/m68k/dl-machine.h | 20 +-
sysdeps/m68k/dl-tls.h | 2 +-
sysdeps/microblaze/dl-machine.h | 23 +-
sysdeps/mips/Makefile | 6 +
sysdeps/mips/dl-debug.h | 2 +-
sysdeps/mips/dl-machine-reject-phdr.h | 20 +-
sysdeps/mips/dl-machine.h | 74 +-
sysdeps/mips/dl-tls.h | 2 +-
sysdeps/mips/dl-trampoline.c | 19 +-
sysdeps/nios2/dl-init.c | 6 +-
sysdeps/nios2/dl-machine.h | 19 +-
sysdeps/nios2/dl-tls.h | 2 +-
sysdeps/nptl/dl-mutex.c | 2 +-
sysdeps/or1k/dl-machine.h | 20 +-
sysdeps/powerpc/dl-tls.h | 2 +-
sysdeps/powerpc/powerpc32/dl-machine.c | 19 +-
sysdeps/powerpc/powerpc32/dl-machine.h | 40 +-
sysdeps/powerpc/powerpc64/dl-machine.c | 8 +-
sysdeps/powerpc/powerpc64/dl-machine.h | 48 +-
sysdeps/riscv/dl-machine.h | 26 +-
sysdeps/riscv/dl-tls.h | 2 +-
sysdeps/s390/s390-32/dl-machine.h | 29 +-
sysdeps/s390/s390-64/dl-machine.h | 29 +-
sysdeps/sh/dl-machine.h | 36 +-
sysdeps/sparc/sparc32/dl-machine.h | 24 +-
sysdeps/sparc/sparc64/dl-irel.h | 2 +-
sysdeps/sparc/sparc64/dl-machine.h | 27 +-
sysdeps/sparc/sparc64/dl-plt.h | 4 +-
sysdeps/unix/sysv/linux/dl-early_allocate.c | 17 +-
sysdeps/unix/sysv/linux/dl-early_mmap.h | 41 ++
sysdeps/unix/sysv/linux/dl-origin.c | 1 -
sysdeps/unix/sysv/linux/dl-protmem-pkey.h | 23 +
sysdeps/unix/sysv/linux/dl-sysdep.c | 2 +
sysdeps/unix/sysv/linux/dl-vdso.h | 2 +-
.../sysv/linux/include/bits/mman-shared.h | 16 +
sysdeps/unix/sysv/linux/pkey_get.c | 5 +-
sysdeps/unix/sysv/linux/pkey_mprotect.c | 4 +-
sysdeps/unix/sysv/linux/pkey_set.c | 5 +-
sysdeps/unix/sysv/linux/powerpc/libc-start.c | 2 +-
.../sysv/linux/powerpc/powerpc64/ldsodefs.h | 14 +-
.../sysv/linux/powerpc/powerpc64/pkey_get.c | 4 +-
.../sysv/linux/powerpc/powerpc64/pkey_set.c | 4 +-
.../sysv/linux/powerpc/rtld_static_init.h | 3 +-
sysdeps/unix/sysv/linux/syscalls.list | 4 +-
sysdeps/unix/sysv/linux/x86/dl-protmem-pkey.h | 26 +
sysdeps/unix/sysv/linux/x86/pkey_get.c | 5 +-
sysdeps/unix/sysv/linux/x86/pkey_set.c | 5 +-
sysdeps/x86/dl-cet.c | 4 +-
sysdeps/x86/dl-lookupcfg.h | 4 +-
sysdeps/x86/dl-prop.h | 29 +-
sysdeps/x86_64/dl-machine.h | 39 +-
sysdeps/x86_64/dl-tlsdesc.h | 2 +-
sysdeps/x86_64/tlsdesc.c | 2 +-
215 files changed, 5267 insertions(+), 2446 deletions(-)
create mode 100644 elf/dl-libname.c
create mode 100644 elf/dl-libname.h
create mode 100644 elf/dl-protmem-internal.h
create mode 100644 elf/dl-protmem.c
create mode 100644 elf/dl-protmem.h
create mode 100644 elf/dl-protmem_bootstrap.h
delete mode 100644 elf/dl-sysdep-open.h
create mode 100644 elf/tst-dl-protmem.c
create mode 100644 elf/tst-relro-linkmap-disabled-mod1.c
create mode 100644 elf/tst-relro-linkmap-disabled-mod2.c
create mode 100644 elf/tst-relro-linkmap-disabled.c
create mode 100644 elf/tst-relro-linkmap-mod1.c
create mode 100644 elf/tst-relro-linkmap-mod2.c
create mode 100644 elf/tst-relro-linkmap-mod3.c
create mode 100644 elf/tst-relro-linkmap.c
create mode 100644 elf/tst-rtld-nomem.c
create mode 100644 support/memprobe.h
create mode 100644 support/support-alloc_buffer.c
create mode 100644 support/support_memprobe.c
create mode 100644 support/tst-support_memprobe.c
create mode 100644 sysdeps/generic/dl-early_mmap.h
create mode 100644 sysdeps/generic/dl-protmem-pkey.h
create mode 100644 sysdeps/unix/sysv/linux/dl-early_mmap.h
create mode 100644 sysdeps/unix/sysv/linux/dl-protmem-pkey.h
create mode 100644 sysdeps/unix/sysv/linux/include/bits/mman-shared.h
create mode 100644 sysdeps/unix/sysv/linux/x86/dl-protmem-pkey.h
base-commit: 958478889c6a7a12b35b857b9788b7ad8706a01e
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 01/32] support: Add <support/memprobe.h> for protection flags probing
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
@ 2023-12-07 10:30 ` Florian Weimer
2024-02-22 22:39 ` Joseph Myers
2023-12-07 10:30 ` [PATCH v3 02/32] misc: Enable internal use of memory protection keys Florian Weimer
` (31 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:30 UTC (permalink / raw)
To: libc-alpha
---
support/Makefile | 2 +
support/memprobe.h | 43 ++++++
support/support_memprobe.c | 251 +++++++++++++++++++++++++++++++++
support/tst-support_memprobe.c | 118 ++++++++++++++++
4 files changed, 414 insertions(+)
create mode 100644 support/memprobe.h
create mode 100644 support/support_memprobe.c
create mode 100644 support/tst-support_memprobe.c
diff --git a/support/Makefile b/support/Makefile
index 9aa7f23a6e..556281121d 100644
--- a/support/Makefile
+++ b/support/Makefile
@@ -65,6 +65,7 @@ libsupport-routines = \
support_format_hostent \
support_format_netent \
support_isolate_in_subprocess \
+ support_memprobe \
support_mutex_pi_monotonic \
support_need_proc \
support_openpty \
@@ -320,6 +321,7 @@ tests = \
tst-support_capture_subprocess \
tst-support_descriptors \
tst-support_format_dns_packet \
+ tst-support_memprobe \
tst-support_quote_blob \
tst-support_quote_blob_wide \
tst-support_quote_string \
diff --git a/support/memprobe.h b/support/memprobe.h
new file mode 100644
index 0000000000..13295e7b8d
--- /dev/null
+++ b/support/memprobe.h
@@ -0,0 +1,43 @@
+/* Probing memory for protection state.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#ifndef SUPPORT_MEMPROBE_H
+#define SUPPORT_MEMPROBE_H
+
+/* Probe access status of memory ranges. These functions record a
+ failure (but do not terminate the process) if the memory range does
+ not match the expected protection flags. */
+
+#include <stddef.h>
+
+/* Asserts that SIZE bytes at ADDRESS are inaccessible. CONTEXT
+ is used for reporting errors. */
+void support_memprobe_noaccess (const char *context, const void *address,
+ size_t size);
+
+/* Asserts that SIZE bytes at ADDRESS read read-only. CONTEXT is used
+ for reporting errors. */
+void support_memprobe_readonly (const char *context, const void *address,
+ size_t size);
+
+/* Asserts that SIZE bytes at ADDRESS are readable and writable.
+ CONTEXT is used for reporting errors. */
+void support_memprobe_readwrite (const char *context, const void *address,
+ size_t size);
+
+#endif /* SUPPORT_MEMPROBE_H */
diff --git a/support/support_memprobe.c b/support/support_memprobe.c
new file mode 100644
index 0000000000..b599f9c70e
--- /dev/null
+++ b/support/support_memprobe.c
@@ -0,0 +1,251 @@
+/* Probing memory for protection state.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* The implementation uses vfork for probing. As a result, it can be
+ used for testing page protections controlled by memory protection
+ keys, despite their problematic interaction with signal handlers
+ (bug 22396). */
+
+#include <support/memprobe.h>
+
+#include <atomic.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <support/check.h>
+#include <support/support.h>
+#include <support/xunistd.h>
+#include <sys/param.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sys/resource.h>
+
+#ifdef __linux__
+# include <sys/prctl.h>
+#endif
+
+/* Make are more complete attempt to disable core dumps, even in the
+ presence of core catchers that ignore RLIMIT_CORE. Used after
+ vfork. */
+static void
+disable_coredumps (void)
+{
+#ifdef __linux__
+ prctl (PR_SET_DUMPABLE, 0 /* SUID_DUMP_DISABLE */, 0, 0);
+#endif
+ struct rlimit rl = {};
+ setrlimit (RLIMIT_CORE, &rl);
+}
+
+/* Restores all signals to SIG_DFL and unblocks them. */
+static void
+memprobe_sig_dfl_unblock (void)
+{
+ for (int sig = 1; sig < _NSIG; ++sig)
+ /* Ignore errors for those signals whose handler cannot be changed. */
+ (void) signal (sig, SIG_DFL);
+ sigset_t sigallset;
+ sigfillset (&sigallset);
+ sigprocmask (SIG_UNBLOCK, &sigallset, NULL);
+}
+
+/* Performs a 4-byte probe at the address aligned down. The internal
+ glibc atomics do not necessarily support one-byte access.
+ Accessing more bytes with a no-op write results in the same page
+ fault effects because of the alignment. */
+static inline void
+write_probe_at (volatile char *address)
+{
+ /* Used as an argument to force the compiler to emit an actual no-op
+ atomic instruction. */
+ static volatile uint32_t zero = 0;
+ uint32_t *ptr = (uint32_t *) ((uintptr_t) address & ~(uintptr_t) 3);
+ atomic_fetch_add_relaxed (ptr, zero);
+}
+
+/* Attempt to read or write the entire range in one go. If DO_WRITE,
+ perform a no-op write with an atomic OR with a zero second operand,
+ otherwise just a read. */
+static void
+memprobe_expect_access (const char *context, volatile char *address,
+ size_t size, volatile size_t *pindex, bool do_write)
+{
+ pid_t pid = vfork ();
+ TEST_VERIFY_EXIT (pid >= 0);
+ if (pid == 0)
+ {
+ memprobe_sig_dfl_unblock ();
+ disable_coredumps ();
+ /* *pindex is a volatile access, so the parent process can read
+ the correct index after an unexpected fault. */
+ if (do_write)
+ for (*pindex = 0; *pindex < size; *pindex += 4)
+ write_probe_at (address + *pindex);
+ else
+ for (*pindex = 0; *pindex < size; *pindex += 1)
+ address[*pindex]; /* Triggers volatile read. */
+ _exit (0);
+ }
+ int status;
+ xwaitpid (pid, &status, 0);
+ if (*pindex < size)
+ {
+ support_record_failure ();
+ printf ("error: %s: unexpected %s fault at address %p"
+ " (%zu bytes after %p, wait status %d)\n",
+ context, do_write ? "write" : "read", address + *pindex,
+ *pindex, address, status);
+ }
+ else
+ {
+ TEST_VERIFY (WIFEXITED (status));
+ TEST_COMPARE (WEXITSTATUS (status), 0);
+ }
+}
+
+/* Probe one byte for lack of access. Attempt a write for DO_WRITE,
+ otherwise a read. Returns false on failure. */
+static bool
+memprobe_expect_noaccess_1 (const char *context, volatile char *address,
+ size_t size, size_t index, bool do_write)
+{
+ pid_t pid = vfork ();
+ TEST_VERIFY_EXIT (pid >= 0);
+ if (pid == 0)
+ {
+ memprobe_sig_dfl_unblock ();
+ disable_coredumps ();
+ if (do_write)
+ write_probe_at (address + index);
+ else
+ address[index]; /* Triggers volatile read. */
+ _exit (0); /* Should not be executed due to fault. */
+ }
+
+ int status;
+ xwaitpid (pid, &status, 0);
+ if (WIFSIGNALED (status))
+ {
+ /* Accept SIGSEGV or SIGBUS. */
+ if (WTERMSIG (status) != SIGSEGV)
+ TEST_COMPARE (WTERMSIG (status), SIGBUS);
+ }
+ else
+ {
+ support_record_failure ();
+ printf ("error: %s: unexpected %s success at address %p"
+ " (%zu bytes after %p, wait status %d)\n",
+ context, do_write ? "write" : "read", address + index,
+ index, address, status);
+ return false;
+ }
+ return true;
+}
+
+/* Probe each byte individually because we expect a fault.
+
+ The implementation skips over bytes on the same page, so it assumes
+ that the subpage_prot system call is not used. */
+static void
+memprobe_expect_noaccess (const char *context, volatile char *address,
+ size_t size, bool do_write)
+{
+ if (size == 0)
+ return;
+
+ if (!memprobe_expect_noaccess_1 (context, address, size, 0, do_write))
+ return;
+
+ /* Round up to the next page. */
+ long int page_size = sysconf (_SC_PAGE_SIZE);
+ TEST_VERIFY_EXIT (page_size > 0);
+ size_t index;
+ {
+ uintptr_t next_page = roundup ((uintptr_t) address, page_size);
+ if (next_page < (uintptr_t) address
+ || next_page >= (uintptr_t) address + size)
+ /* Wrap around or after the end of the region. */
+ return;
+ index = next_page - (uintptr_t) address;
+ }
+
+ /* Probe in page increments. */
+ while (true)
+ {
+ if (!memprobe_expect_noaccess_1 (context, address, size, index,
+ do_write))
+ break;
+ size_t next_index = index + page_size;
+ if (next_index < index || next_index >= size)
+ /* Wrap around or after the end of the region. */
+ break;
+ index = next_index;
+ }
+}
+
+static void
+memprobe_range (const char *context, volatile char *address, size_t size,
+ bool expect_read, bool expect_write)
+{
+ /* Do not rely on the sharing nature of vfork because it could be
+ implemented as fork. */
+ size_t *pindex = support_shared_allocate (sizeof *pindex);
+
+ sigset_t oldset;
+ {
+ sigset_t sigallset;
+ sigfillset (&sigallset);
+ sigprocmask (SIG_BLOCK, &sigallset, &oldset);
+ }
+
+ if (expect_read)
+ {
+ memprobe_expect_access (context, address, size, pindex, false);
+ if (expect_write)
+ memprobe_expect_access (context, address, size, pindex, true);
+ else
+ memprobe_expect_noaccess (context, address, size, true);
+ }
+ else
+ {
+ memprobe_expect_noaccess (context, address, size, false);
+ TEST_VERIFY (!expect_write); /* Write-only probing not supported. */
+ }
+
+ sigprocmask (SIG_SETMASK, NULL, &oldset);
+ support_shared_free (pindex);
+}
+
+void support_memprobe_noaccess (const char *context, const void *address,
+ size_t size)
+{
+ memprobe_range (context, (volatile char *) address, size, false, false);
+}
+
+void support_memprobe_readonly (const char *context, const void *address,
+ size_t size)
+{
+ memprobe_range (context, (volatile char *) address, size, true, false);
+}
+
+void support_memprobe_readwrite (const char *context, const void *address,
+ size_t size)
+{
+ memprobe_range (context, (volatile char *) address, size, true, true);
+}
diff --git a/support/tst-support_memprobe.c b/support/tst-support_memprobe.c
new file mode 100644
index 0000000000..51c1b7812f
--- /dev/null
+++ b/support/tst-support_memprobe.c
@@ -0,0 +1,118 @@
+/* Tests for <support/memprobe.h>.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <support/memprobe.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <support/check.h>
+#include <support/next_to_fault.h>
+
+/* Used to exit early on error, to avoid masking them. */
+static void
+check_barrier (void)
+{
+ if (support_record_failure_is_failed ())
+ exit (1);
+}
+
+/* Expect a failed state in the test harness. */
+static void
+expect_failure (const char *context)
+{
+ if (!support_record_failure_is_failed ())
+ {
+ printf ("error: expected failure missing: %s\n", context);
+ exit (1);
+ }
+ support_record_failure_reset ();
+}
+
+static int
+do_test (void)
+{
+ static char rw_byte = 1;
+ support_memprobe_readwrite ("rw_byte", &rw_byte, 1);
+ check_barrier ();
+
+ puts ("info: expected error for read-only to rw_byte");
+ support_memprobe_readonly ("rw_byte", &rw_byte, 1);
+
+ puts ("info: expected error for no-access to rw_byte");
+ support_memprobe_noaccess ("rw_byte", &rw_byte, 1);
+ expect_failure ("no-access rw_byte");
+
+ static const char const_byte = 1;
+ support_memprobe_readonly ("const_byte", &const_byte, 1);
+ check_barrier ();
+
+ puts ("info: expected error for no-access to const_byte");
+ support_memprobe_noaccess ("const_byte", &const_byte, 1);
+ expect_failure ("no-access const_byte");
+
+ puts ("info: expected error for read-write access to const_byte");
+ support_memprobe_readwrite ("const_byte", &const_byte, 1);
+ expect_failure ("read-write const_byte");
+
+ struct support_next_to_fault ntf = support_next_to_fault_allocate (3);
+ void *ntf_trailing = ntf.buffer + ntf.length;
+
+ /* The initial 3 bytes are accessible. */
+ support_memprobe_readwrite ("ntf init", ntf.buffer, ntf.length);
+ check_barrier ();
+
+ puts ("info: expected error for read-only to ntf init");
+ support_memprobe_readonly ("ntf init", ntf.buffer, ntf.length);
+ expect_failure ("read-only ntf init");
+
+ puts ("info: expected error for no-access to ntf init");
+ support_memprobe_noaccess ("ntf init", ntf.buffer, ntf.length);
+ expect_failure ("no-access ntf init");
+
+ /* The trailing part after the allocated area is inaccessible. */
+ support_memprobe_noaccess ("ntf trailing", ntf_trailing, 1);
+ check_barrier ();
+
+ puts ("info: expected error for read-only to ntf trailing");
+ support_memprobe_readonly ("ntf trailing", ntf_trailing, 1);
+ expect_failure ("read-only ntf trailing");
+
+ puts ("info: expected error for no-access to ntf trailing");
+ support_memprobe_readwrite ("ntf trailing", ntf_trailing, 1);
+ expect_failure ("read-write ntf trailing");
+
+ /* Both areas combined fail all checks due to inconsistent results. */
+ puts ("info: expected error for no-access to ntf overlap");
+ support_memprobe_noaccess ("ntf overlap ", ntf.buffer, ntf.length + 1);
+ expect_failure ("no-access ntf overlap");
+
+ puts ("info: expected error for read-only to ntf overlap");
+ support_memprobe_readonly ("ntf overlap", ntf.buffer, ntf.length + 1);
+ expect_failure ("read-only ntf overlap");
+
+ puts ("info: expected error for read-write to ntf overlap");
+ support_memprobe_readwrite ("ntf overlap", ntf.buffer, ntf.length + 1);
+ expect_failure ("read-write ntf overlap");
+
+
+ support_next_to_fault_free (&ntf);
+
+ return 0;
+}
+
+#include <support/test-driver.c>
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 02/32] misc: Enable internal use of memory protection keys
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
2023-12-07 10:30 ` [PATCH v3 01/32] support: Add <support/memprobe.h> for protection flags probing Florian Weimer
@ 2023-12-07 10:30 ` Florian Weimer
2024-02-22 1:06 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 03/32] elf: Remove _dl_sysdep_open_object hook function Florian Weimer
` (30 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:30 UTC (permalink / raw)
To: libc-alpha
This adds the necessary hidden prototypes.
---
.../unix/sysv/linux/include/bits/mman-shared.h | 16 ++++++++++++++++
sysdeps/unix/sysv/linux/pkey_get.c | 5 ++++-
sysdeps/unix/sysv/linux/pkey_mprotect.c | 4 +++-
sysdeps/unix/sysv/linux/pkey_set.c | 5 ++++-
.../unix/sysv/linux/powerpc/powerpc64/pkey_get.c | 4 +++-
.../unix/sysv/linux/powerpc/powerpc64/pkey_set.c | 4 +++-
sysdeps/unix/sysv/linux/syscalls.list | 4 ++--
sysdeps/unix/sysv/linux/x86/pkey_get.c | 5 ++++-
sysdeps/unix/sysv/linux/x86/pkey_set.c | 5 ++++-
9 files changed, 43 insertions(+), 9 deletions(-)
create mode 100644 sysdeps/unix/sysv/linux/include/bits/mman-shared.h
diff --git a/sysdeps/unix/sysv/linux/include/bits/mman-shared.h b/sysdeps/unix/sysv/linux/include/bits/mman-shared.h
new file mode 100644
index 0000000000..7c14b5df3c
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/include/bits/mman-shared.h
@@ -0,0 +1,16 @@
+#include <sysdeps/unix/sysv/linux/bits/mman-shared.h>
+
+#ifndef _ISOMAC
+
+extern __typeof (pkey_alloc) __pkey_alloc;
+libc_hidden_proto (__pkey_alloc)
+extern __typeof (pkey_free) __pkey_free;
+libc_hidden_proto (__pkey_free)
+extern __typeof (pkey_mprotect) __pkey_mprotect;
+libc_hidden_proto (__pkey_mprotect)
+extern __typeof (pkey_get) __pkey_get;
+libc_hidden_proto (__pkey_get)
+extern __typeof (pkey_set) __pkey_set;
+libc_hidden_proto (__pkey_set)
+
+#endif
diff --git a/sysdeps/unix/sysv/linux/pkey_get.c b/sysdeps/unix/sysv/linux/pkey_get.c
index c81e162961..98ee7a8bcb 100644
--- a/sysdeps/unix/sysv/linux/pkey_get.c
+++ b/sysdeps/unix/sysv/linux/pkey_get.c
@@ -17,10 +17,13 @@
<https://www.gnu.org/licenses/>. */
#include <errno.h>
+#include <sys/mman.h>
int
-pkey_get (int key)
+__pkey_get (int key)
{
__set_errno (ENOSYS);
return -1;
}
+libc_hidden_def (__pkey_get)
+weak_alias (__pkey_get, pkey_get)
diff --git a/sysdeps/unix/sysv/linux/pkey_mprotect.c b/sysdeps/unix/sysv/linux/pkey_mprotect.c
index 130232c78a..798bd1c4af 100644
--- a/sysdeps/unix/sysv/linux/pkey_mprotect.c
+++ b/sysdeps/unix/sysv/linux/pkey_mprotect.c
@@ -22,7 +22,7 @@
#include <sysdep.h>
int
-pkey_mprotect (void *addr, size_t len, int prot, int pkey)
+__pkey_mprotect (void *addr, size_t len, int prot, int pkey)
{
if (pkey == -1)
/* If the key is -1, the system call is precisely equivalent to
@@ -30,3 +30,5 @@ pkey_mprotect (void *addr, size_t len, int prot, int pkey)
return __mprotect (addr, len, prot);
return INLINE_SYSCALL_CALL (pkey_mprotect, addr, len, prot, pkey);
}
+libc_hidden_def (__pkey_mprotect)
+weak_alias (__pkey_mprotect, pkey_mprotect)
diff --git a/sysdeps/unix/sysv/linux/pkey_set.c b/sysdeps/unix/sysv/linux/pkey_set.c
index 94ec29ffdf..d182805c69 100644
--- a/sysdeps/unix/sysv/linux/pkey_set.c
+++ b/sysdeps/unix/sysv/linux/pkey_set.c
@@ -17,10 +17,13 @@
<https://www.gnu.org/licenses/>. */
#include <errno.h>
+#include <sys/mman.h>
int
-pkey_set (int key, unsigned int access_rights)
+__pkey_set (int key, unsigned int access_rights)
{
__set_errno (ENOSYS);
return -1;
}
+libc_hidden_def (__pkey_set)
+weak_alias (__pkey_set, pkey_set)
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/pkey_get.c b/sysdeps/unix/sysv/linux/powerpc/powerpc64/pkey_get.c
index feea0539a8..054669fee8 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/pkey_get.c
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/pkey_get.c
@@ -21,7 +21,7 @@
#include <sys/mman.h>
int
-pkey_get (int key)
+__pkey_get (int key)
{
if (key < 0 || key > PKEY_MAX)
{
@@ -40,3 +40,5 @@ pkey_get (int key)
return PKEY_DISABLE_WRITE;
return 0;
}
+libc_hidden_def (__pkey_get)
+weak_alias (__pkey_get, pkey_get)
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/pkey_set.c b/sysdeps/unix/sysv/linux/powerpc/powerpc64/pkey_set.c
index 32fe523bb2..8b15f699a2 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/pkey_set.c
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/pkey_set.c
@@ -21,7 +21,7 @@
#include <sys/mman.h>
int
-pkey_set (int key, unsigned int rights)
+__pkey_set (int key, unsigned int rights)
{
if (key < 0 || key > PKEY_MAX || rights > 3)
{
@@ -46,3 +46,5 @@ pkey_set (int key, unsigned int rights)
pkey_write (amr);
return 0;
}
+libc_hidden_def (__pkey_set)
+weak_alias (__pkey_set, pkey_set)
diff --git a/sysdeps/unix/sysv/linux/syscalls.list b/sysdeps/unix/sysv/linux/syscalls.list
index 73e941ef89..c7dc8cb567 100644
--- a/sysdeps/unix/sysv/linux/syscalls.list
+++ b/sysdeps/unix/sysv/linux/syscalls.list
@@ -101,8 +101,8 @@ name_to_handle_at EXTRA name_to_handle_at i:isppi name_to_handle_at
setns EXTRA setns i:ii setns
memfd_create EXTRA memfd_create i:si memfd_create
-pkey_alloc EXTRA pkey_alloc i:ii pkey_alloc
-pkey_free EXTRA pkey_free i:i pkey_free
+pkey_alloc EXTRA pkey_alloc i:ii __pkey_alloc pkey_alloc
+pkey_free EXTRA pkey_free i:i __pkey_free pkey_free
gettid EXTRA gettid Ei: __gettid gettid
tgkill EXTRA tgkill i:iii __tgkill tgkill
close_range - close_range i:iii __close_range close_range
diff --git a/sysdeps/unix/sysv/linux/x86/pkey_get.c b/sysdeps/unix/sysv/linux/x86/pkey_get.c
index 29fe5f6634..82bfed409c 100644
--- a/sysdeps/unix/sysv/linux/x86/pkey_get.c
+++ b/sysdeps/unix/sysv/linux/x86/pkey_get.c
@@ -18,9 +18,10 @@
#include <arch-pkey.h>
#include <errno.h>
+#include <sys/mman.h>
int
-pkey_get (int key)
+__pkey_get (int key)
{
if (key < 0 || key > 15)
{
@@ -31,3 +32,5 @@ pkey_get (int key)
return (pkru >> (2 * key)) & 3;
return 0;
}
+libc_hidden_def (__pkey_get)
+weak_alias (__pkey_get, pkey_get)
diff --git a/sysdeps/unix/sysv/linux/x86/pkey_set.c b/sysdeps/unix/sysv/linux/x86/pkey_set.c
index b4cb3dd8e5..375e5abd70 100644
--- a/sysdeps/unix/sysv/linux/x86/pkey_set.c
+++ b/sysdeps/unix/sysv/linux/x86/pkey_set.c
@@ -18,9 +18,10 @@
#include <arch-pkey.h>
#include <errno.h>
+#include <sys/mman.h>
int
-pkey_set (int key, unsigned int rights)
+__pkey_set (int key, unsigned int rights)
{
if (key < 0 || key > 15 || rights > 3)
{
@@ -33,3 +34,5 @@ pkey_set (int key, unsigned int rights)
pkey_write (pkru);
return 0;
}
+libc_hidden_def (__pkey_set)
+weak_alias (__pkey_set, pkey_set)
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 03/32] elf: Remove _dl_sysdep_open_object hook function
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
2023-12-07 10:30 ` [PATCH v3 01/32] support: Add <support/memprobe.h> for protection flags probing Florian Weimer
2023-12-07 10:30 ` [PATCH v3 02/32] misc: Enable internal use of memory protection keys Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-01-31 13:10 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 04/32] elf: Eliminate second loop in find_version in dl-version.c Florian Weimer
` (29 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
It is currently not used by any target.
---
elf/dl-load.c | 15 ---------------
elf/dl-sysdep-open.h | 45 --------------------------------------------
2 files changed, 60 deletions(-)
delete mode 100644 elf/dl-sysdep-open.h
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 25ea4f7a4e..692c9a47ad 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -72,7 +72,6 @@ struct filebuf
#include <dl-map-segments.h>
#include <dl-unmap-segments.h>
#include <dl-machine-reject-phdr.h>
-#include <dl-sysdep-open.h>
#include <dl-prop.h>
#include <not-cancel.h>
@@ -2091,20 +2090,6 @@ _dl_map_object (struct link_map *loader, const char *name,
&loader->l_runpath_dirs, &realname, &fb, loader,
LA_SER_RUNPATH, &found_other_class);
- if (fd == -1)
- {
- realname = _dl_sysdep_open_object (name, namelen, &fd);
- if (realname != NULL)
- {
- fd = open_verify (realname, fd,
- &fb, loader ?: GL(dl_ns)[nsid]._ns_loaded,
- LA_SER_CONFIG, mode, &found_other_class,
- false);
- if (fd == -1)
- free (realname);
- }
- }
-
#ifdef USE_LDCONFIG
if (fd == -1
&& (__glibc_likely ((mode & __RTLD_SECURE) == 0)
diff --git a/elf/dl-sysdep-open.h b/elf/dl-sysdep-open.h
deleted file mode 100644
index 185d6c72ab..0000000000
--- a/elf/dl-sysdep-open.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* System-specific call to open a shared object by name. Stub version.
- Copyright (C) 2015-2023 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-#ifndef _DL_SYSDEP_OPEN_H
-#define _DL_SYSDEP_OPEN_H 1
-
-#include <assert.h>
-#include <stddef.h>
-
-/* NAME is a name without slashes, as it appears in a DT_NEEDED entry
- or a dlopen call's argument or suchlike. NAMELEN is (strlen (NAME) + 1).
-
- Find NAME in an OS-dependent fashion, and return its "real" name.
- Optionally fill in *FD with a file descriptor open on that file (or
- else leave its initial value of -1). The return value is a new
- malloc'd string, which will be free'd by the caller. If NAME is
- resolved to an actual file that can be opened, then the return
- value should name that file (and if *FD was not set, then a normal
- __open call on that string will be made). If *FD was set by some
- other means than a normal open and there is no "real" name to use,
- then __strdup (NAME) is fine (modulo error checking). */
-
-static inline char *
-_dl_sysdep_open_object (const char *name, size_t namelen, int *fd)
-{
- assert (*fd == -1);
- return NULL;
-}
-
-#endif /* dl-sysdep-open.h */
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 04/32] elf: Eliminate second loop in find_version in dl-version.c
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (2 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 03/32] elf: Remove _dl_sysdep_open_object hook function Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-02-19 22:17 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 05/32] elf: In rtld_setup_main_map, assume ld.so has a DYNAMIC segment Florian Weimer
` (28 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
The first loop iterates through all objects in the namespace
because _dl_check_map_versions is called after the loaded
objects have been added to the list. (This list is not limited
by symbol search scope.)
Turn the assert in _dl_check_map_versions into a proper error
because it can be triggered by inconsistent variants of shared
objects.
---
elf/dl-version.c | 18 +++++-------------
1 file changed, 5 insertions(+), 13 deletions(-)
diff --git a/elf/dl-version.c b/elf/dl-version.c
index 5b8693de04..b3b2160ac8 100644
--- a/elf/dl-version.c
+++ b/elf/dl-version.c
@@ -31,21 +31,17 @@ __attribute ((always_inline))
find_needed (const char *name, struct link_map *map)
{
struct link_map *tmap;
- unsigned int n;
for (tmap = GL(dl_ns)[map->l_ns]._ns_loaded; tmap != NULL;
tmap = tmap->l_next)
if (_dl_name_match_p (name, tmap))
return tmap;
- /* The required object is not in the global scope, look to see if it is
- a dependency of the current object. */
- for (n = 0; n < map->l_searchlist.r_nlist; n++)
- if (_dl_name_match_p (name, map->l_searchlist.r_list[n]))
- return map->l_searchlist.r_list[n];
-
- /* Should never happen. */
- return NULL;
+ struct dl_exception exception;
+ _dl_exception_create_format
+ (&exception, DSO_FILENAME (map->l_name),
+ "missing soname %s in version dependency", name);
+ _dl_signal_exception (0, &exception, NULL);
}
@@ -199,10 +195,6 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
ElfW(Vernaux) *aux;
struct link_map *needed = find_needed (strtab + ent->vn_file, map);
- /* If NEEDED is NULL this means a dependency was not found
- and no stub entry was created. This should never happen. */
- assert (needed != NULL);
-
/* Make sure this is no stub we created because of a missing
dependency. */
if (__builtin_expect (! trace_mode, 1)
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 05/32] elf: In rtld_setup_main_map, assume ld.so has a DYNAMIC segment
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (3 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 04/32] elf: Eliminate second loop in find_version in dl-version.c Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-02-19 22:18 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 06/32] elf: Remove version assert in check_match in elf/dl-lookup.c Florian Weimer
` (27 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
The way we build ld.so, it always has a dynamic segment, so checking for
its absence is unnecessary.
---
elf/rtld.c | 24 ------------------------
1 file changed, 24 deletions(-)
diff --git a/elf/rtld.c b/elf/rtld.c
index f0b0f3328d..a00a8d9d0f 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -386,7 +386,6 @@ static void dl_main (const ElfW(Phdr) *phdr, ElfW(Word) phnum,
/* These two variables cannot be moved into .data.rel.ro. */
static struct libname_list _dl_rtld_libname;
-static struct libname_list _dl_rtld_libname2;
/* Variable for statistics. */
RLTD_TIMING_DECLARE (relocate_time, static);
@@ -1165,29 +1164,6 @@ rtld_setup_main_map (struct link_map *main_map)
/* _dl_rtld_libname.next = NULL; Already zero. */
GL(dl_rtld_map).l_libname = &_dl_rtld_libname;
- /* Ordinarily, we would get additional names for the loader from
- our DT_SONAME. This can't happen if we were actually linked as
- a static executable (detect this case when we have no DYNAMIC).
- If so, assume the filename component of the interpreter path to
- be our SONAME, and add it to our name list. */
- if (GL(dl_rtld_map).l_ld == NULL)
- {
- const char *p = NULL;
- const char *cp = _dl_rtld_libname.name;
-
- /* Find the filename part of the path. */
- while (*cp != '\0')
- if (*cp++ == '/')
- p = cp;
-
- if (p != NULL)
- {
- _dl_rtld_libname2.name = p;
- /* _dl_rtld_libname2.next = NULL; Already zero. */
- _dl_rtld_libname.next = &_dl_rtld_libname2;
- }
- }
-
has_interp = true;
break;
case PT_LOAD:
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 06/32] elf: Remove version assert in check_match in elf/dl-lookup.c
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (4 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 05/32] elf: In rtld_setup_main_map, assume ld.so has a DYNAMIC segment Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-03-04 23:22 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 07/32] elf: Disambiguate some failures in _dl_load_cache_lookup Florian Weimer
` (26 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
This case is detected early in the elf/dl-versionc.c consistency
checks. (These checks could be disabled in the future to allow
the removal of symbol versioning from objects.)
Commit f0b2132b35 ("ld.so: Support moving versioned symbols between
sonames [BZ #24741]) removed another call to _dl_name_match_p. The
_dl_check_caller function no longer exists, and the remaining calls
to _dl_name_match_p happen under the loader lock. This means that
atomic accesses are no longer required for the l_libname list. This
supersedes commit 395be7c218 ("elf: Fix data race in _dl_name_match_p
[BZ #21349]").
---
elf/dl-load.c | 18 +-----------------
elf/dl-lookup.c | 19 +++----------------
elf/dl-misc.c | 4 +---
3 files changed, 5 insertions(+), 36 deletions(-)
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 692c9a47ad..65f910f0e5 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -439,23 +439,7 @@ add_name_to_object (struct link_map *l, const char *name)
newname->name = memcpy (newname + 1, name, name_len);
newname->next = NULL;
newname->dont_free = 0;
- /* CONCURRENCY NOTES:
-
- Make sure the initialization of newname happens before its address is
- read from the lastp->next store below.
-
- GL(dl_load_lock) is held here (and by other writers, e.g. dlclose), so
- readers of libname_list->next (e.g. _dl_check_caller or the reads above)
- can use that for synchronization, however the read in _dl_name_match_p
- may be executed without holding the lock during _dl_runtime_resolve
- (i.e. lazy symbol resolution when a function of library l is called).
-
- The release MO store below synchronizes with the acquire MO load in
- _dl_name_match_p. Other writes need to synchronize with that load too,
- however those happen either early when the process is single threaded
- (dl_main) or when the library is unloaded (dlclose) and the user has to
- synchronize library calls with unloading. */
- atomic_store_release (&lastp->next, newname);
+ lastp->next = newname;
}
/* Standard search directories. */
diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
index 69c91bea05..f889473378 100644
--- a/elf/dl-lookup.c
+++ b/elf/dl-lookup.c
@@ -93,22 +93,9 @@ check_match (const char *const undef_name,
const ElfW(Half) *verstab = map->l_versyms;
if (version != NULL)
{
- if (__glibc_unlikely (verstab == NULL))
- {
- /* We need a versioned symbol but haven't found any. If
- this is the object which is referenced in the verneed
- entry it is a bug in the library since a symbol must
- not simply disappear.
-
- It would also be a bug in the object since it means that
- the list of required versions is incomplete and so the
- tests in dl-version.c haven't found a problem.*/
- assert (version->filename == NULL
- || ! _dl_name_match_p (version->filename, map));
-
- /* Otherwise we accept the symbol. */
- }
- else
+ /* If there is no version information, accept the symbol. This
+ can happen during symbol interposition. */
+ if (__glibc_likely (verstab != NULL))
{
/* We can match the version information or use the
default one if it is not hidden. */
diff --git a/elf/dl-misc.c b/elf/dl-misc.c
index 5b84adc2f4..e998083284 100644
--- a/elf/dl-misc.c
+++ b/elf/dl-misc.c
@@ -75,9 +75,7 @@ _dl_name_match_p (const char *name, const struct link_map *map)
if (strcmp (name, runp->name) == 0)
return 1;
else
- /* Synchronize with the release MO store in add_name_to_object.
- See CONCURRENCY NOTES in add_name_to_object in dl-load.c. */
- runp = atomic_load_acquire (&runp->next);
+ runp = runp->next;
return 0;
}
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 07/32] elf: Disambiguate some failures in _dl_load_cache_lookup
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (5 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 06/32] elf: Remove version assert in check_match in elf/dl-lookup.c Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-02-19 23:07 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 08/32] elf: Eliminate alloca in open_verify Florian Weimer
` (25 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
Failure to allocate a copy of the string is now distinct from
a cache lookup failure. Some infrastructure failures in
_dl_sysdep_read_whole_file are still treated as cache lookup
failures, though.
---
elf/dl-cache.c | 22 ++++++++++++++++------
elf/dl-load.c | 5 ++++-
sysdeps/generic/ldsodefs.h | 10 +++++++---
3 files changed, 27 insertions(+), 10 deletions(-)
diff --git a/elf/dl-cache.c b/elf/dl-cache.c
index 804bf23222..a3eb960dac 100644
--- a/elf/dl-cache.c
+++ b/elf/dl-cache.c
@@ -398,8 +398,8 @@ _dl_cache_libcmp (const char *p1, const char *p2)
may be unmapped at any time by a completing recursive dlopen and
this function must take care that it does not return references to
any data in the mapping. */
-char *
-_dl_load_cache_lookup (const char *name)
+bool
+_dl_load_cache_lookup (const char *name, char **realname)
{
/* Print a message if the loading of libs is traced. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS))
@@ -475,8 +475,11 @@ _dl_load_cache_lookup (const char *name)
}
if (cache == (void *) -1)
- /* Previously looked for the cache file and didn't find it. */
- return NULL;
+ {
+ /* Previously looked for the cache file and didn't find it. */
+ *realname = NULL;
+ return true;
+ }
const char *best;
if (cache_new != (void *) -1)
@@ -502,7 +505,10 @@ _dl_load_cache_lookup (const char *name)
_dl_debug_printf (" trying file=%s\n", best);
if (best == NULL)
- return NULL;
+ {
+ *realname = NULL;
+ return true;
+ }
/* The double copy is *required* since malloc may be interposed
and call dlopen itself whose completion would unmap the data
@@ -512,7 +518,11 @@ _dl_load_cache_lookup (const char *name)
size_t best_len = strlen (best) + 1;
temp = alloca (best_len);
memcpy (temp, best, best_len);
- return __strdup (temp);
+ char *copy = __strdup (temp);
+ if (copy == NULL)
+ return false;
+ *realname = copy;
+ return true;
}
#ifndef MAP_COPY
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 65f910f0e5..2084366663 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -2082,7 +2082,10 @@ _dl_map_object (struct link_map *loader, const char *name,
{
/* Check the list of libraries in the file /etc/ld.so.cache,
for compatibility with Linux's ldconfig program. */
- char *cached = _dl_load_cache_lookup (name);
+ char *cached;
+ if (!_dl_load_cache_lookup (name, &cached))
+ _dl_signal_error (ENOMEM, NULL, NULL,
+ N_("cannot allocate library name"));
if (cached != NULL)
{
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 9b50ddd09f..80f078b65f 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -1111,9 +1111,13 @@ const struct r_strlenpair *_dl_important_hwcaps (const char *prepend,
size_t *max_capstrlen)
attribute_hidden;
-/* Look up NAME in ld.so.cache and return the file name stored there,
- or null if none is found. Caller must free returned string. */
-extern char *_dl_load_cache_lookup (const char *name) attribute_hidden;
+/* Look up NAME in ld.so.cache. Return false on memory allocation
+ failure and do not change *REALNAME. If lookup fails, return true
+ and write a null pointer to *REALNAME. If lookup suceeds, write a
+ copy of the full name to *REALNAME (which has to be freed by the
+ caller). */
+bool _dl_load_cache_lookup (const char *name, char **realname)
+ attribute_hidden __nonnull ((1, 2)) __attribute__ ((warn_unused_result));
/* If the system does not support MAP_COPY we cannot leave the file open
all the time since this would create problems when the file is replaced.
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 08/32] elf: Eliminate alloca in open_verify
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (6 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 07/32] elf: Disambiguate some failures in _dl_load_cache_lookup Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-02-19 23:26 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 09/32] Do not export <alloc_buffer.h> functions from libc Florian Weimer
` (24 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
With the two-stage approach for exception handling, the name can
be freed after it has been copied into the exception, but before
it is raised.
---
elf/dl-load.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 2084366663..d355de036a 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1636,15 +1636,13 @@ open_verify (const char *name, int fd,
errval = errno;
errstring = (errval == 0
? N_("file too short") : N_("cannot read file data"));
- lose:
+ lose:;
+ struct dl_exception exception;
+ _dl_exception_create (&exception, name, errstring);
if (free_name)
- {
- char *realname = (char *) name;
- name = strdupa (realname);
- free (realname);
- }
+ free ((char *) name);
__close_nocancel (fd);
- _dl_signal_error (errval, name, NULL, errstring);
+ _dl_signal_exception (errval, &exception, NULL);
}
/* See whether the ELF header is what we expect. */
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 09/32] Do not export <alloc_buffer.h> functions from libc
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (7 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 08/32] elf: Eliminate alloca in open_verify Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-02-21 17:13 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 10/32] elf: Make <alloc_buffer.h> usable in ld.so Florian Weimer
` (23 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
With the merge of NSS service modules into libc, external users are
gone except in tests.
To enable tests that use these functions (and the unit tests in
malloc/tst-alloc_buffer.h), add a copy of these functions to
libsupport.
---
include/alloc_buffer.h | 26 ++++++--------------------
malloc/Makefile | 6 ++++--
malloc/Versions | 7 -------
malloc/alloc_buffer_alloc_array.c | 1 -
malloc/alloc_buffer_allocate.c | 1 -
malloc/alloc_buffer_copy_bytes.c | 1 -
malloc/alloc_buffer_copy_string.c | 1 -
malloc/alloc_buffer_create_failure.c | 1 -
malloc/tst-alloc_buffer.c | 4 ++++
nss/Makefile | 4 ++--
support/Makefile | 1 +
support/support-alloc_buffer.c | 26 ++++++++++++++++++++++++++
12 files changed, 43 insertions(+), 36 deletions(-)
create mode 100644 support/support-alloc_buffer.c
diff --git a/include/alloc_buffer.h b/include/alloc_buffer.h
index 5dcd009405..17acada715 100644
--- a/include/alloc_buffer.h
+++ b/include/alloc_buffer.h
@@ -113,10 +113,8 @@ enum
};
/* Internal function. Terminate the process using __libc_fatal. */
-void __libc_alloc_buffer_create_failure (void *start, size_t size);
-#ifndef _ISOMAC
-libc_hidden_proto (__libc_alloc_buffer_create_failure)
-#endif
+void __libc_alloc_buffer_create_failure (void *start, size_t size)
+ attribute_hidden;
/* Create a new allocation buffer. The byte range from START to START
+ SIZE - 1 must be valid, and the allocation buffer allocates
@@ -134,10 +132,7 @@ alloc_buffer_create (void *start, size_t size)
/* Internal function. See alloc_buffer_allocate below. */
struct alloc_buffer __libc_alloc_buffer_allocate (size_t size, void **pptr)
- __attribute__ ((nonnull (2)));
-#ifndef _ISOMAC
-libc_hidden_proto (__libc_alloc_buffer_allocate)
-#endif
+ attribute_hidden __attribute__ ((nonnull (2)));
/* Allocate a buffer of SIZE bytes using malloc. The returned buffer
is in a failed state if malloc fails. *PPTR points to the start of
@@ -338,10 +333,7 @@ __alloc_buffer_next (struct alloc_buffer *buf, size_t align)
void * __libc_alloc_buffer_alloc_array (struct alloc_buffer *buf,
size_t size, size_t align,
size_t count)
- __attribute__ ((nonnull (1)));
-#ifndef _ISOMAC
-libc_hidden_proto (__libc_alloc_buffer_alloc_array)
-#endif
+ attribute_hidden __attribute__ ((nonnull (1)));
/* Obtain a TYPE * pointer to an array of COUNT objects in BUF of
TYPE. Consume these bytes from the buffer. Return NULL and mark
@@ -357,10 +349,7 @@ libc_hidden_proto (__libc_alloc_buffer_alloc_array)
/* Internal function. See alloc_buffer_copy_bytes below. */
struct alloc_buffer __libc_alloc_buffer_copy_bytes (struct alloc_buffer,
const void *, size_t)
- __attribute__ ((nonnull (2)));
-#ifndef _ISOMAC
-libc_hidden_proto (__libc_alloc_buffer_copy_bytes)
-#endif
+ attribute_hidden __attribute__ ((nonnull (2)));
/* Copy SIZE bytes starting at SRC into the buffer. If there is not
enough room in the buffer, the buffer is marked as failed. No
@@ -374,10 +363,7 @@ alloc_buffer_copy_bytes (struct alloc_buffer *buf, const void *src, size_t size)
/* Internal function. See alloc_buffer_copy_string below. */
struct alloc_buffer __libc_alloc_buffer_copy_string (struct alloc_buffer,
const char *)
- __attribute__ ((nonnull (2)));
-#ifndef _ISOMAC
-libc_hidden_proto (__libc_alloc_buffer_copy_string)
-#endif
+ attribute_hidden __attribute__ ((nonnull (2)));
/* Copy the string at SRC into the buffer, including its null
terminator. If there is not enough room in the buffer, the buffer
diff --git a/malloc/Makefile b/malloc/Makefile
index f6cdf7bf0b..234bb8f6d9 100644
--- a/malloc/Makefile
+++ b/malloc/Makefile
@@ -34,7 +34,6 @@ tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
tst-mallocfork3 \
tst-interpose-nothread \
tst-interpose-thread \
- tst-alloc_buffer \
tst-free-errno \
tst-malloc-tcache-leak \
tst-malloc_info tst-mallinfo2 \
@@ -57,7 +56,10 @@ ifeq ($(have-GLIBC_2.23)$(build-shared),yesyes)
tests += tst-mallocstate tst-compathooks-off tst-compathooks-on
endif
-tests-internal := tst-scratch_buffer
+tests-internal := \
+ tst-alloc_buffer \
+ tst-scratch_buffer \
+ # tests-internal
# The dynarray framework is only available inside glibc.
tests-internal += \
diff --git a/malloc/Versions b/malloc/Versions
index c763395c6d..011b6a5a85 100644
--- a/malloc/Versions
+++ b/malloc/Versions
@@ -88,13 +88,6 @@ libc {
__libc_dynarray_finalize;
__libc_dynarray_resize;
__libc_dynarray_resize_clear;
-
- # struct alloc_buffer support
- __libc_alloc_buffer_alloc_array;
- __libc_alloc_buffer_allocate;
- __libc_alloc_buffer_copy_bytes;
- __libc_alloc_buffer_copy_string;
- __libc_alloc_buffer_create_failure;
}
}
diff --git a/malloc/alloc_buffer_alloc_array.c b/malloc/alloc_buffer_alloc_array.c
index 018d07061a..739934d9fd 100644
--- a/malloc/alloc_buffer_alloc_array.c
+++ b/malloc/alloc_buffer_alloc_array.c
@@ -43,4 +43,3 @@ __libc_alloc_buffer_alloc_array (struct alloc_buffer *buf, size_t element_size,
return NULL;
}
}
-libc_hidden_def (__libc_alloc_buffer_alloc_array)
diff --git a/malloc/alloc_buffer_allocate.c b/malloc/alloc_buffer_allocate.c
index 7f1e083a7c..8b05f55a3d 100644
--- a/malloc/alloc_buffer_allocate.c
+++ b/malloc/alloc_buffer_allocate.c
@@ -33,4 +33,3 @@ __libc_alloc_buffer_allocate (size_t size, void **pptr)
else
return alloc_buffer_create (*pptr, size);
}
-libc_hidden_def (__libc_alloc_buffer_allocate)
diff --git a/malloc/alloc_buffer_copy_bytes.c b/malloc/alloc_buffer_copy_bytes.c
index 397f6a734f..2e261da02e 100644
--- a/malloc/alloc_buffer_copy_bytes.c
+++ b/malloc/alloc_buffer_copy_bytes.c
@@ -31,4 +31,3 @@ __libc_alloc_buffer_copy_bytes (struct alloc_buffer buf,
memcpy (ptr, src, len);
return buf;
}
-libc_hidden_def (__libc_alloc_buffer_copy_bytes)
diff --git a/malloc/alloc_buffer_copy_string.c b/malloc/alloc_buffer_copy_string.c
index a334bd359d..711fbcf695 100644
--- a/malloc/alloc_buffer_copy_string.c
+++ b/malloc/alloc_buffer_copy_string.c
@@ -27,4 +27,3 @@ __libc_alloc_buffer_copy_string (struct alloc_buffer buf, const char *src)
{
return __libc_alloc_buffer_copy_bytes (buf, src, strlen (src) + 1);
}
-libc_hidden_def (__libc_alloc_buffer_copy_string)
diff --git a/malloc/alloc_buffer_create_failure.c b/malloc/alloc_buffer_create_failure.c
index 77ce9378ce..c9557cae5b 100644
--- a/malloc/alloc_buffer_create_failure.c
+++ b/malloc/alloc_buffer_create_failure.c
@@ -28,4 +28,3 @@ __libc_alloc_buffer_create_failure (void *start, size_t size)
size);
__libc_fatal (buf);
}
-libc_hidden_def (__libc_alloc_buffer_create_failure)
diff --git a/malloc/tst-alloc_buffer.c b/malloc/tst-alloc_buffer.c
index 567cb1b172..5e06b5e91c 100644
--- a/malloc/tst-alloc_buffer.c
+++ b/malloc/tst-alloc_buffer.c
@@ -16,6 +16,10 @@
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
+/* Note: This test exercises the (identical) copy of the
+ <alloc_buffer.h> in libsupport, not libc.so, because the latter has
+ hidden visibility and cannot be tested from the outside. */
+
#include <arpa/inet.h>
#include <alloc_buffer.h>
#include <stdio.h>
diff --git a/nss/Makefile b/nss/Makefile
index 213b68720b..fef617454d 100644
--- a/nss/Makefile
+++ b/nss/Makefile
@@ -475,9 +475,9 @@ libof-nss_test1 = extramodules
libof-nss_test2 = extramodules
libof-nss_test_errno = extramodules
libof-nss_test_gai_hv2_canonname = extramodules
-$(objpfx)/libnss_test1.so: $(objpfx)nss_test1.os $(link-libc-deps)
+$(objpfx)/libnss_test1.so: $(objpfx)nss_test1.os $(libsupport) $(link-libc-deps)
$(build-module)
-$(objpfx)/libnss_test2.so: $(objpfx)nss_test2.os $(link-libc-deps)
+$(objpfx)/libnss_test2.so: $(objpfx)nss_test2.os $(libsupport) $(link-libc-deps)
$(build-module)
$(objpfx)/libnss_test_errno.so: $(objpfx)nss_test_errno.os $(link-libc-deps)
$(build-module)
diff --git a/support/Makefile b/support/Makefile
index 556281121d..2adef004c8 100644
--- a/support/Makefile
+++ b/support/Makefile
@@ -41,6 +41,7 @@ libsupport-routines = \
resolv_response_context_free \
resolv_test \
set_fortify_handler \
+ support-alloc_buffer \
support-open-dev-null-range \
support-xfstat \
support-xfstat-time64 \
diff --git a/support/support-alloc_buffer.c b/support/support-alloc_buffer.c
new file mode 100644
index 0000000000..11f967295c
--- /dev/null
+++ b/support/support-alloc_buffer.c
@@ -0,0 +1,26 @@
+/* Make <alloc_buffer.h> available to tests.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* The implementation in libc.so has hidden visibility and is
+ therefore not usable. */
+
+#include <malloc/alloc_buffer_alloc_array.c>
+#include <malloc/alloc_buffer_allocate.c>
+#include <malloc/alloc_buffer_copy_bytes.c>
+#include <malloc/alloc_buffer_copy_string.c>
+#include <malloc/alloc_buffer_create_failure.c>
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 10/32] elf: Make <alloc_buffer.h> usable in ld.so
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (8 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 09/32] Do not export <alloc_buffer.h> functions from libc Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-02-21 17:19 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 11/32] elf: Merge the three implementations of _dl_dst_substitute Florian Weimer
` (22 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
__libc_alloc_buffer_create_failure used to snprintf, which is not
available in ld.so. The size information in the error message
is probably not that useful, so remove it.
---
malloc/alloc_buffer_create_failure.c | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/malloc/alloc_buffer_create_failure.c b/malloc/alloc_buffer_create_failure.c
index c9557cae5b..7a2c2b7b8f 100644
--- a/malloc/alloc_buffer_create_failure.c
+++ b/malloc/alloc_buffer_create_failure.c
@@ -22,9 +22,5 @@
void
__libc_alloc_buffer_create_failure (void *start, size_t size)
{
- char buf[200];
- __snprintf (buf, sizeof (buf), "Fatal glibc error: "
- "invalid allocation buffer of size %zu\n",
- size);
- __libc_fatal (buf);
+ __libc_fatal ("Fatal glibc error: invalid allocation buffer\n");
}
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 11/32] elf: Merge the three implementations of _dl_dst_substitute
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (9 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 10/32] elf: Make <alloc_buffer.h> usable in ld.so Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-02-28 17:52 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 12/32] elf: Move __rtld_malloc_init_stubs call into _dl_start_final Florian Weimer
` (21 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
Use one implementation to perform the copying and the counting.
The IS_RTLD check for l_origin processing is eliminated because
$ORIGIN cannot used with the ld.so link map (and not with the
vDSO link map either) because there is no user code associated
with it that might call dlopen with an $ORIGIN path.
---
elf/dl-deps.c | 70 ++++++-------
elf/dl-load.c | 146 ++++++++++++----------------
elf/dl-open.c | 1 -
sysdeps/generic/ldsodefs.h | 9 +-
sysdeps/unix/sysv/linux/dl-origin.c | 1 -
5 files changed, 94 insertions(+), 133 deletions(-)
diff --git a/elf/dl-deps.c b/elf/dl-deps.c
index 0549b4a4ff..110c8953bd 100644
--- a/elf/dl-deps.c
+++ b/elf/dl-deps.c
@@ -28,8 +28,7 @@
#include <sys/param.h>
#include <ldsodefs.h>
#include <scratch_buffer.h>
-
-#include <dl-dst.h>
+#include <alloc_buffer.h>
/* Whether an shared object references one or more auxiliary objects
is signaled by the AUXTAG entry in l_info. */
@@ -80,47 +79,34 @@ struct list
};
-/* Macro to expand DST. It is an macro since we use `alloca'. */
+/* Macro to expand DST. It is an macro since we use `alloca'.
+ See expand_dynamic_string_token in dl-load.c. */
#define expand_dst(l, str, fatal) \
- ({ \
- const char *__str = (str); \
- const char *__result = __str; \
- size_t __dst_cnt = _dl_dst_count (__str); \
- \
- if (__dst_cnt != 0) \
- { \
- char *__newp; \
- \
- /* DST must not appear in SUID/SGID programs. */ \
- if (__libc_enable_secure) \
- _dl_signal_error (0, __str, NULL, N_("\
-DST not allowed in SUID/SGID programs")); \
- \
- __newp = (char *) alloca (DL_DST_REQUIRED (l, __str, strlen (__str), \
- __dst_cnt)); \
- \
- __result = _dl_dst_substitute (l, __str, __newp); \
- \
- if (*__result == '\0') \
- { \
- /* The replacement for the DST is not known. We can't \
- processed. */ \
- if (fatal) \
- _dl_signal_error (0, __str, NULL, N_("\
-empty dynamic string token substitution")); \
- else \
- { \
- /* This is for DT_AUXILIARY. */ \
- if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS)) \
- _dl_debug_printf (N_("\
-cannot load auxiliary `%s' because of empty dynamic string token " \
- "substitution\n"), __str); \
- continue; \
- } \
- } \
- } \
- \
- __result; })
+ ({ \
+ struct alloc_buffer __buf = {}; \
+ size_t __size = _dl_dst_substitute ((l), (str), &__buf); \
+ char *__result = alloca (__size); \
+ __buf = alloc_buffer_create (__result, __size); \
+ if (_dl_dst_substitute ((l), (str), &__buf) == 0) \
+ { \
+ /* The replacement for the DST is not known. We can't \
+ processed. */ \
+ if (fatal) \
+ _dl_signal_error (0, str, NULL, N_("\
+empty dynamic string token substitution")); \
+ else \
+ { \
+ /* This is for DT_AUXILIARY. */ \
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS)) \
+ _dl_debug_printf (N_("\
+cannot load auxiliary `%s' because of empty dynamic string token " \
+ "substitution\n"), str); \
+ continue; \
+ } \
+ } \
+ assert (!alloc_buffer_has_failed (&__buf)); \
+ __result; \
+ }) \
static void
preload (struct list *known, unsigned int *nlist, struct link_map *map)
diff --git a/elf/dl-load.c b/elf/dl-load.c
index d355de036a..2faaa44eaf 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -32,6 +32,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <gnu/lib-names.h>
+#include <alloc_buffer.h>
/* Type for the buffer we put the ELF header and hopefully the program
header. This buffer does not really have to be too large. In most
@@ -67,7 +68,6 @@ struct filebuf
#include <libc-pointer-arith.h>
#include <array_length.h>
-#include <dl-dst.h>
#include <dl-load.h>
#include <dl-map-segments.h>
#include <dl-unmap-segments.h>
@@ -227,61 +227,32 @@ is_dst (const char *input, const char *ref)
return rlen;
}
-/* INPUT should be the start of a path e.g DT_RPATH or name e.g.
- DT_NEEDED. The return value is the number of known DSTs found. We
- count all known DSTs regardless of __libc_enable_secure; the caller
- is responsible for enforcing the security of the substitution rules
- (usually _dl_dst_substitute). */
-size_t
-_dl_dst_count (const char *input)
-{
- size_t cnt = 0;
-
- input = strchr (input, '$');
-
- /* Most likely there is no DST. */
- if (__glibc_likely (input == NULL))
- return 0;
-
- do
- {
- size_t len;
-
- ++input;
- /* All DSTs must follow ELF gABI rules, see is_dst (). */
- if ((len = is_dst (input, "ORIGIN")) != 0
- || (len = is_dst (input, "PLATFORM")) != 0
- || (len = is_dst (input, "LIB")) != 0)
- ++cnt;
-
- /* There may be more than one DST in the input. */
- input = strchr (input + len, '$');
- }
- while (input != NULL);
-
- return cnt;
-}
-
-/* Process INPUT for DSTs and store in RESULT using the information
+/* Process INPUT for DSTs and store in *RESULT using the information
from link map L to resolve the DSTs. This function only handles one
path at a time and does not handle colon-separated path lists (see
- fillin_rpath ()). Lastly the size of result in bytes should be at
- least equal to the value returned by DL_DST_REQUIRED. Note that it
- is possible for a DT_NEEDED, DT_AUXILIARY, and DT_FILTER entries to
- have colons, but we treat those as literal colons here, not as path
- list delimiters. */
-char *
-_dl_dst_substitute (struct link_map *l, const char *input, char *result)
+ fillin_rpath ()).
+
+ A caller is expected to call this function twice, first with an
+ empty *RESULT buffer to obtain the total length (including the
+ terminating null byte) that is returned by this function. The
+ second call should be made with a properly sized buffer, and this
+ function will write the expansion to *RESULT. If that second call
+ returns 0, it means that the expansion is not valid and should be
+ ignored.
+
+ Note that it is possible for a DT_NEEDED,
+ DT_AUXILIARY, and DT_FILTER entries to have colons, but we treat
+ those as literal colons here, not as path list delimiters. */
+size_t
+_dl_dst_substitute (struct link_map *l, const char *input,
+ struct alloc_buffer *result)
{
/* Copy character-by-character from input into the working pointer
- looking for any DSTs. We track the start of input and if we are
- going to check for trusted paths, all of which are part of $ORIGIN
- handling in SUID/SGID cases (see below). In some cases, like when
- a DST cannot be replaced, we may set result to an empty string and
- return. */
- char *wp = result;
+ looking for any DSTs. */
const char *start = input;
+ char *result_start = alloc_buffer_next (result, char);
bool check_for_trusted = false;
+ size_t length = 0;
do
{
@@ -318,7 +289,16 @@ _dl_dst_substitute (struct link_map *l, const char *input, char *result)
&& (input[len] == '\0' || input[len] == '/')))
repl = (const char *) -1;
else
- repl = l->l_origin;
+ {
+ if (l->l_origin == NULL)
+ {
+ /* For loaded DSOs, the l_origin field is set in
+ _dl_new_object. */
+ assert (l->l_name[0] == '\0');
+ l->l_origin = _dl_get_origin ();
+ }
+ repl = l->l_origin;
+ }
check_for_trusted = (__libc_enable_secure
&& l->l_type == lt_executable);
@@ -330,7 +310,9 @@ _dl_dst_substitute (struct link_map *l, const char *input, char *result)
if (repl != NULL && repl != (const char *) -1)
{
- wp = __stpcpy (wp, repl);
+ size_t repl_len = strlen (repl);
+ length += repl_len;
+ alloc_buffer_copy_bytes (result, repl, repl_len);
input += len;
}
else if (len != 0)
@@ -338,16 +320,20 @@ _dl_dst_substitute (struct link_map *l, const char *input, char *result)
/* We found a valid DST that we know about, but we could
not find a replacement value for it, therefore we
cannot use this path and discard it. */
- *result = '\0';
- return result;
+ alloc_buffer_mark_failed (result);
+ return 0;
}
else
- /* No DST we recognize. */
- *wp++ = '$';
+ {
+ /* No DST we recognize. */
+ ++length;
+ alloc_buffer_add_byte (result, '$');
+ }
}
else
{
- *wp++ = *input++;
+ ++length;
+ alloc_buffer_add_byte (result, *input++);
}
}
while (*input != '\0');
@@ -362,15 +348,19 @@ _dl_dst_substitute (struct link_map *l, const char *input, char *result)
this way because it may be manipulated in some ways with hard
links. */
if (__glibc_unlikely (check_for_trusted)
- && !is_trusted_path_normalize (result, wp - result))
+ && !alloc_buffer_has_failed (result)
+ && !is_trusted_path_normalize (result_start,
+ alloc_buffer_next (result, char)
+ - result_start))
{
- *result = '\0';
- return result;
+ alloc_buffer_mark_failed (result);
+ return 0;
}
- *wp = '\0';
+ ++length;
+ alloc_buffer_add_byte (result, 0);
- return result;
+ return length;
}
@@ -382,30 +372,18 @@ _dl_dst_substitute (struct link_map *l, const char *input, char *result)
static char *
expand_dynamic_string_token (struct link_map *l, const char *input)
{
- /* We make two runs over the string. First we determine how large the
- resulting string is and then we copy it over. Since this is no
- frequently executed operation we are looking here not for performance
- but rather for code size. */
- size_t cnt;
- size_t total;
- char *result;
-
- /* Determine the number of DSTs. */
- cnt = _dl_dst_count (input);
-
- /* If we do not have to replace anything simply copy the string. */
- if (__glibc_likely (cnt == 0))
- return __strdup (input);
-
- /* Determine the length of the substituted string. */
- total = DL_DST_REQUIRED (l, input, strlen (input), cnt);
-
- /* Allocate the necessary memory. */
- result = (char *) malloc (total + 1);
+ struct alloc_buffer buf = {};
+ size_t size = _dl_dst_substitute (l, input, &buf);
+ char *result = malloc (size);
if (result == NULL)
return NULL;
-
- return _dl_dst_substitute (l, input, result);
+ buf = alloc_buffer_create (result, size);
+ if (_dl_dst_substitute (l, input, &buf) == 0)
+ /* Mark the expanded string as to be ignored. */
+ *result = '\0';
+ else
+ assert (!alloc_buffer_has_failed (&buf));
+ return result;
}
diff --git a/elf/dl-open.c b/elf/dl-open.c
index b748c278ac..9a16b01838 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -38,7 +38,6 @@
#include <gnu/lib-names.h>
#include <dl-find_object.h>
-#include <dl-dst.h>
#include <dl-prop.h>
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 80f078b65f..51ee7f2112 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -1224,12 +1224,11 @@ extern void _dl_nothread_init_static_tls (struct link_map *) attribute_hidden;
/* Find origin of the executable. */
extern const char *_dl_get_origin (void) attribute_hidden;
-/* Count DSTs. */
-extern size_t _dl_dst_count (const char *name) attribute_hidden;
-
/* Substitute DST values. */
-extern char *_dl_dst_substitute (struct link_map *l, const char *name,
- char *result) attribute_hidden;
+struct alloc_buffer;
+size_t _dl_dst_substitute (struct link_map *l, const char *name,
+ struct alloc_buffer *result)
+ attribute_hidden __nonnull ((1, 2, 3));
/* Open the shared object NAME, relocate it, and run its initializer if it
hasn't already been run. MODE is as for `dlopen' (see <dlfcn.h>). If
diff --git a/sysdeps/unix/sysv/linux/dl-origin.c b/sysdeps/unix/sysv/linux/dl-origin.c
index d87e89335d..82298c28f4 100644
--- a/sysdeps/unix/sysv/linux/dl-origin.c
+++ b/sysdeps/unix/sysv/linux/dl-origin.c
@@ -17,7 +17,6 @@
<https://www.gnu.org/licenses/>. */
#include <assert.h>
-#include <dl-dst.h>
#include <fcntl.h>
#include <ldsodefs.h>
#include <sysdep.h>
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 12/32] elf: Move __rtld_malloc_init_stubs call into _dl_start_final
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (10 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 11/32] elf: Merge the three implementations of _dl_dst_substitute Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-02-22 22:30 ` Joseph Myers
2024-02-22 23:06 ` Andreas Schwab
2023-12-07 10:31 ` [PATCH v3 13/32] elf: Merge __dl_libc_freemem into __rtld_libc_freeres Florian Weimer
` (20 subsequent siblings)
32 siblings, 2 replies; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
Calling an extern function in a different translation unit before
self-relocation is brittle. The compiler may load the address
at an earlier point in _dl_start, before self-relocation. In
_dl_start_final, the call is behidn a compiler barrier, so this
cannot happen.
---
elf/rtld.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/elf/rtld.c b/elf/rtld.c
index a00a8d9d0f..05cbcee24a 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -452,6 +452,8 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
{
ElfW(Addr) start_addr;
+ __rtld_malloc_init_stubs ();
+
/* Do not use an initializer for these members because it would
interfere with __rtld_static_init. */
GLRO (dl_find_object) = &_dl_find_object;
@@ -574,8 +576,6 @@ _dl_start (void *arg)
function, that way the compiler cannot put accesses to the GOT
before ELF_DYNAMIC_RELOCATE. */
- __rtld_malloc_init_stubs ();
-
#ifdef DONT_USE_BOOTSTRAP_MAP
return _dl_start_final (arg);
#else
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 13/32] elf: Merge __dl_libc_freemem into __rtld_libc_freeres
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (11 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 12/32] elf: Move __rtld_malloc_init_stubs call into _dl_start_final Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-02-22 23:23 ` Joseph Myers
2023-12-07 10:31 ` [PATCH v3 14/32] elf: Use struct link_map_private for the internal link map Florian Weimer
` (19 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
The functions serve very similar purposes. The advantage of
__rtld_libc_freeres is that it is locate within ld.so, so it is
more natural to poke at link map internals there.
This slightly regresses cleanup capabilities for statically linked
binaries. If that becomes a problem, we should start calling
__rtld_libc_freeres from __libc_freeres (perhaps after renaming it).
---
elf/dl-libc.c | 107 ------------------------------------------
elf/dl-libc_freeres.c | 101 +++++++++++++++++++++++++++++++++++++++
include/set-freeres.h | 1 -
malloc/set-freeres.c | 2 -
4 files changed, 101 insertions(+), 110 deletions(-)
diff --git a/elf/dl-libc.c b/elf/dl-libc.c
index c12e52f330..1d0ebd4793 100644
--- a/elf/dl-libc.c
+++ b/elf/dl-libc.c
@@ -226,110 +226,3 @@ __libc_dlclose (void *map)
#endif
return dlerror_run (do_dlclose, map);
}
-
-
-static bool
-free_slotinfo (struct dtv_slotinfo_list **elemp)
-{
- size_t cnt;
-
- if (*elemp == NULL)
- /* Nothing here, all is removed (or there never was anything). */
- return true;
-
- if (!free_slotinfo (&(*elemp)->next))
- /* We cannot free the entry. */
- return false;
-
- /* That cleared our next pointer for us. */
-
- for (cnt = 0; cnt < (*elemp)->len; ++cnt)
- if ((*elemp)->slotinfo[cnt].map != NULL)
- /* Still used. */
- return false;
-
- /* We can remove the list element. */
- free (*elemp);
- *elemp = NULL;
-
- return true;
-}
-
-
-void
-__dl_libc_freemem (void)
-{
- struct link_map *l;
- struct r_search_path_elem *d;
-
- /* Remove all search directories. */
- d = GL(dl_all_dirs);
- while (d != GLRO(dl_init_all_dirs))
- {
- struct r_search_path_elem *old = d;
- d = d->next;
- free (old);
- }
-
- for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
- {
- for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
- {
- struct libname_list *lnp = l->l_libname->next;
-
- l->l_libname->next = NULL;
-
- /* Remove all additional names added to the objects. */
- while (lnp != NULL)
- {
- struct libname_list *old = lnp;
- lnp = lnp->next;
- if (! old->dont_free)
- free (old);
- }
-
- /* Free the initfini dependency list. */
- if (l->l_free_initfini)
- free (l->l_initfini);
- l->l_initfini = NULL;
- }
-
- if (__builtin_expect (GL(dl_ns)[ns]._ns_global_scope_alloc, 0) != 0
- && (GL(dl_ns)[ns]._ns_main_searchlist->r_nlist
- // XXX Check whether we need NS-specific initial_searchlist
- == GLRO(dl_initial_searchlist).r_nlist))
- {
- /* All object dynamically loaded by the program are unloaded. Free
- the memory allocated for the global scope variable. */
- struct link_map **old = GL(dl_ns)[ns]._ns_main_searchlist->r_list;
-
- /* Put the old map in. */
- GL(dl_ns)[ns]._ns_main_searchlist->r_list
- // XXX Check whether we need NS-specific initial_searchlist
- = GLRO(dl_initial_searchlist).r_list;
- /* Signal that the original map is used. */
- GL(dl_ns)[ns]._ns_global_scope_alloc = 0;
-
- /* Now free the old map. */
- free (old);
- }
- }
-
- /* Free the memory allocated for the dtv slotinfo array. We can do
- this only if all modules which used this memory are unloaded. */
-#ifdef SHARED
- if (GL(dl_initial_dtv) == NULL)
- /* There was no initial TLS setup, it was set up later when
- it used the normal malloc. */
- free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
- else
-#endif
- /* The first element of the list does not have to be deallocated.
- It was allocated in the dynamic linker (i.e., with a different
- malloc), and in the static library it's in .bss space. */
- free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
-
- void *scope_free_list = GL(dl_scope_free_list);
- GL(dl_scope_free_list) = NULL;
- free (scope_free_list);
-}
diff --git a/elf/dl-libc_freeres.c b/elf/dl-libc_freeres.c
index d6d824acb5..d60c18d359 100644
--- a/elf/dl-libc_freeres.c
+++ b/elf/dl-libc_freeres.c
@@ -19,8 +19,109 @@
#include <ldsodefs.h>
#include <dl-find_object.h>
+static bool
+free_slotinfo (struct dtv_slotinfo_list **elemp)
+{
+ size_t cnt;
+
+ if (*elemp == NULL)
+ /* Nothing here, all is removed (or there never was anything). */
+ return true;
+
+ if (!free_slotinfo (&(*elemp)->next))
+ /* We cannot free the entry. */
+ return false;
+
+ /* That cleared our next pointer for us. */
+
+ for (cnt = 0; cnt < (*elemp)->len; ++cnt)
+ if ((*elemp)->slotinfo[cnt].map != NULL)
+ /* Still used. */
+ return false;
+
+ /* We can remove the list element. */
+ free (*elemp);
+ *elemp = NULL;
+
+ return true;
+}
+
void
__rtld_libc_freeres (void)
{
+ struct link_map *l;
+ struct r_search_path_elem *d;
+
+ /* Remove all search directories. */
+ d = GL(dl_all_dirs);
+ while (d != GLRO(dl_init_all_dirs))
+ {
+ struct r_search_path_elem *old = d;
+ d = d->next;
+ free (old);
+ }
+
+ for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
+ {
+ for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
+ {
+ struct libname_list *lnp = l->l_libname->next;
+
+ l->l_libname->next = NULL;
+
+ /* Remove all additional names added to the objects. */
+ while (lnp != NULL)
+ {
+ struct libname_list *old = lnp;
+ lnp = lnp->next;
+ if (! old->dont_free)
+ free (old);
+ }
+
+ /* Free the initfini dependency list. */
+ if (l->l_free_initfini)
+ free (l->l_initfini);
+ l->l_initfini = NULL;
+ }
+
+ if (__builtin_expect (GL(dl_ns)[ns]._ns_global_scope_alloc, 0) != 0
+ && (GL(dl_ns)[ns]._ns_main_searchlist->r_nlist
+ // XXX Check whether we need NS-specific initial_searchlist
+ == GLRO(dl_initial_searchlist).r_nlist))
+ {
+ /* All object dynamically loaded by the program are unloaded. Free
+ the memory allocated for the global scope variable. */
+ struct link_map **old = GL(dl_ns)[ns]._ns_main_searchlist->r_list;
+
+ /* Put the old map in. */
+ GL(dl_ns)[ns]._ns_main_searchlist->r_list
+ // XXX Check whether we need NS-specific initial_searchlist
+ = GLRO(dl_initial_searchlist).r_list;
+ /* Signal that the original map is used. */
+ GL(dl_ns)[ns]._ns_global_scope_alloc = 0;
+
+ /* Now free the old map. */
+ free (old);
+ }
+ }
+
+ /* Free the memory allocated for the dtv slotinfo array. We can do
+ this only if all modules which used this memory are unloaded. */
+#ifdef SHARED
+ if (GL(dl_initial_dtv) == NULL)
+ /* There was no initial TLS setup, it was set up later when
+ it used the normal malloc. */
+ free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
+ else
+#endif
+ /* The first element of the list does not have to be deallocated.
+ It was allocated in the dynamic linker (i.e., with a different
+ malloc), and in the static library it's in .bss space. */
+ free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
+
+ void *scope_free_list = GL(dl_scope_free_list);
+ GL(dl_scope_free_list) = NULL;
+ free (scope_free_list);
+
_dl_find_object_freeres ();
}
diff --git a/include/set-freeres.h b/include/set-freeres.h
index 866d0e3717..4286710df9 100644
--- a/include/set-freeres.h
+++ b/include/set-freeres.h
@@ -38,7 +38,6 @@
Each free routines must be explicit listed below. */
/* From libc.so. */
-extern void __dl_libc_freemem (void) attribute_hidden;
extern void __hdestroy (void) attribute_hidden;
extern void __gconv_cache_freemem (void) attribute_hidden;
extern void __gconv_conf_freemem (void) attribute_hidden;
diff --git a/malloc/set-freeres.c b/malloc/set-freeres.c
index 41ef9c2dd6..5839e08983 100644
--- a/malloc/set-freeres.c
+++ b/malloc/set-freeres.c
@@ -28,7 +28,6 @@
# pragma weak __nss_module_freeres
# pragma weak __nss_action_freeres
# pragma weak __nss_database_freeres
-# pragma weak __dl_libc_freemem
# pragma weak __hdestroy
# pragma weak __gconv_cache_freemem
# pragma weak __gconv_conf_freemem
@@ -136,7 +135,6 @@ __libc_freeres (void)
_IO_cleanup ();
/* We run the resource freeing after IO cleanup. */
- call_function_static_weak (__dl_libc_freemem);
call_function_static_weak (__hdestroy);
call_function_static_weak (__gconv_cache_freemem);
call_function_static_weak (__gconv_conf_freemem);
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 14/32] elf: Use struct link_map_private for the internal link map
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (12 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 13/32] elf: Merge __dl_libc_freemem into __rtld_libc_freeres Florian Weimer
@ 2023-12-07 10:31 ` Florian Weimer
2024-02-22 23:36 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 15/32] elf: Remove run-time-writable fields from struct link_map_private Florian Weimer
` (18 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:31 UTC (permalink / raw)
To: libc-alpha
And struct link_map for the external link map type. This avoids
having different definitions of struct link_map in different
translation units, and should help with debugging.
Upcasts (from struct link_map_private * to struct link_map) should
use &L->l_public (no actual cast). Downcasts should use the l_private
helper function.
The changes from the move of l_addr, l_name, etc. to l_public.l_addr,
l_public.l_name could be avoided if we built glibc with
-fms-extensions, but this does not seem to be a good idea because
we probably do not want the other extensions enabled by this flag.
Introducing access macros for l_addr does not work because some of
the l_* names (including l_addr) are used as local variables.
---
csu/libc-start.c | 7 +-
csu/libc-tls.c | 4 +-
debug/backtracesyms.c | 4 +-
debug/backtracesymsfd.c | 6 +-
dlfcn/dladdr1.c | 7 +-
dlfcn/dlinfo.c | 4 +-
dlfcn/tst-dlinfo-phdr.c | 15 +-
elf/circleload1.c | 18 +-
elf/dl-addr-obj.c | 4 +-
elf/dl-addr.c | 13 +-
elf/dl-audit.c | 25 +--
elf/dl-call-libc-early-init.c | 2 +-
elf/dl-call_fini.c | 9 +-
elf/dl-close.c | 53 ++---
elf/dl-debug.c | 12 --
elf/dl-deps.c | 95 ++++-----
elf/dl-find_object.c | 34 ++--
elf/dl-find_object.h | 18 +-
elf/dl-fini.c | 8 +-
elf/dl-fptr.c | 6 +-
elf/dl-init.c | 18 +-
elf/dl-iteratephdr.c | 11 +-
elf/dl-libc.c | 8 +-
elf/dl-libc_freeres.c | 7 +-
elf/dl-load.c | 89 +++++----
elf/dl-load.h | 6 +-
elf/dl-lookup-direct.c | 5 +-
elf/dl-lookup.c | 86 ++++----
elf/dl-machine-reject-phdr.h | 4 +-
elf/dl-map-segments.h | 16 +-
elf/dl-minimal.c | 4 +-
elf/dl-misc.c | 4 +-
elf/dl-object.c | 34 ++--
elf/dl-open.c | 92 ++++-----
elf/dl-profile.c | 4 +-
elf/dl-reloc-static-pie.c | 7 +-
elf/dl-reloc.c | 31 +--
elf/dl-runtime.c | 6 +-
elf/dl-setup_hash.c | 2 +-
elf/dl-sort-maps.c | 30 +--
elf/dl-static-tls.h | 2 +-
elf/dl-support.c | 26 +--
elf/dl-sym-post.h | 6 +-
elf/dl-sym.c | 10 +-
elf/dl-symaddr.c | 2 +-
elf/dl-tls.c | 26 +--
elf/dl-unmap-segments.h | 2 +-
elf/dl-usage.c | 2 +-
elf/dl-version.c | 54 ++---
elf/do-rel.h | 8 +-
elf/dynamic-link.h | 12 +-
elf/get-dynamic-info.h | 10 +-
elf/libc-early-init.h | 6 +-
elf/loadtest.c | 30 +--
elf/neededtest.c | 19 +-
elf/neededtest2.c | 18 +-
elf/neededtest3.c | 18 +-
elf/neededtest4.c | 18 +-
elf/rtld.c | 184 ++++++++++--------
elf/rtld_static_init.c | 2 +-
elf/setup-vdso.h | 33 ++--
elf/sotruss-lib.c | 3 +-
elf/sprof.c | 22 +--
elf/tlsdeschtab.h | 4 +-
elf/tst-_dl_addr_inside_object.c | 13 +-
elf/tst-audit19a.c | 2 +-
elf/tst-dl_find_object-threads.c | 6 +-
elf/tst-dl_find_object.c | 19 +-
elf/tst-tls6.c | 8 +-
elf/tst-tls7.c | 8 +-
elf/tst-tls8.c | 24 +--
elf/unload.c | 9 +-
elf/unload2.c | 9 +-
htl/pt-alloc.c | 2 +-
include/dlfcn.h | 6 +-
include/link.h | 48 ++---
include/rtld-malloc.h | 5 +-
libio/vtables.c | 2 +-
nptl_db/db_info.c | 2 +-
stdlib/cxa_thread_atexit_impl.c | 6 +-
stdlib/tst-tls-atexit.c | 10 +-
sysdeps/aarch64/dl-bti.c | 14 +-
sysdeps/aarch64/dl-lookupcfg.h | 4 +-
sysdeps/aarch64/dl-machine.h | 24 +--
sysdeps/aarch64/dl-prop.h | 12 +-
sysdeps/aarch64/dl-tlsdesc.h | 2 +-
sysdeps/aarch64/tlsdesc.c | 2 +-
sysdeps/alpha/dl-machine.h | 20 +-
sysdeps/arc/dl-machine.h | 18 +-
sysdeps/arm/dl-lookupcfg.h | 4 +-
sysdeps/arm/dl-machine.h | 28 +--
sysdeps/arm/dl-tlsdesc.h | 2 +-
sysdeps/arm/tlsdesc.c | 2 +-
sysdeps/csky/dl-machine.h | 20 +-
sysdeps/generic/dl-debug.h | 2 +-
sysdeps/generic/dl-fptr.h | 4 +-
sysdeps/generic/dl-prop.h | 8 +-
sysdeps/generic/dl-protected.h | 10 +-
sysdeps/generic/ldsodefs.h | 152 ++++++++-------
sysdeps/generic/rtld_static_init.h | 3 +-
sysdeps/hppa/dl-fptr.c | 10 +-
sysdeps/hppa/dl-lookupcfg.h | 6 +-
sysdeps/hppa/dl-machine.h | 26 +--
sysdeps/hppa/dl-runtime.c | 4 +-
sysdeps/hppa/dl-runtime.h | 2 +-
sysdeps/hppa/dl-symaddr.c | 2 +-
sysdeps/htl/pthreadP.h | 2 +-
sysdeps/i386/dl-machine.h | 30 +--
sysdeps/i386/dl-tlsdesc.h | 2 +-
sysdeps/i386/tlsdesc.c | 2 +-
sysdeps/ia64/dl-lookupcfg.h | 6 +-
sysdeps/ia64/dl-machine.h | 29 +--
sysdeps/loongarch/dl-machine.h | 19 +-
sysdeps/m68k/dl-machine.h | 20 +-
sysdeps/microblaze/dl-machine.h | 20 +-
sysdeps/mips/dl-debug.h | 2 +-
sysdeps/mips/dl-machine-reject-phdr.h | 20 +-
sysdeps/mips/dl-machine.h | 58 +++---
sysdeps/mips/dl-trampoline.c | 19 +-
sysdeps/nios2/dl-init.c | 6 +-
sysdeps/nios2/dl-machine.h | 19 +-
sysdeps/nptl/dl-mutex.c | 2 +-
sysdeps/or1k/dl-machine.h | 16 +-
sysdeps/powerpc/powerpc32/dl-machine.c | 19 +-
sysdeps/powerpc/powerpc32/dl-machine.h | 36 ++--
sysdeps/powerpc/powerpc64/dl-machine.c | 8 +-
sysdeps/powerpc/powerpc64/dl-machine.h | 40 ++--
sysdeps/riscv/dl-machine.h | 26 +--
sysdeps/s390/s390-32/dl-machine.h | 24 +--
sysdeps/s390/s390-64/dl-machine.h | 24 +--
sysdeps/sh/dl-machine.h | 22 ++-
sysdeps/sparc/sparc32/dl-machine.h | 20 +-
sysdeps/sparc/sparc64/dl-irel.h | 2 +-
sysdeps/sparc/sparc64/dl-machine.h | 23 +--
sysdeps/sparc/sparc64/dl-plt.h | 4 +-
sysdeps/unix/sysv/linux/dl-vdso.h | 2 +-
sysdeps/unix/sysv/linux/powerpc/libc-start.c | 2 +-
.../sysv/linux/powerpc/powerpc64/ldsodefs.h | 14 +-
.../sysv/linux/powerpc/rtld_static_init.h | 3 +-
sysdeps/x86/dl-lookupcfg.h | 4 +-
sysdeps/x86/dl-prop.h | 25 +--
sysdeps/x86_64/dl-machine.h | 34 ++--
sysdeps/x86_64/dl-tlsdesc.h | 2 +-
sysdeps/x86_64/tlsdesc.c | 2 +-
144 files changed, 1318 insertions(+), 1175 deletions(-)
diff --git a/csu/libc-start.c b/csu/libc-start.c
index c3bb6d09bc..5ec6303c82 100644
--- a/csu/libc-start.c
+++ b/csu/libc-start.c
@@ -125,14 +125,14 @@ static void
call_init (int argc, char **argv, char **env)
{
/* Obtain the main map of the executable. */
- struct link_map *l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
+ struct link_map_private *l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
/* DT_PREINIT_ARRAY is not processed here. It is already handled in
_dl_init in elf/dl-init.c. Also see the call_init function in
the same file. */
if (ELF_INITFINI && l->l_info[DT_INIT] != NULL)
- DL_CALL_DT_INIT(l, l->l_addr + l->l_info[DT_INIT]->d_un.d_ptr,
+ DL_CALL_DT_INIT(l, l->l_public.l_addr + l->l_info[DT_INIT]->d_un.d_ptr,
argc, argv, env);
ElfW(Dyn) *init_array = l->l_info[DT_INIT_ARRAY];
@@ -140,7 +140,8 @@ call_init (int argc, char **argv, char **env)
{
unsigned int jm
= l->l_info[DT_INIT_ARRAYSZ]->d_un.d_val / sizeof (ElfW(Addr));
- ElfW(Addr) *addrs = (void *) (init_array->d_un.d_ptr + l->l_addr);
+ ElfW(Addr) *addrs = (void *) (init_array->d_un.d_ptr
+ + l->l_public.l_addr);
for (unsigned int j = 0; j < jm; ++j)
((dl_init_t) addrs[j]) (argc, argv, env);
}
diff --git a/csu/libc-tls.c b/csu/libc-tls.c
index cdf6442c02..7a3238789d 100644
--- a/csu/libc-tls.c
+++ b/csu/libc-tls.c
@@ -114,7 +114,7 @@ __libc_setup_tls (void)
size_t tcb_offset;
const ElfW(Phdr) *phdr;
- struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
+ struct link_map_private *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
__tls_pre_init_tp ();
@@ -125,7 +125,7 @@ __libc_setup_tls (void)
/* Remember the values we need. */
memsz = phdr->p_memsz;
filesz = phdr->p_filesz;
- initimage = (void *) phdr->p_vaddr + main_map->l_addr;
+ initimage = (void *) phdr->p_vaddr + main_map->l_public.l_addr;
align = phdr->p_align;
if (phdr->p_align > max_align)
max_align = phdr->p_align;
diff --git a/debug/backtracesyms.c b/debug/backtracesyms.c
index 079d8d0d22..0633fc0f5a 100644
--- a/debug/backtracesyms.c
+++ b/debug/backtracesyms.c
@@ -44,7 +44,7 @@ __backtrace_symbols (void *const *array, int size)
/* Fill in the information we can get from `dladdr'. */
for (cnt = 0; cnt < size; ++cnt)
{
- struct link_map *map;
+ struct link_map_private *map;
status[cnt] = _dl_addr (array[cnt], &info[cnt], &map, NULL);
if (status[cnt] && info[cnt].dli_fname && info[cnt].dli_fname[0] != '\0')
{
@@ -58,7 +58,7 @@ __backtrace_symbols (void *const *array, int size)
address. The use of these addresses is to calculate an
address in the ELF file, so its prelinked bias is not
something we want to subtract out. */
- info[cnt].dli_fbase = (void *) map->l_addr;
+ info[cnt].dli_fbase = (void *) map->l_public.l_addr;
}
else
total += 5 + WORD_WIDTH;
diff --git a/debug/backtracesymsfd.c b/debug/backtracesymsfd.c
index e78b1c0832..6dab0b38b6 100644
--- a/debug/backtracesymsfd.c
+++ b/debug/backtracesymsfd.c
@@ -42,7 +42,7 @@ __backtrace_symbols_fd (void *const *array, int size, int fd)
char buf[WORD_WIDTH];
char buf2[WORD_WIDTH];
Dl_info info;
- struct link_map *map;
+ struct link_map_private *map;
size_t last = 0;
if (_dl_addr (array[cnt], &info, &map, NULL)
@@ -53,7 +53,7 @@ __backtrace_symbols_fd (void *const *array, int size, int fd)
iov[0].iov_len = strlen (info.dli_fname);
last = 1;
- if (info.dli_sname != NULL || map->l_addr != 0)
+ if (info.dli_sname != NULL || map->l_public.l_addr != 0)
{
size_t diff;
@@ -74,7 +74,7 @@ __backtrace_symbols_fd (void *const *array, int size, int fd)
address. The use of these addresses is to calculate an
address in the ELF file, so its prelinked bias is not
something we want to subtract out. */
- info.dli_saddr = (void *) map->l_addr;
+ info.dli_saddr = (void *) map->l_public.l_addr;
if (array[cnt] >= (void *) info.dli_saddr)
{
diff --git a/dlfcn/dladdr1.c b/dlfcn/dladdr1.c
index 9466ef1b6e..fe67f5c188 100644
--- a/dlfcn/dladdr1.c
+++ b/dlfcn/dladdr1.c
@@ -36,7 +36,12 @@ __dladdr1 (const void *address, Dl_info *info, void **extra, int flags)
case RTLD_DL_SYMENT:
return _dl_addr (address, info, NULL, (const ElfW(Sym) **) extra);
case RTLD_DL_LINKMAP:
- return _dl_addr (address, info, (struct link_map **) extra, NULL);
+ {
+ struct link_map_private *l;
+ int ret = _dl_addr (address, info, &l, NULL);
+ *(struct link_map **)extra = &l->l_public;
+ return ret;
+ }
}
}
versioned_symbol (libc, __dladdr1, dladdr1, GLIBC_2_34);
diff --git a/dlfcn/dlinfo.c b/dlfcn/dlinfo.c
index 1b5dd90ae5..2ac4320b36 100644
--- a/dlfcn/dlinfo.c
+++ b/dlfcn/dlinfo.c
@@ -38,7 +38,7 @@ static void
dlinfo_doit (void *argsblock)
{
struct dlinfo_args *const args = argsblock;
- struct link_map *l = args->handle;
+ struct link_map_private *l = args->handle;
switch (args->request)
{
@@ -53,7 +53,7 @@ dlinfo_doit (void *argsblock)
break;
case RTLD_DI_LINKMAP:
- *(struct link_map **) args->arg = l;
+ *(struct link_map **) args->arg = &l->l_public;
break;
case RTLD_DI_SERINFO:
diff --git a/dlfcn/tst-dlinfo-phdr.c b/dlfcn/tst-dlinfo-phdr.c
index 4448680f51..b8a5e8e5e3 100644
--- a/dlfcn/tst-dlinfo-phdr.c
+++ b/dlfcn/tst-dlinfo-phdr.c
@@ -59,13 +59,13 @@ do_test (void)
{
/* Avoid a copy relocation. */
struct r_debug *debug = xdlsym (RTLD_DEFAULT, "_r_debug");
- struct link_map *l = (struct link_map *) debug->r_map;
+ struct link_map_private *l = l_private (debug->r_map);
TEST_VERIFY_EXIT (l != NULL);
do
{
printf ("info: checking link map %p (%p) for \"%s\"\n",
- l, l->l_phdr, l->l_name);
+ l, l->l_phdr, l->l_public.l_name);
/* Cause dlerror () to return an error message. */
dlsym (RTLD_DEFAULT, "does-not-exist");
@@ -87,7 +87,8 @@ do_test (void)
if (phdr[i].p_type == PT_DYNAMIC)
{
dynamic_found = true;
- TEST_COMPARE ((ElfW(Addr)) l->l_ld, l->l_addr + phdr[i].p_vaddr);
+ TEST_COMPARE ((ElfW(Addr)) l->l_public.l_ld,
+ l->l_public.l_addr + phdr[i].p_vaddr);
}
TEST_VERIFY (dynamic_found);
}
@@ -97,7 +98,7 @@ do_test (void)
{
struct dlip_callback_args args =
{
- .l = l,
+ .l = &l->l_public,
.phdr = phdr,
.phnum = phnum,
.found = false,
@@ -106,16 +107,16 @@ do_test (void)
TEST_VERIFY (args.found);
}
- if (l->l_prev == NULL)
+ if (l->l_public.l_prev == NULL)
{
/* This is the executable, so the information is also
available via getauxval. */
- TEST_COMPARE_STRING (l->l_name, "");
+ TEST_COMPARE_STRING (l->l_public.l_name, "");
TEST_VERIFY (phdr == (const ElfW(Phdr) *) getauxval (AT_PHDR));
TEST_COMPARE (phnum, getauxval (AT_PHNUM));
}
- l = l->l_next;
+ l = l_next (l);
}
while (l != NULL);
diff --git a/elf/circleload1.c b/elf/circleload1.c
index 990ff84a84..dcf04bc25a 100644
--- a/elf/circleload1.c
+++ b/elf/circleload1.c
@@ -5,12 +5,12 @@
#include <stdlib.h>
#include <string.h>
-#define MAPS ((struct link_map *) _r_debug.r_map)
+#define MAPS (l_private (_r_debug.r_map))
static int
check_loaded_objects (const char **loaded)
{
- struct link_map *lm;
+ struct link_map_private *lm;
int n;
int *found = NULL;
int errors = 0;
@@ -26,16 +26,18 @@ check_loaded_objects (const char **loaded)
printf(" Name\n");
printf(" --------------------------------------------------------\n");
- for (lm = MAPS; lm; lm = lm->l_next)
+ for (lm = MAPS; lm != NULL; lm = l_next (lm))
{
- if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
- if (lm->l_type == lt_loaded && lm->l_name)
+ if (lm->l_public.l_name && lm->l_public.l_name[0])
+ printf(" %s, count = %d\n", lm->l_public.l_name,
+ (int) lm->l_direct_opencount);
+ if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
for (n = 0; loaded[n] != NULL; n++)
{
- if (strcmp (basename (loaded[n]), basename (lm->l_name)) == 0)
+ if (strcmp (basename (loaded[n]),
+ basename (lm->l_public.l_name)) == 0)
{
found[n] = 1;
match = 1;
@@ -46,7 +48,7 @@ check_loaded_objects (const char **loaded)
if (match == 0)
{
++errors;
- printf ("ERRORS: %s is not unloaded\n", lm->l_name);
+ printf ("ERRORS: %s is not unloaded\n", lm->l_public.l_name);
}
}
}
diff --git a/elf/dl-addr-obj.c b/elf/dl-addr-obj.c
index 630710849f..78b8069235 100644
--- a/elf/dl-addr-obj.c
+++ b/elf/dl-addr-obj.c
@@ -61,10 +61,10 @@
*/
int
-_dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
+_dl_addr_inside_object (struct link_map_private *l, const ElfW(Addr) addr)
{
int n = l->l_phnum;
- const ElfW(Addr) reladdr = addr - l->l_addr;
+ const ElfW(Addr) reladdr = addr - l->l_public.l_addr;
while (--n >= 0)
if (l->l_phdr[n].p_type == PT_LOAD
diff --git a/elf/dl-addr.c b/elf/dl-addr.c
index 44f0103687..6fbc1d4d59 100644
--- a/elf/dl-addr.c
+++ b/elf/dl-addr.c
@@ -23,15 +23,16 @@
static inline void
__attribute ((always_inline))
-determine_info (const ElfW(Addr) addr, struct link_map *match, Dl_info *info,
- struct link_map **mapp, const ElfW(Sym) **symbolp)
+determine_info (const ElfW(Addr) addr, struct link_map_private *match,
+ Dl_info *info, struct link_map_private **mapp,
+ const ElfW(Sym) **symbolp)
{
/* Now we know what object the address lies in. */
- info->dli_fname = match->l_name;
+ info->dli_fname = match->l_public.l_name;
info->dli_fbase = (void *) match->l_map_start;
/* If this is the main program the information is incomplete. */
- if (__builtin_expect (match->l_name[0], 'a') == '\0'
+ if (__builtin_expect (match->l_public.l_name[0], 'a') == '\0'
&& match->l_type == lt_executable)
info->dli_fname = _dl_argv[0];
@@ -116,7 +117,7 @@ determine_info (const ElfW(Addr) addr, struct link_map *match, Dl_info *info,
int
_dl_addr (const void *address, Dl_info *info,
- struct link_map **mapp, const ElfW(Sym) **symbolp)
+ struct link_map_private **mapp, const ElfW(Sym) **symbolp)
{
const ElfW(Addr) addr = DL_LOOKUP_ADDRESS (address);
int result = 0;
@@ -124,7 +125,7 @@ _dl_addr (const void *address, Dl_info *info,
/* Protect against concurrent loads and unloads. */
__rtld_lock_lock_recursive (GL(dl_load_lock));
- struct link_map *l = _dl_find_dso_for_object (addr);
+ struct link_map_private *l = _dl_find_dso_for_object (addr);
if (l)
{
diff --git a/elf/dl-audit.c b/elf/dl-audit.c
index 81543f85fe..d7a55123ec 100644
--- a/elf/dl-audit.c
+++ b/elf/dl-audit.c
@@ -25,7 +25,7 @@
#include <sys/param.h>
void
-_dl_audit_activity_map (struct link_map *l, int action)
+_dl_audit_activity_map (struct link_map_private *l, int action)
{
struct audit_ifaces *afct = GLRO(dl_audit);
for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
@@ -42,7 +42,7 @@ _dl_audit_activity_nsid (Lmid_t nsid, int action)
/* If head is NULL, the namespace has become empty, and the audit interface
does not give us a way to signal LA_ACT_CONSISTENT for it because the
first loaded module is used to identify the namespace. */
- struct link_map *head = GL(dl_ns)[nsid]._ns_loaded;
+ struct link_map_private *head = GL(dl_ns)[nsid]._ns_loaded;
if (__glibc_likely (GLRO(dl_naudit) == 0)
|| head == NULL || head->l_auditing)
return;
@@ -51,7 +51,8 @@ _dl_audit_activity_nsid (Lmid_t nsid, int action)
}
const char *
-_dl_audit_objsearch (const char *name, struct link_map *l, unsigned int code)
+_dl_audit_objsearch (const char *name, struct link_map_private *l,
+ unsigned int code)
{
if (l == NULL || l->l_auditing || code == 0)
return name;
@@ -73,7 +74,7 @@ _dl_audit_objsearch (const char *name, struct link_map *l, unsigned int code)
}
void
-_dl_audit_objopen (struct link_map *l, Lmid_t nsid)
+_dl_audit_objopen (struct link_map_private *l, Lmid_t nsid)
{
if (__glibc_likely (GLRO(dl_naudit) == 0))
return;
@@ -93,7 +94,7 @@ _dl_audit_objopen (struct link_map *l, Lmid_t nsid)
}
void
-_dl_audit_objclose (struct link_map *l)
+_dl_audit_objclose (struct link_map_private *l)
{
if (__glibc_likely (GLRO(dl_naudit) == 0)
|| GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing)
@@ -114,7 +115,7 @@ _dl_audit_objclose (struct link_map *l)
}
void
-_dl_audit_preinit (struct link_map *l)
+_dl_audit_preinit (struct link_map_private *l)
{
if (__glibc_likely (GLRO(dl_naudit) == 0))
return;
@@ -129,8 +130,8 @@ _dl_audit_preinit (struct link_map *l)
}
void
-_dl_audit_symbind_alt (struct link_map *l, const ElfW(Sym) *ref, void **value,
- lookup_t result)
+_dl_audit_symbind_alt (struct link_map_private *l, const ElfW(Sym) *ref,
+ void **value, lookup_t result)
{
if ((l->l_audit_any_plt | result->l_audit_any_plt) == 0)
return;
@@ -175,7 +176,8 @@ _dl_audit_symbind_alt (struct link_map *l, const ElfW(Sym) *ref, void **value,
rtld_hidden_def (_dl_audit_symbind_alt)
void
-_dl_audit_symbind (struct link_map *l, struct reloc_result *reloc_result,
+_dl_audit_symbind (struct link_map_private *l,
+ struct reloc_result *reloc_result,
const void *reloc, const ElfW(Sym) *defsym,
DL_FIXUP_VALUE_TYPE *value, lookup_t result, bool lazy)
{
@@ -263,7 +265,8 @@ _dl_audit_symbind (struct link_map *l, struct reloc_result *reloc_result,
}
void
-_dl_audit_pltenter (struct link_map *l, struct reloc_result *reloc_result,
+_dl_audit_pltenter (struct link_map_private *l,
+ struct reloc_result *reloc_result,
DL_FIXUP_VALUE_TYPE *value, void *regs, long int *framesize)
{
/* Don't do anything if no auditor wants to intercept this call. */
@@ -339,7 +342,7 @@ _dl_audit_pltenter (struct link_map *l, struct reloc_result *reloc_result,
void
DL_ARCH_FIXUP_ATTRIBUTE
-_dl_audit_pltexit (struct link_map *l, ElfW(Word) reloc_arg,
+_dl_audit_pltexit (struct link_map_private *l, ElfW(Word) reloc_arg,
const void *inregs, void *outregs)
{
const uintptr_t pltgot = (uintptr_t) D_PTR (l, l_info[DT_PLTGOT]);
diff --git a/elf/dl-call-libc-early-init.c b/elf/dl-call-libc-early-init.c
index 144a744212..c2df5b00c4 100644
--- a/elf/dl-call-libc-early-init.c
+++ b/elf/dl-call-libc-early-init.c
@@ -23,7 +23,7 @@
#include <stddef.h>
void
-_dl_call_libc_early_init (struct link_map *libc_map, _Bool initial)
+_dl_call_libc_early_init (struct link_map_private *libc_map, _Bool initial)
{
/* There is nothing to do if we did not actually load libc.so. */
if (libc_map == NULL)
diff --git a/elf/dl-call_fini.c b/elf/dl-call_fini.c
index 7376546ae0..a9d60e9803 100644
--- a/elf/dl-call_fini.c
+++ b/elf/dl-call_fini.c
@@ -22,11 +22,12 @@
void
_dl_call_fini (void *closure_map)
{
- struct link_map *map = closure_map;
+ struct link_map_private *map = closure_map;
/* When debugging print a message first. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS))
- _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n", map->l_name, map->l_ns);
+ _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
+ map->l_public.l_name, map->l_ns);
/* Make sure nothing happens if we are called twice. */
map->l_init_called = 0;
@@ -34,7 +35,7 @@ _dl_call_fini (void *closure_map)
ElfW(Dyn) *fini_array = map->l_info[DT_FINI_ARRAY];
if (fini_array != NULL)
{
- ElfW(Addr) *array = (ElfW(Addr) *) (map->l_addr
+ ElfW(Addr) *array = (ElfW(Addr) *) (map->l_public.l_addr
+ fini_array->d_un.d_ptr);
size_t sz = (map->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
/ sizeof (ElfW(Addr)));
@@ -46,5 +47,5 @@ _dl_call_fini (void *closure_map)
/* Next try the old-style destructor. */
ElfW(Dyn) *fini = map->l_info[DT_FINI];
if (fini != NULL)
- DL_CALL_DT_FINI (map, ((void *) map->l_addr + fini->d_un.d_ptr));
+ DL_CALL_DT_FINI (map, ((void *) map->l_public.l_addr + fini->d_un.d_ptr));
}
diff --git a/elf/dl-close.c b/elf/dl-close.c
index a97a1efa45..7222b21cf0 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -67,7 +67,7 @@ remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
}
else
{
- struct link_map *old_map = listp->slotinfo[idx - disp].map;
+ struct link_map_private *old_map = listp->slotinfo[idx - disp].map;
/* The entry might still be in its unused state if we are closing an
object that wasn't fully set up. */
@@ -106,7 +106,7 @@ remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
}
void
-_dl_close_worker (struct link_map *map, bool force)
+_dl_close_worker (struct link_map_private *map, bool force)
{
/* One less direct use. */
--map->l_direct_opencount;
@@ -125,7 +125,7 @@ _dl_close_worker (struct link_map *map, bool force)
/* There are still references to this object. Do nothing more. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
- map->l_name, map->l_direct_opencount);
+ map->l_public.l_name, map->l_direct_opencount);
return;
}
@@ -138,12 +138,12 @@ _dl_close_worker (struct link_map *map, bool force)
bool any_tls = false;
const unsigned int nloaded = ns->_ns_nloaded;
- struct link_map *maps[nloaded];
+ struct link_map_private *maps[nloaded];
/* Run over the list and assign indexes to the link maps and enter
them into the MAPS array. */
int idx = 0;
- for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
+ for (struct link_map_private *l = ns->_ns_loaded; l != NULL; l = l_next (l))
{
l->l_map_used = 0;
l->l_map_done = 0;
@@ -167,7 +167,7 @@ _dl_close_worker (struct link_map *map, bool force)
int done_index = -1;
while (++done_index < nloaded)
{
- struct link_map *l = maps[done_index];
+ struct link_map_private *l = maps[done_index];
if (l->l_map_done)
/* Already handled. */
@@ -194,7 +194,7 @@ _dl_close_worker (struct link_map *map, bool force)
{
/* We are always the zeroth entry, and since we don't include
ourselves in the dependency analysis start at 1. */
- struct link_map **lp = &l->l_initfini[1];
+ struct link_map_private **lp = &l->l_initfini[1];
while (*lp != NULL)
{
if ((*lp)->l_idx != IDX_STILL_USED)
@@ -220,7 +220,7 @@ _dl_close_worker (struct link_map *map, bool force)
if (l->l_reldeps != NULL)
for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
{
- struct link_map *jmap = l->l_reldeps->list[j];
+ struct link_map_private *jmap = l->l_reldeps->list[j];
if (jmap->l_idx != IDX_STILL_USED)
{
@@ -248,7 +248,7 @@ _dl_close_worker (struct link_map *map, bool force)
unsigned int first_loaded = ~0;
for (unsigned int i = 0; i < nloaded; ++i)
{
- struct link_map *imap = maps[i];
+ struct link_map_private *imap = maps[i];
/* All elements must be in the same namespace. */
assert (imap->l_ns == nsid);
@@ -317,9 +317,9 @@ _dl_close_worker (struct link_map *map, bool force)
l_searchlist address. */
if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
{
- struct link_map *tmap = (struct link_map *)
+ struct link_map_private *tmap = (struct link_map_private *)
((char *) imap->l_scope[cnt]
- - offsetof (struct link_map, l_searchlist));
+ - offsetof (struct link_map_private, l_searchlist));
assert (tmap->l_ns == nsid);
if (tmap->l_idx == IDX_STILL_USED)
++remain;
@@ -363,9 +363,11 @@ _dl_close_worker (struct link_map *map, bool force)
{
if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
{
- struct link_map *tmap = (struct link_map *)
- ((char *) imap->l_scope[cnt]
- - offsetof (struct link_map, l_searchlist));
+ struct link_map_private *tmap
+ = ((struct link_map_private *)
+ ((char *) imap->l_scope[cnt]
+ - offsetof (struct link_map_private,
+ l_searchlist)));
if (tmap->l_idx != IDX_STILL_USED)
{
/* Remove the scope. Or replace with own map's
@@ -489,7 +491,7 @@ _dl_close_worker (struct link_map *map, bool force)
it are gone. */
for (unsigned int i = first_loaded; i < nloaded; ++i)
{
- struct link_map *imap = maps[i];
+ struct link_map_private *imap = maps[i];
if (!imap->l_map_used)
{
assert (imap->l_type == lt_loaded);
@@ -635,12 +637,12 @@ _dl_close_worker (struct link_map *map, bool force)
is tantamount to nsid >= DL_NNS). That should be impossible
in this configuration, so just assert about it instead. */
assert (nsid == LM_ID_BASE);
- assert (imap->l_prev != NULL);
+ assert (imap->l_public.l_prev != NULL);
#else
- if (imap->l_prev == NULL)
+ if (imap->l_public.l_prev == NULL)
{
assert (nsid != LM_ID_BASE);
- ns->_ns_loaded = imap->l_next;
+ ns->_ns_loaded = l_next (imap);
/* Update the pointer to the head of the list
we leave for debuggers to examine. */
@@ -648,11 +650,11 @@ _dl_close_worker (struct link_map *map, bool force)
}
else
#endif
- imap->l_prev->l_next = imap->l_next;
+ imap->l_public.l_prev->l_next = imap->l_public.l_next;
--ns->_ns_nloaded;
- if (imap->l_next != NULL)
- imap->l_next->l_prev = imap->l_prev;
+ if (imap->l_public.l_next != NULL)
+ imap->l_public.l_next->l_prev = imap->l_public.l_prev;
/* Update the data used by _dl_find_object. */
_dl_find_object_dlclose (imap);
@@ -666,10 +668,10 @@ _dl_close_worker (struct link_map *map, bool force)
/* Print debugging message. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
- imap->l_name, imap->l_ns);
+ imap->l_public.l_name, imap->l_ns);
/* This name always is allocated. */
- free (imap->l_name);
+ free (imap->l_public.l_name);
/* Remove the list with all the names of the shared object. */
struct libname_list *lnp = imap->l_libname;
@@ -756,7 +758,7 @@ _dl_close_worker (struct link_map *map, bool force)
void
_dl_close (void *_map)
{
- struct link_map *map = _map;
+ struct link_map_private *map = _map;
/* We must take the lock to examine the contents of map and avoid
concurrent dlopens. */
@@ -787,7 +789,8 @@ _dl_close (void *_map)
if (__builtin_expect (map->l_direct_opencount, 1) == 0)
{
__rtld_lock_unlock_recursive (GL(dl_load_lock));
- _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
+ _dl_signal_error (0, map->l_public.l_name, NULL,
+ N_("shared object not open"));
}
_dl_close_worker (map, false);
diff --git a/elf/dl-debug.c b/elf/dl-debug.c
index bd7b1cc8f7..6c097e6920 100644
--- a/elf/dl-debug.c
+++ b/elf/dl-debug.c
@@ -18,18 +18,6 @@
#include <ldsodefs.h>
-
-/* These are the members in the public `struct link_map' type.
- Sanity check that the internal type and the public type match. */
-#define VERIFY_MEMBER(name) \
- (offsetof (struct link_map_public, name) == offsetof (struct link_map, name))
-extern const int verify_link_map_members[(VERIFY_MEMBER (l_addr)
- && VERIFY_MEMBER (l_name)
- && VERIFY_MEMBER (l_ld)
- && VERIFY_MEMBER (l_next)
- && VERIFY_MEMBER (l_prev))
- ? 1 : -1];
-
/* Update the `r_map' member and return the address of `struct r_debug'
of the namespace NS. */
diff --git a/elf/dl-deps.c b/elf/dl-deps.c
index 110c8953bd..c730713167 100644
--- a/elf/dl-deps.c
+++ b/elf/dl-deps.c
@@ -45,14 +45,14 @@
struct openaux_args
{
/* The arguments to openaux. */
- struct link_map *map;
+ struct link_map_private *map;
int trace_mode;
int open_mode;
const char *strtab;
const char *name;
/* The return value of openaux. */
- struct link_map *aux;
+ struct link_map_private *aux;
};
static void
@@ -74,7 +74,7 @@ openaux (void *a)
struct list
{
int done; /* Nonzero if this map was processed. */
- struct link_map *map; /* The data. */
+ struct link_map_private *map; /* The data. */
struct list *next; /* Elements for normal list. */
};
@@ -109,7 +109,7 @@ cannot load auxiliary `%s' because of empty dynamic string token " \
}) \
static void
-preload (struct list *known, unsigned int *nlist, struct link_map *map)
+preload (struct list *known, unsigned int *nlist, struct link_map_private *map)
{
known[*nlist].done = 0;
known[*nlist].map = map;
@@ -123,9 +123,9 @@ preload (struct list *known, unsigned int *nlist, struct link_map *map)
}
void
-_dl_map_object_deps (struct link_map *map,
- struct link_map **preloads, unsigned int npreloads,
- int trace_mode, int open_mode)
+_dl_map_object_deps (struct link_map_private *map,
+ struct link_map_private **preloads,
+ unsigned int npreloads, int trace_mode, int open_mode)
{
struct list *known = __alloca (sizeof *known * (1 + npreloads + 1));
struct list *runp, *tail;
@@ -171,8 +171,8 @@ _dl_map_object_deps (struct link_map *map,
name = NULL;
for (runp = known; runp; )
{
- struct link_map *l = runp->map;
- struct link_map **needed = NULL;
+ struct link_map_private *l = runp->map;
+ struct link_map_private **needed = NULL;
unsigned int nneeded = 0;
/* Unless otherwise stated, this object is handled. */
@@ -185,8 +185,8 @@ _dl_map_object_deps (struct link_map *map,
{
/* l->l_ldnum includes space for the terminating NULL. */
if (!scratch_buffer_set_array_size
- (&needed_space, l->l_ldnum, sizeof (struct link_map *)))
- _dl_signal_error (ENOMEM, map->l_name, NULL,
+ (&needed_space, l->l_ldnum, sizeof (struct link_map_private *)))
+ _dl_signal_error (ENOMEM, map->l_public.l_name, NULL,
N_("cannot allocate dependency buffer"));
needed = needed_space.data;
}
@@ -204,11 +204,11 @@ _dl_map_object_deps (struct link_map *map,
args.open_mode = open_mode;
orig = runp;
- for (d = l->l_ld; d->d_tag != DT_NULL; ++d)
+ for (d = l->l_public.l_ld; d->d_tag != DT_NULL; ++d)
if (__builtin_expect (d->d_tag, DT_NEEDED) == DT_NEEDED)
{
/* Map in the needed object. */
- struct link_map *dep;
+ struct link_map_private *dep;
/* Recognize DSTs. */
name = expand_dst (l, strtab + d->d_un.d_val, 0);
@@ -265,7 +265,7 @@ _dl_map_object_deps (struct link_map *map,
_dl_debug_printf ("load auxiliary object=%s"
" requested by file=%s\n",
name,
- DSO_FILENAME (l->l_name));
+ DSO_FILENAME (l->l_public.l_name));
/* We must be prepared that the addressed shared
object is not available. For filter objects the dependency
@@ -347,16 +347,19 @@ _dl_map_object_deps (struct link_map *map,
late->next = late->next->next;
/* We must move the object earlier in the chain. */
- if (args.aux->l_prev != NULL)
- args.aux->l_prev->l_next = args.aux->l_next;
- if (args.aux->l_next != NULL)
- args.aux->l_next->l_prev = args.aux->l_prev;
-
- args.aux->l_prev = newp->map->l_prev;
- newp->map->l_prev = args.aux;
- if (args.aux->l_prev != NULL)
- args.aux->l_prev->l_next = args.aux;
- args.aux->l_next = newp->map;
+ if (args.aux->l_public.l_prev != NULL)
+ args.aux->l_public.l_prev->l_next
+ = args.aux->l_public.l_next;
+ if (args.aux->l_public.l_next != NULL)
+ args.aux->l_public.l_next->l_prev
+ = args.aux->l_public.l_prev;
+
+ args.aux->l_public.l_prev = newp->map->l_public.l_prev;
+ newp->map->l_public.l_prev = &args.aux->l_public;
+ if (args.aux->l_public.l_prev != NULL)
+ args.aux->l_public.l_prev->l_next
+ = &args.aux->l_public;
+ args.aux->l_public.l_next = &newp->map->l_public;
}
else
{
@@ -378,16 +381,18 @@ _dl_map_object_deps (struct link_map *map,
/* The only problem is that in the double linked
list of all objects we don't have this new
object at the correct place. Correct this here. */
- if (args.aux->l_prev)
- args.aux->l_prev->l_next = args.aux->l_next;
- if (args.aux->l_next)
- args.aux->l_next->l_prev = args.aux->l_prev;
-
- args.aux->l_prev = newp->map->l_prev;
- newp->map->l_prev = args.aux;
- if (args.aux->l_prev != NULL)
- args.aux->l_prev->l_next = args.aux;
- args.aux->l_next = newp->map;
+ if (args.aux->l_public.l_prev)
+ args.aux->l_public.l_prev->l_next
+ = args.aux->l_public.l_next;
+ if (args.aux->l_public.l_next)
+ args.aux->l_public.l_next->l_prev
+ = args.aux->l_public.l_prev;
+
+ args.aux->l_public.l_prev = newp->map->l_public.l_prev;
+ newp->map->l_public.l_prev = &args.aux->l_public;
+ if (args.aux->l_public.l_prev != NULL)
+ args.aux->l_public.l_prev->l_next = &args.aux->l_public;
+ args.aux->l_public.l_next = &newp->map->l_public;
}
/* Move the tail pointer if necessary. */
@@ -404,12 +409,12 @@ _dl_map_object_deps (struct link_map *map,
{
needed[nneeded++] = NULL;
- struct link_map **l_initfini = (struct link_map **)
+ struct link_map_private **l_initfini = (struct link_map_private **)
malloc ((2 * nneeded + 1) * sizeof needed[0]);
if (l_initfini == NULL)
{
scratch_buffer_free (&needed_space);
- _dl_signal_error (ENOMEM, map->l_name, NULL,
+ _dl_signal_error (ENOMEM, map->l_public.l_name, NULL,
N_("cannot allocate dependency list"));
}
l_initfini[0] = l;
@@ -434,7 +439,7 @@ _dl_map_object_deps (struct link_map *map,
if (errno == 0 && errno_saved != 0)
__set_errno (errno_saved);
- struct link_map **old_l_initfini = NULL;
+ struct link_map_private **old_l_initfini = NULL;
if (map->l_initfini != NULL && map->l_type == lt_loaded)
{
/* This object was previously loaded as a dependency and we have
@@ -445,11 +450,11 @@ _dl_map_object_deps (struct link_map *map,
/* Store the search list we built in the object. It will be used for
searches in the scope of this object. */
- struct link_map **l_initfini =
- (struct link_map **) malloc ((2 * nlist + 1)
- * sizeof (struct link_map *));
+ struct link_map_private **l_initfini =
+ (struct link_map_private **) malloc ((2 * nlist + 1)
+ * sizeof (struct link_map_private *));
if (l_initfini == NULL)
- _dl_signal_error (ENOMEM, map->l_name, NULL,
+ _dl_signal_error (ENOMEM, map->l_public.l_name, NULL,
N_("cannot allocate symbol search list"));
@@ -485,14 +490,14 @@ _dl_map_object_deps (struct link_map *map,
/* Avoid removing relocation dependencies of the main binary. */
map->l_reserved = 0;
- struct link_map **list = &map->l_reldeps->list[0];
+ struct link_map_private **list = &map->l_reldeps->list[0];
for (i = 0; i < map->l_reldeps->act; ++i)
if (list[i]->l_reserved)
{
/* Need to allocate new array of relocation dependencies. */
l_reldeps = malloc (sizeof (*l_reldeps)
+ map->l_reldepsmax
- * sizeof (struct link_map *));
+ * sizeof (struct link_map_private *));
if (l_reldeps == NULL)
/* Bad luck, keep the reldeps duplicated between
map->l_reldeps->list and map->l_initfini lists. */
@@ -501,7 +506,7 @@ _dl_map_object_deps (struct link_map *map,
{
unsigned int j = i;
memcpy (&l_reldeps->list[0], &list[0],
- i * sizeof (struct link_map *));
+ i * sizeof (struct link_map_private *));
for (i = i + 1; i < map->l_reldeps->act; ++i)
if (!list[i]->l_reserved)
l_reldeps->list[j++] = list[i];
@@ -531,7 +536,7 @@ _dl_map_object_deps (struct link_map *map,
}
else
memcpy (l_initfini, map->l_searchlist.r_list,
- nlist * sizeof (struct link_map *));
+ nlist * sizeof (struct link_map_private *));
/* If libc.so.6 is the main map, it participates in the sort, so
that the relocation order is correct regarding libc.so.6. */
diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
index c1390ee10f..5042b0a8c1 100644
--- a/elf/dl-find_object.c
+++ b/elf/dl-find_object.c
@@ -37,8 +37,8 @@ _dl_find_object_slow (void *pc, struct dl_find_object *result)
{
ElfW(Addr) addr = (ElfW(Addr)) pc;
for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
- for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL;
- l = l->l_next)
+ for (struct link_map_private *l = GL(dl_ns)[ns]._ns_loaded; l != NULL;
+ l = l_next (l))
if (addr >= l->l_map_start && addr < l->l_map_end
&& (l->l_contiguous || _dl_addr_inside_object (l, addr)))
{
@@ -168,7 +168,7 @@ _dlfo_mappings_segment_allocate_unpadded (size_t size)
if (size < dlfo_mappings_initial_segment_size)
size = dlfo_mappings_initial_segment_size;
/* No overflow checks here because the size is a mapping count, and
- struct link_map is larger than what we allocate here. */
+ struct link_map_private is larger than what we allocate here. */
enum
{
element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
@@ -206,7 +206,7 @@ _dlfo_mappings_segment_allocate (size_t size,
}
enum { cache_line_size_estimate = 128 };
/* No overflow checks here because the size is a mapping count, and
- struct link_map is larger than what we allocate here. */
+ struct link_map_private is larger than what we allocate here. */
enum
{
element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
@@ -472,7 +472,7 @@ rtld_hidden_def (_dl_find_object)
static size_t
_dlfo_process_initial (void)
{
- struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
+ struct link_map_private *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
size_t nodelete = 0;
if (!main_map->l_contiguous)
@@ -492,7 +492,7 @@ _dlfo_process_initial (void)
/* Second pass only. */
_dlfo_nodelete_mappings[nodelete] = dlfo;
_dlfo_nodelete_mappings[nodelete].map_start
- = ph->p_vaddr + main_map->l_addr;
+ = ph->p_vaddr + main_map->l_public.l_addr;
_dlfo_nodelete_mappings[nodelete].map_end
= _dlfo_nodelete_mappings[nodelete].map_start + ph->p_memsz;
}
@@ -502,8 +502,8 @@ _dlfo_process_initial (void)
size_t loaded = 0;
for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
- for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL;
- l = l->l_next)
+ for (struct link_map_private *l = GL(dl_ns)[ns]._ns_loaded; l != NULL;
+ l = l_next (l))
/* Skip the main map processed above, and proxy maps. */
if (l != main_map && l == l->l_real)
{
@@ -561,7 +561,7 @@ _dl_find_object_init (void)
{
/* Cover the main mapping. */
{
- struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
+ struct link_map_private *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
if (main_map->l_contiguous)
_dl_find_object_from_map (main_map, &_dlfo_main);
@@ -604,7 +604,7 @@ Fatal glibc error: cannot allocate memory for find-object data\n");
}
static void
-_dl_find_object_link_map_sort (struct link_map **loaded, size_t size)
+_dl_find_object_link_map_sort (struct link_map_private **loaded, size_t size)
{
/* Selection sort based on map_start. */
if (size < 2)
@@ -622,7 +622,7 @@ _dl_find_object_link_map_sort (struct link_map **loaded, size_t size)
}
/* Swap into place. */
- struct link_map *tmp = loaded[min_idx];
+ struct link_map_private *tmp = loaded[min_idx];
loaded[min_idx] = loaded[i];
loaded[i] = tmp;
}
@@ -649,7 +649,7 @@ _dlfo_update_init_seg (struct dlfo_mappings_segment *seg,
shared data need to use relaxed MO. But plain loads can be used
because the loader lock prevents concurrent stores. */
static bool
-_dl_find_object_update_1 (struct link_map **loaded, size_t count)
+_dl_find_object_update_1 (struct link_map_private **loaded, size_t count)
{
int active_idx = _dlfo_read_version_locked () & 1;
@@ -781,22 +781,22 @@ _dl_find_object_update_1 (struct link_map **loaded, size_t count)
}
bool
-_dl_find_object_update (struct link_map *new_map)
+_dl_find_object_update (struct link_map_private *new_map)
{
/* Copy the newly-loaded link maps into an array for sorting. */
size_t count = 0;
- for (struct link_map *l = new_map; l != NULL; l = l->l_next)
+ for (struct link_map_private *l = new_map; l != NULL; l = l_next (l))
/* Skip proxy maps and already-processed maps. */
count += l == l->l_real && !l->l_find_object_processed;
if (count == 0)
return true;
- struct link_map **map_array = malloc (count * sizeof (*map_array));
+ struct link_map_private **map_array = malloc (count * sizeof (*map_array));
if (map_array == NULL)
return false;
{
size_t i = 0;
- for (struct link_map *l = new_map; l != NULL; l = l->l_next)
+ for (struct link_map_private *l = new_map; l != NULL; l = l_next (l))
if (l == l->l_real && !l->l_find_object_processed)
map_array[i++] = l;
}
@@ -808,7 +808,7 @@ _dl_find_object_update (struct link_map *new_map)
}
void
-_dl_find_object_dlclose (struct link_map *map)
+_dl_find_object_dlclose (struct link_map_private *map)
{
uint64_t start_version = _dlfo_read_version_locked ();
uintptr_t map_start = map->l_map_start;
diff --git a/elf/dl-find_object.h b/elf/dl-find_object.h
index 87c9460619..edcc0a7755 100644
--- a/elf/dl-find_object.h
+++ b/elf/dl-find_object.h
@@ -28,14 +28,14 @@
/* Internal version of struct dl_find_object. Does not include the
(yet unused) flags member. We need to make a copy of data also in
- struct link_map to support non-contiguous mappings, and to support
- software transactional memory (the link map is not covered by
- transactions). */
+ struct link_map_private to support non-contiguous mappings, and to
+ support software transactional memory (the link map is not covered
+ by transactions). */
struct dl_find_object_internal
{
uintptr_t map_start;
uintptr_t map_end; /* Set to map_start by dlclose. */
- struct link_map *map; /* Set to NULL by dlclose. */
+ struct link_map_private *map; /* Set to NULL by dlclose. */
void *eh_frame;
#if DLFO_STRUCT_HAS_EH_DBASE
void *eh_dbase;
@@ -76,7 +76,7 @@ _dl_find_object_to_external (struct dl_find_object_internal *internal,
external->dlfo_flags = 0;
external->dlfo_map_start = (void *) internal->map_start;
external->dlfo_map_end = (void *) internal->map_end;
- external->dlfo_link_map = internal->map;
+ external->dlfo_link_map = &internal->map->l_public;
external->dlfo_eh_frame = internal->eh_frame;
# if DLFO_STRUCT_HAS_EH_DBASE
external->dlfo_eh_dbase = internal->eh_dbase;
@@ -89,7 +89,7 @@ _dl_find_object_to_external (struct dl_find_object_internal *internal,
/* Extract the object location data from a link map and writes it to
*RESULT using relaxed MO stores. */
static void __attribute__ ((unused))
-_dl_find_object_from_map (struct link_map *l,
+_dl_find_object_from_map (struct link_map_private *l,
struct dl_find_object_internal *result)
{
atomic_store_relaxed (&result->map_start, (uintptr_t) l->l_map_start);
@@ -105,7 +105,7 @@ _dl_find_object_from_map (struct link_map *l,
if (ph->p_type == DLFO_EH_SEGMENT_TYPE)
{
atomic_store_relaxed (&result->eh_frame,
- (void *) (ph->p_vaddr + l->l_addr));
+ (void *) (ph->p_vaddr + l->l_public.l_addr));
#if DLFO_STRUCT_HAS_EH_COUNT
atomic_store_relaxed (&result->eh_count, ph->p_memsz / 8);
#endif
@@ -129,11 +129,11 @@ void _dl_find_object_init (void) attribute_hidden;
the l_next list are added if l_object_processed is 0. Needs to
be protected by loader write lock. Returns true on success, false
on malloc failure. */
-bool _dl_find_object_update (struct link_map *new_map) attribute_hidden;
+bool _dl_find_object_update (struct link_map_private *new_l) attribute_hidden;
/* Called by dlclose to remove the link map from the DWARF EH frame
data structures. Needs to be protected by loader write lock. */
-void _dl_find_object_dlclose (struct link_map *l) attribute_hidden;
+void _dl_find_object_dlclose (struct link_map_private *l) attribute_hidden;
/* Called from __libc_freeres to deallocate malloc'ed memory. */
void _dl_find_object_freeres (void) attribute_hidden;
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index 9acb64f47c..2e10f1b0b6 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -65,12 +65,12 @@ _dl_fini (void)
/* Now we can allocate an array to hold all the pointers and
copy the pointers in. */
- struct link_map *maps[nloaded];
+ struct link_map_private *maps[nloaded];
unsigned int i;
- struct link_map *l;
+ struct link_map_private *l;
assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL);
- for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next)
+ for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l_next (l))
/* Do not handle ld.so in secondary namespaces. */
if (l == l->l_real)
{
@@ -107,7 +107,7 @@ _dl_fini (void)
the front. */
for (i = 0; i < nmaps; ++i)
{
- struct link_map *l = maps[i];
+ struct link_map_private *l = maps[i];
if (l->l_init_called)
{
diff --git a/elf/dl-fptr.c b/elf/dl-fptr.c
index 536ad17089..afa626daf7 100644
--- a/elf/dl-fptr.c
+++ b/elf/dl-fptr.c
@@ -164,7 +164,7 @@ make_fdesc (ElfW(Addr) ip, ElfW(Addr) gp)
static inline ElfW(Addr) * __attribute__ ((always_inline))
-make_fptr_table (struct link_map *map)
+make_fptr_table (struct link_map_private *map)
{
const ElfW(Sym) *symtab
= (const void *) D_PTR (map, l_info[DT_SYMTAB]);
@@ -202,7 +202,7 @@ make_fptr_table (struct link_map *map)
ElfW(Addr)
-_dl_make_fptr (struct link_map *map, const ElfW(Sym) *sym,
+_dl_make_fptr (struct link_map_private *map, const ElfW(Sym) *sym,
ElfW(Addr) ip)
{
ElfW(Addr) *ftab = map->l_mach.fptr_table;
@@ -264,7 +264,7 @@ _dl_make_fptr (struct link_map *map, const ElfW(Sym) *sym,
void
-_dl_unmap (struct link_map *map)
+_dl_unmap (struct link_map_private *map)
{
ElfW(Addr) *ftab = map->l_mach.fptr_table;
struct fdesc *head = NULL, *tail = NULL;
diff --git a/elf/dl-init.c b/elf/dl-init.c
index ba4d2fdc85..b220ca9239 100644
--- a/elf/dl-init.c
+++ b/elf/dl-init.c
@@ -23,7 +23,7 @@
static void
-call_init (struct link_map *l, int argc, char **argv, char **env)
+call_init (struct link_map_private *l, int argc, char **argv, char **env)
{
/* Do not run constructors for proxy objects. */
if (l != l->l_real)
@@ -43,21 +43,22 @@ call_init (struct link_map *l, int argc, char **argv, char **env)
l->l_init_called = 1;
/* Check for object which constructors we do not run here. */
- if (__builtin_expect (l->l_name[0], 'a') == '\0'
+ if (__builtin_expect (l->l_public.l_name[0], 'a') == '\0'
&& l->l_type == lt_executable)
return;
/* Print a debug message if wanted. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS))
_dl_debug_printf ("\ncalling init: %s\n\n",
- DSO_FILENAME (l->l_name));
+ DSO_FILENAME (l->l_public.l_name));
/* Now run the local constructors. There are two forms of them:
- the one named by DT_INIT
- the others in the DT_INIT_ARRAY.
*/
if (ELF_INITFINI && l->l_info[DT_INIT] != NULL)
- DL_CALL_DT_INIT(l, l->l_addr + l->l_info[DT_INIT]->d_un.d_ptr, argc, argv, env);
+ DL_CALL_DT_INIT(l, l->l_public.l_addr + l->l_info[DT_INIT]->d_un.d_ptr,
+ argc, argv, env);
/* Next see whether there is an array with initialization functions. */
ElfW(Dyn) *init_array = l->l_info[DT_INIT_ARRAY];
@@ -69,7 +70,7 @@ call_init (struct link_map *l, int argc, char **argv, char **env)
jm = l->l_info[DT_INIT_ARRAYSZ]->d_un.d_val / sizeof (ElfW(Addr));
- addrs = (ElfW(Addr) *) (init_array->d_un.d_ptr + l->l_addr);
+ addrs = (ElfW(Addr) *) (init_array->d_un.d_ptr + l->l_public.l_addr);
for (j = 0; j < jm; ++j)
((dl_init_t) addrs[j]) (argc, argv, env);
}
@@ -77,7 +78,7 @@ call_init (struct link_map *l, int argc, char **argv, char **env)
void
-_dl_init (struct link_map *main_map, int argc, char **argv, char **env)
+_dl_init (struct link_map_private *main_map, int argc, char **argv, char **env)
{
ElfW(Dyn) *preinit_array = main_map->l_info[DT_PREINIT_ARRAY];
ElfW(Dyn) *preinit_array_size = main_map->l_info[DT_PREINIT_ARRAYSZ];
@@ -99,9 +100,10 @@ _dl_init (struct link_map *main_map, int argc, char **argv, char **env)
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS))
_dl_debug_printf ("\ncalling preinit: %s\n\n",
- DSO_FILENAME (main_map->l_name));
+ DSO_FILENAME (main_map->l_public.l_name));
- addrs = (ElfW(Addr) *) (preinit_array->d_un.d_ptr + main_map->l_addr);
+ addrs = (ElfW(Addr) *) (preinit_array->d_un.d_ptr
+ + main_map->l_public.l_addr);
for (cnt = 0; cnt < i; ++cnt)
((dl_init_t) addrs[cnt]) (argc, argv, env);
}
diff --git a/elf/dl-iteratephdr.c b/elf/dl-iteratephdr.c
index 2acccc4f83..943f3a74a5 100644
--- a/elf/dl-iteratephdr.c
+++ b/elf/dl-iteratephdr.c
@@ -31,7 +31,7 @@ int
__dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info,
size_t size, void *data), void *data)
{
- struct link_map *l;
+ struct link_map_private *l;
struct dl_phdr_info info;
int ret = 0;
@@ -46,7 +46,8 @@ __dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info,
#ifdef SHARED
const void *caller = RETURN_ADDRESS (0);
for (Lmid_t cnt = GL(dl_nns) - 1; cnt > 0; --cnt)
- for (struct link_map *l = GL(dl_ns)[cnt]._ns_loaded; l; l = l->l_next)
+ for (struct link_map_private *l = GL(dl_ns)[cnt]._ns_loaded; l;
+ l = l_next (l))
{
/* We have to count the total number of loaded objects. */
nloaded += GL(dl_ns)[cnt]._ns_nloaded;
@@ -59,10 +60,10 @@ __dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info,
}
#endif
- for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
+ for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l_next (l))
{
- info.dlpi_addr = l->l_real->l_addr;
- info.dlpi_name = l->l_real->l_name;
+ info.dlpi_addr = l->l_real->l_public.l_addr;
+ info.dlpi_name = l->l_real->l_public.l_name;
info.dlpi_phdr = l->l_real->l_phdr;
info.dlpi_phnum = l->l_real->l_phnum;
info.dlpi_adds = GL(dl_load_adds);
diff --git a/elf/dl-libc.c b/elf/dl-libc.c
index 1d0ebd4793..679d95c4ec 100644
--- a/elf/dl-libc.c
+++ b/elf/dl-libc.c
@@ -64,13 +64,13 @@ struct do_dlopen_args
const void *caller_dlopen;
/* Return from do_dlopen. */
- struct link_map *map;
+ struct link_map_private *map;
};
struct do_dlsym_args
{
/* Arguments to do_dlsym. */
- struct link_map *map;
+ struct link_map_private *map;
const char *name;
/* Return values of do_dlsym. */
@@ -122,7 +122,7 @@ do_dlvsym (void *ptr)
static void
do_dlclose (void *ptr)
{
- GLRO(dl_close) ((struct link_map *) ptr);
+ GLRO(dl_close) ((struct link_map_private *) ptr);
}
#ifndef SHARED
@@ -164,7 +164,7 @@ __libc_dlopen_mode (const char *name, int mode)
#ifndef SHARED
void *
-__libc_dlsym_private (struct link_map *map, const char *name)
+__libc_dlsym_private (struct link_map_private *map, const char *name)
{
struct do_dlsym_args sargs;
sargs.map = map;
diff --git a/elf/dl-libc_freeres.c b/elf/dl-libc_freeres.c
index d60c18d359..65fc70837a 100644
--- a/elf/dl-libc_freeres.c
+++ b/elf/dl-libc_freeres.c
@@ -49,7 +49,7 @@ free_slotinfo (struct dtv_slotinfo_list **elemp)
void
__rtld_libc_freeres (void)
{
- struct link_map *l;
+ struct link_map_private *l;
struct r_search_path_elem *d;
/* Remove all search directories. */
@@ -63,7 +63,7 @@ __rtld_libc_freeres (void)
for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
{
- for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
+ for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l_next (l))
{
struct libname_list *lnp = l->l_libname->next;
@@ -91,7 +91,8 @@ __rtld_libc_freeres (void)
{
/* All object dynamically loaded by the program are unloaded. Free
the memory allocated for the global scope variable. */
- struct link_map **old = GL(dl_ns)[ns]._ns_main_searchlist->r_list;
+ struct link_map_private **old
+ = GL(dl_ns)[ns]._ns_main_searchlist->r_list;
/* Put the old map in. */
GL(dl_ns)[ns]._ns_main_searchlist->r_list
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 2faaa44eaf..5c9aaf2eec 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -244,7 +244,7 @@ is_dst (const char *input, const char *ref)
DT_AUXILIARY, and DT_FILTER entries to have colons, but we treat
those as literal colons here, not as path list delimiters. */
size_t
-_dl_dst_substitute (struct link_map *l, const char *input,
+_dl_dst_substitute (struct link_map_private *l, const char *input,
struct alloc_buffer *result)
{
/* Copy character-by-character from input into the working pointer
@@ -294,7 +294,7 @@ _dl_dst_substitute (struct link_map *l, const char *input,
{
/* For loaded DSOs, the l_origin field is set in
_dl_new_object. */
- assert (l->l_name[0] == '\0');
+ assert (l->l_public.l_name[0] == '\0');
l->l_origin = _dl_get_origin ();
}
repl = l->l_origin;
@@ -370,7 +370,7 @@ _dl_dst_substitute (struct link_map *l, const char *input,
case the path containing the DST is left out. On error NULL
is returned. */
static char *
-expand_dynamic_string_token (struct link_map *l, const char *input)
+expand_dynamic_string_token (struct link_map_private *l, const char *input)
{
struct alloc_buffer buf = {};
size_t size = _dl_dst_substitute (l, input, &buf);
@@ -392,7 +392,7 @@ expand_dynamic_string_token (struct link_map *l, const char *input)
be freed if the shared object already has this name.
Returns false if the object already had this name. */
static void
-add_name_to_object (struct link_map *l, const char *name)
+add_name_to_object (struct link_map_private *l, const char *name)
{
struct libname_list *lnp, *lastp;
struct libname_list *newname;
@@ -427,7 +427,7 @@ static size_t max_dirnamelen;
static struct r_search_path_elem **
fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
- const char *what, const char *where, struct link_map *l)
+ const char *what, const char *where, struct link_map_private *l)
{
char *cp;
size_t nelems = 0;
@@ -537,10 +537,11 @@ fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
static bool
decompose_rpath (struct r_search_path_struct *sps,
- const char *rpath, struct link_map *l, const char *what)
+ const char *rpath, struct link_map_private *l,
+ const char *what)
{
/* Make a copy we can work with. */
- const char *where = l->l_name;
+ const char *where = l->l_public.l_name;
char *cp;
struct r_search_path_elem **result;
size_t nelems;
@@ -635,7 +636,7 @@ decompose_rpath (struct r_search_path_struct *sps,
/* Make sure cached path information is stored in *SP
and return true if there are any paths to search there. */
static bool
-cache_rpath (struct link_map *l,
+cache_rpath (struct link_map_private *l,
struct r_search_path_struct *sp,
int tag,
const char *what)
@@ -669,7 +670,7 @@ _dl_init_paths (const char *llp, const char *source,
const char *strp;
struct r_search_path_elem *pelem, **aelem;
size_t round_size;
- struct link_map __attribute__ ((unused)) *l = NULL;
+ struct link_map_private __attribute__ ((unused)) *l = NULL;
/* Initialize to please the compiler. */
const char *errstring = NULL;
@@ -825,9 +826,10 @@ _dl_init_paths (const char *llp, const char *source,
the fd used for loading module L. */
void
-_dl_process_pt_gnu_property (struct link_map *l, int fd, const ElfW(Phdr) *ph)
+_dl_process_pt_gnu_property (struct link_map_private *l, int fd,
+ const ElfW(Phdr) *ph)
{
- const ElfW(Nhdr) *note = (const void *) (ph->p_vaddr + l->l_addr);
+ const ElfW(Nhdr) *note = (const void *) (ph->p_vaddr + l->l_public.l_addr);
const ElfW(Addr) size = ph->p_memsz;
const ElfW(Addr) align = ph->p_align;
@@ -897,13 +899,13 @@ _dl_process_pt_gnu_property (struct link_map *l, int fd, const ElfW(Phdr) *ph)
#ifndef EXTERNAL_MAP_FROM_FD
static
#endif
-struct link_map *
+struct link_map_private *
_dl_map_object_from_fd (const char *name, const char *origname, int fd,
struct filebuf *fbp, char *realname,
- struct link_map *loader, int l_type, int mode,
+ struct link_map_private *loader, int l_type, int mode,
void **stack_endp, Lmid_t nsid)
{
- struct link_map *l = NULL;
+ struct link_map_private *l = NULL;
const ElfW(Ehdr) *header;
const ElfW(Phdr) *phdr;
const ElfW(Phdr) *ph;
@@ -947,7 +949,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
}
/* Look again to see if the real name matched another already loaded. */
- for (l = GL(dl_ns)[nsid]._ns_loaded; l != NULL; l = l->l_next)
+ for (l = GL(dl_ns)[nsid]._ns_loaded; l != NULL; l = l_next (l))
if (!l->l_removed && _dl_file_id_match_p (&l->l_file_id, &id))
{
/* The object is already loaded.
@@ -980,8 +982,8 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
l->l_real = &GL(dl_rtld_map);
/* Copy l_addr and l_ld to avoid a GDB warning with dlmopen(). */
- l->l_addr = l->l_real->l_addr;
- l->l_ld = l->l_real->l_ld;
+ l->l_public.l_addr = l->l_real->l_public.l_addr;
+ l->l_public.l_ld = l->l_real->l_public.l_ld;
/* No need to bump the refcount of the real object, ld.so will
never be unloaded. */
@@ -1055,9 +1057,9 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
ElfW(Addr) p_align_max = 0;
/* The struct is initialized to zero so this is not necessary:
- l->l_ld = 0;
+ l->l_public.l_ld = 0;
l->l_phdr = 0;
- l->l_addr = 0; */
+ l->l_public.l_addr = 0; */
for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
switch (ph->p_type)
{
@@ -1072,7 +1074,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
/* Debuginfo only files from "objcopy --only-keep-debug"
contain a PT_DYNAMIC segment with p_filesz == 0. Skip
such a segment to avoid a crash later. */
- l->l_ld = (void *) ph->p_vaddr;
+ l->l_public.l_ld = (void *) ph->p_vaddr;
l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
l->l_ld_readonly = (ph->p_flags & PF_W) == 0;
}
@@ -1159,7 +1161,8 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
/* We are loading the executable itself when the dynamic
linker was executed directly. The setup will happen
later. */
- assert (l->l_prev == NULL || (mode & __RTLD_AUDIT) != 0);
+ assert (l->l_public.l_prev == NULL
+ || (mode & __RTLD_AUDIT) != 0);
#else
assert (false && "TLS not initialized in static application");
#endif
@@ -1203,7 +1206,8 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
}
/* This check recognizes most separate debuginfo files. */
- if (__glibc_unlikely ((l->l_ld == 0 && type == ET_DYN) || empty_dynamic))
+ if (__glibc_unlikely ((l->l_public.l_ld == 0 && type == ET_DYN)
+ || empty_dynamic))
{
errstring = N_("object file has no dynamic section");
goto lose;
@@ -1226,8 +1230,9 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
}
}
- if (l->l_ld != 0)
- l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
+ if (l->l_public.l_ld != 0)
+ l->l_public.l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_public.l_ld
+ + l->l_public.l_addr);
elf_get_dynamic_info (l, false, false);
@@ -1265,7 +1270,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
}
else
/* Adjust the PT_PHDR value by the runtime load address. */
- l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
+ l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_public.l_addr);
if (__glibc_unlikely ((stack_flags &~ GL(dl_stack_flags)) & PF_X))
{
@@ -1279,8 +1284,8 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
const uintptr_t p = (uintptr_t) &__stack_prot & -GLRO(dl_pagesize);
const size_t s = (uintptr_t) (&__stack_prot + 1) - p;
- struct link_map *const m = &GL(dl_rtld_map);
- const uintptr_t relro_end = ((m->l_addr + m->l_relro_addr
+ struct link_map_private *const m = &GL(dl_rtld_map);
+ const uintptr_t relro_end = ((m->l_public.l_addr + m->l_relro_addr
+ m->l_relro_size)
& -GLRO(dl_pagesize));
if (__glibc_likely (p + s <= relro_end))
@@ -1320,7 +1325,7 @@ cannot enable executable stack as shared object requires");
/* Adjust the address of the TLS initialization image. */
if (l->l_tls_initimage != NULL)
- l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;
+ l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_public.l_addr;
/* Process program headers again after load segments are mapped in
case processing requires accessing those segments. Scan program
@@ -1353,16 +1358,16 @@ cannot enable executable stack as shared object requires");
/* If this is ET_EXEC, we should have loaded it as lt_executable. */
assert (type != ET_EXEC || l->l_type == lt_executable);
- l->l_entry += l->l_addr;
+ l->l_entry += l->l_public.l_addr;
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\
dynamic: 0x%0*lx base: 0x%0*lx size: 0x%0*zx\n\
entry: 0x%0*lx phdr: 0x%0*lx phnum: %*u\n\n",
(int) sizeof (void *) * 2,
- (unsigned long int) l->l_ld,
+ (unsigned long int) l->l_public.l_ld,
(int) sizeof (void *) * 2,
- (unsigned long int) l->l_addr,
+ (unsigned long int) l->l_public.l_addr,
(int) sizeof (void *) * 2, maplength,
(int) sizeof (void *) * 2,
(unsigned long int) l->l_entry,
@@ -1531,7 +1536,7 @@ print_search_path (struct r_search_path_elem **list,
In that case, FD is consumed for both successful and error returns. */
static int
open_verify (const char *name, int fd,
- struct filebuf *fbp, struct link_map *loader,
+ struct filebuf *fbp, struct link_map_private *loader,
int whatcode, int mode, bool *found_other_class, bool free_name)
{
/* This is the expected ELF header. */
@@ -1747,7 +1752,7 @@ open_verify (const char *name, int fd,
static int
open_path (const char *name, size_t namelen, int mode,
struct r_search_path_struct *sps, char **realname,
- struct filebuf *fbp, struct link_map *loader, int whatcode,
+ struct filebuf *fbp, struct link_map_private *loader, int whatcode,
bool *found_other_class)
{
struct r_search_path_elem **dirs = sps->dirs;
@@ -1901,22 +1906,22 @@ open_path (const char *name, size_t namelen, int mode,
/* Map in the shared object file NAME. */
-struct link_map *
-_dl_map_object (struct link_map *loader, const char *name,
+struct link_map_private *
+_dl_map_object (struct link_map_private *loader, const char *name,
int type, int trace_mode, int mode, Lmid_t nsid)
{
int fd;
const char *origname = NULL;
char *realname;
char *name_copy;
- struct link_map *l;
+ struct link_map_private *l;
struct filebuf fb;
assert (nsid >= 0);
assert (nsid < GL(dl_nns));
/* Look for this name among those already loaded. */
- for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
+ for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l_next (l))
{
/* If the requested name matches the soname of a loaded object,
use that object. Elide this check for names that have not
@@ -1951,7 +1956,8 @@ _dl_map_object (struct link_map *loader, const char *name,
_dl_debug_printf ((mode & __RTLD_CALLMAP) == 0
? "\nfile=%s [%lu]; needed by %s [%lu]\n"
: "\nfile=%s [%lu]; dynamically loaded by %s [%lu]\n",
- name, nsid, DSO_FILENAME (loader->l_name), loader->l_ns);
+ name, nsid, DSO_FILENAME (loader->l_public.l_name),
+ loader->l_ns);
#ifdef SHARED
/* Give the auditing libraries a chance to change the name before we
@@ -1990,7 +1996,7 @@ _dl_map_object (struct link_map *loader, const char *name,
{
/* This is the executable's map (if there is one). Make sure that
we do not look at it twice. */
- struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
+ struct link_map_private *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
bool did_main_map = false;
/* First try the DT_RPATH of the dependent object that caused NAME
@@ -2239,7 +2245,8 @@ add_path (struct add_path_state *p, const struct r_search_path_struct *sps,
}
void
-_dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
+_dl_rtld_di_serinfo (struct link_map_private *loader, Dl_serinfo *si,
+ bool counting)
{
if (counting)
{
@@ -2263,7 +2270,7 @@ _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
/* First try the DT_RPATH of the dependent object that caused NAME
to be loaded. Then that object's dependent, and on up. */
- struct link_map *l = loader;
+ struct link_map_private *l = loader;
do
{
if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
diff --git a/elf/dl-load.h b/elf/dl-load.h
index 1d5207694b..78d28627a6 100644
--- a/elf/dl-load.h
+++ b/elf/dl-load.h
@@ -85,7 +85,7 @@ struct loadcmd
load command, some time after L->l_addr has been set correctly. It is
responsible for setting the l_phdr fields */
static __always_inline void
-_dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header,
+_dl_postprocess_loadcmd (struct link_map_private *l, const ElfW(Ehdr) *header,
const struct loadcmd *c)
{
if (l->l_phdr == 0
@@ -110,13 +110,13 @@ _dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header,
The file <dl-map-segments.h> defines this function. The canonical
implementation in elf/dl-map-segments.h might be replaced by a sysdeps
version. */
-static const char *_dl_map_segments (struct link_map *l, int fd,
+static const char *_dl_map_segments (struct link_map_private *l, int fd,
const ElfW(Ehdr) *header, int type,
const struct loadcmd loadcmds[],
size_t nloadcmds,
const size_t maplength,
bool has_holes,
- struct link_map *loader);
+ struct link_map_private *loader);
/* All the error message strings _dl_map_segments might return are
listed here so that different implementations in different sysdeps
diff --git a/elf/dl-lookup-direct.c b/elf/dl-lookup-direct.c
index 2c13e92961..35502464c5 100644
--- a/elf/dl-lookup-direct.c
+++ b/elf/dl-lookup-direct.c
@@ -25,7 +25,8 @@
variant here is simplified because it requires symbol
versioning. */
static const ElfW(Sym) *
-check_match (const struct link_map *const map, const char *const undef_name,
+check_match (const struct link_map_private *const map,
+ const char *const undef_name,
const char *version, uint32_t version_hash,
const Elf_Symndx symidx)
{
@@ -68,7 +69,7 @@ check_match (const struct link_map *const map, const char *const undef_name,
variant here is simplified because it does not search object
dependencies. It is optimized for a successful lookup. */
const ElfW(Sym) *
-_dl_lookup_direct (struct link_map *map,
+_dl_lookup_direct (struct link_map_private *map,
const char *undef_name, uint32_t new_hash,
const char *version, uint32_t version_hash)
{
diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
index f889473378..d3c705811c 100644
--- a/elf/dl-lookup.c
+++ b/elf/dl-lookup.c
@@ -38,7 +38,7 @@
struct sym_val
{
const ElfW(Sym) *s;
- struct link_map *m;
+ struct link_map_private *m;
};
@@ -64,7 +64,7 @@ check_match (const char *const undef_name,
const ElfW(Sym) *const sym,
const Elf_Symndx symidx,
const char *const strtab,
- const struct link_map *const map,
+ const struct link_map_private *const map,
const ElfW(Sym) **const versioned_sym,
int *const num_versions)
{
@@ -152,7 +152,7 @@ check_match (const char *const undef_name,
static void
enter_unique_sym (struct unique_sym *table, size_t size,
unsigned int hash, const char *name,
- const ElfW(Sym) *sym, const struct link_map *map)
+ const ElfW(Sym) *sym, const struct link_map_private *map)
{
size_t idx = hash % size;
size_t hash2 = 1 + hash % (size - 2);
@@ -172,7 +172,7 @@ enter_unique_sym (struct unique_sym *table, size_t size,
/* Mark MAP as NODELETE according to the lookup mode in FLAGS. During
initial relocation, NODELETE state is pending only. */
static void
-mark_nodelete (struct link_map *map, int flags)
+mark_nodelete (struct link_map_private *map, int flags)
{
if (flags & DL_LOOKUP_FOR_RELOCATE)
map->l_nodelete_pending = true;
@@ -183,7 +183,7 @@ mark_nodelete (struct link_map *map, int flags)
/* Return true if MAP is marked as NODELETE according to the lookup
mode in FLAGS> */
static bool
-is_nodelete (struct link_map *map, int flags)
+is_nodelete (struct link_map_private *map, int flags)
{
/* Non-pending NODELETE always counts. Pending NODELETE only counts
during initial relocation processing. */
@@ -196,9 +196,10 @@ is_nodelete (struct link_map *map, int flags)
Return the matching symbol in RESULT. */
static void
do_lookup_unique (const char *undef_name, unsigned int new_hash,
- struct link_map *map, struct sym_val *result,
+ struct link_map_private *map, struct sym_val *result,
int type_class, const ElfW(Sym) *sym, const char *strtab,
- const ElfW(Sym) *ref, const struct link_map *undef_map,
+ const ElfW(Sym) *ref,
+ const struct link_map_private *undef_map,
int flags)
{
/* We have to determine whether we already found a symbol with this
@@ -232,7 +233,7 @@ do_lookup_unique (const char *undef_name, unsigned int new_hash,
else
{
result->s = entries[idx].sym;
- result->m = (struct link_map *) entries[idx].map;
+ result->m = (struct link_map_private *) entries[idx].map;
}
__rtld_lock_unlock_recursive (tab->lock);
return;
@@ -309,7 +310,7 @@ do_lookup_unique (const char *undef_name, unsigned int new_hash,
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_BINDINGS))
_dl_debug_printf ("\
marking %s [%lu] as NODELETE due to unique symbol\n",
- map->l_name, map->l_ns);
+ map->l_public.l_name, map->l_ns);
mark_nodelete (map, flags);
}
}
@@ -318,7 +319,7 @@ marking %s [%lu] as NODELETE due to unique symbol\n",
__rtld_lock_unlock_recursive (tab->lock);
result->s = sym;
- result->m = (struct link_map *) map;
+ result->m = (struct link_map_private *) map;
}
/* Inner part of the lookup functions. We return a value > 0 if we
@@ -330,7 +331,8 @@ do_lookup_x (const char *undef_name, unsigned int new_hash,
unsigned long int *old_hash, const ElfW(Sym) *ref,
struct sym_val *result, struct r_scope_elem *scope, size_t i,
const struct r_found_version *const version, int flags,
- struct link_map *skip, int type_class, struct link_map *undef_map)
+ struct link_map_private *skip, int type_class,
+ struct link_map_private *undef_map)
{
size_t n = scope->r_nlist;
/* Make sure we read the value before proceeding. Otherwise we
@@ -338,11 +340,11 @@ do_lookup_x (const char *undef_name, unsigned int new_hash,
the value after a resize. That is the only path in dl-open.c not
protected by GSCOPE. A read barrier here might be to expensive. */
__asm volatile ("" : "+r" (n), "+m" (scope->r_list));
- struct link_map **list = scope->r_list;
+ struct link_map_private **list = scope->r_list;
do
{
- const struct link_map *map = list[i]->l_real;
+ const struct link_map_private *map = list[i]->l_real;
/* Here come the extra test needed for `_dl_lookup_symbol_skip'. */
if (map == skip)
@@ -376,7 +378,7 @@ do_lookup_x (const char *undef_name, unsigned int new_hash,
/* Print some debugging info if wanted. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SYMBOLS))
_dl_debug_printf ("symbol=%s; lookup in file=%s [%lu]\n",
- undef_name, DSO_FILENAME (map->l_name),
+ undef_name, DSO_FILENAME (map->l_public.l_name),
map->l_ns);
/* If the hash table is empty there is nothing to do here. */
@@ -476,7 +478,7 @@ do_lookup_x (const char *undef_name, unsigned int new_hash,
if (! result->s)
{
result->s = sym;
- result->m = (struct link_map *) map;
+ result->m = (struct link_map_private *) map;
}
break;
}
@@ -484,11 +486,11 @@ do_lookup_x (const char *undef_name, unsigned int new_hash,
case STB_GLOBAL:
/* Global definition. Just what we need. */
result->s = sym;
- result->m = (struct link_map *) map;
+ result->m = (struct link_map_private *) map;
return 1;
case STB_GNU_UNIQUE:;
- do_lookup_unique (undef_name, new_hash, (struct link_map *) map,
+ do_lookup_unique (undef_name, new_hash, (struct link_map_private *) map,
result, type_class, sym, strtab, ref,
undef_map, flags);
return 1;
@@ -511,9 +513,10 @@ skip:
/* Add extra dependency on MAP to UNDEF_MAP. */
static int
-add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
+add_dependency (struct link_map_private *undef_map,
+ struct link_map_private *map, int flags)
{
- struct link_map *runp;
+ struct link_map_private *runp;
unsigned int i;
int result = 0;
@@ -539,7 +542,8 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
/* Determine whether UNDEF_MAP already has a reference to MAP. First
look in the normal dependencies. */
- struct link_map **l_initfini = atomic_forced_read (undef_map->l_initfini);
+ struct link_map_private **l_initfini
+ = atomic_forced_read (undef_map->l_initfini);
if (l_initfini != NULL)
{
for (i = 0; l_initfini[i] != NULL; ++i)
@@ -552,7 +556,7 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
unsigned int l_reldepsact = 0;
if (l_reldeps != NULL)
{
- struct link_map **list = &l_reldeps->list[0];
+ struct link_map_private **list = &l_reldeps->list[0];
l_reldepsact = l_reldeps->act;
for (i = 0; i < l_reldepsact; ++i)
if (list[i] == map)
@@ -595,7 +599,7 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
{
if (undef_map->l_reldeps != l_reldeps)
{
- struct link_map **list = &undef_map->l_reldeps->list[0];
+ struct link_map_private **list = &undef_map->l_reldeps->list[0];
l_reldepsact = undef_map->l_reldeps->act;
for (i = 0; i < l_reldepsact; ++i)
if (list[i] == map)
@@ -603,7 +607,7 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
}
else if (undef_map->l_reldeps->act > l_reldepsact)
{
- struct link_map **list
+ struct link_map_private **list
= &undef_map->l_reldeps->list[0];
i = l_reldepsact;
l_reldepsact = undef_map->l_reldeps->act;
@@ -623,7 +627,7 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
definition. */
runp = GL(dl_ns)[undef_map->l_ns]._ns_loaded;
while (runp != NULL && runp != map)
- runp = runp->l_next;
+ runp = l_next (runp);
if (runp != NULL)
{
@@ -647,15 +651,15 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_BINDINGS)
&& !is_nodelete (map, flags))
{
- if (undef_map->l_name[0] == '\0')
+ if (undef_map->l_public.l_name[0] == '\0')
_dl_debug_printf ("\
marking %s [%lu] as NODELETE due to reference from main program\n",
- map->l_name, map->l_ns);
+ map->l_public.l_name, map->l_ns);
else
_dl_debug_printf ("\
marking %s [%lu] as NODELETE due to reference from %s [%lu]\n",
- map->l_name, map->l_ns,
- undef_map->l_name, undef_map->l_ns);
+ map->l_public.l_name, map->l_ns,
+ undef_map->l_public.l_name, undef_map->l_ns);
}
mark_nodelete (map, flags);
goto out;
@@ -675,7 +679,8 @@ marking %s [%lu] as NODELETE due to reference from %s [%lu]\n",
RTLD_PREPARE_FOREIGN_CALL;
#endif
- newp = malloc (sizeof (*newp) + max * sizeof (struct link_map *));
+ newp = malloc (sizeof (*newp)
+ + max * sizeof (struct link_map_private *));
if (newp == NULL)
{
/* If we didn't manage to allocate memory for the list this is
@@ -686,7 +691,7 @@ marking %s [%lu] as NODELETE due to reference from %s [%lu]\n",
&& !is_nodelete (map, flags))
_dl_debug_printf ("\
marking %s [%lu] as NODELETE due to memory allocation failure\n",
- map->l_name, map->l_ns);
+ map->l_public.l_name, map->l_ns);
/* In case of non-lazy binding, we could actually report
the memory allocation error, but for now, we use the
conservative approximation as well. */
@@ -697,7 +702,7 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
{
if (l_reldepsact)
memcpy (&newp->list[0], &undef_map->l_reldeps->list[0],
- l_reldepsact * sizeof (struct link_map *));
+ l_reldepsact * sizeof (struct link_map_private *));
newp->list[l_reldepsact] = map;
newp->act = l_reldepsact + 1;
atomic_write_barrier ();
@@ -719,9 +724,9 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\
\nfile=%s [%lu]; needed by %s [%lu] (relocation dependency)\n\n",
- DSO_FILENAME (map->l_name),
+ DSO_FILENAME (map->l_public.l_name),
map->l_ns,
- DSO_FILENAME (undef_map->l_name),
+ DSO_FILENAME (undef_map->l_public.l_name),
undef_map->l_ns);
}
else
@@ -751,11 +756,11 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
or in any function which gets called. If this would happen the audit
code might create a thread which can throw off all the scope locking. */
lookup_t
-_dl_lookup_symbol_x (const char *undef_name, struct link_map *undef_map,
- const ElfW(Sym) **ref,
+_dl_lookup_symbol_x (const char *undef_name,
+ struct link_map_private *undef_map, const ElfW(Sym) **ref,
struct r_scope_elem *symbol_scope[],
- const struct r_found_version *version,
- int type_class, int flags, struct link_map *skip_map)
+ const struct r_found_version *version, int type_class,
+ int flags, struct link_map_private *skip_map)
{
const unsigned int new_hash = _dl_new_hash (undef_name);
unsigned long int old_hash = 0xffffffff;
@@ -787,7 +792,8 @@ _dl_lookup_symbol_x (const char *undef_name, struct link_map *undef_map,
&& !(GLRO(dl_debug_mask) & DL_DEBUG_UNUSED))
{
/* We could find no value for a strong reference. */
- const char *reference_name = undef_map ? undef_map->l_name : "";
+ const char *reference_name
+ = undef_map ? undef_map->l_public.l_name : "";
const char *versionstr = version ? ", version " : "";
const char *versionname = (version && version->name
? version->name : "");
@@ -859,12 +865,12 @@ _dl_lookup_symbol_x (const char *undef_name, struct link_map *undef_map,
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_BINDINGS))
{
- const char *reference_name = undef_map->l_name;
+ const char *reference_name = undef_map->l_public.l_name;
_dl_debug_printf ("binding file %s [%lu] to %s [%lu]: %s symbol `%s'",
DSO_FILENAME (reference_name),
undef_map->l_ns,
- DSO_FILENAME (current_value.m->l_name),
+ DSO_FILENAME (current_value.m->l_public.l_name),
current_value.m->l_ns,
protected ? "protected" : "normal", undef_name);
if (version)
diff --git a/elf/dl-machine-reject-phdr.h b/elf/dl-machine-reject-phdr.h
index f9a8e31e4e..391dda0883 100644
--- a/elf/dl-machine-reject-phdr.h
+++ b/elf/dl-machine-reject-phdr.h
@@ -25,8 +25,8 @@
host. */
static inline bool
elf_machine_reject_phdr_p (const ElfW(Phdr) *phdr, unsigned int phnum,
- const char *buf, size_t len, struct link_map *map,
- int fd)
+ const char *buf, size_t len,
+ struct link_map_private *map, int fd)
{
return false;
}
diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index ac10182d58..4ae4ce0a0a 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -72,11 +72,11 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
other use of those parts of the address space). */
static __always_inline const char *
-_dl_map_segments (struct link_map *l, int fd,
+_dl_map_segments (struct link_map_private *l, int fd,
const ElfW(Ehdr) *header, int type,
const struct loadcmd loadcmds[], size_t nloadcmds,
const size_t maplength, bool has_holes,
- struct link_map *loader)
+ struct link_map_private *loader)
{
const struct loadcmd *c = loadcmds;
@@ -103,7 +103,7 @@ _dl_map_segments (struct link_map *l, int fd,
return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
l->l_map_end = l->l_map_start + maplength;
- l->l_addr = l->l_map_start - c->mapstart;
+ l->l_public.l_addr = l->l_map_start - c->mapstart;
if (has_holes)
{
@@ -116,7 +116,7 @@ _dl_map_segments (struct link_map *l, int fd,
c->mapend))
return N_("ELF load command address/offset not page-aligned");
if (__glibc_unlikely
- (__mprotect ((caddr_t) (l->l_addr + c->mapend),
+ (__mprotect ((caddr_t) (l->l_public.l_addr + c->mapend),
loadcmds[nloadcmds - 1].mapstart - c->mapend,
PROT_NONE) < 0))
return DL_MAP_SEGMENTS_ERROR_MPROTECT;
@@ -128,7 +128,7 @@ _dl_map_segments (struct link_map *l, int fd,
}
/* Remember which part of the address space this object uses. */
- l->l_map_start = c->mapstart + l->l_addr;
+ l->l_map_start = c->mapstart + l->l_public.l_addr;
l->l_map_end = l->l_map_start + maplength;
l->l_contiguous = !has_holes;
@@ -136,7 +136,7 @@ _dl_map_segments (struct link_map *l, int fd,
{
if (c->mapend > c->mapstart
/* Map the segment contents from the file. */
- && (__mmap ((void *) (l->l_addr + c->mapstart),
+ && (__mmap ((void *) (l->l_public.l_addr + c->mapstart),
c->mapend - c->mapstart, c->prot,
MAP_FIXED|MAP_COPY|MAP_FILE,
fd, c->mapoff)
@@ -152,8 +152,8 @@ _dl_map_segments (struct link_map *l, int fd,
after the data mapped from the file. */
ElfW(Addr) zero, zeroend, zeropage;
- zero = l->l_addr + c->dataend;
- zeroend = l->l_addr + c->allocend;
+ zero = l->l_public.l_addr + c->dataend;
+ zeroend = l->l_public.l_addr + c->allocend;
zeropage = ((zero + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
diff --git a/elf/dl-minimal.c b/elf/dl-minimal.c
index 578bf84c20..31c2f7fbb0 100644
--- a/elf/dl-minimal.c
+++ b/elf/dl-minimal.c
@@ -56,7 +56,7 @@ __rtld_malloc_is_complete (void)
/* Lookup NAME at VERSION in the scope of MATCH. */
static void *
-lookup_malloc_symbol (struct link_map *main_map, const char *name,
+lookup_malloc_symbol (struct link_map_private *main_map, const char *name,
struct r_found_version *version)
{
@@ -72,7 +72,7 @@ lookup_malloc_symbol (struct link_map *main_map, const char *name,
}
void
-__rtld_malloc_init_real (struct link_map *main_map)
+__rtld_malloc_init_real (struct link_map_private *main_map)
{
/* We cannot use relocations and initializers for this because the
changes made by __rtld_malloc_init_stubs break REL-style
diff --git a/elf/dl-misc.c b/elf/dl-misc.c
index e998083284..37c8a4dee9 100644
--- a/elf/dl-misc.c
+++ b/elf/dl-misc.c
@@ -64,9 +64,9 @@ _dl_sysdep_read_whole_file (const char *file, size_t *sizep, int prot)
/* Test whether given NAME matches any of the names of the given object. */
int
-_dl_name_match_p (const char *name, const struct link_map *map)
+_dl_name_match_p (const char *name, const struct link_map_private *map)
{
- if (strcmp (name, map->l_name) == 0)
+ if (strcmp (name, map->l_public.l_name) == 0)
return 1;
struct libname_list *runp = map->l_libname;
diff --git a/elf/dl-object.c b/elf/dl-object.c
index f1f2ec956c..3e06e22ab2 100644
--- a/elf/dl-object.c
+++ b/elf/dl-object.c
@@ -27,19 +27,19 @@
/* Add the new link_map NEW to the end of the namespace list. */
void
-_dl_add_to_namespace_list (struct link_map *new, Lmid_t nsid)
+_dl_add_to_namespace_list (struct link_map_private *new, Lmid_t nsid)
{
/* We modify the list of loaded objects. */
__rtld_lock_lock_recursive (GL(dl_load_write_lock));
if (GL(dl_ns)[nsid]._ns_loaded != NULL)
{
- struct link_map *l = GL(dl_ns)[nsid]._ns_loaded;
- while (l->l_next != NULL)
- l = l->l_next;
- new->l_prev = l;
+ struct link_map_private *l = GL(dl_ns)[nsid]._ns_loaded;
+ while (l_next (l) != NULL)
+ l = l_next (l);
+ new->l_public.l_prev = &l->l_public;
/* new->l_next = NULL; Would be necessary but we use calloc. */
- l->l_next = new;
+ l->l_public.l_next = &new->l_public;
}
else
GL(dl_ns)[nsid]._ns_loaded = new;
@@ -51,11 +51,11 @@ _dl_add_to_namespace_list (struct link_map *new, Lmid_t nsid)
}
-/* Allocate a `struct link_map' for a new object being loaded,
+/* Allocate a `struct link_map_private' for a new object being loaded,
and enter it into the _dl_loaded list. */
-struct link_map *
+struct link_map_private *
_dl_new_object (char *realname, const char *libname, int type,
- struct link_map *loader, int mode, Lmid_t nsid)
+ struct link_map_private *loader, int mode, Lmid_t nsid)
{
#ifdef SHARED
unsigned int naudit;
@@ -81,7 +81,7 @@ _dl_new_object (char *realname, const char *libname, int type,
#endif
size_t libname_len = strlen (libname) + 1;
- struct link_map *new;
+ struct link_map_private *new;
struct libname_list *newname;
#ifdef SHARED
size_t audit_space = naudit * sizeof (struct auditstate);
@@ -89,15 +89,15 @@ _dl_new_object (char *realname, const char *libname, int type,
# define audit_space 0
#endif
- new = (struct link_map *) calloc (sizeof (*new) + audit_space
- + sizeof (struct link_map *)
- + sizeof (*newname) + libname_len, 1);
+ new = calloc (sizeof (*new) + audit_space
+ + sizeof (struct link_map_private *)
+ + sizeof (*newname) + libname_len, 1);
if (new == NULL)
return NULL;
new->l_real = new;
- new->l_symbolic_searchlist.r_list = (struct link_map **) ((char *) (new + 1)
- + audit_space);
+ new->l_symbolic_searchlist.r_list
+ = (struct link_map_private **) ((char *) (new + 1) + audit_space);
new->l_libname = newname
= (struct libname_list *) (new->l_symbolic_searchlist.r_list + 1);
@@ -120,9 +120,9 @@ _dl_new_object (char *realname, const char *libname, int type,
#else
if (*realname != '\0')
#endif
- new->l_name = realname;
+ new->l_public.l_name = realname;
else
- new->l_name = (char *) newname->name + libname_len - 1;
+ new->l_public.l_name = (char *) newname->name + libname_len - 1;
new->l_type = type;
/* If we set the bit now since we know it is never used we avoid
diff --git a/elf/dl-open.c b/elf/dl-open.c
index 9a16b01838..542889a6b8 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -50,7 +50,7 @@ struct dl_open_args
int mode;
/* This is the caller of the dlopen() function. */
const void *caller_dlopen;
- struct link_map *map;
+ struct link_map_private *map;
/* Namespace ID. */
Lmid_t nsid;
@@ -77,7 +77,7 @@ struct dl_open_args
/* Called in case the global scope cannot be extended. */
static void __attribute__ ((noreturn))
-add_to_global_resize_failure (struct link_map *new)
+add_to_global_resize_failure (struct link_map_private *new)
{
_dl_signal_error (ENOMEM, new->l_libname->name, NULL,
N_ ("cannot extend global scope"));
@@ -88,7 +88,7 @@ add_to_global_resize_failure (struct link_map *new)
risk of memory allocation failure. add_to_global_resize raises
exceptions for memory allocation errors. */
static void
-add_to_global_resize (struct link_map *new)
+add_to_global_resize (struct link_map_private *new)
{
struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
@@ -145,16 +145,17 @@ add_to_global_resize (struct link_map *new)
if (new_size > 0)
{
size_t allocation_size;
- if (__builtin_mul_overflow (new_size, sizeof (struct link_map *),
+ if (__builtin_mul_overflow (new_size, sizeof (struct link_map_private *),
&allocation_size))
add_to_global_resize_failure (new);
- struct link_map **new_global = malloc (allocation_size);
+ struct link_map_private **new_global = malloc (allocation_size);
if (new_global == NULL)
add_to_global_resize_failure (new);
/* Copy over the old entries. */
memcpy (new_global, ns->_ns_main_searchlist->r_list,
- ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *));
+ ns->_ns_main_searchlist->r_nlist
+ * sizeof (struct link_map_private *));
ns->_ns_global_scope_alloc = new_size;
ns->_ns_main_searchlist->r_list = new_global;
@@ -169,7 +170,7 @@ add_to_global_resize (struct link_map *new)
/* Actually add the new global objects to the global scope. Must be
called after add_to_global_resize. This function cannot fail. */
static void
-add_to_global_update (struct link_map *new)
+add_to_global_update (struct link_map_private *new)
{
struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
@@ -177,7 +178,7 @@ add_to_global_update (struct link_map *new)
unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
{
- struct link_map *map = new->l_searchlist.r_list[cnt];
+ struct link_map_private *map = new->l_searchlist.r_list[cnt];
if (map->l_global == 0)
{
@@ -191,7 +192,7 @@ add_to_global_update (struct link_map *new)
/* We modify the global scope. Report this. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
_dl_debug_printf ("\nadd %s [%lu] to global scope\n",
- map->l_name, map->l_ns);
+ map->l_public.l_name, map->l_ns);
}
}
@@ -208,14 +209,14 @@ add_to_global_update (struct link_map *new)
/* Search link maps in all namespaces for the DSO that contains the object at
address ADDR. Returns the pointer to the link map of the matching DSO, or
NULL if a match is not found. */
-struct link_map *
+struct link_map_private *
_dl_find_dso_for_object (const ElfW(Addr) addr)
{
- struct link_map *l;
+ struct link_map_private *l;
/* Find the highest-addressed object that ADDR is not below. */
for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
- for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
+ for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l_next (l))
if (addr >= l->l_map_start && addr < l->l_map_end
&& (l->l_contiguous
|| _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
@@ -229,7 +230,7 @@ rtld_hidden_def (_dl_find_dso_for_object);
/* Return true if NEW is found in the scope for MAP. */
static size_t
-scope_has_map (struct link_map *map, struct link_map *new)
+scope_has_map (struct link_map_private *map, struct link_map_private *new)
{
size_t cnt;
for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt)
@@ -240,7 +241,7 @@ scope_has_map (struct link_map *map, struct link_map *new)
/* Return the length of the scope for MAP. */
static size_t
-scope_size (struct link_map *map)
+scope_size (struct link_map_private *map)
{
size_t cnt;
for (cnt = 0; map->l_scope[cnt] != NULL; )
@@ -252,13 +253,13 @@ scope_size (struct link_map *map)
can be added later without further allocation of memory. This
function can raise an exceptions due to malloc failure. */
static void
-resize_scopes (struct link_map *new)
+resize_scopes (struct link_map_private *new)
{
/* If the file is not loaded now as a dependency, add the search
list of the newly loaded object to the scope. */
for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
{
- struct link_map *imap = new->l_searchlist.r_list[i];
+ struct link_map_private *imap = new->l_searchlist.r_list[i];
/* If the initializer has been called already, the object has
not been loaded here and now. */
@@ -319,11 +320,11 @@ resize_scopes (struct link_map *new)
This function cannot raise an exception because all required memory
has been allocated by a previous call to resize_scopes. */
static void
-update_scopes (struct link_map *new)
+update_scopes (struct link_map_private *new)
{
for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
{
- struct link_map *imap = new->l_searchlist.r_list[i];
+ struct link_map_private *imap = new->l_searchlist.r_list[i];
int from_scope = 0;
if (imap->l_init_called && imap->l_type == lt_loaded)
@@ -358,12 +359,12 @@ update_scopes (struct link_map *new)
exception. The return value is true if any of the new objects use
TLS. */
static bool
-resize_tls_slotinfo (struct link_map *new)
+resize_tls_slotinfo (struct link_map_private *new)
{
bool any_tls = false;
for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
{
- struct link_map *imap = new->l_searchlist.r_list[i];
+ struct link_map_private *imap = new->l_searchlist.r_list[i];
/* Only add TLS memory if this object is loaded now and
therefore is not yet initialized. */
@@ -380,12 +381,12 @@ resize_tls_slotinfo (struct link_map *new)
function does not raise any exception. It should only be called if
resize_tls_slotinfo returned true. */
static void
-update_tls_slotinfo (struct link_map *new)
+update_tls_slotinfo (struct link_map_private *new)
{
unsigned int first_static_tls = new->l_searchlist.r_nlist;
for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
{
- struct link_map *imap = new->l_searchlist.r_list[i];
+ struct link_map_private *imap = new->l_searchlist.r_list[i];
/* Only add TLS memory if this object is loaded now and
therefore is not yet initialized. */
@@ -411,7 +412,7 @@ TLS generation counter wrapped! Please report this."));
_dl_add_to_slotinfo are still pending. */
for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
{
- struct link_map *imap = new->l_searchlist.r_list[i];
+ struct link_map_private *imap = new->l_searchlist.r_list[i];
if (imap->l_need_tls_init
&& ! imap->l_init_called
@@ -443,18 +444,18 @@ TLS generation counter wrapped! Please report this."));
after dlopen failure is not possible, so that _dl_close can clean
up objects if necessary. */
static void
-activate_nodelete (struct link_map *new)
+activate_nodelete (struct link_map_private *new)
{
/* It is necessary to traverse the entire namespace. References to
objects in the global scope and unique symbol bindings can force
NODELETE status for objects outside the local scope. */
- for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
- l = l->l_next)
+ for (struct link_map_private *l = GL (dl_ns)[new->l_ns]._ns_loaded;
+ l != NULL; l = l_next (l))
if (l->l_nodelete_pending)
{
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("activating NODELETE for %s [%lu]\n",
- l->l_name, l->l_ns);
+ l->l_public.l_name, l->l_ns);
/* The flag can already be true at this point, e.g. a signal
handler may have triggered lazy binding and set NODELETE
@@ -471,7 +472,7 @@ activate_nodelete (struct link_map *new)
the debugger is notified of the start of relocation processing. */
static void
_dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
- struct link_map *l, int reloc_mode,
+ struct link_map_private *l, int reloc_mode,
bool *relocation_in_progress)
{
if (l->l_real->l_relocated)
@@ -492,7 +493,7 @@ _dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
this is necessary or not by observing the `_dl_profile_map'
variable. If it was NULL but is not NULL afterwards we must
start the profiling. */
- struct link_map *old_profile_map = GL(dl_profile_map);
+ struct link_map_private *old_profile_map = GL(dl_profile_map);
_dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
@@ -515,7 +516,7 @@ _dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
exception handling disabled. */
struct dl_init_args
{
- struct link_map *new;
+ struct link_map_private *new;
int argc;
char **argv;
char **env;
@@ -534,7 +535,7 @@ dl_open_worker_begin (void *a)
struct dl_open_args *args = a;
const char *file = args->file;
int mode = args->mode;
- struct link_map *call_map = NULL;
+ struct link_map_private *call_map = NULL;
/* Determine the caller's map if necessary. This is needed in case
we have a DST, when we don't know the namespace ID we have to put
@@ -550,7 +551,7 @@ dl_open_worker_begin (void *a)
By default we assume this is the main application. */
call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
- struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
+ struct link_map_private *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
if (l)
call_map = l;
@@ -573,7 +574,7 @@ dl_open_worker_begin (void *a)
_dl_debug_initialize (0, args->nsid);
/* Load the named object. */
- struct link_map *new;
+ struct link_map_private *new;
args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
mode | __RTLD_CALLMAP, args->nsid);
@@ -598,7 +599,8 @@ dl_open_worker_begin (void *a)
/* Let the user know about the opencount. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
- new->l_name, new->l_ns, new->l_direct_opencount);
+ new->l_public.l_name, new->l_ns,
+ new->l_direct_opencount);
/* If the user requested the object to be in the global
namespace but it is not so far, prepare to add it now. This
@@ -613,7 +615,7 @@ dl_open_worker_begin (void *a)
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
&& !new->l_nodelete_active)
_dl_debug_printf ("marking %s [%lu] as NODELETE\n",
- new->l_name, new->l_ns);
+ new->l_public.l_name, new->l_ns);
new->l_nodelete_active = true;
}
@@ -641,7 +643,7 @@ dl_open_worker_begin (void *a)
for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
{
- struct link_map *map = new->l_searchlist.r_list[i]->l_real;
+ struct link_map_private *map = new->l_searchlist.r_list[i]->l_real;
_dl_check_map_versions (map, 0, 0);
#ifndef SHARED
/* During static dlopen, check if ld.so has been loaded.
@@ -684,7 +686,7 @@ dl_open_worker_begin (void *a)
unsigned int first = UINT_MAX;
unsigned int last = 0;
unsigned int j = 0;
- struct link_map *l = new->l_initfini[0];
+ struct link_map_private *l = new->l_initfini[0];
do
{
if (! l->l_real->l_relocated)
@@ -780,7 +782,7 @@ dl_open_worker_begin (void *a)
if (!args->libc_already_loaded)
{
/* dlopen cannot be used to load an initial libc by design. */
- struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map;
+ struct link_map_private *libc_map = GL(dl_ns)[args->nsid].libc_map;
_dl_call_libc_early_init (libc_map, false);
}
@@ -812,7 +814,7 @@ dl_open_worker (void *a)
return;
int mode = args->mode;
- struct link_map *new = args->map;
+ struct link_map_private *new = args->map;
/* Run the initializer functions of new objects. Temporarily
disable the exception handler, so that lazy binding failures are
@@ -835,7 +837,8 @@ dl_open_worker (void *a)
/* Let the user know about the opencount. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
- new->l_name, new->l_ns, new->l_direct_opencount);
+ new->l_public.l_name, new->l_ns,
+ new->l_direct_opencount);
}
void *
@@ -959,19 +962,20 @@ no more namespaces available for dlmopen()"));
void
-_dl_show_scope (struct link_map *l, int from)
+_dl_show_scope (struct link_map_private *l, int from)
{
_dl_debug_printf ("object=%s [%lu]\n",
- DSO_FILENAME (l->l_name), l->l_ns);
+ DSO_FILENAME (l->l_public.l_name), l->l_ns);
if (l->l_scope != NULL)
for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
{
_dl_debug_printf (" scope %u:", scope_cnt);
for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
- if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
+ if (*l->l_scope[scope_cnt]->r_list[cnt]->l_public.l_name)
_dl_debug_printf_c (" %s",
- l->l_scope[scope_cnt]->r_list[cnt]->l_name);
+ l->l_scope[scope_cnt]->r_list[cnt]
+ ->l_public.l_name);
else
_dl_debug_printf_c (" %s", RTLD_PROGNAME);
diff --git a/elf/dl-profile.c b/elf/dl-profile.c
index 8be0065fbd..fdf540c957 100644
--- a/elf/dl-profile.c
+++ b/elf/dl-profile.c
@@ -220,9 +220,9 @@ _dl_start_profile (void)
/* Now we can compute the size of the profiling data. This is done
with the same formulas as in `monstartup' (see gmon.c). */
running = 0;
- lowpc = ROUNDDOWN (mapstart + GL(dl_profile_map)->l_addr,
+ lowpc = ROUNDDOWN (mapstart + GL(dl_profile_map)->l_public.l_addr,
HISTFRACTION * sizeof (HISTCOUNTER));
- highpc = ROUNDUP (mapend + GL(dl_profile_map)->l_addr,
+ highpc = ROUNDUP (mapend + GL(dl_profile_map)->l_public.l_addr,
HISTFRACTION * sizeof (HISTCOUNTER));
textsize = highpc - lowpc;
kcountsize = textsize / HISTFRACTION;
diff --git a/elf/dl-reloc-static-pie.c b/elf/dl-reloc-static-pie.c
index a143ee5aad..ab94462f20 100644
--- a/elf/dl-reloc-static-pie.c
+++ b/elf/dl-reloc-static-pie.c
@@ -35,13 +35,14 @@
void
_dl_relocate_static_pie (void)
{
- struct link_map *main_map = _dl_get_dl_main_map ();
+ struct link_map_private *main_map = _dl_get_dl_main_map ();
/* Figure out the run-time load address of static PIE. */
- main_map->l_addr = elf_machine_load_address ();
+ main_map->l_public.l_addr = elf_machine_load_address ();
/* Read our own dynamic section and fill in the info array. */
- main_map->l_ld = ((void *) main_map->l_addr + elf_machine_dynamic ());
+ main_map->l_public.l_ld = ((void *) main_map->l_public.l_addr
+ + elf_machine_dynamic ());
const ElfW(Phdr) *ph, *phdr = GL(dl_phdr);
size_t phnum = GL(dl_phnum);
diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c
index 72c8586d29..cbe4fcee4c 100644
--- a/elf/dl-reloc.c
+++ b/elf/dl-reloc.c
@@ -49,7 +49,7 @@
TLS runs out. If OPTIONAL is false then the entire surplus TLS area is
considered and the allocation only fails if that runs out. */
int
-_dl_try_allocate_static_tls (struct link_map *map, bool optional)
+_dl_try_allocate_static_tls (struct link_map_private *map, bool optional)
{
/* If we've already used the variable with dynamic access, or if the
alignment requirements are too high, fail. */
@@ -132,12 +132,12 @@ _dl_try_allocate_static_tls (struct link_map *map, bool optional)
not be inlined as much as possible. */
void
__attribute_noinline__
-_dl_allocate_static_tls (struct link_map *map)
+_dl_allocate_static_tls (struct link_map_private *map)
{
if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
|| _dl_try_allocate_static_tls (map, false))
{
- _dl_signal_error (0, map->l_name, NULL, N_("\
+ _dl_signal_error (0, map->l_public.l_name, NULL, N_("\
cannot allocate memory in static TLS block"));
}
}
@@ -147,7 +147,7 @@ cannot allocate memory in static TLS block"));
libpthread implementations should provide their own hook
to handle all threads. */
void
-_dl_nothread_init_static_tls (struct link_map *map)
+_dl_nothread_init_static_tls (struct link_map_private *map)
{
#if TLS_TCB_AT_TP
void *dest = (char *) THREAD_SELF - map->l_tls_offset;
@@ -202,7 +202,7 @@ resolve_map (lookup_t l, struct r_scope_elem *scope[], const ElfW(Sym) **ref,
#include "dynamic-link.h"
void
-_dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
+_dl_relocate_object (struct link_map_private *l, struct r_scope_elem *scope[],
int reloc_mode, int consider_profiling)
{
if (l->l_relocated)
@@ -254,7 +254,8 @@ _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_RELOC))
_dl_debug_printf ("\nrelocation processing: %s%s\n",
- DSO_FILENAME (l->l_name), lazy ? " (lazy)" : "");
+ DSO_FILENAME (l->l_public.l_name), lazy
+ ? " (lazy)" : "");
/* DT_TEXTREL is now in level 2 and might phase out at some time.
But we rewrite the DT_FLAGS entry to a DT_TEXTREL entry to make
@@ -273,7 +274,7 @@ _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
newp->len = ALIGN_UP (ph->p_vaddr + ph->p_memsz, GLRO(dl_pagesize))
- ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize));
newp->start = PTR_ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize))
- + (caddr_t) l->l_addr;
+ + (caddr_t) l->l_public.l_addr;
newp->prot = 0;
if (ph->p_flags & PF_R)
@@ -287,7 +288,7 @@ _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
{
errstring = N_("cannot make segment writable for relocation");
call_error:
- _dl_signal_error (errno, l->l_name, NULL, errstring);
+ _dl_signal_error (errno, l->l_public.l_name, NULL, errstring);
}
newp->next = textrels;
@@ -318,7 +319,7 @@ _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
{
errstring = N_("\
%s: out of memory to store relocation results for %s\n");
- _dl_fatal_printf (errstring, RTLD_PROGNAME, l->l_name);
+ _dl_fatal_printf (errstring, RTLD_PROGNAME, l->l_public.l_name);
}
}
#endif
@@ -351,12 +352,12 @@ _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
void
-_dl_protect_relro (struct link_map *l)
+_dl_protect_relro (struct link_map_private *l)
{
- ElfW(Addr) start = ALIGN_DOWN((l->l_addr
+ ElfW(Addr) start = ALIGN_DOWN((l->l_public.l_addr
+ l->l_relro_addr),
GLRO(dl_pagesize));
- ElfW(Addr) end = ALIGN_DOWN((l->l_addr
+ ElfW(Addr) end = ALIGN_DOWN((l->l_public.l_addr
+ l->l_relro_addr
+ l->l_relro_size),
GLRO(dl_pagesize));
@@ -365,13 +366,13 @@ _dl_protect_relro (struct link_map *l)
{
static const char errstring[] = N_("\
cannot apply additional memory protection after relocation");
- _dl_signal_error (errno, l->l_name, NULL, errstring);
+ _dl_signal_error (errno, l->l_public.l_name, NULL, errstring);
}
}
void
__attribute_noinline__
-_dl_reloc_bad_type (struct link_map *map, unsigned int type, int plt)
+_dl_reloc_bad_type (struct link_map_private *map, unsigned int type, int plt)
{
#define DIGIT(b) _itoa_lower_digits[(b) & 0xf];
@@ -401,5 +402,5 @@ _dl_reloc_bad_type (struct link_map *map, unsigned int type, int plt)
*cp++ = DIGIT (type);
*cp = '\0';
- _dl_signal_error (0, map->l_name, NULL, msgbuf);
+ _dl_signal_error (0, map->l_public.l_name, NULL, msgbuf);
}
diff --git a/elf/dl-runtime.c b/elf/dl-runtime.c
index fe7deda32a..8214b862c2 100644
--- a/elf/dl-runtime.c
+++ b/elf/dl-runtime.c
@@ -42,7 +42,7 @@ _dl_fixup (
# ifdef ELF_MACHINE_RUNTIME_FIXUP_ARGS
ELF_MACHINE_RUNTIME_FIXUP_ARGS,
# endif
- struct link_map *l, ElfW(Word) reloc_arg)
+ struct link_map_private *l, ElfW(Word) reloc_arg)
{
const ElfW(Sym) *const symtab
= (const void *) D_PTR (l, l_info[DT_SYMTAB]);
@@ -55,7 +55,7 @@ _dl_fixup (
+ reloc_offset (pltgot, reloc_arg));
const ElfW(Sym) *sym = &symtab[ELFW(R_SYM) (reloc->r_info)];
const ElfW(Sym) *refsym = sym;
- void *const rel_addr = (void *)(l->l_addr + reloc->r_offset);
+ void *const rel_addr = (void *)(l->l_public.l_addr + reloc->r_offset);
lookup_t result;
DL_FIXUP_VALUE_TYPE value;
@@ -170,7 +170,7 @@ _dl_profile_fixup (
# ifdef ELF_MACHINE_RUNTIME_FIXUP_ARGS
ELF_MACHINE_RUNTIME_FIXUP_ARGS,
# endif
- struct link_map *l, ElfW(Word) reloc_arg,
+ struct link_map_private *l, ElfW(Word) reloc_arg,
ElfW(Addr) retaddr, void *regs, long int *framesizep)
{
void (*mcount_fct) (ElfW(Addr), ElfW(Addr)) = _dl_mcount;
diff --git a/elf/dl-setup_hash.c b/elf/dl-setup_hash.c
index e3322dec4b..1b8481b07f 100644
--- a/elf/dl-setup_hash.c
+++ b/elf/dl-setup_hash.c
@@ -21,7 +21,7 @@
#include <ldsodefs.h>
void
-_dl_setup_hash (struct link_map *map)
+_dl_setup_hash (struct link_map_private *map)
{
Elf_Symndx *hash;
diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
index 5c846c7c6f..bcc49fa0e9 100644
--- a/elf/dl-sort-maps.c
+++ b/elf/dl-sort-maps.c
@@ -26,7 +26,7 @@
Sort array MAPS according to dependencies of the contained objects.
If FOR_FINI is true, this is called for finishing an object. */
static void
-_dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
+_dl_sort_maps_original (struct link_map_private **maps, unsigned int nmaps,
bool force_first, bool for_fini)
{
/* Allows caller to do the common optimization of skipping the first map,
@@ -45,7 +45,7 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
{
/* Keep track of which object we looked at this round. */
++seen[i];
- struct link_map *thisp = maps[i];
+ struct link_map_private *thisp = maps[i];
if (__glibc_unlikely (for_fini))
{
@@ -61,7 +61,7 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
unsigned int k = nmaps - 1;
while (k > i)
{
- struct link_map **runp = maps[k]->l_initfini;
+ struct link_map_private **runp = maps[k]->l_initfini;
if (runp != NULL)
/* Look through the dependencies of the object. */
while (*runp != NULL)
@@ -90,7 +90,7 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
if (__glibc_unlikely (for_fini && maps[k]->l_reldeps != NULL))
{
unsigned int m = maps[k]->l_reldeps->act;
- struct link_map **relmaps = &maps[k]->l_reldeps->list[0];
+ struct link_map_private **relmaps = &maps[k]->l_reldeps->list[0];
/* Look through the relocation dependencies of the object. */
while (m-- > 0)
@@ -98,7 +98,7 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
{
/* If a cycle exists with a link time dependency,
preserve the latter. */
- struct link_map **runp = thisp->l_initfini;
+ struct link_map_private **runp = thisp->l_initfini;
if (runp != NULL)
while (*runp != NULL)
if (__glibc_unlikely (*runp++ == maps[k]))
@@ -132,7 +132,7 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
decremented before storing the current map at each level. */
static void
-dfs_traversal (struct link_map ***rpo, struct link_map *map,
+dfs_traversal (struct link_map_private ***rpo, struct link_map_private *map,
bool *do_reldeps)
{
/* _dl_map_object_deps ignores l_faked objects when calculating the
@@ -146,7 +146,7 @@ dfs_traversal (struct link_map ***rpo, struct link_map *map,
{
for (int i = 0; map->l_initfini[i] != NULL; i++)
{
- struct link_map *dep = map->l_initfini[i];
+ struct link_map_private *dep = map->l_initfini[i];
if (dep->l_visited == 0
&& dep->l_main_map == 0)
dfs_traversal (rpo, dep, do_reldeps);
@@ -161,7 +161,7 @@ dfs_traversal (struct link_map ***rpo, struct link_map *map,
for (int m = map->l_reldeps->act - 1; m >= 0; m--)
{
- struct link_map *dep = map->l_reldeps->list[m];
+ struct link_map_private *dep = map->l_reldeps->list[m];
if (dep->l_visited == 0
&& dep->l_main_map == 0)
dfs_traversal (rpo, dep, do_reldeps);
@@ -176,10 +176,10 @@ dfs_traversal (struct link_map ***rpo, struct link_map *map,
objects. */
static void
-_dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
+_dl_sort_maps_dfs (struct link_map_private **maps, unsigned int nmaps,
bool force_first, bool for_fini)
{
- struct link_map *first_map = maps[0];
+ struct link_map_private *first_map = maps[0];
for (int i = nmaps - 1; i >= 0; i--)
maps[i]->l_visited = 0;
@@ -208,12 +208,12 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
to front makes things much more straightforward. */
/* Array to hold RPO sorting results, before we copy back to maps[]. */
- struct link_map *rpo[nmaps];
+ struct link_map_private *rpo[nmaps];
/* The 'head' position during each DFS iteration. Note that we start at
one past the last element due to first-decrement-then-store (see the
bottom of above dfs_traversal() routine). */
- struct link_map **rpo_head = &rpo[nmaps];
+ struct link_map_private **rpo_head = &rpo[nmaps];
bool do_reldeps = false;
bool *do_reldeps_ref = (for_fini ? &do_reldeps : NULL);
@@ -246,7 +246,7 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
for (int i = nmaps - 1; i >= 0; i--)
rpo[i]->l_visited = 0;
- struct link_map **maps_head = &maps[nmaps];
+ struct link_map_private **maps_head = &maps[nmaps];
for (int i = nmaps - 1; i >= 0; i--)
{
dfs_traversal (&maps_head, rpo[i], NULL);
@@ -260,7 +260,7 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
assert (maps_head == maps);
}
else
- memcpy (maps, rpo, sizeof (struct link_map *) * nmaps);
+ memcpy (maps, rpo, sizeof (struct link_map_private *) * nmaps);
/* Skipping the first object at maps[0] is not valid in general,
since traversing along object dependency-links may "find" that
@@ -293,7 +293,7 @@ _dl_sort_maps_init (void)
}
void
-_dl_sort_maps (struct link_map **maps, unsigned int nmaps,
+_dl_sort_maps (struct link_map_private **maps, unsigned int nmaps,
bool force_first, bool for_fini)
{
/* It can be tempting to use a static function pointer to store and call
diff --git a/elf/dl-static-tls.h b/elf/dl-static-tls.h
index 5ffbad99f5..d40dd882f6 100644
--- a/elf/dl-static-tls.h
+++ b/elf/dl-static-tls.h
@@ -45,7 +45,7 @@
&& (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
|| _dl_try_allocate_static_tls (sym_map, true) == 0))
-int _dl_try_allocate_static_tls (struct link_map *map, bool optional)
+int _dl_try_allocate_static_tls (struct link_map_private *map, bool optional)
attribute_hidden;
#endif
diff --git a/elf/dl-support.c b/elf/dl-support.c
index 837fa1c836..3648dd4d05 100644
--- a/elf/dl-support.c
+++ b/elf/dl-support.c
@@ -65,7 +65,7 @@ int _dl_verbose;
const char *_dl_inhibit_rpath;
/* The map for the object we will profile. */
-struct link_map *_dl_profile_map;
+struct link_map_private *_dl_profile_map;
/* This is the address of the last stack address ever used. */
void *__libc_stack_end;
@@ -78,18 +78,21 @@ int _dl_bind_not;
/* A dummy link map for the executable, used by dlopen to access the global
scope. We don't export any symbols ourselves, so this can be minimal. */
-static struct link_map _dl_main_map =
+static struct link_map_private _dl_main_map =
{
- .l_name = (char *) "",
+ .l_public = { .l_name = (char *) "", },
.l_real = &_dl_main_map,
.l_ns = LM_ID_BASE,
.l_libname = &(struct libname_list) { .name = "", .dont_free = 1 },
.l_searchlist =
{
- .r_list = &(struct link_map *) { &_dl_main_map },
+ .r_list = &(struct link_map_private *) { &_dl_main_map },
.r_nlist = 1,
},
- .l_symbolic_searchlist = { .r_list = &(struct link_map *) { NULL } },
+ .l_symbolic_searchlist =
+ {
+ .r_list = &(struct link_map_private *) { NULL },
+ },
.l_type = lt_executable,
.l_scope_mem = { &_dl_main_map.l_searchlist },
.l_scope_max = (sizeof (_dl_main_map.l_scope_mem)
@@ -119,7 +122,7 @@ unsigned long long _dl_load_adds = 1;
/* Fake scope of the main application. */
struct r_scope_elem _dl_initial_searchlist =
{
- .r_list = &(struct link_map *) { &_dl_main_map },
+ .r_list = &(struct link_map_private *) { &_dl_main_map },
.r_nlist = 1,
};
@@ -148,7 +151,7 @@ struct r_search_path_elem *_dl_all_dirs;
struct r_search_path_elem *_dl_init_all_dirs;
/* The object to be initialized first. */
-struct link_map *_dl_initfirst;
+struct link_map_private *_dl_initfirst;
/* Descriptor to write debug messages to. */
int _dl_debug_fd = STDERR_FILENO;
@@ -180,7 +183,8 @@ int _dl_stack_cache_lock;
when it was not, we do it by calling this function.
It returns an errno code or zero on success. */
int (*_dl_make_stack_executable_hook) (void **) = _dl_make_stack_executable;
-void (*_dl_init_static_tls) (struct link_map *) = &_dl_nothread_init_static_tls;
+void (*_dl_init_static_tls) (struct link_map_private *)
+ = &_dl_nothread_init_static_tls;
#endif
struct dl_scope_free_list *_dl_scope_free_list;
@@ -193,7 +197,7 @@ uintptr_t _dl_sysinfo;
/* Address of the ELF headers in the vsyscall page. */
const ElfW(Ehdr) *_dl_sysinfo_dso;
-struct link_map *_dl_sysinfo_map;
+struct link_map_private *_dl_sysinfo_map;
# include "get-dynamic-info.h"
#endif
@@ -348,7 +352,7 @@ DL_SYSINFO_IMPLEMENTATION
/* Since relocation to hidden _dl_main_map causes relocation overflow on
aarch64, a function is used to get the address of _dl_main_map. */
-struct link_map *
+struct link_map_private *
_dl_get_dl_main_map (void)
{
return &_dl_main_map;
@@ -358,7 +362,7 @@ _dl_get_dl_main_map (void)
/* This is used by _dl_runtime_profile, not used on static code. */
void
DL_ARCH_FIXUP_ATTRIBUTE
-_dl_audit_pltexit (struct link_map *l, ElfW(Word) reloc_arg,
+_dl_audit_pltexit (struct link_map_private *l, ElfW(Word) reloc_arg,
const void *inregs, void *outregs)
{
}
diff --git a/elf/dl-sym-post.h b/elf/dl-sym-post.h
index 5623d63ac8..c118f81f79 100644
--- a/elf/dl-sym-post.h
+++ b/elf/dl-sym-post.h
@@ -18,10 +18,10 @@
/* Return the link map containing the caller address. */
-static struct link_map *
+static struct link_map_private *
_dl_sym_find_caller_link_map (ElfW(Addr) caller)
{
- struct link_map *l = _dl_find_dso_for_object (caller);
+ struct link_map_private *l = _dl_find_dso_for_object (caller);
if (l != NULL)
return l;
else
@@ -35,7 +35,7 @@ _dl_sym_find_caller_link_map (ElfW(Addr) caller)
necessary. If MATCH is NULL, CALLER is used to determine it. */
static void *
_dl_sym_post (lookup_t result, const ElfW(Sym) *ref, void *value,
- ElfW(Addr) caller, struct link_map *match)
+ ElfW(Addr) caller, struct link_map_private *match)
{
/* Resolve indirect function address. */
if (__glibc_unlikely (ELFW(ST_TYPE) (ref->st_info) == STT_GNU_IFUNC))
diff --git a/elf/dl-sym.c b/elf/dl-sym.c
index b1ed1f2006..5774ea594a 100644
--- a/elf/dl-sym.c
+++ b/elf/dl-sym.c
@@ -42,7 +42,7 @@
/* Return the symbol address given the map of the module it is in and
the symbol record. This is used in dl-sym.c. */
static void *
-_dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref)
+_dl_tls_symaddr (struct link_map_private *map, const ElfW(Sym) *ref)
{
# ifndef DONT_USE_TLS_INDEX
tls_index tmp =
@@ -62,7 +62,7 @@ _dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref)
struct call_dl_lookup_args
{
/* Arguments to do_dlsym. */
- struct link_map *map;
+ struct link_map_private *map;
const char *name;
struct r_found_version *vers;
int flags;
@@ -90,7 +90,7 @@ do_sym (void *handle, const char *name, void *who,
ElfW(Addr) caller = (ElfW(Addr)) who;
/* Link map of the caller if needed. */
- struct link_map *match = NULL;
+ struct link_map_private *match = NULL;
if (handle == RTLD_DEFAULT)
{
@@ -139,7 +139,7 @@ do_sym (void *handle, const char *name, void *who,
RTLD_NEXT used in code not dynamically loaded"));
}
- struct link_map *l = match;
+ struct link_map_private *l = match;
while (l->l_loader != NULL)
l = l->l_loader;
@@ -149,7 +149,7 @@ RTLD_NEXT used in code not dynamically loaded"));
else
{
/* Search the scope of the given object. */
- struct link_map *map = handle;
+ struct link_map_private *map = handle;
result = GLRO(dl_lookup_symbol_x) (name, map, &ref, map->l_local_scope,
vers, 0, flags, NULL);
}
diff --git a/elf/dl-symaddr.c b/elf/dl-symaddr.c
index 5c4f94943b..6f37f26080 100644
--- a/elf/dl-symaddr.c
+++ b/elf/dl-symaddr.c
@@ -20,7 +20,7 @@
#include <dl-fptr.h>
void *
-_dl_symbol_address (struct link_map *map, const ElfW(Sym) *ref)
+_dl_symbol_address (struct link_map_private *map, const ElfW(Sym) *ref)
{
ElfW(Addr) value = SYMBOL_ADDRESS (map, ref, false);
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index 70446e71a8..68ed806c8e 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -121,7 +121,7 @@ oom (void)
void
-_dl_assign_tls_modid (struct link_map *l)
+_dl_assign_tls_modid (struct link_map_private *l)
{
size_t result;
@@ -553,7 +553,7 @@ _dl_allocate_tls_init (void *result, bool init_tls)
for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
{
- struct link_map *map;
+ struct link_map_private *map;
void *dest;
/* Check for the total number of used slots. */
@@ -699,7 +699,7 @@ allocate_dtv_entry (size_t alignment, size_t size)
}
static struct dtv_pointer
-allocate_and_init (struct link_map *map)
+allocate_and_init (struct link_map_private *map)
{
struct dtv_pointer result = allocate_dtv_entry
(map->l_tls_align, map->l_tls_blocksize);
@@ -715,10 +715,10 @@ allocate_and_init (struct link_map *map)
}
-struct link_map *
+struct link_map_private *
_dl_update_slotinfo (unsigned long int req_modid, size_t new_gen)
{
- struct link_map *the_map = NULL;
+ struct link_map_private *the_map = NULL;
dtv_t *dtv = THREAD_DTV ();
/* CONCURRENCY NOTES:
@@ -796,7 +796,7 @@ _dl_update_slotinfo (unsigned long int req_modid, size_t new_gen)
/* Case (3) or (1). */
/* If there is no map this means the entry is empty. */
- struct link_map *map
+ struct link_map_private *map
= atomic_load_relaxed (&listp->slotinfo[cnt].map);
/* Check whether the current dtv array is large enough. */
if (dtv[-1].counter < modid)
@@ -850,7 +850,7 @@ _dl_update_slotinfo (unsigned long int req_modid, size_t new_gen)
static void *
__attribute_noinline__
-tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
+tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map_private *the_map)
{
/* The allocation was deferred. Do it now. */
if (the_map == NULL)
@@ -909,11 +909,11 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
}
-static struct link_map *
+static struct link_map_private *
__attribute_noinline__
update_get_addr (GET_ADDR_ARGS, size_t gen)
{
- struct link_map *the_map = _dl_update_slotinfo (GET_ADDR_MODULE, gen);
+ struct link_map_private *the_map = _dl_update_slotinfo (GET_ADDR_MODULE, gen);
dtv_t *dtv = THREAD_DTV ();
void *p = dtv[GET_ADDR_MODULE].pointer.val;
@@ -968,7 +968,7 @@ __tls_get_addr (GET_ADDR_ARGS)
/* Look up the module's TLS block as for __tls_get_addr,
but never touch anything. Return null if it's not allocated yet. */
void *
-_dl_tls_get_addr_soft (struct link_map *l)
+_dl_tls_get_addr_soft (struct link_map_private *l)
{
if (__glibc_unlikely (l->l_tls_modid == 0))
/* This module has no TLS segment. */
@@ -1013,7 +1013,7 @@ _dl_tls_get_addr_soft (struct link_map *l)
void
-_dl_add_to_slotinfo (struct link_map *l, bool do_add)
+_dl_add_to_slotinfo (struct link_map_private *l, bool do_add)
{
/* Now that we know the object is loaded successfully add
modules containing TLS data to the dtv info table. We
@@ -1073,7 +1073,7 @@ cannot create TLS data structures"));
#if PTHREAD_IN_LIBC
static inline void __attribute__((always_inline))
-init_one_static_tls (struct pthread *curp, struct link_map *map)
+init_one_static_tls (struct pthread *curp, struct link_map_private *map)
{
# if TLS_TCB_AT_TP
void *dest = (char *) curp - map->l_tls_offset;
@@ -1089,7 +1089,7 @@ init_one_static_tls (struct pthread *curp, struct link_map *map)
}
void
-_dl_init_static_tls (struct link_map *map)
+_dl_init_static_tls (struct link_map_private *map)
{
lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
diff --git a/elf/dl-unmap-segments.h b/elf/dl-unmap-segments.h
index 1ec507e887..f51b320777 100644
--- a/elf/dl-unmap-segments.h
+++ b/elf/dl-unmap-segments.h
@@ -27,7 +27,7 @@
range in one fell swoop. */
static __always_inline void
-_dl_unmap_segments (struct link_map *l)
+_dl_unmap_segments (struct link_map_private *l)
{
__munmap ((void *) l->l_map_start, l->l_map_end - l->l_map_start);
}
diff --git a/elf/dl-usage.c b/elf/dl-usage.c
index 04315451f2..dde0b2be53 100644
--- a/elf/dl-usage.c
+++ b/elf/dl-usage.c
@@ -90,7 +90,7 @@ print_search_path_for_help (struct dl_main_state *state)
/* The print order should reflect the processing in
_dl_map_object. */
- struct link_map *map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
+ struct link_map_private *map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
if (map != NULL)
print_search_path_for_help_1 (map->l_rpath_dirs.dirs);
diff --git a/elf/dl-version.c b/elf/dl-version.c
index b3b2160ac8..faad3fea16 100644
--- a/elf/dl-version.c
+++ b/elf/dl-version.c
@@ -26,20 +26,20 @@
#include <assert.h>
-static inline struct link_map *
+static inline struct link_map_private *
__attribute ((always_inline))
-find_needed (const char *name, struct link_map *map)
+find_needed (const char *name, struct link_map_private *map)
{
- struct link_map *tmap;
+ struct link_map_private *tmap;
for (tmap = GL(dl_ns)[map->l_ns]._ns_loaded; tmap != NULL;
- tmap = tmap->l_next)
+ tmap = l_next (tmap))
if (_dl_name_match_p (name, tmap))
return tmap;
struct dl_exception exception;
_dl_exception_create_format
- (&exception, DSO_FILENAME (map->l_name),
+ (&exception, DSO_FILENAME (map->l_public.l_name),
"missing soname %s in version dependency", name);
_dl_signal_exception (0, &exception, NULL);
}
@@ -47,7 +47,7 @@ find_needed (const char *name, struct link_map *map)
static int
match_symbol (const char *name, Lmid_t ns, ElfW(Word) hash, const char *string,
- struct link_map *map, int verbose, int weak)
+ struct link_map_private *map, int verbose, int weak)
{
const char *strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
ElfW(Addr) def_offset;
@@ -60,7 +60,7 @@ match_symbol (const char *name, Lmid_t ns, ElfW(Word) hash, const char *string,
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_VERSIONS))
_dl_debug_printf ("\
checking for version `%s' in file %s [%lu] required by file %s [%lu]\n",
- string, DSO_FILENAME (map->l_name),
+ string, DSO_FILENAME (map->l_public.l_name),
map->l_ns, name, ns);
if (__glibc_unlikely (map->l_info[VERSYMIDX (DT_VERDEF)] == NULL))
@@ -72,7 +72,7 @@ checking for version `%s' in file %s [%lu] required by file %s [%lu]\n",
{
/* XXX We cannot translate the messages. */
_dl_exception_create_format
- (&exception, DSO_FILENAME (map->l_name),
+ (&exception, DSO_FILENAME (map->l_public.l_name),
"no version information available (required by %s)", name);
goto call_cerror;
}
@@ -82,7 +82,7 @@ checking for version `%s' in file %s [%lu] required by file %s [%lu]\n",
def_offset = map->l_info[VERSYMIDX (DT_VERDEF)]->d_un.d_ptr;
assert (def_offset != 0);
- def = (ElfW(Verdef) *) ((char *) map->l_addr + def_offset);
+ def = (ElfW(Verdef) *) ((char *) map->l_public.l_addr + def_offset);
while (1)
{
/* Currently the version number of the definition entry is 1.
@@ -93,7 +93,7 @@ checking for version `%s' in file %s [%lu] required by file %s [%lu]\n",
buf[sizeof (buf) - 1] = '\0';
/* XXX We cannot translate the message. */
_dl_exception_create_format
- (&exception, DSO_FILENAME (map->l_name),
+ (&exception, DSO_FILENAME (map->l_public.l_name),
"unsupported version %s of Verdef record",
_itoa (def->vd_version, &buf[sizeof (buf) - 1], 10, 0));
result = 1;
@@ -127,7 +127,7 @@ checking for version `%s' in file %s [%lu] required by file %s [%lu]\n",
{
/* XXX We cannot translate the message. */
_dl_exception_create_format
- (&exception, DSO_FILENAME (map->l_name),
+ (&exception, DSO_FILENAME (map->l_public.l_name),
"weak version `%s' not found (required by %s)", string, name);
goto call_cerror;
}
@@ -136,7 +136,7 @@ checking for version `%s' in file %s [%lu] required by file %s [%lu]\n",
/* XXX We cannot translate the message. */
_dl_exception_create_format
- (&exception, DSO_FILENAME (map->l_name),
+ (&exception, DSO_FILENAME (map->l_public.l_name),
"version `%s' not found (required by %s)", string, name);
result = 1;
call_cerror:
@@ -147,7 +147,7 @@ checking for version `%s' in file %s [%lu] required by file %s [%lu]\n",
int
-_dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
+_dl_check_map_versions (struct link_map_private *map, int verbose, int trace_mode)
{
int result = 0;
const char *strtab;
@@ -173,7 +173,8 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
if (dyn != NULL)
{
/* This file requires special versions from its dependencies. */
- ElfW(Verneed) *ent = (ElfW(Verneed) *) (map->l_addr + dyn->d_un.d_ptr);
+ ElfW(Verneed) *ent = (ElfW(Verneed) *) (map->l_public.l_addr
+ + dyn->d_un.d_ptr);
/* Currently the version number of the needed entry is 1.
Make sure all we see is this version. */
@@ -183,7 +184,7 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
buf[sizeof (buf) - 1] = '\0';
/* XXX We cannot translate the message. */
_dl_exception_create_format
- (&exception, DSO_FILENAME (map->l_name),
+ (&exception, DSO_FILENAME (map->l_public.l_name),
"unsupported version %s of Verneed record",
_itoa (ent->vn_version, &buf[sizeof (buf) - 1], 10, 0));
call_error:
@@ -193,7 +194,7 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
while (1)
{
ElfW(Vernaux) *aux;
- struct link_map *needed = find_needed (strtab + ent->vn_file, map);
+ struct link_map_private *needed = find_needed (strtab + ent->vn_file, map);
/* Make sure this is no stub we created because of a missing
dependency. */
@@ -207,7 +208,7 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
{
/* Match the symbol. */
const char *string = strtab + aux->vna_name;
- result |= match_symbol (DSO_FILENAME (map->l_name),
+ result |= match_symbol (DSO_FILENAME (map->l_public.l_name),
map->l_ns, aux->vna_hash,
string, needed->l_real, verbose,
aux->vna_flags & VER_FLG_WEAK);
@@ -249,7 +250,7 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
if (def != NULL)
{
ElfW(Verdef) *ent;
- ent = (ElfW(Verdef) *) (map->l_addr + def->d_un.d_ptr);
+ ent = (ElfW(Verdef) *) (map->l_public.l_addr + def->d_un.d_ptr);
while (1)
{
if ((unsigned int) (ent->vd_ndx & 0x7fff) > ndx_high)
@@ -273,7 +274,7 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
if (__glibc_unlikely (map->l_versions == NULL))
{
_dl_exception_create
- (&exception, DSO_FILENAME (map->l_name),
+ (&exception, DSO_FILENAME (map->l_public.l_name),
N_("cannot allocate version reference table"));
errval = ENOMEM;
goto call_error;
@@ -288,7 +289,7 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
if (dyn != NULL)
{
ElfW(Verneed) *ent;
- ent = (ElfW(Verneed) *) (map->l_addr + dyn->d_un.d_ptr);
+ ent = (ElfW(Verneed) *) (map->l_public.l_addr + dyn->d_un.d_ptr);
while (1)
{
ElfW(Vernaux) *aux;
@@ -326,7 +327,7 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
if (def != NULL)
{
ElfW(Verdef) *ent;
- ent = (ElfW(Verdef) *) (map->l_addr + def->d_un.d_ptr);
+ ent = (ElfW(Verdef) *) (map->l_public.l_addr + def->d_un.d_ptr);
while (1)
{
ElfW(Verdaux) *aux;
@@ -361,14 +362,14 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
{
const char *strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
const ElfW(Dyn) *d;
- for (d = map->l_ld; d->d_tag != DT_NULL; ++d)
+ for (d = map->l_public.l_ld; d->d_tag != DT_NULL; ++d)
if (d->d_tag == DT_NEEDED)
{
const char *name = strtab + d->d_un.d_val;
if (strncmp (name, "libc.so.", 8) == 0)
{
_dl_exception_create
- (&exception, DSO_FILENAME (map->l_name),
+ (&exception, DSO_FILENAME (map->l_public.l_name),
N_("DT_RELR without GLIBC_ABI_DT_RELR dependency"));
goto call_error;
}
@@ -380,12 +381,13 @@ _dl_check_map_versions (struct link_map *map, int verbose, int trace_mode)
int
-_dl_check_all_versions (struct link_map *map, int verbose, int trace_mode)
+_dl_check_all_versions (struct link_map_private *map, int verbose,
+ int trace_mode)
{
- struct link_map *l;
+ struct link_map_private *l;
int result = 0;
- for (l = map; l != NULL; l = l->l_next)
+ for (l = map; l != NULL; l = l_next (l))
result |= (! l->l_faked
&& _dl_check_map_versions (l, verbose, trace_mode));
diff --git a/elf/do-rel.h b/elf/do-rel.h
index ea973b155a..8083cb4162 100644
--- a/elf/do-rel.h
+++ b/elf/do-rel.h
@@ -40,7 +40,7 @@
than fully resolved now. */
static inline void __attribute__ ((always_inline))
-elf_dynamic_do_Rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_dynamic_do_Rel (struct link_map_private *map, struct r_scope_elem *scope[],
ElfW(Addr) reladdr, ElfW(Addr) relsize,
__typeof (((ElfW(Dyn) *) 0)->d_un.d_val) nrelative,
int lazy, int skip_ifunc)
@@ -48,7 +48,7 @@ elf_dynamic_do_Rel (struct link_map *map, struct r_scope_elem *scope[],
const ElfW(Rel) *relative = (const void *) reladdr;
const ElfW(Rel) *r = relative + nrelative;
const ElfW(Rel) *end = (const void *) (reladdr + relsize);
- ElfW(Addr) l_addr = map->l_addr;
+ ElfW(Addr) l_addr = map->l_public.l_addr;
const ElfW(Sym) *const symtab
= (const void *) D_PTR (map, l_info[DT_SYMTAB]);
@@ -150,7 +150,7 @@ elf_dynamic_do_Rel (struct link_map *map, struct r_scope_elem *scope[],
if (ELFW(R_TYPE) (r->r_info) == ELF_MACHINE_JMP_SLOT
&& GLRO(dl_naudit) > 0)
{
- struct link_map *sym_map
+ struct link_map_private *sym_map
= RESOLVE_MAP (map, scope, &sym, rversion,
ELF_MACHINE_JMP_SLOT);
if (sym != NULL)
@@ -196,7 +196,7 @@ elf_dynamic_do_Rel (struct link_map *map, struct r_scope_elem *scope[],
if (ELFW(R_TYPE) (r->r_info) == ELF_MACHINE_JMP_SLOT
&& GLRO(dl_naudit) > 0)
{
- struct link_map *sym_map
+ struct link_map_private *sym_map
= RESOLVE_MAP (map, scope, &sym,
(struct r_found_version *) NULL,
ELF_MACHINE_JMP_SLOT);
diff --git a/elf/dynamic-link.h b/elf/dynamic-link.h
index e7f755fc75..2f72240b6a 100644
--- a/elf/dynamic-link.h
+++ b/elf/dynamic-link.h
@@ -32,7 +32,7 @@
unaligned cases. */
# if ! ELF_MACHINE_NO_REL
static inline void __attribute__((always_inline))
-elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rel (struct link_map_private *map, struct r_scope_elem *scope[],
const ElfW(Rel) *reloc, const ElfW(Sym) *sym,
const struct r_found_version *version,
void *const reloc_addr, int skip_ifunc);
@@ -42,7 +42,7 @@ elf_machine_rel_relative (ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
# endif
# if ! ELF_MACHINE_NO_RELA
static inline void __attribute__((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
const struct r_found_version *version, void *const reloc_addr,
int skip_ifunc);
@@ -52,12 +52,14 @@ elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
# endif
# if ELF_MACHINE_NO_RELA || defined ELF_MACHINE_PLT_REL
static inline void __attribute__((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
int skip_ifunc);
# else
static inline void __attribute__((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
int skip_ifunc);
# endif
@@ -152,7 +154,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
# define ELF_DYNAMIC_DO_RELR(map) \
do { \
- ElfW(Addr) l_addr = (map)->l_addr, *where = 0; \
+ ElfW(Addr) l_addr = (map)->l_public.l_addr, *where = 0; \
const ElfW(Relr) *r, *end; \
if ((map)->l_info[DT_RELR] == NULL) \
break; \
diff --git a/elf/get-dynamic-info.h b/elf/get-dynamic-info.h
index 1e458420c0..066395eab5 100644
--- a/elf/get-dynamic-info.h
+++ b/elf/get-dynamic-info.h
@@ -26,7 +26,7 @@
#include <libc-diag.h>
static inline void __attribute__ ((unused, always_inline))
-elf_get_dynamic_info (struct link_map *l, bool bootstrap,
+elf_get_dynamic_info (struct link_map_private *l, bool bootstrap,
bool static_pie_bootstrap)
{
#if __ELF_NATIVE_CLASS == 32
@@ -36,13 +36,13 @@ elf_get_dynamic_info (struct link_map *l, bool bootstrap,
#endif
#ifndef STATIC_PIE_BOOTSTRAP
- if (!bootstrap && l->l_ld == NULL)
+ if (!bootstrap && l->l_public.l_ld == NULL)
return;
#endif
ElfW(Dyn) **info = l->l_info;
- for (ElfW(Dyn) *dyn = l->l_ld; dyn->d_tag != DT_NULL; dyn++)
+ for (ElfW(Dyn) *dyn = l->l_public.l_ld; dyn->d_tag != DT_NULL; dyn++)
{
d_tag_utype i;
@@ -69,9 +69,9 @@ elf_get_dynamic_info (struct link_map *l, bool bootstrap,
}
/* Don't adjust .dynamic unnecessarily. */
- if (l->l_addr != 0 && dl_relocate_ld (l))
+ if (l->l_public.l_addr != 0 && dl_relocate_ld (l))
{
- ElfW(Addr) l_addr = l->l_addr;
+ ElfW(Addr) l_addr = l->l_public.l_addr;
# define ADJUST_DYN_INFO(tag) \
do \
diff --git a/elf/libc-early-init.h b/elf/libc-early-init.h
index e3e9dcd4f5..318630dc1d 100644
--- a/elf/libc-early-init.h
+++ b/elf/libc-early-init.h
@@ -19,12 +19,12 @@
#ifndef _LIBC_EARLY_INIT_H
#define _LIBC_EARLY_INIT_H
-struct link_map;
+struct link_map_private;
/* If LIBC_MAP is not NULL, look up the __libc_early_init symbol in it
and call this function, with INITIAL as the argument. */
-void _dl_call_libc_early_init (struct link_map *libc_map, _Bool initial)
- attribute_hidden;
+void _dl_call_libc_early_init (struct link_map_private *libc_map,
+ _Bool initial) attribute_hidden;
/* In the shared case, this function is defined in libc.so and invoked
from ld.so (or on the fist static dlopen) after complete relocation
diff --git a/elf/loadtest.c b/elf/loadtest.c
index b5eab5e93c..ca7b634347 100644
--- a/elf/loadtest.c
+++ b/elf/loadtest.c
@@ -70,15 +70,16 @@ static const struct
#include <include/link.h>
-#define MAPS ((struct link_map *) _r_debug.r_map)
+#define MAPS (l_private (_r_debug.r_map))
#define OUT \
do \
{ \
- for (map = MAPS; map != NULL; map = map->l_next) \
+ for (map = MAPS; map != NULL; map = l_next (map)) \
if (map->l_type == lt_loaded) \
printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_name, (int) map->l_direct_opencount); \
+ map->l_public.l_name, \
+ (int) map->l_direct_opencount); \
fflush (stdout); \
} \
while (0)
@@ -90,7 +91,7 @@ main (int argc, char *argv[])
int debug = argc > 1 && argv[1][0] != '\0';
int count = TEST_ROUNDS;
int result = 0;
- struct link_map *map;
+ struct link_map_private *map;
mtrace ();
@@ -153,13 +154,15 @@ main (int argc, char *argv[])
{
/* In this case none of the objects above should be
present. */
- for (map = MAPS; map != NULL; map = map->l_next)
+ for (map = MAPS; map != NULL; map = l_next (map))
if (map->l_type == lt_loaded
- && (strstr (map->l_name, testobjs[0].name) != NULL
- || strstr (map->l_name, testobjs[1].name) != NULL
- || strstr (map->l_name, testobjs[5].name) != NULL))
+ && (strstr (map->l_public.l_name, testobjs[0].name) != NULL
+ || strstr (map->l_public.l_name,
+ testobjs[1].name) != NULL
+ || strstr (map->l_public.l_name,
+ testobjs[5].name) != NULL))
{
- printf ("`%s' is still loaded\n", map->l_name);
+ printf ("`%s' is still loaded\n", map->l_public.l_name);
result = 1;
}
}
@@ -173,10 +176,9 @@ main (int argc, char *argv[])
for (count = 0; count < (int) NOBJS; ++count)
if (testobjs[count].handle != NULL)
{
+ struct link_map_private *l = testobjs[count].handle;
printf ("\nclose: %s: l_initfini = %p, l_versions = %p\n",
- testobjs[count].name,
- ((struct link_map *) testobjs[count].handle)->l_initfini,
- ((struct link_map *) testobjs[count].handle)->l_versions);
+ testobjs[count].name, l->l_initfini, l->l_versions);
if (dlclose (testobjs[count].handle) != 0)
{
@@ -186,11 +188,11 @@ main (int argc, char *argv[])
}
/* Check whether all files are unloaded. */
- for (map = MAPS; map != NULL; map = map->l_next)
+ for (map = MAPS; map != NULL; map = l_next (map))
if (map->l_type == lt_loaded)
{
printf ("name = \"%s\", direct_opencount = %d\n",
- map->l_name, (int) map->l_direct_opencount);
+ map->l_public.l_name, (int) map->l_direct_opencount);
result = 1;
}
diff --git a/elf/neededtest.c b/elf/neededtest.c
index 3cea499314..1fce50b81a 100644
--- a/elf/neededtest.c
+++ b/elf/neededtest.c
@@ -5,12 +5,12 @@
#include <stdlib.h>
#include <string.h>
-#define MAPS ((struct link_map *) _r_debug.r_map)
+#define MAPS (l_private (_r_debug.r_map))
static int
check_loaded_objects (const char **loaded)
{
- struct link_map *lm;
+ struct link_map_private *lm;
int n;
int *found = NULL;
int errors = 0;
@@ -26,16 +26,19 @@ check_loaded_objects (const char **loaded)
printf(" Name\n");
printf(" --------------------------------------------------------\n");
- for (lm = MAPS; lm; lm = lm->l_next)
+ for (lm = MAPS; lm; lm = l_next (lm))
{
- if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
- if (lm->l_type == lt_loaded && lm->l_name)
+ if (lm->l_public.l_name && lm->l_public.l_name[0])
+ printf(" %s, count = %d\n",
+ lm->l_public.l_name,
+ (int) lm->l_direct_opencount);
+ if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
for (n = 0; loaded[n] != NULL; n++)
{
- if (strcmp (basename (loaded[n]), basename (lm->l_name)) == 0)
+ if (strcmp (basename (loaded[n]),
+ basename (lm->l_public.l_name)) == 0)
{
found[n] = 1;
match = 1;
@@ -46,7 +49,7 @@ check_loaded_objects (const char **loaded)
if (match == 0)
{
++errors;
- printf ("ERRORS: %s is not unloaded\n", lm->l_name);
+ printf ("ERRORS: %s is not unloaded\n", lm->l_public.l_name);
}
}
}
diff --git a/elf/neededtest2.c b/elf/neededtest2.c
index 17c75f2ba3..00b5dd0cb1 100644
--- a/elf/neededtest2.c
+++ b/elf/neededtest2.c
@@ -5,12 +5,12 @@
#include <stdlib.h>
#include <string.h>
-#define MAPS ((struct link_map *) _r_debug.r_map)
+#define MAPS (l_private (_r_debug.r_map))
static int
check_loaded_objects (const char **loaded)
{
- struct link_map *lm;
+ struct link_map_private *lm;
int n;
int *found = NULL;
int errors = 0;
@@ -26,16 +26,18 @@ check_loaded_objects (const char **loaded)
printf(" Name\n");
printf(" --------------------------------------------------------\n");
- for (lm = MAPS; lm; lm = lm->l_next)
+ for (lm = MAPS; lm; lm = l_next (lm))
{
- if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
- if (lm->l_type == lt_loaded && lm->l_name)
+ if (lm->l_public.l_name && lm->l_public.l_name[0])
+ printf(" %s, count = %d\n",
+ lm->l_public.l_name, (int) lm->l_direct_opencount);
+ if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
for (n = 0; loaded[n] != NULL; n++)
{
- if (strcmp (basename (loaded[n]), basename (lm->l_name)) == 0)
+ if (strcmp (basename (loaded[n]),
+ basename (lm->l_public.l_name)) == 0)
{
found[n] = 1;
match = 1;
@@ -46,7 +48,7 @@ check_loaded_objects (const char **loaded)
if (match == 0)
{
++errors;
- printf ("ERRORS: %s is not unloaded\n", lm->l_name);
+ printf ("ERRORS: %s is not unloaded\n", lm->l_public.l_name);
}
}
}
diff --git a/elf/neededtest3.c b/elf/neededtest3.c
index 41970cf2c7..cb625649fa 100644
--- a/elf/neededtest3.c
+++ b/elf/neededtest3.c
@@ -5,12 +5,12 @@
#include <stdlib.h>
#include <string.h>
-#define MAPS ((struct link_map *) _r_debug.r_map)
+#define MAPS (l_private (_r_debug.r_map))
static int
check_loaded_objects (const char **loaded)
{
- struct link_map *lm;
+ struct link_map_private *lm;
int n;
int *found = NULL;
int errors = 0;
@@ -26,16 +26,18 @@ check_loaded_objects (const char **loaded)
printf(" Name\n");
printf(" --------------------------------------------------------\n");
- for (lm = MAPS; lm; lm = lm->l_next)
+ for (lm = MAPS; lm; lm = l_next (lm))
{
- if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
- if (lm->l_type == lt_loaded && lm->l_name)
+ if (lm->l_public.l_name && lm->l_public.l_name[0])
+ printf(" %s, count = %d\n",
+ lm->l_public.l_name, (int) lm->l_direct_opencount);
+ if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
for (n = 0; loaded[n] != NULL; n++)
{
- if (strcmp (basename (loaded[n]), basename (lm->l_name)) == 0)
+ if (strcmp (basename (loaded[n]),
+ basename (lm->l_public.l_name)) == 0)
{
found[n] = 1;
match = 1;
@@ -46,7 +48,7 @@ check_loaded_objects (const char **loaded)
if (match == 0)
{
++errors;
- printf ("ERRORS: %s is not unloaded\n", lm->l_name);
+ printf ("ERRORS: %s is not unloaded\n", lm->l_public.l_name);
}
}
}
diff --git a/elf/neededtest4.c b/elf/neededtest4.c
index 0ae0b7ff47..9f5d5fcbc3 100644
--- a/elf/neededtest4.c
+++ b/elf/neededtest4.c
@@ -5,12 +5,12 @@
#include <stdlib.h>
#include <string.h>
-#define MAPS ((struct link_map *) _r_debug.r_map)
+#define MAPS (l_private (_r_debug.r_map))
static int
check_loaded_objects (const char **loaded)
{
- struct link_map *lm;
+ struct link_map_private *lm;
int n;
int *found = NULL;
int errors = 0;
@@ -26,16 +26,18 @@ check_loaded_objects (const char **loaded)
printf(" Name\n");
printf(" --------------------------------------------------------\n");
- for (lm = MAPS; lm; lm = lm->l_next)
+ for (lm = MAPS; lm; lm = l_next (lm))
{
- if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
- if (lm->l_type == lt_loaded && lm->l_name)
+ if (lm->l_public.l_name && lm->l_public.l_name[0])
+ printf(" %s, count = %d\n",
+ lm->l_public.l_name, (int) lm->l_direct_opencount);
+ if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
for (n = 0; loaded[n] != NULL; n++)
{
- if (strcmp (basename (loaded[n]), basename (lm->l_name)) == 0)
+ if (strcmp (basename (loaded[n]),
+ basename (lm->l_public.l_name)) == 0)
{
found[n] = 1;
match = 1;
@@ -46,7 +48,7 @@ check_loaded_objects (const char **loaded)
if (match == 0)
{
++errors;
- printf ("ERRORS: %s is not unloaded\n", lm->l_name);
+ printf ("ERRORS: %s is not unloaded\n", lm->l_public.l_name);
}
}
}
diff --git a/elf/rtld.c b/elf/rtld.c
index 05cbcee24a..92d8fa6fd4 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -139,7 +139,7 @@ static void audit_list_add_string (struct audit_list *, const char *);
segment at TG (either DT_AUDIT and DT_DEPAUDIT). Must be called
before audit_list_next. */
static void audit_list_add_dynamic_tag (struct audit_list *,
- struct link_map *,
+ struct link_map_private *,
unsigned int tag);
/* Extract the next audit module from the audit list. Only modules
@@ -218,8 +218,8 @@ audit_list_add_string (struct audit_list *list, const char *string)
}
static void
-audit_list_add_dynamic_tag (struct audit_list *list, struct link_map *main_map,
- unsigned int tag)
+audit_list_add_dynamic_tag (struct audit_list *list,
+ struct link_map_private *main_map, unsigned int tag)
{
ElfW(Dyn) *info = main_map->l_info[ADDRIDX (tag)];
const char *strtab = (const char *) D_PTR (main_map, l_info[DT_STRTAB]);
@@ -417,7 +417,7 @@ static ElfW(Addr) _dl_start_final (void *arg);
#else
struct dl_start_final_info
{
- struct link_map l;
+ struct link_map_private l;
RTLD_TIMING_VAR (start_time);
};
static ElfW(Addr) _dl_start_final (void *arg,
@@ -466,8 +466,8 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
/* Transfer data about ourselves to the permanent link_map structure. */
#ifndef DONT_USE_BOOTSTRAP_MAP
- GL(dl_rtld_map).l_addr = info->l.l_addr;
- GL(dl_rtld_map).l_ld = info->l.l_ld;
+ GL(dl_rtld_map).l_public.l_addr = info->l.l_public.l_addr;
+ GL(dl_rtld_map).l_public.l_ld = info->l.l_public.l_ld;
GL(dl_rtld_map).l_ld_readonly = info->l.l_ld_readonly;
memcpy (GL(dl_rtld_map).l_info, info->l.l_info,
sizeof GL(dl_rtld_map).l_info);
@@ -541,10 +541,11 @@ _dl_start (void *arg)
#endif
/* Figure out the run-time load address of the dynamic linker itself. */
- bootstrap_map.l_addr = elf_machine_load_address ();
+ bootstrap_map.l_public.l_addr = elf_machine_load_address ();
/* Read our own dynamic section and fill in the info array. */
- bootstrap_map.l_ld = (void *) bootstrap_map.l_addr + elf_machine_dynamic ();
+ bootstrap_map.l_public.l_ld
+ = (void *) bootstrap_map.l_public.l_addr + elf_machine_dynamic ();
bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
elf_get_dynamic_info (&bootstrap_map, true, false);
@@ -556,7 +557,7 @@ _dl_start (void *arg)
ELF_MACHINE_BEFORE_RTLD_RELOC (&bootstrap_map, bootstrap_map.l_info);
#endif
- if (bootstrap_map.l_addr)
+ if (bootstrap_map.l_public.l_addr)
{
/* Relocate ourselves so we can do normal function calls and
data access using the global offset table. */
@@ -593,7 +594,7 @@ _dl_start (void *arg)
/* Arguments to relocate_doit. */
struct relocate_args
{
- struct link_map *l;
+ struct link_map_private *l;
int reloc_mode;
};
@@ -601,22 +602,22 @@ struct map_args
{
/* Argument to map_doit. */
const char *str;
- struct link_map *loader;
+ struct link_map_private *loader;
int mode;
/* Return value of map_doit. */
- struct link_map *map;
+ struct link_map_private *map;
};
struct dlmopen_args
{
const char *fname;
- struct link_map *map;
+ struct link_map_private *map;
};
struct lookup_args
{
const char *name;
- struct link_map *map;
+ struct link_map_private *map;
void *result;
};
@@ -679,7 +680,7 @@ version_check_doit (void *a)
}
-static inline struct link_map *
+static inline struct link_map_private *
find_needed (const char *name)
{
struct r_scope_elem *scope = &GL(dl_ns)[LM_ID_BASE]._ns_loaded->l_searchlist;
@@ -694,7 +695,7 @@ find_needed (const char *name)
}
static int
-match_version (const char *string, struct link_map *map)
+match_version (const char *string, struct link_map_private *map)
{
const char *strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
ElfW(Verdef) *def;
@@ -704,7 +705,7 @@ match_version (const char *string, struct link_map *map)
/* The file has no symbol versioning. */
return 0;
- def = (ElfW(Verdef) *) ((char *) map->l_addr
+ def = (ElfW(Verdef) *) ((char *) map->l_public.l_addr
+ map->l_info[VERDEFTAG]->d_un.d_ptr);
while (1)
{
@@ -759,8 +760,8 @@ init_tls (size_t naudit)
but the base one can be filled at this time. */
assert (GL(dl_ns)[LM_ID_BASE + 1]._ns_loaded == NULL);
int i = 0;
- for (struct link_map *l = GL(dl_ns)[LM_ID_BASE]._ns_loaded; l != NULL;
- l = l->l_next)
+ for (struct link_map_private *l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
+ l != NULL; l = l_next (l))
if (l->l_tls_blocksize != 0)
{
/* This is a module with TLS data. Store the map reference.
@@ -799,7 +800,8 @@ cannot allocate TLS data structures for initial thread\n");
}
static unsigned int
-do_preload (const char *fname, struct link_map *main_map, const char *where)
+do_preload (const char *fname, struct link_map_private *main_map,
+ const char *where)
{
const char *objname;
const char *err_str = NULL;
@@ -863,8 +865,8 @@ security_init (void)
ignored since it is insecure.) Return the number of preloads
performed. Ditto for --preload command argument. */
unsigned int
-handle_preload_list (const char *preloadlist, struct link_map *main_map,
- const char *where)
+handle_preload_list (const char *preloadlist,
+ struct link_map_private *main_map, const char *where)
{
unsigned int npreloads = 0;
const char *p = preloadlist;
@@ -897,7 +899,7 @@ handle_preload_list (const char *preloadlist, struct link_map *main_map,
appropriate interfaces, or it expects a more recent version library
version than what the dynamic linker provides. */
static void
-unload_audit_module (struct link_map *map, int original_tls_idx)
+unload_audit_module (struct link_map_private *map, int original_tls_idx)
{
#ifndef NDEBUG
Lmid_t ns = map->l_ns;
@@ -970,7 +972,7 @@ load_audit_module (const char *name, struct audit_ifaces **last_audit)
if (GLRO(dl_debug_mask) & DL_DEBUG_FILES)
_dl_debug_printf ("\
file=%s [%lu]; audit interface function la_version returned zero; ignored.\n",
- dlmargs.map->l_name, dlmargs.map->l_ns);
+ dlmargs.map->l_public.l_name, dlmargs.map->l_ns);
unload_audit_module (dlmargs.map, original_tls_idx);
return;
}
@@ -1044,7 +1046,8 @@ ERROR: audit interface '%s' requires version %d (maximum supported version %d);
/* Load all audit modules. */
static void
-load_audit_modules (struct link_map *main_map, struct audit_list *audit_list)
+load_audit_modules (struct link_map_private *main_map,
+ struct audit_list *audit_list)
{
struct audit_ifaces *last_audit = NULL;
@@ -1068,7 +1071,7 @@ load_audit_modules (struct link_map *main_map, struct audit_list *audit_list)
/* Check if the executable is not actually dynamically linked, and
invoke it directly in that case. */
static void
-rtld_chain_load (struct link_map *main_map, char *argv0)
+rtld_chain_load (struct link_map_private *main_map, char *argv0)
{
/* The dynamic loader run against itself. */
const char *rtld_soname
@@ -1107,7 +1110,7 @@ rtld_chain_load (struct link_map *main_map, char *argv0)
/* Called to complete the initialization of the link map for the main
executable. Returns true if there is a PT_INTERP segment. */
static bool
-rtld_setup_main_map (struct link_map *main_map)
+rtld_setup_main_map (struct link_map_private *main_map)
{
/* This have already been filled in right after _dl_new_object, or
as part of _dl_map_object. */
@@ -1144,12 +1147,13 @@ rtld_setup_main_map (struct link_map *main_map)
{
case PT_PHDR:
/* Find out the load address. */
- main_map->l_addr = (ElfW(Addr)) phdr - ph->p_vaddr;
+ main_map->l_public.l_addr = (ElfW(Addr)) phdr - ph->p_vaddr;
break;
case PT_DYNAMIC:
/* This tells us where to find the dynamic section,
which tells us everything we need to do. */
- main_map->l_ld = (void *) main_map->l_addr + ph->p_vaddr;
+ main_map->l_public.l_ld
+ = (void *) main_map->l_public.l_addr + ph->p_vaddr;
main_map->l_ld_readonly = (ph->p_flags & PF_W) == 0;
break;
case PT_INTERP:
@@ -1159,7 +1163,7 @@ rtld_setup_main_map (struct link_map *main_map)
dlopen call or DT_NEEDED entry, for something that wants to link
against the dynamic linker as a shared library, will know that
the shared object is already loaded. */
- _dl_rtld_libname.name = ((const char *) main_map->l_addr
+ _dl_rtld_libname.name = ((const char *) main_map->l_public.l_addr
+ ph->p_vaddr);
/* _dl_rtld_libname.next = NULL; Already zero. */
GL(dl_rtld_map).l_libname = &_dl_rtld_libname;
@@ -1172,7 +1176,7 @@ rtld_setup_main_map (struct link_map *main_map)
ElfW(Addr) allocend;
/* Remember where the main program starts in memory. */
- mapstart = (main_map->l_addr
+ mapstart = (main_map->l_public.l_addr
+ (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)));
if (main_map->l_map_start > mapstart)
main_map->l_map_start = mapstart;
@@ -1182,7 +1186,7 @@ rtld_setup_main_map (struct link_map *main_map)
main_map->l_contiguous = 0;
/* Also where it ends. */
- allocend = main_map->l_addr + ph->p_vaddr + ph->p_memsz;
+ allocend = main_map->l_public.l_addr + ph->p_vaddr + ph->p_memsz;
if (main_map->l_map_end < allocend)
main_map->l_map_end = allocend;
@@ -1241,14 +1245,14 @@ rtld_setup_main_map (struct link_map *main_map)
the executable is actually an ET_DYN object. */
if (main_map->l_tls_initimage != NULL)
main_map->l_tls_initimage
- = (char *) main_map->l_tls_initimage + main_map->l_addr;
+ = (char *) main_map->l_tls_initimage + main_map->l_public.l_addr;
if (! main_map->l_map_end)
main_map->l_map_end = ~0;
- if (! GL(dl_rtld_map).l_libname && GL(dl_rtld_map).l_name)
+ if (! GL(dl_rtld_map).l_libname && GL(dl_rtld_map).l_public.l_name)
{
/* We were invoked directly, so the program might not have a
PT_INTERP. */
- _dl_rtld_libname.name = GL(dl_rtld_map).l_name;
+ _dl_rtld_libname.name = GL(dl_rtld_map).l_public.l_name;
/* _dl_rtld_libname.next = NULL; Already zero. */
GL(dl_rtld_map).l_libname = &_dl_rtld_libname;
}
@@ -1320,7 +1324,7 @@ dl_main (const ElfW(Phdr) *phdr,
ElfW(Addr) *user_entry,
ElfW(auxv_t) *auxv)
{
- struct link_map *main_map;
+ struct link_map_private *main_map;
size_t file_size;
char *file;
unsigned int i;
@@ -1370,7 +1374,7 @@ dl_main (const ElfW(Phdr) *phdr,
char **orig_argv = _dl_argv;
/* Note the place where the dynamic linker actually came from. */
- GL(dl_rtld_map).l_name = rtld_progname;
+ GL(dl_rtld_map).l_public.l_name = rtld_progname;
while (_dl_argc > 1)
if (! strcmp (_dl_argv[1], "--list"))
@@ -1572,7 +1576,7 @@ dl_main (const ElfW(Phdr) *phdr,
the malloc() implementation used at this point is the dummy
implementations which has no real free() function it does not
makes sense to free the old string first. */
- main_map->l_name = (char *) "";
+ main_map->l_public.l_name = (char *) "";
*user_entry = main_map->l_entry;
/* Set bit indicating this is the main program map. */
@@ -1687,13 +1691,13 @@ dl_main (const ElfW(Phdr) *phdr,
executable using us as the program interpreter. Exit with an
error if we were not able to load the binary or no interpreter
is specified (i.e., this is no dynamically linked binary. */
- if (main_map->l_ld == NULL)
+ if (main_map->l_public.l_ld == NULL)
_exit (1);
_exit (has_interp ? 0 : 2);
}
- struct link_map **first_preload = &GL(dl_rtld_map).l_next;
+ struct link_map **first_preload = &GL(dl_rtld_map).l_public.l_next;
/* Set up the data structures for the system-supplied DSO early,
so they can influence _dl_init_paths. */
setup_vdso (main_map, &first_preload);
@@ -1706,20 +1710,20 @@ dl_main (const ElfW(Phdr) *phdr,
call_init_paths (&state);
/* Initialize _r_debug_extended. */
- struct r_debug *r = _dl_debug_initialize (GL(dl_rtld_map).l_addr,
+ struct r_debug *r = _dl_debug_initialize (GL(dl_rtld_map).l_public.l_addr,
LM_ID_BASE);
r->r_state = RT_CONSISTENT;
/* Put the link_map for ourselves on the chain so it can be found by
name. Note that at this point the global chain of link maps contains
exactly one element, which is pointed to by dl_loaded. */
- if (! GL(dl_rtld_map).l_name)
+ if (! GL(dl_rtld_map).l_public.l_name)
/* If not invoked directly, the dynamic linker shared object file was
found by the PT_INTERP name. */
- GL(dl_rtld_map).l_name = (char *) GL(dl_rtld_map).l_libname->name;
+ GL(dl_rtld_map).l_public.l_name = (char *) GL(dl_rtld_map).l_libname->name;
GL(dl_rtld_map).l_type = lt_library;
- main_map->l_next = &GL(dl_rtld_map);
- GL(dl_rtld_map).l_prev = main_map;
+ main_map->l_public.l_next = &GL(dl_rtld_map).l_public;
+ GL(dl_rtld_map).l_public.l_prev = &main_map->l_public;
++GL(dl_ns)[LM_ID_BASE]._ns_nloaded;
++GL(dl_load_adds);
@@ -1808,7 +1812,7 @@ dl_main (const ElfW(Phdr) *phdr,
variable and via the file /etc/ld.so.preload. The latter can also
be used when security is enabled. */
assert (*first_preload == NULL);
- struct link_map **preloads = NULL;
+ struct link_map_private **preloads = NULL;
unsigned int npreloads = 0;
if (__glibc_unlikely (state.preloadlist != NULL))
@@ -1920,7 +1924,7 @@ dl_main (const ElfW(Phdr) *phdr,
i = 0;
do
{
- preloads[i++] = l;
+ preloads[i++] = l_private (l);
l = l->l_next;
} while (l);
assert (i == npreloads);
@@ -1948,9 +1952,9 @@ dl_main (const ElfW(Phdr) *phdr,
main_map->l_searchlist.r_list[--i]->l_global = 1;
/* Remove _dl_rtld_map from the chain. */
- GL(dl_rtld_map).l_prev->l_next = GL(dl_rtld_map).l_next;
- if (GL(dl_rtld_map).l_next != NULL)
- GL(dl_rtld_map).l_next->l_prev = GL(dl_rtld_map).l_prev;
+ GL(dl_rtld_map).l_public.l_prev->l_next = GL(dl_rtld_map).l_public.l_next;
+ if (GL(dl_rtld_map).l_public.l_next != NULL)
+ GL(dl_rtld_map).l_public.l_next->l_prev = GL(dl_rtld_map).l_public.l_prev;
for (i = 1; i < main_map->l_searchlist.r_nlist; ++i)
if (main_map->l_searchlist.r_list[i] == &GL(dl_rtld_map))
@@ -1965,17 +1969,21 @@ dl_main (const ElfW(Phdr) *phdr,
its symbol search order. */
rtld_multiple_ref = true;
- GL(dl_rtld_map).l_prev = main_map->l_searchlist.r_list[i - 1];
+ GL(dl_rtld_map).l_public.l_prev
+ = &main_map->l_searchlist.r_list[i - 1]->l_public;
if (__glibc_likely (state.mode == rtld_mode_normal))
{
- GL(dl_rtld_map).l_next = (i + 1 < main_map->l_searchlist.r_nlist
- ? main_map->l_searchlist.r_list[i + 1]
- : NULL);
+ GL(dl_rtld_map).l_public.l_next
+ = (i + 1 < main_map->l_searchlist.r_nlist
+ ? &main_map->l_searchlist.r_list[i + 1]->l_public
+ : NULL);
#ifdef NEED_DL_SYSINFO_DSO
if (GLRO(dl_sysinfo_map) != NULL
- && GL(dl_rtld_map).l_prev->l_next == GLRO(dl_sysinfo_map)
- && GL(dl_rtld_map).l_next != GLRO(dl_sysinfo_map))
- GL(dl_rtld_map).l_prev = GLRO(dl_sysinfo_map);
+ && (GL(dl_rtld_map).l_public.l_prev->l_next
+ == &GLRO(dl_sysinfo_map)->l_public)
+ && (GL(dl_rtld_map).l_public.l_next
+ != &GLRO(dl_sysinfo_map)->l_public))
+ GL(dl_rtld_map).l_public.l_prev = &GLRO(dl_sysinfo_map)->l_public;
#endif
}
else
@@ -1984,14 +1992,17 @@ dl_main (const ElfW(Phdr) *phdr,
In this case it doesn't matter much where we put the
interpreter object, so we just initialize the list pointer so
that the assertion below holds. */
- GL(dl_rtld_map).l_next = GL(dl_rtld_map).l_prev->l_next;
+ GL(dl_rtld_map).l_public.l_next
+ = GL(dl_rtld_map).l_public.l_prev->l_next;
- assert (GL(dl_rtld_map).l_prev->l_next == GL(dl_rtld_map).l_next);
- GL(dl_rtld_map).l_prev->l_next = &GL(dl_rtld_map);
- if (GL(dl_rtld_map).l_next != NULL)
+ assert (GL(dl_rtld_map).l_public.l_prev->l_next
+ == GL(dl_rtld_map).l_public.l_next);
+ GL(dl_rtld_map).l_public.l_prev->l_next = &GL(dl_rtld_map).l_public;
+ if (GL(dl_rtld_map).l_public.l_next != NULL)
{
- assert (GL(dl_rtld_map).l_next->l_prev == GL(dl_rtld_map).l_prev);
- GL(dl_rtld_map).l_next->l_prev = &GL(dl_rtld_map);
+ assert (GL(dl_rtld_map).l_public.l_next->l_prev
+ == GL(dl_rtld_map).l_public.l_prev);
+ GL(dl_rtld_map).l_public.l_next->l_prev = &GL(dl_rtld_map).l_public;
}
}
@@ -2026,14 +2037,14 @@ dl_main (const ElfW(Phdr) *phdr,
important that we do this before real relocation, because the
functions we call below for output may no longer work properly
after relocation. */
- struct link_map *l;
+ struct link_map_private *l;
if (GLRO(dl_debug_mask) & DL_DEBUG_UNUSED)
{
/* Look through the dependencies of the main executable
and determine which of them is not actually
required. */
- struct link_map *l = main_map;
+ struct link_map_private *l = main_map;
/* Relocate the main executable. */
struct relocate_args args = { .l = l,
@@ -2044,18 +2055,18 @@ dl_main (const ElfW(Phdr) *phdr,
/* This loop depends on the dependencies of the executable to
correspond in number and order to the DT_NEEDED entries. */
- ElfW(Dyn) *dyn = main_map->l_ld;
+ ElfW(Dyn) *dyn = main_map->l_public.l_ld;
bool first = true;
while (dyn->d_tag != DT_NULL)
{
if (dyn->d_tag == DT_NEEDED)
{
- l = l->l_next;
+ l = l_next (l);
#ifdef NEED_DL_SYSINFO_DSO
/* Skip the VDSO since it's not part of the list
of objects we brought in via DT_NEEDED entries. */
if (l == GLRO(dl_sysinfo_map))
- l = l->l_next;
+ l = l_next (l);
#endif
if (!l->l_used)
{
@@ -2065,7 +2076,7 @@ dl_main (const ElfW(Phdr) *phdr,
first = false;
}
- _dl_printf ("\t%s\n", l->l_name);
+ _dl_printf ("\t%s\n", l->l_public.l_name);
}
}
@@ -2078,12 +2089,12 @@ dl_main (const ElfW(Phdr) *phdr,
_dl_printf ("\tstatically linked\n");
else
{
- for (l = state.mode_trace_program ? main_map : main_map->l_next;
- l; l = l->l_next) {
+ for (l = state.mode_trace_program ? main_map : l_next (main_map);
+ l; l = l_next (l)) {
if (l->l_faked)
/* The library was not found. */
_dl_printf ("\t%s => not found\n", l->l_libname->name);
- else if (strcmp (l->l_libname->name, l->l_name) == 0)
+ else if (strcmp (l->l_libname->name, l->l_public.l_name) == 0)
/* Print vDSO like libraries without duplicate name. Some
consumers depend of this format. */
_dl_printf ("\t%s (0x%0*zx)\n", l->l_libname->name,
@@ -2092,7 +2103,7 @@ dl_main (const ElfW(Phdr) *phdr,
else
_dl_printf ("\t%s => %s (0x%0*zx)\n",
DSO_FILENAME (l->l_libname->name),
- DSO_FILENAME (l->l_name),
+ DSO_FILENAME (l->l_public.l_name),
(int) sizeof l->l_map_start * 2,
(size_t) l->l_map_start);
}
@@ -2133,7 +2144,7 @@ dl_main (const ElfW(Phdr) *phdr,
i = main_map->l_searchlist.r_nlist;
while (i-- > 0)
{
- struct link_map *l = main_map->l_initfini[i];
+ struct link_map_private *l = main_map->l_initfini[i];
if (l != &GL(dl_rtld_map) && ! l->l_faked)
{
args.l = l;
@@ -2149,9 +2160,9 @@ dl_main (const ElfW(Phdr) *phdr,
/* Print more information. This means here, print information
about the versions needed. */
int first = 1;
- struct link_map *map;
+ struct link_map_private *map;
- for (map = main_map; map != NULL; map = map->l_next)
+ for (map = main_map; map != NULL; map = l_next (map))
{
const char *strtab;
ElfW(Dyn) *dyn = map->l_info[VERNEEDTAG];
@@ -2161,7 +2172,8 @@ dl_main (const ElfW(Phdr) *phdr,
continue;
strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
- ent = (ElfW(Verneed) *) (map->l_addr + dyn->d_un.d_ptr);
+ ent = (ElfW(Verneed) *) (map->l_public.l_addr
+ + dyn->d_un.d_ptr);
if (first)
{
@@ -2169,12 +2181,12 @@ dl_main (const ElfW(Phdr) *phdr,
first = 0;
}
- _dl_printf ("\t%s:\n", DSO_FILENAME (map->l_name));
+ _dl_printf ("\t%s:\n", DSO_FILENAME (map->l_public.l_name));
while (1)
{
ElfW(Vernaux) *aux;
- struct link_map *needed;
+ struct link_map_private *needed;
needed = find_needed (strtab + ent->vn_file);
aux = (ElfW(Vernaux) *) ((char *) ent + ent->vn_aux);
@@ -2186,7 +2198,7 @@ dl_main (const ElfW(Phdr) *phdr,
if (needed != NULL
&& match_version (strtab + aux->vna_name,
needed))
- fname = needed->l_name;
+ fname = needed->l_public.l_name;
_dl_printf ("\t\t%s (%s) %s=> %s\n",
strtab + ent->vn_file,
@@ -2236,7 +2248,7 @@ dl_main (const ElfW(Phdr) *phdr,
{
_dl_debug_printf ("\nInitial object scopes\n");
- for (struct link_map *l = main_map; l != NULL; l = l->l_next)
+ for (struct link_map_private *l = main_map; l != NULL; l = l_next (l))
_dl_show_scope (l, 0);
}
@@ -2268,7 +2280,7 @@ dl_main (const ElfW(Phdr) *phdr,
unsigned i = main_map->l_searchlist.r_nlist;
while (i-- > 0)
{
- struct link_map *l = main_map->l_initfini[i];
+ struct link_map_private *l = main_map->l_initfini[i];
/* While we are at it, help the memory handling a bit. We have to
mark some data structures as allocated with the fake malloc()
@@ -2785,20 +2797,20 @@ print_statistics (const hp_timing_t *rtld_total_timep)
for (unsigned int i = 0; i < scope->r_nlist; i++)
{
- struct link_map *l = scope->r_list [i];
+ struct link_map_private *l = scope->r_list [i];
- if (l->l_addr != 0 && l->l_info[VERSYMIDX (DT_RELCOUNT)])
+ if (l->l_public.l_addr != 0 && l->l_info[VERSYMIDX (DT_RELCOUNT)])
num_relative_relocations
+= l->l_info[VERSYMIDX (DT_RELCOUNT)]->d_un.d_val;
#ifndef ELF_MACHINE_REL_RELATIVE
/* Relative relocations are processed on these architectures if
library is loaded to different address than p_vaddr. */
- if ((l->l_addr != 0)
+ if ((l->l_public.l_addr != 0)
&& l->l_info[VERSYMIDX (DT_RELACOUNT)])
#else
/* On e.g. IA-64 or Alpha, relative relocations are processed
only if library is loaded to different address than p_vaddr. */
- if (l->l_addr != 0 && l->l_info[VERSYMIDX (DT_RELACOUNT)])
+ if (l->l_public.l_addr != 0 && l->l_info[VERSYMIDX (DT_RELACOUNT)])
#endif
num_relative_relocations
+= l->l_info[VERSYMIDX (DT_RELACOUNT)]->d_un.d_val;
diff --git a/elf/rtld_static_init.c b/elf/rtld_static_init.c
index aec8cc056b..9ce92fa4fc 100644
--- a/elf/rtld_static_init.c
+++ b/elf/rtld_static_init.c
@@ -43,7 +43,7 @@ static const struct dlfcn_hook _dlfcn_hook =
};
void
-__rtld_static_init (struct link_map *map)
+__rtld_static_init (struct link_map_private *map)
{
const ElfW(Sym) *sym
= _dl_lookup_direct (map, "_rtld_global_ro",
diff --git a/elf/setup-vdso.h b/elf/setup-vdso.h
index d92b12a7aa..5e9d2eb820 100644
--- a/elf/setup-vdso.h
+++ b/elf/setup-vdso.h
@@ -17,7 +17,7 @@
<https://www.gnu.org/licenses/>. */
static inline void __attribute__ ((always_inline))
-setup_vdso (struct link_map *main_map __attribute__ ((unused)),
+setup_vdso (struct link_map_private *main_map __attribute__ ((unused)),
struct link_map ***first_preload __attribute__ ((unused)))
{
#ifdef NEED_DL_SYSINFO_DSO
@@ -29,8 +29,8 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)),
better be, since it's read-only and so we couldn't relocate it).
We just want our data structures to describe it as if we had just
mapped and relocated it normally. */
- struct link_map *l = _dl_new_object ((char *) "", "", lt_library, NULL,
- __RTLD_VDSO, LM_ID_BASE);
+ struct link_map_private *l = _dl_new_object ((char *) "", "", lt_library,
+ NULL, __RTLD_VDSO, LM_ID_BASE);
if (__glibc_likely (l != NULL))
{
l->l_phdr = ((const void *) GLRO(dl_sysinfo_dso)
@@ -41,14 +41,14 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)),
const ElfW(Phdr) *const ph = &l->l_phdr[i];
if (ph->p_type == PT_DYNAMIC)
{
- l->l_ld = (void *) ph->p_vaddr;
+ l->l_public.l_ld = (void *) ph->p_vaddr;
l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
l->l_ld_readonly = (ph->p_flags & PF_W) == 0;
}
else if (ph->p_type == PT_LOAD)
{
- if (! l->l_addr)
- l->l_addr = ph->p_vaddr;
+ if (! l->l_public.l_addr)
+ l->l_public.l_addr = ph->p_vaddr;
if (ph->p_vaddr + ph->p_memsz >= l->l_map_end)
l->l_map_end = ph->p_vaddr + ph->p_memsz;
}
@@ -57,9 +57,10 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)),
assert (ph->p_type != PT_TLS);
}
l->l_map_start = (ElfW(Addr)) GLRO(dl_sysinfo_dso);
- l->l_addr = l->l_map_start - l->l_addr;
- l->l_map_end += l->l_addr;
- l->l_ld = (void *) ((ElfW(Addr)) l->l_ld + l->l_addr);
+ l->l_public.l_addr = l->l_map_start - l->l_public.l_addr;
+ l->l_map_end += l->l_public.l_addr;
+ l->l_public.l_ld = (void *) ((ElfW(Addr)) l->l_public.l_ld
+ + l->l_public.l_addr);
elf_get_dynamic_info (l, false, false);
_dl_setup_hash (l);
l->l_relocated = 1;
@@ -81,7 +82,7 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)),
char *dsoname = ((char *) D_PTR (l, l_info[DT_STRTAB])
+ l->l_info[DT_SONAME]->d_un.d_val);
l->l_libname->name = dsoname;
- l->l_name = dsoname;
+ l->l_public.l_name = dsoname;
}
/* Add the vDSO to the object list. */
@@ -89,11 +90,11 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)),
# if IS_IN (rtld)
/* Rearrange the list so this DSO appears after rtld_map. */
- assert (l->l_next == NULL);
- assert (l->l_prev == main_map);
- GL(dl_rtld_map).l_next = l;
- l->l_prev = &GL(dl_rtld_map);
- *first_preload = &l->l_next;
+ assert (l->l_public.l_next == NULL);
+ assert (l->l_public.l_prev == &main_map->l_public);
+ GL(dl_rtld_map).l_public.l_next = &l->l_public;
+ l->l_public.l_prev = &GL(dl_rtld_map).l_public;
+ *first_preload = &l->l_public.l_next;
# else
GL(dl_nns) = 1;
# endif
@@ -102,7 +103,7 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)),
GLRO(dl_sysinfo_map) = l;
# ifdef NEED_DL_SYSINFO
if (GLRO(dl_sysinfo) == DL_SYSINFO_DEFAULT)
- GLRO(dl_sysinfo) = GLRO(dl_sysinfo_dso)->e_entry + l->l_addr;
+ GLRO(dl_sysinfo) = GLRO(dl_sysinfo_dso)->e_entry + l->l_public.l_addr;
# endif
}
#endif
diff --git a/elf/sotruss-lib.c b/elf/sotruss-lib.c
index 59aa6aa1b9..09232b1a4a 100644
--- a/elf/sotruss-lib.c
+++ b/elf/sotruss-lib.c
@@ -173,7 +173,8 @@ la_objopen (struct link_map *map, Lmid_t lmid, uintptr_t *cookie)
int result = 0;
const char *print_name = NULL;
- for (struct libname_list *l = map->l_libname; l != NULL; l = l->next)
+ for (struct libname_list *l = l_private (map)->l_libname; l != NULL;
+ l = l->next)
{
if (print_name == NULL || (print_name[0] == '/' && l->name[0] != '/'))
print_name = l->name;
diff --git a/elf/sprof.c b/elf/sprof.c
index 3eaa4758d6..155da1bd03 100644
--- a/elf/sprof.c
+++ b/elf/sprof.c
@@ -169,7 +169,7 @@ struct shobj
{
const char *name; /* User-provided name. */
- struct link_map *map;
+ struct link_map_private *map;
const char *dynstrtab; /* Dynamic string table of shared object. */
const char *soname; /* Soname of shared object. */
@@ -400,7 +400,7 @@ warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n\
static struct shobj *
load_shobj (const char *name)
{
- struct link_map *map = NULL;
+ struct link_map_private *map = NULL;
struct shobj *result;
ElfW(Addr) mapstart = ~((ElfW(Addr)) 0);
ElfW(Addr) mapend = 0;
@@ -422,11 +422,11 @@ load_shobj (const char *name)
char *load_name = (char *) alloca (strlen (name) + 3);
stpcpy (stpcpy (load_name, "./"), name);
- map = (struct link_map *) dlopen (load_name, RTLD_LAZY | __RTLD_SPROF);
+ map = dlopen (load_name, RTLD_LAZY | __RTLD_SPROF);
}
if (map == NULL)
{
- map = (struct link_map *) dlopen (name, RTLD_LAZY | __RTLD_SPROF);
+ map = dlopen (name, RTLD_LAZY | __RTLD_SPROF);
if (map == NULL)
{
error (0, errno, _("failed to load shared object `%s'"), name);
@@ -460,15 +460,15 @@ load_shobj (const char *name)
mapend = end;
}
- result->lowpc = ROUNDDOWN ((uintptr_t) (mapstart + map->l_addr),
+ result->lowpc = ROUNDDOWN ((uintptr_t) (mapstart + map->l_public.l_addr),
HISTFRACTION * sizeof (HISTCOUNTER));
- result->highpc = ROUNDUP ((uintptr_t) (mapend + map->l_addr),
+ result->highpc = ROUNDUP ((uintptr_t) (mapend + map->l_public.l_addr),
HISTFRACTION * sizeof (HISTCOUNTER));
if (do_test)
printf ("load addr: %0#*" PRIxPTR "\n"
"lower bound PC: %0#*" PRIxPTR "\n"
"upper bound PC: %0#*" PRIxPTR "\n",
- __ELF_NATIVE_CLASS == 32 ? 10 : 18, map->l_addr,
+ __ELF_NATIVE_CLASS == 32 ? 10 : 18, map->l_public.l_addr,
__ELF_NATIVE_CLASS == 32 ? 10 : 18, result->lowpc,
__ELF_NATIVE_CLASS == 32 ? 10 : 18, result->highpc);
@@ -547,11 +547,11 @@ load_shobj (const char *name)
abort ();
/* And we need the shared object file descriptor again. */
- fd = open (map->l_name, O_RDONLY);
+ fd = open (map->l_public.l_name, O_RDONLY);
if (fd == -1)
/* Dooh, this really shouldn't happen. We know the file is available. */
error (EXIT_FAILURE, errno, _("Reopening shared object `%s' failed"),
- map->l_name);
+ map->l_public.l_name);
/* Map the section header. */
size_t size = ehdr->e_shnum * sizeof (ElfW(Shdr));
@@ -879,8 +879,8 @@ load_profdata (const char *name, struct shobj *shobj)
!= offsetof (struct gmon_hist_hdr, dimen_abbrev)))
abort ();
- hist_hdr.low_pc = (char *) shobj->lowpc - shobj->map->l_addr;
- hist_hdr.high_pc = (char *) shobj->highpc - shobj->map->l_addr;
+ hist_hdr.low_pc = (char *) shobj->lowpc - shobj->map->l_public.l_addr;
+ hist_hdr.high_pc = (char *) shobj->highpc - shobj->map->l_public.l_addr;
if (do_test)
printf ("low_pc = %p\nhigh_pc = %p\n", hist_hdr.low_pc, hist_hdr.high_pc);
hist_hdr.hist_size = shobj->kcountsize / sizeof (HISTCOUNTER);
diff --git a/elf/tlsdeschtab.h b/elf/tlsdeschtab.h
index da92834234..a5efddcdf7 100644
--- a/elf/tlsdeschtab.h
+++ b/elf/tlsdeschtab.h
@@ -44,7 +44,7 @@ eq_tlsdesc (void *p, void *q)
}
inline static size_t
-map_generation (struct link_map *map)
+map_generation (struct link_map_private *map)
{
size_t idx = map->l_tls_modid;
struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
@@ -82,7 +82,7 @@ map_generation (struct link_map *map)
when using dynamic TLS. This requires allocation, returns NULL on
allocation failure. */
void *
-_dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
+_dl_make_tlsdesc_dynamic (struct link_map_private *map, size_t ti_offset)
{
struct hashtab *ht;
void **entry;
diff --git a/elf/tst-_dl_addr_inside_object.c b/elf/tst-_dl_addr_inside_object.c
index 363a1990dc..62809c03a4 100644
--- a/elf/tst-_dl_addr_inside_object.c
+++ b/elf/tst-_dl_addr_inside_object.c
@@ -22,18 +22,19 @@
#include <elf.h>
#include <libc-symbols.h>
-extern int _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr);
+extern int _dl_addr_inside_object (struct link_map_private *l,
+ const ElfW(Addr) addr);
static int
do_test (void)
{
int ret, err = 0;
ElfW(Addr) addr;
- struct link_map map;
+ struct link_map_private map;
ElfW(Phdr) header;
map.l_phdr = &header;
map.l_phnum = 1;
- map.l_addr = 0x0;
+ map.l_public.l_addr = 0x0;
/* Segment spans 0x2000 -> 0x4000. */
header.p_vaddr = 0x2000;
header.p_memsz = 0x2000;
@@ -191,12 +192,12 @@ do_test (void)
/* Attempt to wrap addr into the segment.
Pick a load address in the middle of the address space.
Place the test address at 0x0 so it wraps to the middle again. */
- map.l_addr = 0x0 - 0x1;
- map.l_addr = map.l_addr / 2;
+ map.l_public.l_addr = 0x0 - 0x1;
+ map.l_public.l_addr = map.l_public.l_addr / 2;
addr = 0;
/* Setup a segment covering 1/2 the address space. */
header.p_vaddr = 0x0;
- header.p_memsz = 0x0 - 0x1 - map.l_addr;
+ header.p_memsz = 0x0 - 0x1 - map.l_public.l_addr;
/* No matter where you place addr everything is shifted modulo l_addr
and even with this underflow you're always 1 byte away from being
in the range. */
diff --git a/elf/tst-audit19a.c b/elf/tst-audit19a.c
index 2b96b3bb53..48f607c780 100644
--- a/elf/tst-audit19a.c
+++ b/elf/tst-audit19a.c
@@ -26,7 +26,7 @@ do_test (void)
{
void *h = xdlopen ("tst-auditmod19a.so", RTLD_NOW);
- struct link_map *lmap;
+ struct link_map_private *lmap;
TEST_VERIFY_EXIT (dlinfo (h, RTLD_DI_LINKMAP, &lmap) == 0);
/* The internal array is only allocated if profiling is enabled. */
diff --git a/elf/tst-dl_find_object-threads.c b/elf/tst-dl_find_object-threads.c
index 0cc5e8920f..d940c73053 100644
--- a/elf/tst-dl_find_object-threads.c
+++ b/elf/tst-dl_find_object-threads.c
@@ -30,7 +30,7 @@
/* Computes the expected _dl_find_object result directly from the
map. */
static void
-from_map (struct link_map *l, struct dl_find_object *expected)
+from_map (struct link_map_private *l, struct dl_find_object *expected)
{
struct dl_find_object_internal internal;
_dl_find_object_from_map (l, &internal);
@@ -169,9 +169,9 @@ static void
start_verify (int number, struct verify_data *data)
{
data->soname = soname (number);
- struct link_map *l = xdlopen (data->soname, RTLD_NOW);
+ struct link_map_private *l = xdlopen (data->soname, RTLD_NOW);
from_map (l, &data->dlfo);
- TEST_VERIFY_EXIT (data->dlfo.dlfo_link_map == l);
+ TEST_VERIFY_EXIT (data->dlfo.dlfo_link_map == &l->l_public);
char *sym = symbol (number);
data->address = xdlsym (data->dlfo.dlfo_link_map, sym);
free (sym);
diff --git a/elf/tst-dl_find_object.c b/elf/tst-dl_find_object.c
index eaaa2cf91f..ce72439cdd 100644
--- a/elf/tst-dl_find_object.c
+++ b/elf/tst-dl_find_object.c
@@ -32,7 +32,7 @@ static char main_program_data;
/* Computes the expected _dl_find_object result directly from the
map. */
static void
-from_map (struct link_map *l, struct dl_find_object *expected)
+from_map (struct link_map_private *l, struct dl_find_object *expected)
{
struct dl_find_object_internal internal;
_dl_find_object_from_map (l, &internal);
@@ -71,7 +71,7 @@ check (void *address,
__FILE__, line, address,
actual.dlfo_flags, expected->dlfo_flags);
}
- if (expected->dlfo_link_map->l_contiguous)
+ if (l_private (expected->dlfo_link_map)->l_contiguous)
{
/* If the mappings are not contiguous, the actual and execpted
mappings may differ, so this subtest will not work. */
@@ -136,15 +136,16 @@ check_initial (void)
char **tzname = xdlsym (NULL, "tzname");
/* The main executable has an unnamed link map. */
- struct link_map *main_map = (struct link_map *) debug->r_map;
- TEST_COMPARE_STRING (main_map->l_name, "");
+ struct link_map_private *main_map = l_private (debug->r_map);
+ TEST_COMPARE_STRING (main_map->l_public.l_name, "");
/* The link map of the dynamic linker. */
- struct link_map *rtld_map = xdlopen (LD_SO, RTLD_LAZY | RTLD_NOLOAD);
+ struct link_map_private *rtld_map = xdlopen (LD_SO, RTLD_LAZY | RTLD_NOLOAD);
TEST_VERIFY_EXIT (rtld_map != NULL);
/* The link map of libc.so. */
- struct link_map *libc_map = xdlopen (LIBC_SO, RTLD_LAZY | RTLD_NOLOAD);
+ struct link_map_private *libc_map
+ = xdlopen (LIBC_SO, RTLD_LAZY | RTLD_NOLOAD);
TEST_VERIFY_EXIT (libc_map != NULL);
struct dl_find_object expected;
@@ -185,7 +186,8 @@ do_test (void)
check_initial ();
/* dlopen-based test. First an object that can be dlclosed. */
- struct link_map *mod1 = xdlopen ("tst-dl_find_object-mod1.so", RTLD_NOW);
+ struct link_map_private *mod1
+ = xdlopen ("tst-dl_find_object-mod1.so", RTLD_NOW);
void *mod1_data = xdlsym (mod1, "mod1_data");
void *map_start = (void *) mod1->l_map_start;
void *map_end = (void *) (mod1->l_map_end - 1);
@@ -205,7 +207,8 @@ do_test (void)
check (map_end, NULL, __LINE__);
/* Now try a NODELETE load. */
- struct link_map *mod2 = xdlopen ("tst-dl_find_object-mod2.so", RTLD_NOW);
+ struct link_map_private *mod2
+ = xdlopen ("tst-dl_find_object-mod2.so", RTLD_NOW);
void *mod2_data = xdlsym (mod2, "mod2_data");
map_start = (void *) mod2->l_map_start;
map_end = (void *) (mod2->l_map_end - 1);
diff --git a/elf/tst-tls6.c b/elf/tst-tls6.c
index df81c1f6b4..2da9229482 100644
--- a/elf/tst-tls6.c
+++ b/elf/tst-tls6.c
@@ -13,7 +13,7 @@ do_test (void)
int *foop;
int *foop2;
int (*fp) (int, int *);
- void *h;
+ struct link_map_private *h;
int i;
int modid = -1;
@@ -30,11 +30,11 @@ do_test (void)
We make sure that the module gets assigned the same ID every
time. The value of the first round is used. */
if (modid == -1)
- modid = ((struct link_map *) h)->l_tls_modid;
- else if (((struct link_map *) h)->l_tls_modid != modid)
+ modid = h->l_tls_modid;
+ else if (h->l_tls_modid != modid)
{
printf ("round %d: modid now %zd, initially %d\n",
- i, ((struct link_map *) h)->l_tls_modid, modid);
+ i, h->l_tls_modid, modid);
result = 1;
}
diff --git a/elf/tst-tls7.c b/elf/tst-tls7.c
index fa46709600..6bac842218 100644
--- a/elf/tst-tls7.c
+++ b/elf/tst-tls7.c
@@ -11,7 +11,7 @@ do_test (void)
static const char modname[] = "tst-tlsmod3.so";
int result = 0;
int (*fp) (void);
- void *h;
+ struct link_map_private *h;
int i;
int modid = -1;
@@ -28,11 +28,11 @@ do_test (void)
We make sure that the module gets assigned the same ID every
time. The value of the first round is used. */
if (modid == -1)
- modid = ((struct link_map *) h)->l_tls_modid;
- else if (((struct link_map *) h)->l_tls_modid != (size_t) modid)
+ modid = h->l_tls_modid;
+ else if (h->l_tls_modid != (size_t) modid)
{
printf ("round %d: modid now %zu, initially %d\n",
- i, ((struct link_map *) h)->l_tls_modid, modid);
+ i, h->l_tls_modid, modid);
result = 1;
}
diff --git a/elf/tst-tls8.c b/elf/tst-tls8.c
index c779572617..81576b85c3 100644
--- a/elf/tst-tls8.c
+++ b/elf/tst-tls8.c
@@ -13,8 +13,8 @@ do_test (void)
int result = 0;
int (*fp1) (void);
int (*fp2) (int, int *);
- void *h1;
- void *h2;
+ struct link_map_private *h1;
+ struct link_map_private *h2;
int i;
size_t modid1 = (size_t) -1;
size_t modid2 = (size_t) -1;
@@ -33,11 +33,11 @@ do_test (void)
We make sure that the module gets assigned the same ID every
time. The value of the first round is used. */
if (modid1 == (size_t) -1)
- modid1 = ((struct link_map *) h1)->l_tls_modid;
- else if (((struct link_map *) h1)->l_tls_modid != modid1)
+ modid1 = h1->l_tls_modid;
+ else if (h1->l_tls_modid != modid1)
{
printf ("round %d: modid now %zd, initially %zd\n",
- i, ((struct link_map *) h1)->l_tls_modid, modid1);
+ i, h1->l_tls_modid, modid1);
result = 1;
}
@@ -63,11 +63,11 @@ do_test (void)
We make sure that the module gets assigned the same ID every
time. The value of the first round is used. */
if (modid2 == (size_t) -1)
- modid2 = ((struct link_map *) h1)->l_tls_modid;
- else if (((struct link_map *) h1)->l_tls_modid != modid2)
+ modid2 = h1->l_tls_modid;
+ else if (h1->l_tls_modid != modid2)
{
printf ("round %d: modid now %zd, initially %zd\n",
- i, ((struct link_map *) h1)->l_tls_modid, modid2);
+ i, h1->l_tls_modid, modid2);
result = 1;
}
@@ -103,10 +103,10 @@ do_test (void)
/* Dirty test code here: we peek into a private data structure.
We make sure that the module gets assigned the same ID every
time. The value of the first round is used. */
- if (((struct link_map *) h1)->l_tls_modid != modid1)
+ if (h1->l_tls_modid != modid1)
{
printf ("round %d: modid now %zd, initially %zd\n",
- i, ((struct link_map *) h1)->l_tls_modid, modid1);
+ i, h1->l_tls_modid, modid1);
result = 1;
}
@@ -131,10 +131,10 @@ do_test (void)
/* Dirty test code here: we peek into a private data structure.
We make sure that the module gets assigned the same ID every
time. The value of the first round is used. */
- if (((struct link_map *) h1)->l_tls_modid != modid2)
+ if (h1->l_tls_modid != modid2)
{
printf ("round %d: modid now %zd, initially %zd\n",
- i, ((struct link_map *) h1)->l_tls_modid, modid2);
+ i, h1->l_tls_modid, modid2);
result = 1;
}
diff --git a/elf/unload.c b/elf/unload.c
index 4566f226f8..ab27d9da4a 100644
--- a/elf/unload.c
+++ b/elf/unload.c
@@ -9,13 +9,14 @@
#include <stdio.h>
#include <stdlib.h>
-#define MAPS ((struct link_map *) _r_debug.r_map)
+#define MAPS (l_private (_r_debug.r_map))
#define OUT \
- for (map = MAPS; map != NULL; map = map->l_next) \
+ for (map = MAPS; map != NULL; map = l_next (map)) \
if (map->l_type == lt_loaded) \
printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_name, (int) map->l_direct_opencount); \
+ map->l_public.l_name, \
+ (int) map->l_direct_opencount); \
fflush (stdout)
typedef struct
@@ -30,7 +31,7 @@ main (void)
strct *testdat;
int ret;
int result = 0;
- struct link_map *map;
+ struct link_map_private *map;
mtrace ();
diff --git a/elf/unload2.c b/elf/unload2.c
index eef2bfd426..3d6b224610 100644
--- a/elf/unload2.c
+++ b/elf/unload2.c
@@ -6,20 +6,21 @@
#include <stdio.h>
#include <stdlib.h>
-#define MAPS ((struct link_map *) _r_debug.r_map)
+#define MAPS (l_private (_r_debug.r_map))
#define OUT \
- for (map = MAPS; map != NULL; map = map->l_next) \
+ for (map = MAPS; map != NULL; map = l_next (map)) \
if (map->l_type == lt_loaded) \
printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_name, (int) map->l_direct_opencount); \
+ map->l_public.l_name, \
+ (int) map->l_direct_opencount); \
fflush (stdout)
int
main (void)
{
void *h[3];
- struct link_map *map;
+ struct link_map_private *map;
void (*fp) (void);
h[0] = dlopen ("unload2mod.so", RTLD_LAZY);
diff --git a/htl/pt-alloc.c b/htl/pt-alloc.c
index e205752649..c94a4dcad9 100644
--- a/htl/pt-alloc.c
+++ b/htl/pt-alloc.c
@@ -204,7 +204,7 @@ retry:
void
attribute_hidden
-__pthread_init_static_tls (struct link_map *map)
+__pthread_init_static_tls (struct link_map_private *map)
{
int i;
diff --git a/include/dlfcn.h b/include/dlfcn.h
index a44420fa37..af5d75509c 100644
--- a/include/dlfcn.h
+++ b/include/dlfcn.h
@@ -61,17 +61,17 @@ extern int __libc_dlclose (void *__map)
/* Locate shared object containing the given address. */
#ifdef ElfW
extern int _dl_addr (const void *address, Dl_info *info,
- struct link_map **mapp, const ElfW(Sym) **symbolp)
+ struct link_map_private **mapp, const ElfW(Sym) **symbolp)
attribute_hidden;
#endif
-struct link_map;
+struct link_map_private;
/* Close an object previously opened by _dl_open. */
extern void _dl_close (void *map) attribute_hidden;
/* Same as above, but without locking and safety checks for user
provided map arguments. */
-extern void _dl_close_worker (struct link_map *map, bool force)
+extern void _dl_close_worker (struct link_map_private *map, bool force)
attribute_hidden;
/* Look up NAME in shared object HANDLE (which may be RTLD_DEFAULT or
diff --git a/include/link.h b/include/link.h
index adb3886193..ae76a99c30 100644
--- a/include/link.h
+++ b/include/link.h
@@ -5,7 +5,7 @@
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
+ License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
@@ -24,14 +24,6 @@
# error this should be impossible
#endif
-# ifndef _ISOMAC
-/* Get most of the contents from the public header, but we define a
- different `struct link_map' type for private use. The la_objopen
- prototype uses the type, so we have to declare it separately. */
-# define link_map link_map_public
-# define la_objopen la_objopen_wrongproto
-# endif
-
#include <elf/link.h>
# ifndef _ISOMAC
@@ -66,7 +58,7 @@ struct link_map;
struct r_scope_elem
{
/* Array of maps for the scope. */
- struct link_map **r_list;
+ struct link_map_private **r_list;
/* Number of entries in the scope. */
unsigned int r_nlist;
};
@@ -92,16 +84,11 @@ extern struct r_search_path_struct __rtld_env_path_list attribute_hidden;
This data structure might change in future, if necessary. User-level
programs must avoid defining objects of this type. */
-struct link_map
+struct link_map_private
{
/* These first few members are part of the protocol with the debugger.
This is the same format used in SVR4. */
-
- ElfW(Addr) l_addr; /* Difference between the address in the ELF
- file and the addresses in memory. */
- char *l_name; /* Absolute file name object was found in. */
- ElfW(Dyn) *l_ld; /* Dynamic section of the shared object. */
- struct link_map *l_next, *l_prev; /* Chain of loaded objects. */
+ struct link_map l_public;
/* All following members are internal to the dynamic linker.
They may change without notice. */
@@ -109,7 +96,7 @@ struct link_map
/* This is an element which is only ever different from a pointer to
the very same copy of this type for ld.so when it is used in more
than one namespace. */
- struct link_map *l_real;
+ struct link_map_private *l_real;
/* Number of the namespace this link map belongs to. */
Lmid_t l_ns;
@@ -148,7 +135,7 @@ struct link_map
struct r_scope_elem l_symbolic_searchlist;
/* Dependent object that first caused this object to be loaded. */
- struct link_map *l_loader;
+ struct link_map_private *l_loader;
/* Array with version names. */
struct r_found_version *l_versions;
@@ -234,7 +221,7 @@ struct link_map
struct reloc_result
{
DL_FIXUP_VALUE_TYPE addr;
- struct link_map *bound;
+ struct link_map_private *bound;
unsigned int boundndx;
uint32_t enterexit;
unsigned int flags;
@@ -274,13 +261,13 @@ struct link_map
struct r_search_path_struct l_runpath_dirs;
/* List of object in order of the init and fini calls. */
- struct link_map **l_initfini;
+ struct link_map_private **l_initfini;
/* List of the dependencies introduced through symbol binding. */
struct link_map_reldeps
{
unsigned int act;
- struct link_map *list[];
+ struct link_map_private *list[];
} *l_reldeps;
unsigned int l_reldepsmax;
@@ -301,7 +288,7 @@ struct link_map
{
const ElfW(Sym) *sym;
int type_class;
- struct link_map *value;
+ struct link_map_private *value;
const ElfW(Sym) *ret;
} l_lookup_cache;
@@ -347,6 +334,21 @@ struct link_map
unsigned long long int l_serial;
};
+/* Type-safe downcast from struct link_map to the internal structure. */
+static inline struct link_map_private *
+l_private (struct link_map *l)
+{
+ return (struct link_map_private *) l;
+}
+
+/* Return a pointer to the private view of the next link map. Can be
+ used to iterate through the list of link maps. */
+static inline struct link_map_private *
+l_next (struct link_map_private *l)
+{
+ return l_private (l->l_public.l_next);
+}
+
#include <dl-relocate-ld.h>
/* Information used by audit modules. For most link maps, this data
diff --git a/include/rtld-malloc.h b/include/rtld-malloc.h
index 02f89bca1c..318ec53055 100644
--- a/include/rtld-malloc.h
+++ b/include/rtld-malloc.h
@@ -73,8 +73,9 @@ _Bool __rtld_malloc_is_complete (void) attribute_hidden;
/* Called shortly before the final self-relocation (when RELRO
variables are still writable) to activate the real malloc
implementation. MAIN_MAP is the link map of the executable. */
-struct link_map;
-void __rtld_malloc_init_real (struct link_map *main_map) attribute_hidden;
+struct link_map_private;
+void __rtld_malloc_init_real (struct link_map_private *main_map)
+ attribute_hidden;
#else /* !IS_IN (rtld) */
diff --git a/libio/vtables.c b/libio/vtables.c
index 34f7e15f1c..d7f4f96d49 100644
--- a/libio/vtables.c
+++ b/libio/vtables.c
@@ -516,7 +516,7 @@ _IO_vtable_check (void)
boundary. */
{
Dl_info di;
- struct link_map *l;
+ struct link_map_private *l;
if (!rtld_active ()
|| (_dl_addr (_IO_vtable_check, &di, &l, NULL) != 0
&& l->l_ns != LM_ID_BASE))
diff --git a/nptl_db/db_info.c b/nptl_db/db_info.c
index adb83769cf..64e5d220ef 100644
--- a/nptl_db/db_info.c
+++ b/nptl_db/db_info.c
@@ -37,7 +37,7 @@ typedef struct
union dtv dtv[UINT32_MAX / 2 / sizeof (union dtv)]; /* No constant bound. */
} dtv;
-typedef struct link_map link_map;
+typedef struct link_map_private link_map;
typedef struct rtld_global rtld_global;
typedef struct dtv_slotinfo_list dtv_slotinfo_list;
typedef struct dtv_slotinfo dtv_slotinfo;
diff --git a/stdlib/cxa_thread_atexit_impl.c b/stdlib/cxa_thread_atexit_impl.c
index 9bd2ad2158..d35002af30 100644
--- a/stdlib/cxa_thread_atexit_impl.c
+++ b/stdlib/cxa_thread_atexit_impl.c
@@ -83,13 +83,13 @@ struct dtor_list
{
dtor_func func;
void *obj;
- struct link_map *map;
+ struct link_map_private *map;
struct dtor_list *next;
};
static __thread struct dtor_list *tls_dtor_list;
static __thread void *dso_symbol_cache;
-static __thread struct link_map *lm_cache;
+static __thread struct link_map_private *lm_cache;
/* Register a destructor for TLS variables declared with the 'thread_local'
keyword. This function is only called from code generated by the C++
@@ -121,7 +121,7 @@ __cxa_thread_atexit_impl (dtor_func func, void *obj, void *dso_symbol)
{
ElfW(Addr) caller = (ElfW(Addr)) dso_symbol;
- struct link_map *l = _dl_find_dso_for_object (caller);
+ struct link_map_private *l = _dl_find_dso_for_object (caller);
/* If the address is not recognized the call comes from the main
program (we hope). */
diff --git a/stdlib/tst-tls-atexit.c b/stdlib/tst-tls-atexit.c
index efbaed1220..4ccd8c1dc8 100644
--- a/stdlib/tst-tls-atexit.c
+++ b/stdlib/tst-tls-atexit.c
@@ -55,13 +55,13 @@
static bool
is_loaded (void)
{
- struct link_map *lm = (struct link_map *) _r_debug.r_map;
+ struct link_map_private *lm = l_private (_r_debug.r_map);
- for (; lm; lm = lm->l_next)
- if (lm->l_type == lt_loaded && lm->l_name
- && strcmp (basename (DSO_NAME), basename (lm->l_name)) == 0)
+ for (; lm; lm = l_next (lm))
+ if (lm->l_type == lt_loaded && lm->l_public.l_name
+ && strcmp (basename (DSO_NAME), basename (lm->l_public.l_name)) == 0)
{
- printf ("%s is still loaded\n", lm->l_name);
+ printf ("%s is still loaded\n", lm->l_public.l_name);
return true;
}
return false;
diff --git a/sysdeps/aarch64/dl-bti.c b/sysdeps/aarch64/dl-bti.c
index b740b1f86d..ed4ea55f4c 100644
--- a/sysdeps/aarch64/dl-bti.c
+++ b/sysdeps/aarch64/dl-bti.c
@@ -29,7 +29,7 @@
/* Enable BTI protection for MAP. */
void
-_dl_bti_protect (struct link_map *map, int fd)
+_dl_bti_protect (struct link_map_private *map, int fd)
{
const size_t pagesz = GLRO(dl_pagesize);
const ElfW(Phdr) *phdr;
@@ -40,7 +40,7 @@ _dl_bti_protect (struct link_map *map, int fd)
size_t vstart = ALIGN_DOWN (phdr->p_vaddr, pagesz);
size_t vend = ALIGN_UP (phdr->p_vaddr + phdr->p_filesz, pagesz);
off_t off = ALIGN_DOWN (phdr->p_offset, pagesz);
- void *start = (void *) (vstart + map->l_addr);
+ void *start = (void *) (vstart + map->l_public.l_addr);
size_t len = vend - vstart;
unsigned prot = PROT_EXEC | PROT_BTI;
@@ -61,14 +61,14 @@ _dl_bti_protect (struct link_map *map, int fd)
static void
-bti_failed (struct link_map *l, const char *program)
+bti_failed (struct link_map_private *l, const char *program)
{
if (program)
_dl_fatal_printf ("%s: %s: failed to turn on BTI protection\n",
- program, l->l_name);
+ program, l->l_public.l_name);
else
/* Note: the errno value is not available any more. */
- _dl_signal_error (0, l->l_name, "dlopen",
+ _dl_signal_error (0, l->l_public.l_name, "dlopen",
N_("failed to turn on BTI protection"));
}
@@ -76,7 +76,7 @@ bti_failed (struct link_map *l, const char *program)
/* Enable BTI for L and its dependencies. */
void
-_dl_bti_check (struct link_map *l, const char *program)
+_dl_bti_check (struct link_map_private *l, const char *program)
{
if (!GLRO(dl_aarch64_cpu_features).bti)
return;
@@ -87,7 +87,7 @@ _dl_bti_check (struct link_map *l, const char *program)
unsigned int i = l->l_searchlist.r_nlist;
while (i-- > 0)
{
- struct link_map *dep = l->l_initfini[i];
+ struct link_map_private *dep = l->l_initfini[i];
if (dep->l_mach.bti_fail)
bti_failed (dep, program);
}
diff --git a/sysdeps/aarch64/dl-lookupcfg.h b/sysdeps/aarch64/dl-lookupcfg.h
index d7fe73636b..22d98485e2 100644
--- a/sysdeps/aarch64/dl-lookupcfg.h
+++ b/sysdeps/aarch64/dl-lookupcfg.h
@@ -20,8 +20,8 @@
#include_next <dl-lookupcfg.h>
-struct link_map;
+struct link_map_private;
-extern void _dl_unmap (struct link_map *map);
+extern void _dl_unmap (struct link_map_private *map);
#define DL_UNMAP(map) _dl_unmap (map)
diff --git a/sysdeps/aarch64/dl-machine.h b/sysdeps/aarch64/dl-machine.h
index a56eb96a79..370bbfceba 100644
--- a/sysdeps/aarch64/dl-machine.h
+++ b/sysdeps/aarch64/dl-machine.h
@@ -61,7 +61,8 @@ elf_machine_dynamic (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((unused))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
if (l->l_info[DT_JMPREL] && lazy)
@@ -72,7 +73,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
got = (ElfW(Addr) *) D_PTR (l, l_info[DT_PLTGOT]);
if (got[1])
{
- l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.plt = got[1] + l->l_public.l_addr;
}
got[1] = (ElfW(Addr)) l;
@@ -138,7 +139,7 @@ dl_platform_init (void)
static inline ElfW(Addr)
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const ElfW(Rela) *reloc,
ElfW(Addr) *reloc_addr,
@@ -149,7 +150,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline ElfW(Addr)
-elf_machine_plt_value (struct link_map *map,
+elf_machine_plt_value (struct link_map_private *map,
const ElfW(Rela) *reloc,
ElfW(Addr) value)
{
@@ -166,7 +167,7 @@ elf_machine_plt_value (struct link_map *map,
static inline void
__attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -175,7 +176,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
if (__builtin_expect (r_type == AARCH64_R(RELATIVE), 0))
- *reloc_addr = map->l_addr + reloc->r_addend;
+ *reloc_addr = map->l_public.l_addr + reloc->r_addend;
else if (__builtin_expect (r_type == R_AARCH64_NONE, 0))
return;
else
@@ -183,8 +184,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
# ifndef RTLD_BOOTSTRAP
const ElfW(Sym) *const refsym = sym;
# endif
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
ElfW(Addr) value = SYMBOL_ADDRESS (sym_map, sym, true);
if (sym != NULL
@@ -279,7 +280,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
break;
case AARCH64_R(IRELATIVE):
- value = map->l_addr + reloc->r_addend;
+ value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
@@ -305,7 +306,8 @@ elf_machine_rela_relative (ElfW(Addr) l_addr,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
ElfW(Addr) l_addr,
const ElfW(Rela) *reloc,
int skip_ifunc)
@@ -364,7 +366,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
}
else if (__glibc_unlikely (r_type == AARCH64_R(IRELATIVE)))
{
- ElfW(Addr) value = map->l_addr + reloc->r_addend;
+ ElfW(Addr) value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
diff --git a/sysdeps/aarch64/dl-prop.h b/sysdeps/aarch64/dl-prop.h
index 8fc58791d5..fa5865a9e9 100644
--- a/sysdeps/aarch64/dl-prop.h
+++ b/sysdeps/aarch64/dl-prop.h
@@ -19,30 +19,30 @@
#ifndef _DL_PROP_H
#define _DL_PROP_H
-extern void _dl_bti_protect (struct link_map *, int) attribute_hidden;
+extern void _dl_bti_protect (struct link_map_private *, int) attribute_hidden;
-extern void _dl_bti_check (struct link_map *, const char *)
+extern void _dl_bti_check (struct link_map_private *, const char *)
attribute_hidden;
static inline void __attribute__ ((always_inline))
-_rtld_main_check (struct link_map *m, const char *program)
+_rtld_main_check (struct link_map_private *m, const char *program)
{
_dl_bti_check (m, program);
}
static inline void __attribute__ ((always_inline))
-_dl_open_check (struct link_map *m)
+_dl_open_check (struct link_map_private *m)
{
_dl_bti_check (m, NULL);
}
static inline void __attribute__ ((always_inline))
-_dl_process_pt_note (struct link_map *l, int fd, const ElfW(Phdr) *ph)
+_dl_process_pt_note (struct link_map_private *l, int fd, const ElfW(Phdr) *ph)
{
}
static inline int
-_dl_process_gnu_property (struct link_map *l, int fd, uint32_t type,
+_dl_process_gnu_property (struct link_map_private *l, int fd, uint32_t type,
uint32_t datasz, void *data)
{
if (!GLRO(dl_aarch64_cpu_features).bti)
diff --git a/sysdeps/aarch64/dl-tlsdesc.h b/sysdeps/aarch64/dl-tlsdesc.h
index 0138efcf49..d84890dc86 100644
--- a/sysdeps/aarch64/dl-tlsdesc.h
+++ b/sysdeps/aarch64/dl-tlsdesc.h
@@ -49,7 +49,7 @@ extern ptrdiff_t attribute_hidden
_dl_tlsdesc_undefweak (struct tlsdesc *);
# ifdef SHARED
-extern void *_dl_make_tlsdesc_dynamic (struct link_map *, size_t);
+extern void *_dl_make_tlsdesc_dynamic (struct link_map_private *, size_t);
extern ptrdiff_t attribute_hidden
_dl_tlsdesc_dynamic (struct tlsdesc *);
diff --git a/sysdeps/aarch64/tlsdesc.c b/sysdeps/aarch64/tlsdesc.c
index a357e7619f..109ee9dbc7 100644
--- a/sysdeps/aarch64/tlsdesc.c
+++ b/sysdeps/aarch64/tlsdesc.c
@@ -28,7 +28,7 @@
if there is one. */
void
-_dl_unmap (struct link_map *map)
+_dl_unmap (struct link_map_private *map)
{
_dl_unmap_segments (map);
diff --git a/sysdeps/alpha/dl-machine.h b/sysdeps/alpha/dl-machine.h
index 7fe2afca93..746c7d8189 100644
--- a/sysdeps/alpha/dl-machine.h
+++ b/sysdeps/alpha/dl-machine.h
@@ -71,7 +71,8 @@ elf_machine_load_address (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int
-elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *map,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
extern char _dl_runtime_resolve_new[] attribute_hidden;
@@ -79,7 +80,7 @@ elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
struct pltgot {
char *resolve;
- struct link_map *link;
+ struct link_map_private *link;
};
struct pltgot *pg;
@@ -218,7 +219,7 @@ dl_platform_init (void)
/* Fix up the instructions of a PLT entry to invoke the function
rather than the dynamic linker. */
static inline Elf64_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf64_Rela *reloc,
Elf64_Addr *got_addr, Elf64_Addr value)
@@ -304,7 +305,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf64_Addr
-elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf64_Rela *reloc,
Elf64_Addr value)
{
return value + reloc->r_addend;
@@ -322,7 +323,7 @@ elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
MAP is the object containing the reloc. */
static inline void
__attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf64_Rela *reloc,
const Elf64_Sym *sym,
const struct r_found_version *version,
@@ -344,7 +345,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
/* Load value without causing unaligned trap. */
memcpy (&reloc_addr_val, reloc_addr_arg, 8);
- reloc_addr_val += map->l_addr;
+ reloc_addr_val += map->l_public.l_addr;
/* Store value without causing unaligned trap. */
memcpy (reloc_addr_arg, &reloc_addr_val, 8);
@@ -355,8 +356,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
return;
else
{
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
Elf64_Addr sym_value;
Elf64_Addr sym_raw_value;
@@ -438,7 +439,8 @@ elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf64_Addr l_addr, const Elf64_Rela *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/arc/dl-machine.h b/sysdeps/arc/dl-machine.h
index 4dc652a449..c07ebe0838 100644
--- a/sysdeps/arc/dl-machine.h
+++ b/sysdeps/arc/dl-machine.h
@@ -124,7 +124,8 @@ elf_machine_load_address (void)
static inline int
__attribute__ ((always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
extern void _dl_runtime_resolve (void);
@@ -134,7 +135,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
/* On ARC DT_PLTGOT point to .plt whose 5th word (after the PLT header)
contains the address of .got. */
ElfW(Addr) *plt_base = (ElfW(Addr) *) D_PTR (l, l_info[DT_PLTGOT]);
- ElfW(Addr) *got = (ElfW(Addr) *) (plt_base[5] + l->l_addr);
+ ElfW(Addr) *got = (ElfW(Addr) *) (plt_base[5] + l->l_public.l_addr);
got[1] = (ElfW(Addr)) l; /* Identify this shared object. */
@@ -195,7 +196,7 @@ __start: \n\
/* Fixup a PLT entry to bounce directly to the function at VALUE. */
static inline ElfW(Addr)
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const ElfW(Rela) *reloc,
ElfW(Addr) *reloc_addr, ElfW(Addr) value)
@@ -216,7 +217,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
static inline void
__attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -226,14 +227,14 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
ElfW(Addr) *const reloc_addr = reloc_addr_arg;
if (__glibc_unlikely (r_type == R_ARC_RELATIVE))
- *reloc_addr += map->l_addr;
+ *reloc_addr += map->l_public.l_addr;
else if (__glibc_unlikely (r_type == R_ARC_NONE))
return;
else
{
const ElfW(Sym) *const refsym = sym;
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
ElfW(Addr) value = SYMBOL_ADDRESS (sym_map, sym, true);
switch (r_type)
@@ -314,7 +315,8 @@ elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/arm/dl-lookupcfg.h b/sysdeps/arm/dl-lookupcfg.h
index d7fe73636b..22d98485e2 100644
--- a/sysdeps/arm/dl-lookupcfg.h
+++ b/sysdeps/arm/dl-lookupcfg.h
@@ -20,8 +20,8 @@
#include_next <dl-lookupcfg.h>
-struct link_map;
+struct link_map_private;
-extern void _dl_unmap (struct link_map *map);
+extern void _dl_unmap (struct link_map_private *map);
#define DL_UNMAP(map) _dl_unmap (map)
diff --git a/sysdeps/arm/dl-machine.h b/sysdeps/arm/dl-machine.h
index a68679e653..e5fd0cc4d2 100644
--- a/sysdeps/arm/dl-machine.h
+++ b/sysdeps/arm/dl-machine.h
@@ -60,7 +60,8 @@ elf_machine_dynamic (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((unused))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
Elf32_Addr *got;
@@ -78,7 +79,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
we have to be able to undo the prelinking of .got.plt.
The prelinker saved us here address of .plt. */
if (got[1])
- l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.plt = got[1] + l->l_public.l_addr;
got[1] = (Elf32_Addr) l; /* Identify this shared object. */
/* The got[2] entry contains the address of a function which gets
@@ -205,7 +206,7 @@ dl_platform_init (void)
}
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rel *reloc,
Elf32_Addr *reloc_addr, Elf32_Addr value)
@@ -215,7 +216,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rel *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rel *reloc,
Elf32_Addr value)
{
return value;
@@ -241,7 +242,7 @@ static inline bool set_new_value (Elf32_Addr *new_value, Elf32_Addr v,
/* Handle a PC24 reloc, including the out-of-range case. */
static void
-relocate_pc24 (struct link_map *map, Elf32_Addr value,
+relocate_pc24 (struct link_map_private *map, Elf32_Addr value,
Elf32_Addr *const reloc_addr, Elf32_Sword addend)
{
Elf32_Addr new_value;
@@ -258,7 +259,7 @@ relocate_pc24 (struct link_map *map, Elf32_Addr value,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANON, -1, 0);
if (new_page == MAP_FAILED)
- _dl_signal_error (0, map->l_name, NULL,
+ _dl_signal_error (0, map->l_public.l_name, NULL,
"could not map page for fixup");
fix_page = new_page;
assert (fix_offset == 0);
@@ -277,7 +278,7 @@ relocate_pc24 (struct link_map *map, Elf32_Addr value,
if (set_new_value (&new_value, (Elf32_Addr) fix_address, reloc_addr,
addend))
- _dl_signal_error (0, map->l_name, NULL,
+ _dl_signal_error (0, map->l_public.l_name, NULL,
"R_ARM_PC24 relocation out of range");
}
@@ -289,7 +290,7 @@ relocate_pc24 (struct link_map *map, Elf32_Addr value,
static inline void
__attribute__ ((always_inline))
-elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rel (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf32_Rel *reloc, const Elf32_Sym *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -299,7 +300,7 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
#if !defined RTLD_BOOTSTRAP
if (__builtin_expect (r_type == R_ARM_RELATIVE, 0))
- *reloc_addr += map->l_addr;
+ *reloc_addr += map->l_public.l_addr;
# ifndef RTLD_BOOTSTRAP
else if (__builtin_expect (r_type == R_ARM_NONE, 0))
return;
@@ -308,8 +309,8 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
#endif
{
const Elf32_Sym *const refsym = sym;
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
Elf32_Addr value = SYMBOL_ADDRESS (sym_map, sym, true);
if (sym != NULL
@@ -439,7 +440,7 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
}
break;
case R_ARM_IRELATIVE:
- value = map->l_addr + *reloc_addr;
+ value = map->l_public.l_addr + *reloc_addr;
if (__glibc_likely (!skip_ifunc))
value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
*reloc_addr = value;
@@ -465,7 +466,8 @@ elf_machine_rel_relative (Elf32_Addr l_addr, const Elf32_Rel *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf32_Addr l_addr, const Elf32_Rel *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/arm/dl-tlsdesc.h b/sysdeps/arm/dl-tlsdesc.h
index bb647ccc87..7fa5732267 100644
--- a/sysdeps/arm/dl-tlsdesc.h
+++ b/sysdeps/arm/dl-tlsdesc.h
@@ -51,7 +51,7 @@ extern ptrdiff_t attribute_hidden
_dl_tlsdesc_undefweak(struct tlsdesc *);
# ifdef SHARED
-extern void *_dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset);
+void *_dl_make_tlsdesc_dynamic (struct link_map_private *, size_t ti_offset);
extern ptrdiff_t attribute_hidden
_dl_tlsdesc_dynamic(struct tlsdesc *);
diff --git a/sysdeps/arm/tlsdesc.c b/sysdeps/arm/tlsdesc.c
index 54b35f0bac..550d3695ee 100644
--- a/sysdeps/arm/tlsdesc.c
+++ b/sysdeps/arm/tlsdesc.c
@@ -26,7 +26,7 @@
if there is one. */
void
-_dl_unmap (struct link_map *map)
+_dl_unmap (struct link_map_private *map)
{
_dl_unmap_segments (map);
diff --git a/sysdeps/csky/dl-machine.h b/sysdeps/csky/dl-machine.h
index 07a4b678bf..8dcb9b11c8 100644
--- a/sysdeps/csky/dl-machine.h
+++ b/sysdeps/csky/dl-machine.h
@@ -60,7 +60,8 @@ elf_machine_load_address (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
Elf32_Addr *got;
@@ -76,7 +77,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
got = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
if (got[1])
- l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.plt = got[1] + l->l_public.l_addr;
got[1] = (Elf32_Addr) l; /* Identify this shared object. */
/* The got[2] entry contains the address of a function which gets
@@ -170,7 +171,7 @@ dl_platform_init (void)
}
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rela *reloc,
Elf32_Addr *reloc_addr, Elf32_Addr value)
@@ -181,7 +182,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. On the csky the JMP_SLOT
relocation ignores the addend. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rela *reloc,
Elf32_Addr value)
{
return value;
@@ -198,7 +199,7 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
MAP is the object containing the reloc. */
static inline void __attribute__ ((unused, always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf32_Rela *reloc, const Elf32_Sym *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -209,12 +210,12 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
Elf32_Addr __attribute__ ((unused)) insn_opcode = 0x0;
if (__builtin_expect (r_type == R_CKCORE_RELATIVE, 0))
- *reloc_addr = map->l_addr + reloc->r_addend;
+ *reloc_addr = map->l_public.l_addr + reloc->r_addend;
else
{
const Elf32_Sym *const refsym = sym;
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
ElfW(Addr) value = SYMBOL_ADDRESS (sym_map, sym, true);
opcode16_addr = (unsigned short *)reloc_addr;
@@ -324,7 +325,8 @@ elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
}
static inline void __attribute__ ((unused, always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf32_Addr l_addr, const Elf32_Rela *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/generic/dl-debug.h b/sysdeps/generic/dl-debug.h
index 7ad4a286cc..dbc4c317c9 100644
--- a/sysdeps/generic/dl-debug.h
+++ b/sysdeps/generic/dl-debug.h
@@ -24,7 +24,7 @@
static inline void
__attribute ((always_inline))
-elf_setup_debug_entry (struct link_map *l, struct r_debug *r)
+elf_setup_debug_entry (struct link_map_private *l, struct r_debug *r)
{
if (l->l_info[DT_DEBUG] != NULL)
l->l_info[DT_DEBUG]->d_un.d_ptr = (ElfW(Addr)) r;
diff --git a/sysdeps/generic/dl-fptr.h b/sysdeps/generic/dl-fptr.h
index adf40348bc..59cc9aaf02 100644
--- a/sysdeps/generic/dl-fptr.h
+++ b/sysdeps/generic/dl-fptr.h
@@ -35,11 +35,11 @@ struct fdesc_table
struct fdesc fdesc[0];
};
-struct link_map;
+struct link_map_private;
extern ElfW(Addr) _dl_boot_fptr_table [];
-extern ElfW(Addr) _dl_make_fptr (struct link_map *, const ElfW(Sym) *,
+extern ElfW(Addr) _dl_make_fptr (struct link_map_private *, const ElfW(Sym) *,
ElfW(Addr));
#endif /* !dl_fptr_h */
diff --git a/sysdeps/generic/dl-prop.h b/sysdeps/generic/dl-prop.h
index 654137ebce..13913da970 100644
--- a/sysdeps/generic/dl-prop.h
+++ b/sysdeps/generic/dl-prop.h
@@ -27,24 +27,24 @@
"flag day" ABI transitions. */
static inline void __attribute__ ((always_inline))
-_rtld_main_check (struct link_map *m, const char *program)
+_rtld_main_check (struct link_map_private *m, const char *program)
{
}
static inline void __attribute__ ((always_inline))
-_dl_open_check (struct link_map *m)
+_dl_open_check (struct link_map_private *m)
{
}
static inline void __attribute__ ((always_inline))
-_dl_process_pt_note (struct link_map *l, int fd, const ElfW(Phdr) *ph)
+_dl_process_pt_note (struct link_map_private *l, int fd, const ElfW(Phdr) *ph)
{
}
/* Called for each property in the NT_GNU_PROPERTY_TYPE_0 note of L,
processing of the properties continues until this returns 0. */
static inline int __attribute__ ((always_inline))
-_dl_process_gnu_property (struct link_map *l, int fd, uint32_t type,
+_dl_process_gnu_property (struct link_map_private *l, int fd, uint32_t type,
uint32_t datasz, void *data)
{
/* Continue until GNU_PROPERTY_1_NEEDED is found. */
diff --git a/sysdeps/generic/dl-protected.h b/sysdeps/generic/dl-protected.h
index 36fb8ce7f3..4d2540bea8 100644
--- a/sysdeps/generic/dl-protected.h
+++ b/sysdeps/generic/dl-protected.h
@@ -21,9 +21,9 @@
static inline void __attribute__ ((always_inline))
_dl_check_protected_symbol (const char *undef_name,
- const struct link_map *undef_map,
+ const struct link_map_private *undef_map,
const ElfW(Sym) *ref,
- const struct link_map *map,
+ const struct link_map_private *map,
int type_class)
{
if (undef_map == NULL || undef_map->l_type != lt_executable)
@@ -35,7 +35,7 @@ _dl_check_protected_symbol (const char *undef_name,
access. */
_dl_error_printf ("warning: copy relocation against non-copyable "
"protected symbol `%s' in `%s'\n",
- undef_name, map->l_name);
+ undef_name, map->l_public.l_name);
else if ((type_class & ELF_RTYPE_CLASS_PLT) && ref->st_value != 0
&& ref->st_shndx == SHN_UNDEF)
/* Disallow non-zero symbol values of undefined symbols in
@@ -45,13 +45,13 @@ _dl_check_protected_symbol (const char *undef_name,
_dl_error_printf (
"warning: direct reference to "
"protected function `%s' in `%s' may break pointer equality\n",
- undef_name, map->l_name);
+ undef_name, map->l_public.l_name);
else
return;
if (map->l_1_needed & GNU_PROPERTY_1_NEEDED_INDIRECT_EXTERN_ACCESS)
_dl_signal_error (
- 0, map->l_name, undef_name,
+ 0, map->l_public.l_name, undef_name,
N_ ("error due to GNU_PROPERTY_1_NEEDED_INDIRECT_EXTERN_ACCESS"));
}
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 51ee7f2112..53cc428421 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -73,7 +73,7 @@ __BEGIN_DECLS
relocated. */
static inline bool
-dl_relocate_ld (const struct link_map *l)
+dl_relocate_ld (const struct link_map_private *l)
{
/* Don't relocate dynamic section if it is readonly */
return !(l->l_ld_readonly || DL_RO_DYN_SECTION);
@@ -86,12 +86,13 @@ dl_relocate_ld (const struct link_map *l)
most architectures the entry is already relocated - but for some not
and we need to relocate at access time. */
#define D_PTR(map, i) \
- ((map)->i->d_un.d_ptr + (dl_relocate_ld (map) ? 0 : (map)->l_addr))
+ ((map)->i->d_un.d_ptr + (dl_relocate_ld (map) ? 0 : (map)->l_public.l_addr))
/* Result of the lookup functions and how to retrieve the base address. */
-typedef struct link_map *lookup_t;
+typedef struct link_map_private *lookup_t;
#define LOOKUP_VALUE(map) map
-#define LOOKUP_VALUE_ADDRESS(map, set) ((set) || (map) ? (map)->l_addr : 0)
+#define LOOKUP_VALUE_ADDRESS(map, set) \
+ ((set) || (map) ? (map)->l_public.l_addr : 0)
/* Calculate the address of symbol REF using the base address from map MAP,
if non-NULL. Don't check for NULL map if MAP_SET is TRUE. */
@@ -121,10 +122,10 @@ typedef void (*fini_t) (void);
/* On some architectures dladdr can't use st_size of all symbols this way. */
#define DL_ADDR_SYM_MATCH(L, SYM, MATCHSYM, ADDR) \
- ((ADDR) >= (L)->l_addr + (SYM)->st_value \
+ ((ADDR) >= (L)->l_public.l_addr + (SYM)->st_value \
&& ((((SYM)->st_shndx == SHN_UNDEF || (SYM)->st_size == 0) \
- && (ADDR) == (L)->l_addr + (SYM)->st_value) \
- || (ADDR) < (L)->l_addr + (SYM)->st_value + (SYM)->st_size) \
+ && (ADDR) == (L)->l_public.l_addr + (SYM)->st_value) \
+ || (ADDR) < (L)->l_public.l_addr + (SYM)->st_value + (SYM)->st_size) \
&& ((MATCHSYM) == NULL || (MATCHSYM)->st_value < (SYM)->st_value))
/* According to the ELF gABI no STV_HIDDEN or STV_INTERNAL symbols are
@@ -238,7 +239,7 @@ struct audit_ifaces
{
void (*activity) (uintptr_t *, unsigned int);
char *(*objsearch) (const char *, uintptr_t *, unsigned int);
- unsigned int (*objopen) (struct link_map *, Lmid_t, uintptr_t *);
+ unsigned int (*objopen) (struct link_map_private *, Lmid_t, uintptr_t *);
void (*preinit) (uintptr_t *);
union
{
@@ -266,7 +267,8 @@ struct audit_ifaces
/* Test whether given NAME matches any of the names of the given object. */
-extern int _dl_name_match_p (const char *__name, const struct link_map *__map)
+extern int _dl_name_match_p (const char *__name,
+ const struct link_map_private *__map)
attribute_hidden;
/* Compute next higher prime number. */
@@ -315,7 +317,7 @@ struct rtld_global
EXTERN struct link_namespaces
{
/* A pointer to the map for the main map. */
- struct link_map *_ns_loaded;
+ struct link_map_private *_ns_loaded;
/* Number of object in the _dl_loaded list. */
unsigned int _ns_nloaded;
/* Direct pointer to the searchlist of the main object. */
@@ -333,7 +335,7 @@ struct rtld_global
/* Once libc.so has been loaded into the namespace, this points to
its link map. */
- struct link_map *libc_map;
+ struct link_map_private *libc_map;
/* Search table for unique objects. */
struct unique_sym_table
@@ -344,7 +346,7 @@ struct rtld_global
uint32_t hashval;
const char *name;
const ElfW(Sym) *sym;
- const struct link_map *map;
+ const struct link_map_private *map;
} *entries;
size_t size;
size_t n_elements;
@@ -380,10 +382,10 @@ struct rtld_global
EXTERN unsigned long long _dl_load_adds;
/* The object to be initialized first. */
- EXTERN struct link_map *_dl_initfirst;
+ EXTERN struct link_map_private *_dl_initfirst;
/* Map of shared object to be profiled. */
- EXTERN struct link_map *_dl_profile_map;
+ EXTERN struct link_map_private *_dl_profile_map;
/* Counters for the number of relocations performed. */
EXTERN unsigned long int _dl_num_relocations;
@@ -393,7 +395,7 @@ struct rtld_global
EXTERN struct r_search_path_elem *_dl_all_dirs;
/* Structure describing the dynamic linker itself. */
- EXTERN struct link_map _dl_rtld_map;
+ EXTERN struct link_map_private _dl_rtld_map;
#ifdef SHARED
/* Used to store the audit information for the link map of the
dynamic loader. */
@@ -435,7 +437,7 @@ struct rtld_global
struct dtv_slotinfo
{
size_t gen;
- struct link_map *map;
+ struct link_map_private *map;
} slotinfo[];
} *_dl_tls_dtv_slotinfo_list;
/* Number of modules in the static TLS block. */
@@ -460,7 +462,7 @@ struct rtld_global
EXTERN size_t _dl_tls_generation;
#if !PTHREAD_IN_LIBC
- EXTERN void (*_dl_init_static_tls) (struct link_map *);
+ EXTERN void (*_dl_init_static_tls) (struct link_map_private *);
#endif
/* Scopes to free after next THREAD_GSCOPE_WAIT (). */
@@ -634,7 +636,7 @@ struct rtld_global_ro
/* At startup time we set up the normal DSO data structure for it,
and this points to it. */
- EXTERN struct link_map *_dl_sysinfo_map;
+ EXTERN struct link_map_private *_dl_sysinfo_map;
# define PROCINFO_DECL
# ifndef PROCINFO_CLASS
@@ -657,10 +659,10 @@ struct rtld_global_ro
void (*_dl_debug_printf) (const char *, ...)
__attribute__ ((__format__ (__printf__, 1, 2)));
void (*_dl_mcount) (ElfW(Addr) frompc, ElfW(Addr) selfpc);
- lookup_t (*_dl_lookup_symbol_x) (const char *, struct link_map *,
+ lookup_t (*_dl_lookup_symbol_x) (const char *, struct link_map_private *,
const ElfW(Sym) **, struct r_scope_elem *[],
const struct r_found_version *, int, int,
- struct link_map *);
+ struct link_map_private *);
void *(*_dl_open) (const char *file, int mode, const void *caller_dlopen,
Lmid_t nsid, int argc, char *argv[], char *env[]);
void (*_dl_close) (void *map);
@@ -673,7 +675,7 @@ struct rtld_global_ro
/* libdl in a secondary namespace must use free from the base
namespace. */
void (*_dl_error_free) (void *);
- void *(*_dl_tls_get_addr_soft) (struct link_map *);
+ void *(*_dl_tls_get_addr_soft) (struct link_map_private *);
/* Called from __libc_shared to deallocate malloc'ed memory. */
void (*_dl_libc_freeres) (void);
@@ -914,23 +916,23 @@ rtld_hidden_proto (_dl_catch_exception)
/* Open the shared object NAME and map in its segments.
LOADER's DT_RPATH is used in searching for NAME.
If the object is already opened, returns its existing map. */
-extern struct link_map *_dl_map_object (struct link_map *loader,
- const char *name,
- int type, int trace_mode, int mode,
- Lmid_t nsid) attribute_hidden;
+struct link_map_private *_dl_map_object (struct link_map_private * loader,
+ const char *name,
+ int type, int trace_mode, int mode,
+ Lmid_t nsid) attribute_hidden;
/* Call _dl_map_object on the dependencies of MAP, and set up
MAP->l_searchlist. PRELOADS points to a vector of NPRELOADS previously
loaded objects that will be inserted into MAP->l_searchlist after MAP
but before its dependencies. */
-extern void _dl_map_object_deps (struct link_map *map,
- struct link_map **preloads,
+extern void _dl_map_object_deps (struct link_map_private *map,
+ struct link_map_private **preloads,
unsigned int npreloads, int trace_mode,
int open_mode)
attribute_hidden;
/* Cache the locations of MAP's hash table. */
-extern void _dl_setup_hash (struct link_map *map) attribute_hidden;
+extern void _dl_setup_hash (struct link_map_private *map) attribute_hidden;
/* Collect the directories in the search path for LOADER's dependencies.
@@ -938,12 +940,12 @@ extern void _dl_setup_hash (struct link_map *map) attribute_hidden;
SI->dls_cnt and SI->dls_size are set; if false, those must be as set
by a previous call with COUNTING set, and SI must point to SI->dls_size
bytes to be used in filling in the result. */
-extern void _dl_rtld_di_serinfo (struct link_map *loader,
+extern void _dl_rtld_di_serinfo (struct link_map_private *loader,
Dl_serinfo *si, bool counting);
/* Process PT_GNU_PROPERTY program header PH in module L after
PT_LOAD segments are mapped from file FD. */
-void _dl_process_pt_gnu_property (struct link_map *l, int fd,
+void _dl_process_pt_gnu_property (struct link_map_private *l, int fd,
const ElfW(Phdr) *ph);
@@ -972,12 +974,12 @@ enum
/* Lookup versioned symbol. */
extern lookup_t _dl_lookup_symbol_x (const char *undef,
- struct link_map *undef_map,
+ struct link_map_private *undef_map,
const ElfW(Sym) **sym,
struct r_scope_elem *symbol_scope[],
const struct r_found_version *version,
int type_class, int flags,
- struct link_map *skip_map)
+ struct link_map_private *skip_map)
attribute_hidden;
@@ -988,53 +990,54 @@ extern lookup_t _dl_lookup_symbol_x (const char *undef,
the symbol table entry in MAP on success, or NULL on failure. MAP
must have symbol versioning information, or otherwise the result is
undefined. */
-const ElfW(Sym) *_dl_lookup_direct (struct link_map *map,
+const ElfW(Sym) *_dl_lookup_direct (struct link_map_private *map,
const char *undef_name,
uint32_t new_hash,
const char *version,
uint32_t version_hash) attribute_hidden;
/* Add the new link_map NEW to the end of the namespace list. */
-extern void _dl_add_to_namespace_list (struct link_map *new, Lmid_t nsid)
- attribute_hidden;
-
-/* Allocate a `struct link_map' for a new object being loaded. */
-extern struct link_map *_dl_new_object (char *realname, const char *libname,
- int type, struct link_map *loader,
- int mode, Lmid_t nsid)
+extern void _dl_add_to_namespace_list (struct link_map_private *new,
+ Lmid_t nsid) attribute_hidden;
+
+/* Allocate a `struct link_map_private' for a new object being loaded. */
+struct link_map_private *_dl_new_object (char *realname,
+ const char *libname, int type,
+ struct link_map_private *loader,
+ int mode, Lmid_t nsid)
attribute_hidden;
/* Relocate the given object (if it hasn't already been).
SCOPE is passed to _dl_lookup_symbol in symbol lookups.
If RTLD_LAZY is set in RELOC-MODE, don't relocate its PLT. */
-extern void _dl_relocate_object (struct link_map *map,
+extern void _dl_relocate_object (struct link_map_private *map,
struct r_scope_elem *scope[],
int reloc_mode, int consider_profiling)
attribute_hidden;
/* Protect PT_GNU_RELRO area. */
-extern void _dl_protect_relro (struct link_map *map) attribute_hidden;
+extern void _dl_protect_relro (struct link_map_private *map) attribute_hidden;
/* Call _dl_signal_error with a message about an unhandled reloc type.
TYPE is the result of ELFW(R_TYPE) (r_info), i.e. an R_<CPU>_* value.
PLT is nonzero if this was a PLT reloc; it just affects the message. */
-extern void _dl_reloc_bad_type (struct link_map *map,
+extern void _dl_reloc_bad_type (struct link_map_private *map,
unsigned int type, int plt)
attribute_hidden __attribute__ ((__noreturn__));
/* Check the version dependencies of all objects available through
MAP. If VERBOSE print some more diagnostics. */
-extern int _dl_check_all_versions (struct link_map *map, int verbose,
+extern int _dl_check_all_versions (struct link_map_private *map, int verbose,
int trace_mode) attribute_hidden;
/* Check the version dependencies for MAP. If VERBOSE print some more
diagnostics. */
-extern int _dl_check_map_versions (struct link_map *map, int verbose,
+extern int _dl_check_map_versions (struct link_map_private *map, int verbose,
int trace_mode) attribute_hidden;
/* Initialize the object in SCOPE by calling the constructors with
ARGC, ARGV, and ENV as the parameters. */
-extern void _dl_init (struct link_map *main_map, int argc, char **argv,
+extern void _dl_init (struct link_map_private *main_map, int argc, char **argv,
char **env) attribute_hidden;
/* Call the finalizer functions of all shared objects whose
@@ -1042,14 +1045,14 @@ extern void _dl_init (struct link_map *main_map, int argc, char **argv,
extern void _dl_fini (void) attribute_hidden;
/* Invoke the DT_FINI_ARRAY and DT_FINI destructors for MAP, which
- must be a struct link_map *. Can be used as an argument to
+ must be a struct link_map_private *. Can be used as an argument to
_dl_catch_exception. */
void _dl_call_fini (void *map) attribute_hidden;
/* Sort array MAPS according to dependencies of the contained objects.
If FORCE_FIRST, MAPS[0] keeps its place even if the dependencies
say otherwise. */
-extern void _dl_sort_maps (struct link_map **maps, unsigned int nmaps,
+extern void _dl_sort_maps (struct link_map_private **maps, unsigned int nmaps,
bool force_first, bool for_fini) attribute_hidden;
/* The dynamic linker calls this function before and having changing
@@ -1146,7 +1149,7 @@ extern void _dl_sysdep_start_cleanup (void) attribute_hidden;
/* Determine next available module ID and set the L l_tls_modid. */
-extern void _dl_assign_tls_modid (struct link_map *l) attribute_hidden;
+extern void _dl_assign_tls_modid (struct link_map_private *l) attribute_hidden;
/* Count the modules with TLS segments. */
extern size_t _dl_count_modids (void) attribute_hidden;
@@ -1178,7 +1181,7 @@ void __libc_setup_tls (void);
extern void _dl_relocate_static_pie (void) attribute_hidden;
/* Get a pointer to _dl_main_map. */
-extern struct link_map * _dl_get_dl_main_map (void)
+extern struct link_map_private * _dl_get_dl_main_map (void)
__attribute__ ((visibility ("hidden")));
# else
# define _dl_relocate_static_pie()
@@ -1204,7 +1207,7 @@ rtld_hidden_proto (_dl_allocate_tls)
/* Get size and alignment requirements of the static TLS block. */
extern void _dl_get_tls_static_info (size_t *sizep, size_t *alignp);
-extern void _dl_allocate_static_tls (struct link_map *map) attribute_hidden;
+void _dl_allocate_static_tls (struct link_map_private *map) attribute_hidden;
/* These are internal entry points to the two halves of _dl_allocate_tls,
only used within rtld.c itself at startup time. */
@@ -1219,14 +1222,14 @@ extern bool __rtld_tls_init_tp_called attribute_hidden;
extern void _dl_deallocate_tls (void *tcb, bool dealloc_tcb);
rtld_hidden_proto (_dl_deallocate_tls)
-extern void _dl_nothread_init_static_tls (struct link_map *) attribute_hidden;
+void _dl_nothread_init_static_tls (struct link_map_private *) attribute_hidden;
/* Find origin of the executable. */
extern const char *_dl_get_origin (void) attribute_hidden;
/* Substitute DST values. */
struct alloc_buffer;
-size_t _dl_dst_substitute (struct link_map *l, const char *name,
+size_t _dl_dst_substitute (struct link_map_private *l, const char *name,
struct alloc_buffer *result)
attribute_hidden __nonnull ((1, 2, 3));
@@ -1249,27 +1252,26 @@ extern int _dl_scope_free (void *) attribute_hidden;
for the link map L with !do_add, then this function will not raise
an exception, otherwise it is possible that it encounters a memory
allocation failure. */
-extern void _dl_add_to_slotinfo (struct link_map *l, bool do_add)
+extern void _dl_add_to_slotinfo (struct link_map_private *l, bool do_add)
attribute_hidden;
/* Update slot information data for at least the generation of the
module with the given index. */
-extern struct link_map *_dl_update_slotinfo (unsigned long int req_modid,
- size_t gen)
- attribute_hidden;
+struct link_map_private *_dl_update_slotinfo (unsigned long int req_modid,
+ size_t gen) attribute_hidden;
/* Look up the module's TLS block as for __tls_get_addr,
but never touch anything. Return null if it's not allocated yet. */
-extern void *_dl_tls_get_addr_soft (struct link_map *l) attribute_hidden;
+void *_dl_tls_get_addr_soft (struct link_map_private *l) attribute_hidden;
-extern int _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
+int _dl_addr_inside_object (struct link_map_private *l, const ElfW(Addr) addr)
attribute_hidden;
/* Show show of an object. */
-extern void _dl_show_scope (struct link_map *new, int from)
+extern void _dl_show_scope (struct link_map_private *new, int from)
attribute_hidden;
-extern struct link_map *_dl_find_dso_for_object (const ElfW(Addr) addr);
+struct link_map_private *_dl_find_dso_for_object (const ElfW(Addr) addr);
rtld_hidden_proto (_dl_find_dso_for_object)
/* Initialization which is normally done by the dynamic linker. */
@@ -1283,10 +1285,10 @@ extern void _dl_aux_init (ElfW(auxv_t) *av)
/* Initialize the static TLS space for the link map in all existing
threads. */
#if PTHREAD_IN_LIBC
-void _dl_init_static_tls (struct link_map *map) attribute_hidden;
+void _dl_init_static_tls (struct link_map_private *map) attribute_hidden;
#endif
static inline void
-dl_init_static_tls (struct link_map *map)
+dl_init_static_tls (struct link_map_private *map)
{
#if PTHREAD_IN_LIBC
/* The stack list is available to ld.so, so the initialization can
@@ -1301,7 +1303,7 @@ dl_init_static_tls (struct link_map *map)
/* Called before relocating ld.so during static dlopen. This can be
used to partly initialize the dormant ld.so copy in the static
dlopen namespace. */
-void __rtld_static_init (struct link_map *map) attribute_hidden;
+void __rtld_static_init (struct link_map_private *map) attribute_hidden;
#endif
/* Return true if the ld.so copy in this namespace is actually active
@@ -1319,7 +1321,7 @@ rtld_active (void)
}
static inline struct auditstate *
-link_map_audit_state (struct link_map *l, size_t index)
+link_map_audit_state (struct link_map_private *l, size_t index)
{
if (l == &GL (dl_rtld_map))
/* The auditstate array is stored separately. */
@@ -1335,13 +1337,13 @@ link_map_audit_state (struct link_map *l, size_t index)
/* Call the la_objsearch from the audit modules from the link map L. If
ORIGNAME is non NULL, it is updated with the revious name prior calling
la_objsearch. */
-const char *_dl_audit_objsearch (const char *name, struct link_map *l,
+const char *_dl_audit_objsearch (const char *name, struct link_map_private *l,
unsigned int code)
attribute_hidden;
/* Call the la_activity from the audit modules from the link map L and issues
the ACTION argument. */
-void _dl_audit_activity_map (struct link_map *l, int action)
+void _dl_audit_activity_map (struct link_map_private *l, int action)
attribute_hidden;
/* Call the la_activity from the audit modules from the link map from the
@@ -1351,33 +1353,35 @@ void _dl_audit_activity_nsid (Lmid_t nsid, int action)
/* Call the la_objopen from the audit modules for the link_map L on the
namespace identification NSID. */
-void _dl_audit_objopen (struct link_map *l, Lmid_t nsid)
+void _dl_audit_objopen (struct link_map_private *l, Lmid_t nsid)
attribute_hidden;
/* Call the la_objclose from the audit modules for the link_map L. */
-void _dl_audit_objclose (struct link_map *l)
+void _dl_audit_objclose (struct link_map_private *l)
attribute_hidden;
/* Call the la_preinit from the audit modules for the link_map L. */
-void _dl_audit_preinit (struct link_map *l);
+void _dl_audit_preinit (struct link_map_private *l);
/* Call the la_symbind{32,64} from the audit modules for the link_map L. If
RELOC_RESULT is NULL it assumes the symbol to be bind-now and will set
the flags with LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT prior calling
la_symbind{32,64}. */
-void _dl_audit_symbind (struct link_map *l, struct reloc_result *reloc_result,
+void _dl_audit_symbind (struct link_map_private *l,
+ struct reloc_result *reloc_result,
const void *reloc, const ElfW(Sym) *defsym,
DL_FIXUP_VALUE_TYPE *value, lookup_t result, bool lazy)
attribute_hidden;
/* Same as _dl_audit_symbind, but also sets LA_SYMB_DLSYM flag. */
-void _dl_audit_symbind_alt (struct link_map *l, const ElfW(Sym) *ref,
+void _dl_audit_symbind_alt (struct link_map_private *l, const ElfW(Sym) *ref,
void **value, lookup_t result);
rtld_hidden_proto (_dl_audit_symbind_alt)
-void _dl_audit_pltenter (struct link_map *l, struct reloc_result *reloc_result,
+void _dl_audit_pltenter (struct link_map_private *l,
+ struct reloc_result *reloc_result,
DL_FIXUP_VALUE_TYPE *value, void *regs,
long int *framesize)
attribute_hidden;
-void DL_ARCH_FIXUP_ATTRIBUTE _dl_audit_pltexit (struct link_map *l,
+void DL_ARCH_FIXUP_ATTRIBUTE _dl_audit_pltexit (struct link_map_private *l,
ElfW(Word) reloc_arg,
const void *inregs,
void *outregs)
diff --git a/sysdeps/generic/rtld_static_init.h b/sysdeps/generic/rtld_static_init.h
index 92543cd798..a482112760 100644
--- a/sysdeps/generic/rtld_static_init.h
+++ b/sysdeps/generic/rtld_static_init.h
@@ -17,7 +17,8 @@
<https://www.gnu.org/licenses/>. */
static inline void
-__rtld_static_init_arch (struct link_map *map, struct rtld_global_ro *dl)
+__rtld_static_init_arch (struct link_map_private *map,
+ struct rtld_global_ro *dl)
{
/* The generic helper does not perform any additional
initialization. */
diff --git a/sysdeps/hppa/dl-fptr.c b/sysdeps/hppa/dl-fptr.c
index 1c0d51ca9b..9015c40fc9 100644
--- a/sysdeps/hppa/dl-fptr.c
+++ b/sysdeps/hppa/dl-fptr.c
@@ -181,7 +181,7 @@ make_fdesc (ElfW(Addr) ip, ElfW(Addr) gp)
static inline ElfW(Addr) * __attribute__ ((always_inline))
-make_fptr_table (struct link_map *map)
+make_fptr_table (struct link_map_private *map)
{
const ElfW(Sym) *symtab = (const void *) D_PTR (map, l_info[DT_SYMTAB]);
const char *strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
@@ -224,7 +224,7 @@ make_fptr_table (struct link_map *map)
ElfW(Addr)
-_dl_make_fptr (struct link_map *map, const ElfW(Sym) *sym,
+_dl_make_fptr (struct link_map_private *map, const ElfW(Sym) *sym,
ElfW(Addr) ip)
{
ElfW(Addr) *ftab = map->l_mach.fptr_table;
@@ -286,7 +286,7 @@ _dl_make_fptr (struct link_map *map, const ElfW(Sym) *sym,
void
-_dl_unmap (struct link_map *map)
+_dl_unmap (struct link_map_private *map)
{
ElfW(Addr) *ftab = map->l_mach.fptr_table;
struct fdesc *head = NULL, *tail = NULL;
@@ -322,7 +322,7 @@ _dl_unmap (struct link_map *map)
map->l_mach.fptr_table = NULL;
}
-extern ElfW(Addr) _dl_fixup (struct link_map *, ElfW(Word)) attribute_hidden;
+ElfW(Addr) _dl_fixup (struct link_map_private *, ElfW(Word)) attribute_hidden;
static inline Elf32_Addr
elf_machine_resolve (void)
@@ -396,7 +396,7 @@ _dl_lookup_address (const void *address)
&& gptr[1] == 0xd6801c1e /* depwi 0,31,2,r20 */
&& (ElfW(Addr)) gptr[2] == elf_machine_resolve ())
{
- struct link_map *l = (struct link_map *) gptr[5];
+ struct link_map_private *l = (struct link_map *) gptr[5];
/* If gp has been resolved, we need to hunt for relocation offset. */
if (!(reloc_arg & PA_GP_RELOC))
diff --git a/sysdeps/hppa/dl-lookupcfg.h b/sysdeps/hppa/dl-lookupcfg.h
index 28daf3f89b..e64dea69b0 100644
--- a/sysdeps/hppa/dl-lookupcfg.h
+++ b/sysdeps/hppa/dl-lookupcfg.h
@@ -22,9 +22,9 @@
#include <dl-fptr.h>
/* Forward declaration. */
-struct link_map;
+struct link_map_private;
-void *_dl_symbol_address (struct link_map *map, const ElfW(Sym) *ref);
+void *_dl_symbol_address (struct link_map_private *map, const ElfW(Sym) *ref);
rtld_hidden_proto (_dl_symbol_address)
#define DL_SYMBOL_ADDRESS(map, ref) _dl_symbol_address(map, ref)
@@ -34,7 +34,7 @@ rtld_hidden_proto (_dl_lookup_address)
#define DL_LOOKUP_ADDRESS(addr) _dl_lookup_address ((const void *) addr)
-void attribute_hidden _dl_unmap (struct link_map *map);
+void attribute_hidden _dl_unmap (struct link_map_private *map);
#define DL_UNMAP(map) _dl_unmap (map)
diff --git a/sysdeps/hppa/dl-machine.h b/sysdeps/hppa/dl-machine.h
index 993593de5d..60ef82c3e7 100644
--- a/sysdeps/hppa/dl-machine.h
+++ b/sysdeps/hppa/dl-machine.h
@@ -59,7 +59,7 @@
/* Initialize the function descriptor table before relocations */
static inline void
-__hppa_init_bootstrap_fdesc_table (struct link_map *map)
+__hppa_init_bootstrap_fdesc_table (struct link_map_private *map)
{
ElfW(Addr) *boot_table;
@@ -118,7 +118,7 @@ elf_machine_load_address (void)
/* Fixup a PLT entry to bounce directly to the function at VALUE. */
static inline struct fdesc __attribute__ ((always_inline))
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rela *reloc,
Elf32_Addr *reloc_addr, struct fdesc value)
@@ -153,7 +153,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline struct fdesc
-elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rela *reloc,
struct fdesc value)
{
/* We are rela only, return a function descriptor as a plt entry. */
@@ -164,7 +164,8 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
Elf32_Addr *got = NULL;
@@ -190,7 +191,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
return lazy;
/* All paths use these values */
- l_addr = l->l_addr;
+ l_addr = l->l_public.l_addr;
jmprel = D_PTR(l, l_info[DT_JMPREL]);
end_jmprel = jmprel + l->l_info[DT_PLTRELSZ]->d_un.d_val;
@@ -350,8 +351,8 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
#define RTLD_START \
/* Set up dp for any non-PIC lib constructors that may be called. */ \
-static struct link_map * __attribute__((used)) \
-set_dp (struct link_map *map) \
+static struct link_map_private * __attribute__((used)) \
+set_dp (struct link_map_private *map) \
{ \
register Elf32_Addr dp asm ("%r27"); \
dp = D_PTR (map, l_info[DT_PLTGOT]); \
@@ -552,7 +553,7 @@ dl_platform_init (void)
| (((as14) & 0x2000) >> 13))
static void __attribute__((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf32_Rela *reloc,
const Elf32_Sym *sym,
const struct r_found_version *version,
@@ -562,7 +563,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
Elf32_Addr *const reloc_addr = reloc_addr_arg;
const Elf32_Sym *const refsym = sym;
unsigned long const r_type = ELF32_R_TYPE (reloc->r_info);
- struct link_map *sym_map;
+ struct link_map_private *sym_map;
Elf32_Addr value;
/* RESOLVE_MAP will return a null value for undefined syms, and
@@ -740,7 +741,7 @@ elf_machine_rela_relative (Elf32_Addr l_addr,
unsigned long const r_type = ELF32_R_TYPE (reloc->r_info);
Elf32_Addr *const reloc_addr = reloc_addr_arg;
static char msgbuf[] = { "Unknown" };
- struct link_map map;
+ struct link_map_private map;
Elf32_Addr value;
value = l_addr + reloc->r_addend;
@@ -777,7 +778,7 @@ elf_machine_rela_relative (Elf32_Addr l_addr,
return;
default: /* Bad reloc, map unknown (really it's the current map) */
- map.l_name = msgbuf;
+ map.l_public.l_name = msgbuf;
_dl_reloc_bad_type (&map, r_type, 0);
return;
}
@@ -786,7 +787,8 @@ elf_machine_rela_relative (Elf32_Addr l_addr,
}
static void __attribute__((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf32_Addr l_addr, const Elf32_Rela *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/hppa/dl-runtime.c b/sysdeps/hppa/dl-runtime.c
index bebf640725..61095bdbcf 100644
--- a/sysdeps/hppa/dl-runtime.c
+++ b/sysdeps/hppa/dl-runtime.c
@@ -26,12 +26,12 @@
_dl_fixup with the relocation offset. */
ElfW(Word) __attribute ((noinline)) DL_ARCH_FIXUP_ATTRIBUTE
-_dl_fix_reloc_arg (struct fdesc *fptr, struct link_map *l)
+_dl_fix_reloc_arg (struct fdesc *fptr, struct link_map_private *l)
{
Elf32_Addr l_addr, iplt, jmprel, end_jmprel, r_type;
const Elf32_Rela *reloc;
- l_addr = l->l_addr;
+ l_addr = l->l_public.l_addr;
jmprel = D_PTR(l, l_info[DT_JMPREL]);
end_jmprel = jmprel + l->l_info[DT_PLTRELSZ]->d_un.d_val;
diff --git a/sysdeps/hppa/dl-runtime.h b/sysdeps/hppa/dl-runtime.h
index 5dbe762fee..7a44279aa7 100644
--- a/sysdeps/hppa/dl-runtime.h
+++ b/sysdeps/hppa/dl-runtime.h
@@ -17,7 +17,7 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-ElfW(Word) _dl_fix_reloc_arg (struct fdesc *, struct link_map *);
+ElfW(Word) _dl_fix_reloc_arg (struct fdesc *, struct link_map_private *);
rtld_hidden_proto (_dl_fix_reloc_arg)
/* Clear PA_GP_RELOC bit in relocation offset. */
diff --git a/sysdeps/hppa/dl-symaddr.c b/sysdeps/hppa/dl-symaddr.c
index 3b5a000dd8..bfc720ed92 100644
--- a/sysdeps/hppa/dl-symaddr.c
+++ b/sysdeps/hppa/dl-symaddr.c
@@ -20,7 +20,7 @@
#include <dl-machine.h>
void *
-_dl_symbol_address (struct link_map *map, const ElfW(Sym) *ref)
+_dl_symbol_address (struct link_map_private *map, const ElfW(Sym) *ref)
{
/* Find the "ip" from the "map" and symbol "ref" */
Elf32_Addr value = SYMBOL_ADDRESS (map, ref, false);
diff --git a/sysdeps/htl/pthreadP.h b/sysdeps/htl/pthreadP.h
index 3f052f0e53..d178aa63c4 100644
--- a/sysdeps/htl/pthreadP.h
+++ b/sysdeps/htl/pthreadP.h
@@ -27,7 +27,7 @@
/* Attribute to indicate thread creation was issued from C11 thrd_create. */
#define ATTR_C11_THREAD ((void*)(uintptr_t)-1)
-extern void __pthread_init_static_tls (struct link_map *) attribute_hidden;
+void __pthread_init_static_tls (struct link_map_private *) attribute_hidden;
/* These represent the interface used by glibc itself. */
diff --git a/sysdeps/i386/dl-machine.h b/sysdeps/i386/dl-machine.h
index 07469e99b0..b0bba3621f 100644
--- a/sysdeps/i386/dl-machine.h
+++ b/sysdeps/i386/dl-machine.h
@@ -57,7 +57,8 @@ elf_machine_dynamic (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((unused, always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
Elf32_Addr *got;
@@ -81,7 +82,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
The prelinker saved us here address of .plt + 0x16. */
if (got[1])
{
- l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.plt = got[1] + l->l_public.l_addr;
l->l_mach.gotplt = (Elf32_Addr) &got[3];
}
got[1] = (Elf32_Addr) l; /* Identify this shared object. */
@@ -217,7 +218,7 @@ dl_platform_init (void)
}
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rel *reloc,
Elf32_Addr *reloc_addr, Elf32_Addr value)
@@ -227,7 +228,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rel *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rel *reloc,
Elf32_Addr value)
{
return value;
@@ -247,7 +248,7 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rel *reloc,
static inline void
__attribute ((always_inline))
-elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rel (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf32_Rel *reloc,
const Elf32_Sym *sym, const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -257,7 +258,7 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
# if !defined RTLD_BOOTSTRAP
if (__glibc_unlikely (r_type == R_386_RELATIVE))
- *reloc_addr += map->l_addr;
+ *reloc_addr += map->l_public.l_addr;
# ifndef RTLD_BOOTSTRAP
else if (__glibc_unlikely (r_type == R_386_NONE))
return;
@@ -268,8 +269,8 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
# ifndef RTLD_BOOTSTRAP
const Elf32_Sym *const refsym = sym;
# endif
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
Elf32_Addr value = SYMBOL_ADDRESS (sym_map, sym, true);
if (sym != NULL
@@ -288,12 +289,12 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
%s: IFUNC symbol '%s' referenced in '%s' is defined in the executable \
and creates an unsatisfiable circular dependency.\n",
RTLD_PROGNAME, strtab + refsym->st_name,
- map->l_name);
+ map->l_public.l_name);
else
_dl_error_printf ("\
%s: Relink `%s' with `%s' for IFUNC symbol `%s'\n",
- RTLD_PROGNAME, map->l_name,
- sym_map->l_name,
+ RTLD_PROGNAME, map->l_public.l_name,
+ sym_map->l_public.l_name,
strtab + refsym->st_name);
}
# endif
@@ -426,7 +427,7 @@ and creates an unsatisfiable circular dependency.\n",
MIN (sym->st_size, refsym->st_size));
break;
case R_386_IRELATIVE:
- value = map->l_addr + *reloc_addr;
+ value = map->l_public.l_addr + *reloc_addr;
if (__glibc_likely (!skip_ifunc))
value = ((Elf32_Addr (*) (void)) value) ();
*reloc_addr = value;
@@ -463,7 +464,8 @@ elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf32_Addr l_addr, const Elf32_Rel *reloc,
int skip_ifunc)
{
@@ -510,7 +512,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
}
else if (__glibc_unlikely (r_type == R_386_IRELATIVE))
{
- Elf32_Addr value = map->l_addr + *reloc_addr;
+ Elf32_Addr value = map->l_public.l_addr + *reloc_addr;
if (__glibc_likely (!skip_ifunc))
value = ((Elf32_Addr (*) (void)) value) ();
*reloc_addr = value;
diff --git a/sysdeps/i386/dl-tlsdesc.h b/sysdeps/i386/dl-tlsdesc.h
index 312905eb93..0851d833cb 100644
--- a/sysdeps/i386/dl-tlsdesc.h
+++ b/sysdeps/i386/dl-tlsdesc.h
@@ -46,7 +46,7 @@ extern ptrdiff_t attribute_hidden __attribute__ ((regparm (1)))
_dl_tlsdesc_undefweak (struct tlsdesc *);
# ifdef SHARED
-extern void *_dl_make_tlsdesc_dynamic (struct link_map *map,
+extern void *_dl_make_tlsdesc_dynamic (struct link_map_private *map,
size_t ti_offset) attribute_hidden;
extern ptrdiff_t attribute_hidden __attribute__ ((regparm (1)))
diff --git a/sysdeps/i386/tlsdesc.c b/sysdeps/i386/tlsdesc.c
index b5c6a06e13..b2dbd7634a 100644
--- a/sysdeps/i386/tlsdesc.c
+++ b/sysdeps/i386/tlsdesc.c
@@ -26,7 +26,7 @@
if there is one. */
void
-_dl_unmap (struct link_map *map)
+_dl_unmap (struct link_map_private *map)
{
_dl_unmap_segments (map);
diff --git a/sysdeps/ia64/dl-lookupcfg.h b/sysdeps/ia64/dl-lookupcfg.h
index 64218fa7bb..e12da0e149 100644
--- a/sysdeps/ia64/dl-lookupcfg.h
+++ b/sysdeps/ia64/dl-lookupcfg.h
@@ -25,9 +25,9 @@
#define DL_NO_COPY_RELOCS
/* Forward declaration. */
-struct link_map;
+struct link_map_private;
-extern void *_dl_symbol_address (struct link_map *map, const Elf64_Sym *ref);
+void *_dl_symbol_address (struct link_map_private *, const Elf64_Sym *ref);
rtld_hidden_proto (_dl_symbol_address)
#define DL_SYMBOL_ADDRESS(map, ref) _dl_symbol_address(map, ref)
@@ -36,7 +36,7 @@ extern Elf64_Addr _dl_lookup_address (const void *address);
#define DL_LOOKUP_ADDRESS(addr) _dl_lookup_address (addr)
-extern void attribute_hidden _dl_unmap (struct link_map *map);
+extern void attribute_hidden _dl_unmap (struct link_map_private *map);
#define DL_UNMAP(map) _dl_unmap (map)
diff --git a/sysdeps/ia64/dl-machine.h b/sysdeps/ia64/dl-machine.h
index 3ef6b0ef4b..e9c59dc65a 100644
--- a/sysdeps/ia64/dl-machine.h
+++ b/sysdeps/ia64/dl-machine.h
@@ -35,7 +35,7 @@
#define DT_IA_64(x) (DT_IA_64_##x - DT_LOPROC + DT_NUM)
static inline void __attribute__ ((always_inline))
-__ia64_init_bootstrap_fdesc_table (struct link_map *map)
+__ia64_init_bootstrap_fdesc_table (struct link_map_private *map)
{
Elf64_Addr *boot_table;
@@ -100,7 +100,8 @@ elf_machine_load_address (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((unused, always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
extern void _dl_runtime_resolve (void);
@@ -112,11 +113,12 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
Elf64_Addr *reserve, doit;
/*
- * Careful with the typecast here or it will try to add l-l_addr
+ * Careful with the typecast here or it will try to add l_addr
* pointer elements
*/
reserve = ((Elf64_Addr *)
- (l->l_info[DT_IA_64 (PLT_RESERVE)]->d_un.d_ptr + l->l_addr));
+ (l->l_info[DT_IA_64 (PLT_RESERVE)]->d_un.d_ptr
+ + l->l_public.l_addr));
/* Identify this shared object. */
reserve[0] = (Elf64_Addr) l;
@@ -293,7 +295,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
/* Fixup a PLT entry to bounce directly to the function at VALUE. */
static inline struct fdesc __attribute__ ((always_inline))
-elf_machine_fixup_plt (struct link_map *l, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *l, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf64_Rela *reloc,
Elf64_Addr *reloc_addr, struct fdesc value)
@@ -310,7 +312,7 @@ elf_machine_fixup_plt (struct link_map *l, lookup_t t,
/* Return the final value of a plt relocation. */
static inline struct fdesc
-elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf64_Rela *reloc,
struct fdesc value)
{
/* No need to handle rel vs rela since IA64 is rela only */
@@ -334,7 +336,7 @@ elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
resolved). MAP is the object containing the reloc. */
static inline void
__attribute ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf64_Rela *reloc,
const Elf64_Sym *sym,
const struct r_found_version *version,
@@ -353,7 +355,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
0))
{
assert (ELF64_R_TYPE (reloc->r_info) == R_IA64_REL64LSB);
- value = *reloc_addr + map->l_addr;
+ value = *reloc_addr + map->l_public.l_addr;
}
else
#endif
@@ -361,8 +363,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
return;
else
{
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
/* RESOLVE_MAP() will return NULL if it fail to locate the symbol. */
if (sym_map != NULL)
@@ -390,14 +392,14 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
resolv function. */
value = sym_map->l_tls_modid;
else if (R_IA64_TYPE (r_type) == R_IA64_TYPE (R_IA64_DTPREL64LSB))
- value -= sym_map->l_addr;
+ value -= sym_map->l_public.l_addr;
#endif
else if (R_IA64_TYPE (r_type) == R_IA64_TYPE (R_IA64_TPREL64LSB))
{
#ifndef RTLD_BOOTSTRAP
CHECK_STATIC_TLS (map, sym_map);
#endif
- value += sym_map->l_tls_offset - sym_map->l_addr;
+ value += sym_map->l_tls_offset - sym_map->l_public.l_addr;
}
else
_dl_reloc_bad_type (map, r_type, 0);
@@ -439,7 +441,8 @@ elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
/* Perform a RELATIVE reloc on the .got entry that transfers to the .plt. */
static inline void
__attribute ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf64_Addr l_addr, const Elf64_Rela *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/loongarch/dl-machine.h b/sysdeps/loongarch/dl-machine.h
index 0d17fd21e3..683d5f4e7e 100644
--- a/sysdeps/loongarch/dl-machine.h
+++ b/sysdeps/loongarch/dl-machine.h
@@ -93,7 +93,7 @@ static inline ElfW (Addr) elf_machine_dynamic (void)
or $s0, $a0, $zero \n\
# Load the original argument count. \n\
ld.d $a1, $sp, 0 \n\
- # Call _dl_init (struct link_map *main_map, int argc, \
+ # Call _dl_init (struct link_map_private *main_map, int argc, \
char **argv, char **env) \n\
la $a0, _rtld_local \n\
ld.d $a0, $a0, 0 \n\
@@ -124,7 +124,7 @@ static inline ElfW (Addr) elf_machine_dynamic (void)
#define elf_machine_plt_value(map, reloc, value) (value)
static inline ElfW (Addr)
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW (Sym) *refsym, const ElfW (Sym) *sym,
const ElfW (Rela) *reloc, ElfW (Addr) *reloc_addr,
ElfW (Addr) value)
@@ -141,7 +141,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
MAP is the object containing the reloc. */
static inline void __attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const ElfW (Rela) *reloc,
const ElfW (Sym) *sym,
const struct r_found_version *version,
@@ -151,7 +151,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
const unsigned long int r_type = ELFW (R_TYPE) (r_info);
ElfW (Addr) *addr_field = (ElfW (Addr) *) reloc_addr;
const ElfW (Sym) *const __attribute__ ((unused)) refsym = sym;
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
ElfW (Addr) value = 0;
if (sym_map != NULL)
value = SYMBOL_ADDRESS (sym_map, sym, true) + reloc->r_addend;
@@ -211,11 +212,11 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
}
case R_LARCH_RELATIVE:
- *addr_field = map->l_addr + reloc->r_addend;
+ *addr_field = map->l_public.l_addr + reloc->r_addend;
break;
case R_LARCH_IRELATIVE:
- value = map->l_addr + reloc->r_addend;
+ value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = ((ElfW (Addr) (*) (void)) value) ();
*addr_field = value;
@@ -237,7 +238,8 @@ elf_machine_rela_relative (ElfW (Addr) l_addr, const ElfW (Rela) *reloc,
}
static inline void __attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
ElfW (Addr) l_addr,
const ElfW (Rela) *reloc, int skip_ifunc)
{
@@ -263,7 +265,8 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
will jump to the on-demand fixup code __dl_runtime_resolve. */
static inline int __attribute__ ((always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
#ifndef RTLD_BOOTSTRAP
diff --git a/sysdeps/m68k/dl-machine.h b/sysdeps/m68k/dl-machine.h
index 8d7e733e2a..6bba7315c3 100644
--- a/sysdeps/m68k/dl-machine.h
+++ b/sysdeps/m68k/dl-machine.h
@@ -70,7 +70,8 @@ elf_machine_load_address (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
Elf32_Addr *got;
@@ -146,7 +147,7 @@ _dl_start_user:\n\
move.l %d0, %a4\n\
| Load the adjusted argument count.\n\
move.l (%sp), %d1\n\
- # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env)\n\
+ # Call _dl_init (struct link_map_private *main_map, int argc, char **argv, char **env)\n\
pea 8(%sp, %d1*4)\n\
pea 8(%sp)\n\
move.l %d1, -(%sp)\n\
@@ -179,7 +180,7 @@ _dl_start_user:\n\
#define ELF_MACHINE_JMP_SLOT R_68K_JMP_SLOT
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rela *reloc,
Elf32_Addr *reloc_addr, Elf32_Addr value)
@@ -190,7 +191,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. On the m68k the JMP_SLOT
relocation ignores the addend. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rela *reloc,
Elf32_Addr value)
{
return value;
@@ -208,7 +209,7 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
MAP is the object containing the reloc. */
static inline void __attribute__ ((unused, always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf32_Rela *reloc, const Elf32_Sym *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -217,12 +218,12 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
if (__builtin_expect (r_type == R_68K_RELATIVE, 0))
- *reloc_addr = map->l_addr + reloc->r_addend;
+ *reloc_addr = map->l_public.l_addr + reloc->r_addend;
else
{
const Elf32_Sym *const refsym = sym;
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
Elf32_Addr value = SYMBOL_ADDRESS (sym_map, sym, true);
switch (r_type)
@@ -306,7 +307,8 @@ elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
}
static inline void __attribute__ ((unused, always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf32_Addr l_addr, const Elf32_Rela *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/microblaze/dl-machine.h b/sysdeps/microblaze/dl-machine.h
index da875b68c5..e8f9cebe2f 100644
--- a/sysdeps/microblaze/dl-machine.h
+++ b/sysdeps/microblaze/dl-machine.h
@@ -71,7 +71,8 @@ elf_machine_load_address (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
extern void _dl_runtime_resolve (Elf32_Word);
@@ -167,7 +168,7 @@ _dl_start_user:\n\
#define ELF_MACHINE_JMP_SLOT R_MICROBLAZE_JUMP_SLOT
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rela *reloc,
Elf32_Addr *reloc_addr, Elf32_Addr value)
@@ -177,7 +178,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. Ignore the addend. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rela *reloc,
Elf32_Addr value)
{
return value;
@@ -202,7 +203,7 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
} while (0)
static inline void __attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf32_Rela *reloc, const Elf32_Sym *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -211,14 +212,14 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
const int r_type = ELF32_R_TYPE (reloc->r_info);
if (__builtin_expect (r_type == R_MICROBLAZE_64_PCREL, 0))
- PUT_REL_64 (reloc_addr, map->l_addr + reloc->r_addend);
+ PUT_REL_64 (reloc_addr, map->l_public.l_addr + reloc->r_addend);
else if (r_type == R_MICROBLAZE_REL)
- *reloc_addr = map->l_addr + reloc->r_addend;
+ *reloc_addr = map->l_public.l_addr + reloc->r_addend;
else
{
const Elf32_Sym *const refsym = sym;
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
Elf32_Addr value = SYMBOL_ADDRESS (sym_map, sym, true);
value += reloc->r_addend;
@@ -282,7 +283,8 @@ elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
}
static inline void
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf32_Addr l_addr, const Elf32_Rela *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/mips/dl-debug.h b/sysdeps/mips/dl-debug.h
index 584ae0da1c..3966b6a8db 100644
--- a/sysdeps/mips/dl-debug.h
+++ b/sysdeps/mips/dl-debug.h
@@ -25,7 +25,7 @@
static inline void
__attribute ((always_inline))
-elf_setup_debug_entry (struct link_map *l, struct r_debug *r)
+elf_setup_debug_entry (struct link_map_private *l, struct r_debug *r)
{
if (l->l_info[DT_MIPS (RLD_MAP_REL)] != NULL)
{
diff --git a/sysdeps/mips/dl-machine-reject-phdr.h b/sysdeps/mips/dl-machine-reject-phdr.h
index b784697fc1..85cc2beeea 100644
--- a/sysdeps/mips/dl-machine-reject-phdr.h
+++ b/sysdeps/mips/dl-machine-reject-phdr.h
@@ -52,7 +52,7 @@ find_mips_abiflags (const ElfW(Phdr) *phdr, ElfW(Half) phnum)
/* Cache the FP ABI value from the PT_MIPS_ABIFLAGS program header. */
static bool
-cached_fpabi_reject_phdr_p (struct link_map *l)
+cached_fpabi_reject_phdr_p (struct link_map_private *l)
{
if (l->l_mach.fpabi == 0)
{
@@ -62,13 +62,15 @@ cached_fpabi_reject_phdr_p (struct link_map *l)
{
Elf_MIPS_ABIFlags_v0 * mips_abiflags;
if (ph->p_filesz < sizeof (Elf_MIPS_ABIFlags_v0))
- REJECT (" %s: malformed PT_MIPS_ABIFLAGS found\n", l->l_name);
+ REJECT (" %s: malformed PT_MIPS_ABIFLAGS found\n",
+ l->l_public.l_name);
- mips_abiflags = (Elf_MIPS_ABIFlags_v0 *) (l->l_addr + ph->p_vaddr);
+ mips_abiflags
+ = (Elf_MIPS_ABIFlags_v0 *) (l->l_public.l_addr + ph->p_vaddr);
if (__glibc_unlikely (mips_abiflags->flags2 != 0))
- REJECT (" %s: unknown MIPS.abiflags flags2: %u\n", l->l_name,
- mips_abiflags->flags2);
+ REJECT (" %s: unknown MIPS.abiflags flags2: %u\n",
+ l->l_public.l_name, mips_abiflags->flags2);
l->l_mach.fpabi = mips_abiflags->fp_abi;
l->l_mach.odd_spreg = (mips_abiflags->flags1
@@ -153,11 +155,11 @@ static const struct abi_req none_req = { true, true, true, false, true };
static bool __attribute_used__
elf_machine_reject_phdr_p (const ElfW(Phdr) *phdr, unsigned int phnum,
- const char *buf, size_t len, struct link_map *map,
- int fd)
+ const char *buf, size_t len,
+ struct link_map_private *map, int fd)
{
const ElfW(Phdr) *ph = find_mips_abiflags (phdr, phnum);
- struct link_map *l;
+ struct link_map_private *l;
Lmid_t nsid;
int in_abi = -1;
struct abi_req in_req;
@@ -204,7 +206,7 @@ elf_machine_reject_phdr_p (const ElfW(Phdr) *phdr, unsigned int phnum,
/* Check that the new requirement does not conflict with any currently
loaded object. */
for (nsid = 0; nsid < DL_NNS; ++nsid)
- for (l = GL(dl_ns)[nsid]._ns_loaded; l != NULL; l = l->l_next)
+ for (l = GL(dl_ns)[nsid]._ns_loaded; l != NULL; l = l_next (l))
{
struct abi_req existing_req;
diff --git a/sysdeps/mips/dl-machine.h b/sysdeps/mips/dl-machine.h
index f1b3effec8..b7b1705f65 100644
--- a/sysdeps/mips/dl-machine.h
+++ b/sysdeps/mips/dl-machine.h
@@ -168,14 +168,14 @@ elf_machine_load_address (void)
fiddles with global data. */
#define ELF_MACHINE_BEFORE_RTLD_RELOC(bootstrap_map, dynamic_info) \
do { \
- struct link_map *map = bootstrap_map; \
+ struct link_map_private *map = bootstrap_map; \
ElfW(Sym) *sym; \
ElfW(Addr) *got; \
int i, n; \
\
got = (ElfW(Addr) *) D_PTR (map, l_info[DT_PLTGOT]); \
\
- if (__builtin_expect (map->l_addr == 0, 1)) \
+ if (__builtin_expect (map->l_public.l_addr == 0, 1)) \
break; \
\
/* got[0] is reserved. got[1] is also reserved for the dynamic object \
@@ -186,7 +186,7 @@ do { \
\
/* Add the run-time displacement to all local got entries. */ \
while (i < n) \
- got[i++] += map->l_addr; \
+ got[i++] += map->l_public.l_addr; \
\
/* Handle global got entries. */ \
got += n; \
@@ -201,11 +201,11 @@ do { \
*got = SYMBOL_ADDRESS (map, sym, true); \
else if (ELFW(ST_TYPE) (sym->st_info) == STT_FUNC \
&& *got != sym->st_value) \
- *got += map->l_addr; \
+ *got += map->l_public.l_addr; \
else if (ELFW(ST_TYPE) (sym->st_info) == STT_SECTION) \
{ \
if (sym->st_other == 0) \
- *got += map->l_addr; \
+ *got += map->l_public.l_addr; \
} \
else \
*got = SYMBOL_ADDRESS (map, sym, true); \
@@ -278,7 +278,7 @@ do { \
move $16, $28\n\
# Save the user entry point address in a saved register.\n\
move $17, $2\n\
- # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env) \n\
+ # Call _dl_init (struct link_map_private *main_map, int argc, char **argv, char **env) \n\
" STRINGXP(PTR_L) " $4, _rtld_local\n\
" STRINGXP(PTR_L) /* or lw??? fixme */ " $5, 0($29)\n\
" STRINGXP(PTR_LA) " $6, " STRINGXP (PTRSIZE) "($29)\n\
@@ -339,7 +339,7 @@ do { \
addu $16, $4\n\
move $17, $2\n\
move $28, $16\n\
- # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env) \n\
+ # Call _dl_init (struct link_map_private *main_map, int argc, char **argv, char **env) \n\
lw $4, %got(_rtld_local)($16)\n\
lw $4, 0($4)\n\
lw $5, 0($sp)\n\
@@ -403,7 +403,7 @@ dl_platform_init (void)
point at the symbol with address VALUE. For a writable PLT, rewrite
the corresponding PLT entry instead. */
static inline ElfW(Addr)
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const ElfW(Rel) *reloc,
ElfW(Addr) *reloc_addr, ElfW(Addr) value)
@@ -412,7 +412,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
}
static inline ElfW(Addr)
-elf_machine_plt_value (struct link_map *map, const ElfW(Rel) *reloc,
+elf_machine_plt_value (struct link_map_private *map, const ElfW(Rel) *reloc,
ElfW(Addr) value)
{
return value;
@@ -428,7 +428,7 @@ elf_machine_plt_value (struct link_map *map, const ElfW(Rel) *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_reloc (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_reloc (struct link_map_private *map, struct r_scope_elem *scope[],
ElfW(Addr) r_info, const ElfW(Sym) *sym,
const struct r_found_version *version, void *reloc_addr,
ElfW(Addr) r_addend, int inplace_p)
@@ -459,8 +459,8 @@ elf_machine_reloc (struct link_map *map, struct r_scope_elem *scope[],
case R_MIPS_TLS_TPREL32:
# endif
{
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
switch (r_type)
{
@@ -555,7 +555,7 @@ elf_machine_reloc (struct link_map *map, struct r_scope_elem *scope[],
#ifndef RTLD_BOOTSTRAP
if (map != &GL(dl_rtld_map))
#endif
- reloc_value += map->l_addr;
+ reloc_value += map->l_public.l_addr;
__builtin_memcpy (reloc_addr, &reloc_value, sizeof (reloc_value));
}
@@ -590,14 +590,14 @@ elf_machine_reloc (struct link_map *map, struct r_scope_elem *scope[],
case R_MIPS_JUMP_SLOT:
{
- struct link_map *sym_map;
+ struct link_map_private *sym_map;
ElfW(Addr) value;
/* The addend for a jump slot relocation must always be zero:
calls via the PLT always branch to the symbol's address and
not to the address plus a non-zero offset. */
if (r_addend != 0)
- _dl_signal_error (0, map->l_name, NULL,
+ _dl_signal_error (0, map->l_public.l_name, NULL,
"found jump slot relocation with non-zero addend");
sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
@@ -610,7 +610,7 @@ elf_machine_reloc (struct link_map *map, struct r_scope_elem *scope[],
case R_MIPS_COPY:
{
const ElfW(Sym) *const refsym = sym;
- struct link_map *sym_map;
+ struct link_map_private *sym_map;
ElfW(Addr) value;
/* Calculate the address of the symbol. */
@@ -663,7 +663,7 @@ elf_machine_reloc (struct link_map *map, struct r_scope_elem *scope[],
static inline void
__attribute__ ((always_inline))
-elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rel (struct link_map_private *map, struct r_scope_elem *scope[],
const ElfW(Rel) *reloc, const ElfW(Sym) *sym,
const struct r_found_version *version, void *const reloc_addr,
int skip_ifunc)
@@ -681,7 +681,8 @@ elf_machine_rel_relative (ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
static inline void
__attribute__((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
int skip_ifunc)
{
@@ -704,7 +705,8 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
static inline void
__attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[], const ElfW(Rela) *reloc,
+elf_machine_rela (struct link_map_private *map,
+ struct r_scope_elem *scope[], const ElfW(Rela) *reloc,
const ElfW(Sym) *sym, const struct r_found_version *version,
void *const reloc_addr, int skip_ifunc)
{
@@ -723,7 +725,8 @@ elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
/* Relocate GOT. */
static inline void
__attribute__((always_inline))
-elf_machine_got_rel (struct link_map *map, struct r_scope_elem *scope[], int lazy)
+elf_machine_got_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[], int lazy)
{
ElfW(Addr) *got;
ElfW(Sym) *sym;
@@ -735,7 +738,7 @@ elf_machine_got_rel (struct link_map *map, struct r_scope_elem *scope[], int laz
const ElfW(Sym) *ref = sym; \
const struct r_found_version *version __attribute__ ((unused)) \
= vernum ? &map->l_versions[vernum[sym_index] & 0x7fff] : NULL; \
- struct link_map *sym_map; \
+ struct link_map_private *sym_map; \
sym_map = RESOLVE_MAP (map, scope, &ref, version, reloc); \
SYMBOL_ADDRESS (sym_map, ref, true); \
})
@@ -757,10 +760,10 @@ elf_machine_got_rel (struct link_map *map, struct r_scope_elem *scope[], int laz
/* Add the run-time displacement to all local got entries if
needed. */
- if (__builtin_expect (map->l_addr != 0, 0))
+ if (__builtin_expect (map->l_public.l_addr != 0, 0))
{
while (i < n)
- got[i++] += map->l_addr;
+ got[i++] += map->l_public.l_addr;
}
}
@@ -796,7 +799,7 @@ elf_machine_got_rel (struct link_map *map, struct r_scope_elem *scope[], int laz
&& *got != sym->st_value)
{
if (lazy)
- *got += map->l_addr;
+ *got += map->l_public.l_addr;
else
/* This is a lazy-binding stub, so we don't need the
canonical address. */
@@ -805,7 +808,7 @@ elf_machine_got_rel (struct link_map *map, struct r_scope_elem *scope[], int laz
else if (ELFW(ST_TYPE) (sym->st_info) == STT_SECTION)
{
if (sym->st_other == 0)
- *got += map->l_addr;
+ *got += map->l_public.l_addr;
}
else
*got = RESOLVE_GOTSYM (sym, vernum, symidx, R_MIPS_32);
@@ -824,7 +827,8 @@ elf_machine_got_rel (struct link_map *map, struct r_scope_elem *scope[], int laz
static inline int
__attribute__((always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
# ifndef RTLD_BOOTSTRAP
@@ -866,7 +870,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
we have to be able to undo the prelinking of .got.plt.
The prelinker saved the address of .plt for us here. */
if (gotplt[1])
- l->l_mach.plt = gotplt[1] + l->l_addr;
+ l->l_mach.plt = gotplt[1] + l->l_public.l_addr;
gotplt[0] = (ElfW(Addr)) &_dl_runtime_pltresolve;
gotplt[1] = (ElfW(Addr)) l;
}
diff --git a/sysdeps/mips/dl-trampoline.c b/sysdeps/mips/dl-trampoline.c
index 1771f9eb6a..0c9a79a360 100644
--- a/sysdeps/mips/dl-trampoline.c
+++ b/sysdeps/mips/dl-trampoline.c
@@ -26,7 +26,7 @@
#include <sysdep-cancel.h>
/* Get link map for callers object containing STUB_PC. */
-static inline struct link_map *
+static inline struct link_map_private *
elf_machine_runtime_link_map (ElfW(Addr) gpreg, ElfW(Addr) stub_pc)
{
extern int _dl_mips_gnu_objects;
@@ -45,8 +45,8 @@ elf_machine_runtime_link_map (ElfW(Addr) gpreg, ElfW(Addr) stub_pc)
if ((g1 & ELF_MIPS_GNU_GOT1_MASK) != 0)
{
- struct link_map *l =
- (struct link_map *) (g1 & ~ELF_MIPS_GNU_GOT1_MASK);
+ struct link_map_private *l =
+ (struct link_map_private *) (g1 & ~ELF_MIPS_GNU_GOT1_MASK);
ElfW(Addr) base, limit;
const ElfW(Phdr) *p = l->l_phdr;
ElfW(Half) this, nent = l->l_phnum;
@@ -59,7 +59,7 @@ elf_machine_runtime_link_map (ElfW(Addr) gpreg, ElfW(Addr) stub_pc)
{
if (p[this].p_type == PT_LOAD)
{
- base = p[this].p_vaddr + l->l_addr;
+ base = p[this].p_vaddr + l->l_public.l_addr;
limit = base + p[this].p_memsz;
if (stub_pc >= base && stub_pc < limit)
return l;
@@ -68,11 +68,11 @@ elf_machine_runtime_link_map (ElfW(Addr) gpreg, ElfW(Addr) stub_pc)
}
}
- struct link_map *l;
+ struct link_map_private *l;
Lmid_t nsid;
for (nsid = 0; nsid < DL_NNS; ++nsid)
- for (l = GL(dl_ns)[nsid]._ns_loaded; l != NULL; l = l->l_next)
+ for (l = GL(dl_ns)[nsid]._ns_loaded; l != NULL; l = l_next (l))
{
ElfW(Addr) base, limit;
const ElfW(Phdr) *p = l->l_phdr;
@@ -82,7 +82,7 @@ elf_machine_runtime_link_map (ElfW(Addr) gpreg, ElfW(Addr) stub_pc)
{
if (p[this].p_type == PT_LOAD)
{
- base = p[this].p_vaddr + l->l_addr;
+ base = p[this].p_vaddr + l->l_public.l_addr;
limit = base + p[this].p_memsz;
if (stub_pc >= base && stub_pc < limit)
return l;
@@ -121,7 +121,8 @@ __dl_runtime_resolve (ElfW(Word) sym_index,
ElfW(Addr) old_gpreg,
ElfW(Addr) stub_pc)
{
- struct link_map *l = elf_machine_runtime_link_map (old_gpreg, stub_pc);
+ struct link_map_private *l
+ = elf_machine_runtime_link_map (old_gpreg, stub_pc);
const ElfW(Sym) *const symtab
= (const ElfW(Sym) *) D_PTR (l, l_info[DT_SYMTAB]);
const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
@@ -132,7 +133,7 @@ __dl_runtime_resolve (ElfW(Word) sym_index,
const ElfW(Word) gotsym
= (const ElfW(Word)) l->l_info[DT_MIPS (GOTSYM)]->d_un.d_val;
const ElfW(Sym) *sym = &symtab[sym_index];
- struct link_map *sym_map;
+ struct link_map_private *sym_map;
ElfW(Addr) value;
/* FIXME: The symbol versioning stuff is not tested yet. */
diff --git a/sysdeps/nios2/dl-init.c b/sysdeps/nios2/dl-init.c
index 5b8407b986..667bf0619b 100644
--- a/sysdeps/nios2/dl-init.c
+++ b/sysdeps/nios2/dl-init.c
@@ -19,10 +19,10 @@
#include <elf/dl-init.c>
unsigned int
-_dl_nios2_get_gp_value (struct link_map *main_map)
+_dl_nios2_get_gp_value (struct link_map_private *main_map)
{
- ElfW(Dyn) *dyn = main_map->l_ld;
- for (dyn = main_map->l_ld; dyn->d_tag != DT_NULL; ++dyn)
+ ElfW(Dyn) *dyn = main_map->l_public.l_ld;
+ for (dyn = main_map->l_public.l_ld; dyn->d_tag != DT_NULL; ++dyn)
if (dyn->d_tag == DT_NIOS2_GP)
return (unsigned int)(dyn->d_un.d_ptr);
return 0;
diff --git a/sysdeps/nios2/dl-machine.h b/sysdeps/nios2/dl-machine.h
index f151d29242..bbfc6f7354 100644
--- a/sysdeps/nios2/dl-machine.h
+++ b/sysdeps/nios2/dl-machine.h
@@ -69,7 +69,8 @@ elf_machine_load_address (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
extern void _dl_runtime_resolve (Elf32_Word);
@@ -174,7 +175,7 @@ _start:\n\
/* Fixup a PLT entry to bounce directly to the function at VALUE. */
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rela *reloc,
Elf32_Addr *reloc_addr, Elf32_Addr value)
@@ -184,7 +185,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rela *reloc,
Elf32_Addr value)
{
return value;
@@ -203,7 +204,8 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
by DT_* of the .dynamic section info. */
static inline void __attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map,
+ struct r_scope_elem *scope[],
const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -212,14 +214,14 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
if (__glibc_unlikely (r_type == R_NIOS2_RELATIVE))
- *reloc_addr = map->l_addr + reloc->r_addend;
+ *reloc_addr = map->l_public.l_addr + reloc->r_addend;
else if (__glibc_unlikely (r_type == R_NIOS2_NONE))
return;
else
{
const Elf32_Sym *const refsym = sym;
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
Elf32_Addr value = SYMBOL_ADDRESS (sym_map, sym, true);
switch (r_type)
@@ -293,7 +295,8 @@ elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
}
static inline void __attribute__((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/nptl/dl-mutex.c b/sysdeps/nptl/dl-mutex.c
index 415df854fe..8c5fed62b3 100644
--- a/sysdeps/nptl/dl-mutex.c
+++ b/sysdeps/nptl/dl-mutex.c
@@ -34,7 +34,7 @@ __rtld_mutex_init (void)
it happens directly in dl_main in elf/rtld.c, and not some ELF
constructor while holding loader locks. */
- struct link_map *libc_map = GL (dl_ns)[LM_ID_BASE].libc_map;
+ struct link_map_private *libc_map = GL (dl_ns)[LM_ID_BASE].libc_map;
const ElfW(Sym) *sym
= _dl_lookup_direct (libc_map, "pthread_mutex_lock",
diff --git a/sysdeps/or1k/dl-machine.h b/sysdeps/or1k/dl-machine.h
index 27dfd7bcdd..d016493be9 100644
--- a/sysdeps/or1k/dl-machine.h
+++ b/sysdeps/or1k/dl-machine.h
@@ -107,7 +107,8 @@ elf_machine_load_address (void)
/* Set up the loaded object described by L so its unrelocated PLT
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((unused, always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
ElfW(Addr) *pltgot;
@@ -151,7 +152,7 @@ dl_platform_init (void)
}
static inline ElfW(Addr)
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const ElfW(Rela) *reloc,
ElfW(Addr) *reloc_addr, ElfW(Addr) value)
@@ -161,7 +162,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rela *reloc,
Elf32_Addr value)
{
return value + reloc->r_addend;
@@ -177,7 +178,7 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
static inline void
__attribute ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -190,8 +191,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
else
{
const Elf32_Sym *const refsym = sym;
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
Elf32_Addr value = SYMBOL_ADDRESS (sym_map, sym, true);
if (sym != NULL
@@ -278,7 +279,8 @@ elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/powerpc/powerpc32/dl-machine.c b/sysdeps/powerpc/powerpc32/dl-machine.c
index e6b603de94..70f2b0de2b 100644
--- a/sysdeps/powerpc/powerpc32/dl-machine.c
+++ b/sysdeps/powerpc/powerpc32/dl-machine.c
@@ -87,11 +87,11 @@
mapped somewhere else. */
ElfW(Addr)
-__elf_preferred_address (struct link_map *loader, size_t maplength,
+__elf_preferred_address (struct link_map_private *loader, size_t maplength,
ElfW(Addr) mapstartpref)
{
ElfW(Addr) low, high;
- struct link_map *l;
+ struct link_map_private *l;
Lmid_t nsid;
/* If the object has a preference, load it there! */
@@ -105,7 +105,7 @@ __elf_preferred_address (struct link_map *loader, size_t maplength,
low = 0x0003FFFF;
high = 0x70000000;
for (nsid = 0; nsid < DL_NNS; ++nsid)
- for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
+ for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l_next (l))
{
ElfW(Addr) mapstart, mapend;
mapstart = l->l_map_start & ~(GLRO(dl_pagesize) - 1);
@@ -196,7 +196,8 @@ __elf_preferred_address (struct link_map *loader, size_t maplength,
Once this is done, and is visible to all processors, the `lwzu' can
safely be changed to a `lwz'. */
int
-__elf_machine_runtime_setup (struct link_map *map, int lazy, int profile)
+__elf_machine_runtime_setup (struct link_map_private *map, int lazy,
+ int profile)
{
if (map->l_info[DT_JMPREL])
{
@@ -329,7 +330,7 @@ __elf_machine_runtime_setup (struct link_map *map, int lazy, int profile)
}
Elf32_Addr
-__elf_machine_fixup_plt (struct link_map *map,
+__elf_machine_fixup_plt (struct link_map_private *map,
Elf32_Addr *reloc_addr, Elf32_Addr finaladdr)
{
Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
@@ -378,7 +379,7 @@ __elf_machine_fixup_plt (struct link_map *map,
}
void
-_dl_reloc_overflow (struct link_map *map,
+_dl_reloc_overflow (struct link_map_private *map,
const char *name,
Elf32_Addr *const reloc_addr,
const Elf32_Sym *refsym)
@@ -398,13 +399,13 @@ _dl_reloc_overflow (struct link_map *map,
t = stpcpy (t, "'");
}
t = stpcpy (t, " out of range");
- _dl_signal_error (0, map->l_name, NULL, buffer);
+ _dl_signal_error (0, map->l_public.l_name, NULL, buffer);
}
void
-__process_machine_rela (struct link_map *map,
+__process_machine_rela (struct link_map_private *map,
const Elf32_Rela *reloc,
- struct link_map *sym_map,
+ struct link_map_private *sym_map,
const Elf32_Sym *sym,
const Elf32_Sym *refsym,
Elf32_Addr *const reloc_addr,
diff --git a/sysdeps/powerpc/powerpc32/dl-machine.h b/sysdeps/powerpc/powerpc32/dl-machine.h
index 1ff46d5f8a..789255e427 100644
--- a/sysdeps/powerpc/powerpc32/dl-machine.h
+++ b/sysdeps/powerpc/powerpc32/dl-machine.h
@@ -124,7 +124,7 @@ elf_machine_load_address (void)
/* Decide where a relocatable object should be loaded. */
extern ElfW(Addr)
-__elf_preferred_address(struct link_map *loader, size_t maplength,
+__elf_preferred_address(struct link_map_private *loader, size_t maplength,
ElfW(Addr) mapstartpref);
#define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) \
__elf_preferred_address (loader, maplength, mapstartpref)
@@ -164,11 +164,12 @@ dl_platform_init (void)
entries will jump to the on-demand fixup code in dl-runtime.c.
Also install a small trampoline to be used by entries that have
been relocated to an address too far away for a single branch. */
-extern int __elf_machine_runtime_setup (struct link_map *map,
+extern int __elf_machine_runtime_setup (struct link_map_private *map,
int lazy, int profile);
static inline int
-elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *map,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
if (map->l_info[DT_JMPREL] == 0)
@@ -179,7 +180,7 @@ elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
return __elf_machine_runtime_setup (map, lazy, profile);
/* New style non-exec PLT consisting of an array of addresses. */
- map->l_info[DT_PPC(GOT)]->d_un.d_ptr += map->l_addr;
+ map->l_info[DT_PPC(GOT)]->d_un.d_ptr += map->l_public.l_addr;
if (lazy)
{
Elf32_Addr *plt, *got, glink;
@@ -217,24 +218,24 @@ elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
section's start. */
if (glink)
{
- glink += map->l_addr;
+ glink += map->l_public.l_addr;
while (num_plt_entries-- != 0)
*plt++ = glink, glink += 4;
}
else
while (num_plt_entries-- != 0)
- *plt++ += map->l_addr;
+ *plt++ += map->l_public.l_addr;
}
return lazy;
}
/* Change the PLT entry whose reloc is 'reloc' to call the actual routine. */
-extern Elf32_Addr __elf_machine_fixup_plt (struct link_map *map,
+extern Elf32_Addr __elf_machine_fixup_plt (struct link_map_private *map,
Elf32_Addr *reloc_addr,
Elf32_Addr finaladdr);
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rela *reloc,
Elf32_Addr *reloc_addr, Elf64_Addr finaladdr)
@@ -249,7 +250,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rela *reloc,
Elf32_Addr value)
{
return value + reloc->r_addend;
@@ -266,9 +267,9 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
/* Do the actual processing of a reloc, once its target address
has been determined. */
-extern void __process_machine_rela (struct link_map *map,
+extern void __process_machine_rela (struct link_map_private *map,
const Elf32_Rela *reloc,
- struct link_map *sym_map,
+ struct link_map_private *sym_map,
const Elf32_Sym *sym,
const Elf32_Sym *refsym,
Elf32_Addr *const reloc_addr,
@@ -277,7 +278,7 @@ extern void __process_machine_rela (struct link_map *map,
attribute_hidden;
/* Call _dl_signal_error when a resolved value overflows a relocated area. */
-extern void _dl_reloc_overflow (struct link_map *map,
+extern void _dl_reloc_overflow (struct link_map_private *map,
const char *name,
Elf32_Addr *const reloc_addr,
const Elf32_Sym *refsym) attribute_hidden;
@@ -287,7 +288,7 @@ extern void _dl_reloc_overflow (struct link_map *map,
by DT_* of the .dynamic section info. */
static inline void __attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf32_Rela *reloc, const Elf32_Sym *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -296,11 +297,11 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
const Elf32_Sym *const refsym = sym;
Elf32_Addr value;
const int r_type = ELF32_R_TYPE (reloc->r_info);
- struct link_map *sym_map = NULL;
+ struct link_map_private *sym_map = NULL;
if (r_type == R_PPC_RELATIVE)
{
- *reloc_addr = map->l_addr + reloc->r_addend;
+ *reloc_addr = map->l_public.l_addr + reloc->r_addend;
return;
}
@@ -313,7 +314,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
&& sym->st_shndx != SHN_UNDEF)
{
sym_map = map;
- value = map->l_addr;
+ value = map->l_public.l_addr;
}
else
{
@@ -442,7 +443,8 @@ elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
}
static inline void __attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf32_Addr l_addr, const Elf32_Rela *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/powerpc/powerpc64/dl-machine.c b/sysdeps/powerpc/powerpc64/dl-machine.c
index 3b4b08f51f..7db68eb0e4 100644
--- a/sysdeps/powerpc/powerpc64/dl-machine.c
+++ b/sysdeps/powerpc/powerpc64/dl-machine.c
@@ -23,7 +23,7 @@
#include <dl-machine.h>
void
-_dl_reloc_overflow (struct link_map *map,
+_dl_reloc_overflow (struct link_map_private *map,
const char *name,
Elf64_Addr *const reloc_addr,
const Elf64_Sym *refsym)
@@ -46,12 +46,12 @@ _dl_reloc_overflow (struct link_map *map,
t = stpcpy (t, "'");
}
t = stpcpy (t, " out of range");
- _dl_signal_error (0, map->l_name, NULL, buffer);
+ _dl_signal_error (0, map->l_public.l_name, NULL, buffer);
}
#if _CALL_ELF == 2
void
-_dl_error_localentry (struct link_map *map, const Elf64_Sym *refsym)
+_dl_error_localentry (struct link_map_private *map, const Elf64_Sym *refsym)
{
char buffer[1024];
char *t;
@@ -61,6 +61,6 @@ _dl_error_localentry (struct link_map *map, const Elf64_Sym *refsym)
t = stpcpy (buffer, "expected localentry:0 `");
t = stpcpy (t, strtab + refsym->st_name);
t = stpcpy (t, "'");
- _dl_signal_error (0, map->l_name, NULL, buffer);
+ _dl_signal_error (0, map->l_public.l_name, NULL, buffer);
}
#endif
diff --git a/sysdeps/powerpc/powerpc64/dl-machine.h b/sysdeps/powerpc/powerpc64/dl-machine.h
index 601c3cba9d..9db1838f44 100644
--- a/sysdeps/powerpc/powerpc64/dl-machine.h
+++ b/sysdeps/powerpc/powerpc64/dl-machine.h
@@ -332,7 +332,8 @@ dl_platform_init (void)
/* Set up the loaded object described by MAP so its unrelocated PLT
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((always_inline))
-elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *map,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
if (map->l_info[DT_JMPREL])
@@ -342,7 +343,7 @@ elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
Elf64_Xword *plt = (Elf64_Xword *) D_PTR (map, l_info[DT_PLTGOT]);
Elf64_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
/ sizeof (Elf64_Rela));
- Elf64_Addr l_addr = map->l_addr;
+ Elf64_Addr l_addr = map->l_public.l_addr;
Elf64_Dyn **info = map->l_info;
char *p;
@@ -436,13 +437,13 @@ elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
}
#if _CALL_ELF == 2
-extern void attribute_hidden _dl_error_localentry (struct link_map *map,
- const Elf64_Sym *refsym);
+void attribute_hidden _dl_error_localentry (struct link_map_private *map,
+ const Elf64_Sym *refsym);
/* If the PLT entry resolves to a function in the same object, return
the target function's local entry point offset if usable. */
static inline Elf64_Addr __attribute__ ((always_inline))
-ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
+ppc64_local_entry_offset (struct link_map_private *map, lookup_t sym_map,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym)
{
/* If the target function is in a different object, we cannot
@@ -481,7 +482,7 @@ ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
/* Change the PLT entry whose reloc is 'reloc' to call the actual
routine. */
static inline Elf64_Addr __attribute__ ((always_inline))
-elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t sym_map,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf64_Rela *reloc,
Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
@@ -521,7 +522,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
&& sym_map != &GL(dl_rtld_map)
#endif
)
- offset = sym_map->l_addr;
+ offset = sym_map->l_public.l_addr;
/* For PPC64, fixup_plt copies the function descriptor from opd
over the corresponding PLT entry.
@@ -547,7 +548,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
/* Return the final value of a plt relocation. */
static inline Elf64_Addr
-elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf64_Rela *reloc,
Elf64_Addr value)
{
return value + reloc->r_addend;
@@ -600,7 +601,7 @@ ppc_init_fake_thread_pointer (void)
#define dont_expect(X) __builtin_expect ((X), 0)
-extern void attribute_hidden _dl_reloc_overflow (struct link_map *map,
+extern void attribute_hidden _dl_reloc_overflow (struct link_map_private *map,
const char *name,
Elf64_Addr *const reloc_addr,
const Elf64_Sym *refsym);
@@ -615,8 +616,8 @@ elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
/* This computes the value used by TPREL* relocs. */
static inline Elf64_Addr __attribute__ ((always_inline, const))
-elf_machine_tprel (struct link_map *map,
- struct link_map *sym_map,
+elf_machine_tprel (struct link_map_private *map,
+ struct link_map_private *sym_map,
const Elf64_Sym *sym,
const Elf64_Rela *reloc)
{
@@ -635,7 +636,8 @@ elf_machine_tprel (struct link_map *map,
/* Call function at address VALUE (an OPD entry) to resolve ifunc relocs. */
static inline Elf64_Addr __attribute__ ((always_inline))
resolve_ifunc (Elf64_Addr value,
- const struct link_map *map, const struct link_map *sym_map)
+ const struct link_map_private *map,
+ const struct link_map_private *sym_map)
{
#if _CALL_ELF != 2
/* The function we are calling may not yet have its opd entry relocated. */
@@ -648,8 +650,8 @@ resolve_ifunc (Elf64_Addr value,
&& !sym_map->l_relocated)
{
Elf64_FuncDesc *func = (Elf64_FuncDesc *) value;
- opd.fd_func = func->fd_func + sym_map->l_addr;
- opd.fd_toc = func->fd_toc + sym_map->l_addr;
+ opd.fd_func = func->fd_func + sym_map->l_public.l_addr;
+ opd.fd_toc = func->fd_toc + sym_map->l_public.l_addr;
opd.fd_aux = func->fd_aux;
/* GCC 4.9+ eliminates the branch as dead code, force the odp set
dependency. */
@@ -662,7 +664,7 @@ resolve_ifunc (Elf64_Addr value,
/* Perform the relocation specified by RELOC and SYM (which is fully
resolved). MAP is the object containing the reloc. */
static inline void __attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf64_Rela *reloc,
const Elf64_Sym *sym,
const struct r_found_version *version,
@@ -681,7 +683,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (r_type == R_PPC64_RELATIVE)
{
- *reloc_addr = map->l_addr + reloc->r_addend;
+ *reloc_addr = map->l_public.l_addr + reloc->r_addend;
return;
}
@@ -690,7 +692,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
/* We need SYM_MAP even in the absence of TLS, for elf_machine_fixup_plt
and STT_GNU_IFUNC. */
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
Elf64_Addr value = SYMBOL_ADDRESS (sym_map, sym, true) + reloc->r_addend;
if (sym != NULL
@@ -1016,7 +1019,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
}
static inline void __attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf64_Addr l_addr, const Elf64_Rela *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/riscv/dl-machine.h b/sysdeps/riscv/dl-machine.h
index c0c9bd93ad..a05a166514 100644
--- a/sysdeps/riscv/dl-machine.h
+++ b/sysdeps/riscv/dl-machine.h
@@ -109,7 +109,7 @@ elf_machine_dynamic (void)
mv s0, a0\n\
# Load the adjusted argument count.\n\
" STRINGXP (REG_L) " a1, 0(sp)\n\
- # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env) \n\
+ # Call _dl_init (struct link_map_private *main_map, int argc, char **argv, char **env) \n\
" STRINGXP (REG_L) " a0, _rtld_local\n\
add a2, sp, " STRINGXP (SZREG) "\n\
sll a3, a1, " STRINGXP (PTRLOG) "\n\
@@ -140,7 +140,7 @@ elf_machine_dynamic (void)
#define elf_machine_plt_value(map, reloc, value) (value)
static inline ElfW(Addr)
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const ElfW(Rela) *reloc,
ElfW(Addr) *reloc_addr, ElfW(Addr) value)
@@ -169,7 +169,7 @@ elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
const struct r_found_version *version,
void *const reloc_addr, int skip_ifunc)
@@ -178,7 +178,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
const unsigned long int r_type = ELFW (R_TYPE) (r_info);
ElfW(Addr) *addr_field = (ElfW(Addr) *) reloc_addr;
const ElfW(Sym) *const __attribute__ ((unused)) refsym = sym;
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
ElfW(Addr) value = 0;
if (sym_map != NULL)
value = SYMBOL_ADDRESS (sym_map, sym, true) + reloc->r_addend;
@@ -193,7 +194,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
switch (r_type)
{
case R_RISCV_RELATIVE:
- elf_machine_rela_relative (map->l_addr, reloc, addr_field);
+ elf_machine_rela_relative (map->l_public.l_addr, reloc, addr_field);
break;
case R_RISCV_JUMP_SLOT:
case __WORDSIZE == 64 ? R_RISCV_64 : R_RISCV_32:
@@ -232,7 +233,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
/* There's nothing to do if the symbol is in .tbss. */
if (__glibc_likely (sym->st_value >= sym_map->l_tls_initimage_size))
break;
- value += (ElfW(Addr)) sym_map->l_tls_initimage - sym_map->l_addr;
+ value += ((ElfW(Addr)) sym_map->l_tls_initimage
+ - sym_map->l_public.l_addr);
}
size_t size = sym->st_size;
@@ -253,7 +255,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
}
case R_RISCV_IRELATIVE:
- value = map->l_addr + reloc->r_addend;
+ value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*addr_field = value;
@@ -271,7 +273,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
int skip_ifunc)
{
@@ -291,7 +294,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
}
else if (__glibc_unlikely (r_type == R_RISCV_IRELATIVE))
{
- ElfW(Addr) value = map->l_addr + reloc->r_addend;
+ ElfW(Addr) value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
@@ -305,7 +308,8 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
static inline int
__attribute__ ((always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
#ifndef RTLD_BOOTSTRAP
@@ -318,7 +322,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
we have to be able to undo the prelinking of .got.plt.
The prelinker saved the address of .plt for us here. */
if (gotplt[1])
- l->l_mach.plt = gotplt[1] + l->l_addr;
+ l->l_mach.plt = gotplt[1] + l->l_public.l_addr;
gotplt[0] = (ElfW(Addr)) &_dl_runtime_resolve;
gotplt[1] = (ElfW(Addr)) l;
}
diff --git a/sysdeps/s390/s390-32/dl-machine.h b/sysdeps/s390/s390-32/dl-machine.h
index b8bf2796c7..a92248d2ed 100644
--- a/sysdeps/s390/s390-32/dl-machine.h
+++ b/sysdeps/s390/s390-32/dl-machine.h
@@ -89,7 +89,8 @@ elf_machine_load_address (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((unused))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
extern void _dl_runtime_resolve (Elf32_Word);
@@ -113,7 +114,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
The prelinker saved us here address of .plt + 0x2c. */
if (got[1])
{
- l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.plt = got[1] + l->l_public.l_addr;
l->l_mach.jmprel = (const Elf32_Rela *) D_PTR (l, l_info[DT_JMPREL]);
}
got[1] = (Elf32_Addr) l; /* Identify this shared object. */
@@ -256,7 +257,7 @@ dl_platform_init (void)
}
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rela *reloc,
Elf32_Addr *reloc_addr, Elf32_Addr value)
@@ -266,7 +267,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rela *reloc,
Elf32_Addr value)
{
return value;
@@ -286,7 +287,7 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf32_Rela *reloc, const Elf32_Sym *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -296,7 +297,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
#if !defined RTLD_BOOTSTRAP
if (__glibc_unlikely (r_type == R_390_RELATIVE))
- *reloc_addr = map->l_addr + reloc->r_addend;
+ *reloc_addr = map->l_public.l_addr + reloc->r_addend;
else
#endif
if (__glibc_unlikely (r_type == R_390_NONE))
@@ -307,8 +308,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
/* Only needed for R_390_COPY below. */
const Elf32_Sym *const refsym = sym;
#endif
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
Elf32_Addr value = SYMBOL_ADDRESS (sym_map, sym, true);
if (sym != NULL
@@ -320,7 +321,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
switch (r_type)
{
case R_390_IRELATIVE:
- value = map->l_addr + reloc->r_addend;
+ value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
@@ -440,7 +441,8 @@ elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf32_Addr l_addr, const Elf32_Rela *reloc,
int skip_ifunc)
{
@@ -456,7 +458,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
}
else if (__glibc_likely (r_type == R_390_IRELATIVE))
{
- Elf32_Addr value = map->l_addr + reloc->r_addend;
+ Elf32_Addr value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
diff --git a/sysdeps/s390/s390-64/dl-machine.h b/sysdeps/s390/s390-64/dl-machine.h
index 82259dad64..f21df5232c 100644
--- a/sysdeps/s390/s390-64/dl-machine.h
+++ b/sysdeps/s390/s390-64/dl-machine.h
@@ -77,7 +77,8 @@ elf_machine_load_address (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((unused))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
extern void _dl_runtime_resolve (Elf64_Word);
@@ -100,7 +101,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
The prelinker saved us here address of .plt + 0x2e. */
if (got[1])
{
- l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.plt = got[1] + l->l_public.l_addr;
l->l_mach.jmprel = (const Elf64_Rela *) D_PTR (l, l_info[DT_JMPREL]);
}
got[1] = (Elf64_Addr) l; /* Identify this shared object. */
@@ -233,7 +234,7 @@ dl_platform_init (void)
}
static inline Elf64_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf64_Rela *reloc,
Elf64_Addr *reloc_addr, Elf64_Addr value)
@@ -243,7 +244,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf64_Addr
-elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf64_Rela *reloc,
Elf64_Addr value)
{
return value;
@@ -262,7 +263,7 @@ elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf64_Rela *reloc, const Elf64_Sym *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -272,7 +273,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
#if !defined RTLD_BOOTSTRAP
if (__glibc_unlikely (r_type == R_390_RELATIVE))
- *reloc_addr = map->l_addr + reloc->r_addend;
+ *reloc_addr = map->l_public.l_addr + reloc->r_addend;
else
#endif
if (__glibc_unlikely (r_type == R_390_NONE))
@@ -283,8 +284,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
/* Only needed for R_390_COPY below. */
const Elf64_Sym *const refsym = sym;
#endif
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
Elf64_Addr value = SYMBOL_ADDRESS (sym_map, sym, true);
if (sym != NULL
@@ -297,7 +298,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
switch (r_type)
{
case R_390_IRELATIVE:
- value = map->l_addr + reloc->r_addend;
+ value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
@@ -423,7 +424,8 @@ elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf64_Addr l_addr, const Elf64_Rela *reloc,
int skip_ifunc)
{
@@ -439,7 +441,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
}
else if (__glibc_likely (r_type == R_390_IRELATIVE))
{
- Elf64_Addr value = map->l_addr + reloc->r_addend;
+ Elf64_Addr value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
diff --git a/sysdeps/sh/dl-machine.h b/sysdeps/sh/dl-machine.h
index e0480eae5a..6841671ae6 100644
--- a/sysdeps/sh/dl-machine.h
+++ b/sysdeps/sh/dl-machine.h
@@ -71,7 +71,8 @@ elf_machine_load_address (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((unused, always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
Elf32_Addr *got;
@@ -90,7 +91,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
The prelinker saved us here address of .plt + 36. */
if (got[1])
{
- l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.plt = got[1] + l->l_public.l_addr;
l->l_mach.gotplt = (Elf32_Addr) &got[3];
}
got[1] = (Elf32_Addr) l; /* Identify this shared object. */
@@ -220,7 +221,7 @@ dl_platform_init (void)
}
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rela *reloc,
Elf32_Addr *reloc_addr, Elf32_Addr value)
@@ -230,7 +231,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rela *reloc,
Elf32_Addr value)
{
return value + reloc->r_addend;
@@ -248,7 +249,7 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
static inline void
__attribute ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf32_Rela *reloc, const Elf32_Sym *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -288,12 +289,12 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
#endif
{
if (reloc->r_addend)
- value = map->l_addr + reloc->r_addend;
+ value = map->l_public.l_addr + reloc->r_addend;
else
{
COPY_UNALIGNED_WORD (reloc_addr_arg, &value,
(int) reloc_addr_arg & 3);
- value += map->l_addr;
+ value += map->l_public.l_addr;
}
COPY_UNALIGNED_WORD (&value, reloc_addr_arg,
(int) reloc_addr_arg & 3);
@@ -306,8 +307,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
else
{
const Elf32_Sym *const refsym = sym;
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
- r_type);
+ struct link_map_private *sym_map
+ = RESOLVE_MAP (map, scope, &sym, version, r_type);
value = SYMBOL_ADDRESS (sym_map, sym, true);
value += reloc->r_addend;
@@ -434,7 +435,8 @@ elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf32_Addr l_addr, const Elf32_Rela *reloc,
int skip_ifunc)
{
diff --git a/sysdeps/sparc/sparc32/dl-machine.h b/sysdeps/sparc/sparc32/dl-machine.h
index b10e541810..2e9c84e771 100644
--- a/sysdeps/sparc/sparc32/dl-machine.h
+++ b/sysdeps/sparc/sparc32/dl-machine.h
@@ -100,7 +100,8 @@ elf_machine_load_address (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[],
int lazy, int profile)
{
Elf32_Addr *plt;
@@ -218,7 +219,7 @@ _dl_start_user:\n\
.previous");
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf32_Rela *reloc,
Elf32_Addr *reloc_addr, Elf32_Addr value)
@@ -236,7 +237,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf32_Addr
-elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf32_Rela *reloc,
Elf32_Addr value)
{
return value + reloc->r_addend;
@@ -254,7 +255,7 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf32_Rela *reloc, const Elf32_Sym *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -265,7 +266,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
#endif
Elf32_Addr value;
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
- struct link_map *sym_map = NULL;
+ struct link_map_private *sym_map = NULL;
if (__glibc_unlikely (r_type == R_SPARC_NONE))
return;
@@ -279,7 +280,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
#if !defined RTLD_BOOTSTRAP
if (__glibc_unlikely (r_type == R_SPARC_RELATIVE))
{
- *reloc_addr += map->l_addr + reloc->r_addend;
+ *reloc_addr += map->l_public.l_addr + reloc->r_addend;
return;
}
#endif
@@ -288,7 +289,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
&& sym->st_shndx != SHN_UNDEF)
{
sym_map = map;
- value = map->l_addr;
+ value = map->l_public.l_addr;
}
else
{
@@ -453,7 +454,8 @@ elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf32_Addr l_addr, const Elf32_Rela *reloc,
int skip_ifunc)
{
@@ -464,7 +466,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
;
else if (r_type == R_SPARC_JMP_IREL)
{
- Elf32_Addr value = map->l_addr + reloc->r_addend;
+ Elf32_Addr value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
sparc_fixup_plt (reloc, reloc_addr, value, 1, 1);
diff --git a/sysdeps/sparc/sparc64/dl-irel.h b/sysdeps/sparc/sparc64/dl-irel.h
index 4010e1d2c6..84f0389b07 100644
--- a/sysdeps/sparc/sparc64/dl-irel.h
+++ b/sysdeps/sparc/sparc64/dl-irel.h
@@ -50,7 +50,7 @@ elf_irela (const Elf64_Rela *reloc)
{
Elf64_Addr *const reloc_addr = (void *) reloc->r_offset;
Elf64_Addr value = elf_ifunc_invoke(reloc->r_addend);
- struct link_map map = { .l_addr = 0 };
+ struct link_map_private map = { };
/* 'high' is always zero, for large PLT entries the linker
emits an R_SPARC_IRELATIVE. */
diff --git a/sysdeps/sparc/sparc64/dl-machine.h b/sysdeps/sparc/sparc64/dl-machine.h
index 98469e7604..c06f568a45 100644
--- a/sysdeps/sparc/sparc64/dl-machine.h
+++ b/sysdeps/sparc/sparc64/dl-machine.h
@@ -86,7 +86,7 @@ elf_machine_load_address (void)
}
static inline Elf64_Addr __attribute__ ((always_inline))
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf64_Rela *reloc,
Elf64_Addr *reloc_addr, Elf64_Addr value)
@@ -98,7 +98,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a plt relocation. */
static inline Elf64_Addr
-elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
+elf_machine_plt_value (struct link_map_private *map, const Elf64_Rela *reloc,
Elf64_Addr value)
{
/* Don't add addend here, but in elf_machine_fixup_plt instead.
@@ -124,8 +124,8 @@ elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
- int lazy, int profile)
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[], int lazy, int profile)
{
if (l->l_info[DT_JMPREL] && lazy)
{
@@ -197,7 +197,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
/* Now put the magic cookie at the beginning of .PLT2
Entry .PLT3 is unused by this implementation. */
- *((struct link_map **)(&plt[16])) = l;
+ *((struct link_map_private **)(&plt[16])) = l;
}
return lazy;
@@ -277,7 +277,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
static inline void
__attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
const Elf64_Rela *reloc, const Elf64_Sym *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc)
@@ -288,7 +288,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
#endif
Elf64_Addr value;
const unsigned long int r_type = ELF64_R_TYPE_ID (reloc->r_info);
- struct link_map *sym_map = NULL;
+ struct link_map_private *sym_map = NULL;
if (__glibc_unlikely (r_type == R_SPARC_NONE))
return;
@@ -302,7 +302,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
#if !defined RTLD_BOOTSTRAP
if (__glibc_unlikely (r_type == R_SPARC_RELATIVE))
{
- *reloc_addr += map->l_addr + reloc->r_addend;
+ *reloc_addr += map->l_public.l_addr + reloc->r_addend;
return;
}
#endif
@@ -311,7 +311,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
&& sym->st_shndx != SHN_UNDEF)
{
sym_map = map;
- value = map->l_addr;
+ value = map->l_public.l_addr;
}
else
{
@@ -545,7 +545,8 @@ elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
static inline void
__attribute__ ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
Elf64_Addr l_addr, const Elf64_Rela *reloc,
int skip_ifunc)
{
@@ -557,7 +558,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
else if (r_type == R_SPARC_JMP_IREL
|| r_type == R_SPARC_IRELATIVE)
{
- Elf64_Addr value = map->l_addr + reloc->r_addend;
+ Elf64_Addr value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
if (r_type == R_SPARC_JMP_IREL)
diff --git a/sysdeps/sparc/sparc64/dl-plt.h b/sysdeps/sparc/sparc64/dl-plt.h
index 900563bb87..e2350ab968 100644
--- a/sysdeps/sparc/sparc64/dl-plt.h
+++ b/sysdeps/sparc/sparc64/dl-plt.h
@@ -22,7 +22,7 @@
/* We have 4 cases to handle. And we code different code sequences
for each one. I love V9 code models... */
static inline void __attribute__ ((always_inline))
-sparc64_fixup_plt (struct link_map *map, const Elf64_Rela *reloc,
+sparc64_fixup_plt (struct link_map_private *map, const Elf64_Rela *reloc,
Elf64_Addr *reloc_addr, Elf64_Addr value,
Elf64_Addr high, int t)
{
@@ -43,7 +43,7 @@ sparc64_fixup_plt (struct link_map *map, const Elf64_Rela *reloc,
/* PLT entries .PLT32768 and above look always the same. */
if (__builtin_expect (high, 0) != 0)
{
- *reloc_addr = value - map->l_addr;
+ *reloc_addr = value - map->l_public.l_addr;
}
/* Near destination. */
else if (disp >= -0x800000 && disp < 0x800000)
diff --git a/sysdeps/unix/sysv/linux/dl-vdso.h b/sysdeps/unix/sysv/linux/dl-vdso.h
index 64002cd075..bfe6b82f10 100644
--- a/sysdeps/unix/sysv/linux/dl-vdso.h
+++ b/sysdeps/unix/sysv/linux/dl-vdso.h
@@ -37,7 +37,7 @@
static inline void *
dl_vdso_vsym (const char *name)
{
- struct link_map *map = GLRO (dl_sysinfo_map);
+ struct link_map_private *map = GLRO (dl_sysinfo_map);
if (map == NULL)
return NULL;
diff --git a/sysdeps/unix/sysv/linux/powerpc/libc-start.c b/sysdeps/unix/sysv/linux/powerpc/libc-start.c
index b6aec4615d..4781d2bcdd 100644
--- a/sysdeps/unix/sysv/linux/powerpc/libc-start.c
+++ b/sysdeps/unix/sysv/linux/powerpc/libc-start.c
@@ -101,7 +101,7 @@ __libc_start_main_impl (int argc, char **argv,
void *stmain = stinfo->main;
#if ENABLE_STATIC_PIE && !defined SHARED
- struct link_map *map = _dl_get_dl_main_map ();
+ struct link_map_private *map = _dl_get_dl_main_map ();
if (!map->l_relocated)
stmain = (char *) stmain + elf_machine_load_address ();
#endif
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/ldsodefs.h b/sysdeps/unix/sysv/linux/powerpc/powerpc64/ldsodefs.h
index d9f2e0b8cb..5ed1a30535 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/ldsodefs.h
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/ldsodefs.h
@@ -26,19 +26,21 @@
#if _CALL_ELF != 2
static __always_inline bool
-_dl_ppc64_is_opd_sym (const struct link_map *l, const ElfW(Sym) *sym)
+_dl_ppc64_is_opd_sym (const struct link_map_private *l, const ElfW(Sym) *sym)
{
return (ELFW(ST_TYPE) (sym->st_info) == STT_FUNC
- && l->l_addr + sym->st_value >= (ElfW(Addr)) l->l_ld
- && l->l_addr + sym->st_value < l->l_map_end
+ && (l->l_public.l_addr + sym->st_value
+ >= (ElfW(Addr)) l->l_public.l_ld)
+ && l->l_public.l_addr + sym->st_value < l->l_map_end
&& sym->st_size != 0);
}
static __always_inline bool
-_dl_ppc64_addr_sym_match (const struct link_map *l, const ElfW(Sym) *sym,
+_dl_ppc64_addr_sym_match (const struct link_map_private *l,
+ const ElfW(Sym) *sym,
const ElfW(Sym) *matchsym, ElfW(Addr) addr)
{
- ElfW(Addr) value = l->l_addr + sym->st_value;
+ ElfW(Addr) value = l->l_public.l_addr + sym->st_value;
if (_dl_ppc64_is_opd_sym (l, sym))
{
if (addr < value || addr >= value + 24)
@@ -59,7 +61,7 @@ _dl_ppc64_addr_sym_match (const struct link_map *l, const ElfW(Sym) *sym,
if (matchsym == NULL)
return true;
- ElfW(Addr) matchvalue = l->l_addr + matchsym->st_value;
+ ElfW(Addr) matchvalue = l->l_public.l_addr + matchsym->st_value;
if (_dl_ppc64_is_opd_sym (l, matchsym)
&& (addr < matchvalue || addr > matchvalue + 24))
matchvalue = *(ElfW(Addr) *) matchvalue;
diff --git a/sysdeps/unix/sysv/linux/powerpc/rtld_static_init.h b/sysdeps/unix/sysv/linux/powerpc/rtld_static_init.h
index 53f7d3c2d9..e301b34b81 100644
--- a/sysdeps/unix/sysv/linux/powerpc/rtld_static_init.h
+++ b/sysdeps/unix/sysv/linux/powerpc/rtld_static_init.h
@@ -17,7 +17,8 @@
<https://www.gnu.org/licenses/>. */
static inline void
-__rtld_static_init_arch (struct link_map *map, struct rtld_global_ro *dl)
+__rtld_static_init_arch (struct link_map_private *map,
+ struct rtld_global_ro *dl)
{
/* This field does not exist in the generic _rtld_global_ro version. */
diff --git a/sysdeps/x86/dl-lookupcfg.h b/sysdeps/x86/dl-lookupcfg.h
index 1f6be346c1..4a8904b4c4 100644
--- a/sysdeps/x86/dl-lookupcfg.h
+++ b/sysdeps/x86/dl-lookupcfg.h
@@ -20,8 +20,8 @@
#include_next <dl-lookupcfg.h>
-struct link_map;
+struct link_map_private;
-extern void _dl_unmap (struct link_map *map) attribute_hidden;
+extern void _dl_unmap (struct link_map_private *map) attribute_hidden;
#define DL_UNMAP(map) _dl_unmap (map)
diff --git a/sysdeps/x86/dl-prop.h b/sysdeps/x86/dl-prop.h
index b2836f3009..ba70b06c3a 100644
--- a/sysdeps/x86/dl-prop.h
+++ b/sysdeps/x86/dl-prop.h
@@ -21,17 +21,17 @@
#include <libintl.h>
-extern void _dl_cet_check (struct link_map *, const char *)
+extern void _dl_cet_check (struct link_map_private *, const char *)
attribute_hidden;
-extern void _dl_cet_open_check (struct link_map *)
+extern void _dl_cet_open_check (struct link_map_private *)
attribute_hidden;
static void
-dl_isa_level_check (struct link_map *m, const char *program)
+dl_isa_level_check (struct link_map_private *m, const char *program)
{
const struct cpu_features *cpu_features = __get_cpu_features ();
unsigned int i;
- struct link_map *l;
+ struct link_map_private *l;
i = m->l_searchlist.r_nlist;
while (i-- > 0)
@@ -55,16 +55,17 @@ dl_isa_level_check (struct link_map *m, const char *program)
{
if (program)
_dl_fatal_printf ("%s: CPU ISA level is lower than required\n",
- *l->l_name != '\0' ? l->l_name : program);
+ *l->l_public.l_name != '\0'
+ ? l->l_public.l_name : program);
else
- _dl_signal_error (0, l->l_name, "dlopen",
+ _dl_signal_error (0, l->l_public.l_name, "dlopen",
N_("CPU ISA level is lower than required"));
}
}
}
static inline void __attribute__ ((always_inline))
-_rtld_main_check (struct link_map *m, const char *program)
+_rtld_main_check (struct link_map_private *m, const char *program)
{
dl_isa_level_check (m, program);
#if CET_ENABLED
@@ -73,7 +74,7 @@ _rtld_main_check (struct link_map *m, const char *program)
}
static inline void __attribute__ ((always_inline))
-_dl_open_check (struct link_map *m)
+_dl_open_check (struct link_map_private *m)
{
dl_isa_level_check (m, NULL);
#if CET_ENABLED
@@ -82,7 +83,7 @@ _dl_open_check (struct link_map *m)
}
static inline void __attribute__ ((unused))
-_dl_process_property_note (struct link_map *l, const ElfW(Nhdr) *note,
+_dl_process_property_note (struct link_map_private *l, const ElfW(Nhdr) *note,
const ElfW(Addr) size, const ElfW(Addr) align)
{
/* Skip if we have seen a NT_GNU_PROPERTY_TYPE_0 note before. */
@@ -207,14 +208,14 @@ _dl_process_property_note (struct link_map *l, const ElfW(Nhdr) *note,
}
static inline void __attribute__ ((unused))
-_dl_process_pt_note (struct link_map *l, int fd, const ElfW(Phdr) *ph)
+_dl_process_pt_note (struct link_map_private *l, int fd, const ElfW(Phdr) *ph)
{
- const ElfW(Nhdr) *note = (const void *) (ph->p_vaddr + l->l_addr);
+ const ElfW(Nhdr) *note = (const void *) (ph->p_vaddr + l->l_public.l_addr);
_dl_process_property_note (l, note, ph->p_memsz, ph->p_align);
}
static inline int __attribute__ ((always_inline))
-_dl_process_gnu_property (struct link_map *l, int fd, uint32_t type,
+_dl_process_gnu_property (struct link_map_private *l, int fd, uint32_t type,
uint32_t datasz, void *data)
{
return 0;
diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
index 581a2f1a9e..915ac77dd8 100644
--- a/sysdeps/x86_64/dl-machine.h
+++ b/sysdeps/x86_64/dl-machine.h
@@ -58,8 +58,8 @@ elf_machine_dynamic (void)
entries will jump to the on-demand fixup code in dl-runtime.c. */
static inline int __attribute__ ((unused, always_inline))
-elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
- int lazy, int profile)
+elf_machine_runtime_setup (struct link_map_private *l,
+ struct r_scope_elem *scope[], int lazy, int profile)
{
Elf64_Addr *got;
extern void _dl_runtime_resolve_fxsave (ElfW(Word)) attribute_hidden;
@@ -81,7 +81,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
The prelinker saved us here address of .plt + 0x16. */
if (got[1])
{
- l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.plt = got[1] + l->l_public.l_addr;
l->l_mach.gotplt = (ElfW(Addr)) &got[3];
}
/* Identify this shared object. */
@@ -148,7 +148,7 @@ _dl_start_user:\n\
movq %rax, %r12\n\
# Read the original argument count.\n\
movq (%rsp), %rdx\n\
- # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env)\n\
+ # Call _dl_init (struct link_map_private *main_map, int argc, char **argv, char **env)\n\
# argc -> rsi\n\
movq %rdx, %rsi\n\
# Save %rsp value in %r13.\n\
@@ -214,7 +214,7 @@ dl_platform_init (void)
}
static inline ElfW(Addr)
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+elf_machine_fixup_plt (struct link_map_private *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const ElfW(Rela) *reloc,
ElfW(Addr) *reloc_addr, ElfW(Addr) value)
@@ -225,7 +225,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Return the final value of a PLT relocation. On x86-64 the
JUMP_SLOT relocation ignores the addend. */
static inline ElfW(Addr)
-elf_machine_plt_value (struct link_map *map, const ElfW(Rela) *reloc,
+elf_machine_plt_value (struct link_map_private *map, const ElfW(Rela) *reloc,
ElfW(Addr) value)
{
return value;
@@ -244,7 +244,7 @@ elf_machine_plt_value (struct link_map *map, const ElfW(Rela) *reloc,
MAP is the object containing the reloc. */
static inline void __attribute__((always_inline))
-elf_machine_rela(struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_rela(struct link_map_private *map, struct r_scope_elem *scope[],
const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
const struct r_found_version *version,
void *const reloc_addr_arg, int skip_ifunc) {
@@ -253,14 +253,15 @@ elf_machine_rela(struct link_map *map, struct r_scope_elem *scope[],
# if !defined RTLD_BOOTSTRAP
if (__glibc_unlikely (r_type == R_X86_64_RELATIVE))
- *reloc_addr = map->l_addr + reloc->r_addend;
+ *reloc_addr = map->l_public.l_addr + reloc->r_addend;
else
# endif
# if !defined RTLD_BOOTSTRAP
/* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
relocation updates the whole 64-bit entry. */
if (__glibc_unlikely (r_type == R_X86_64_RELATIVE64))
- *(Elf64_Addr *) reloc_addr = (Elf64_Addr) map->l_addr + reloc->r_addend;
+ *(Elf64_Addr *) reloc_addr
+ = (Elf64_Addr) map->l_public.l_addr + reloc->r_addend;
else
# endif
if (__glibc_unlikely (r_type == R_X86_64_NONE))
@@ -270,7 +271,7 @@ elf_machine_rela(struct link_map *map, struct r_scope_elem *scope[],
# ifndef RTLD_BOOTSTRAP
const ElfW(Sym) *const refsym = sym;
# endif
- struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
+ struct link_map_private *sym_map = RESOLVE_MAP (map, scope, &sym, version,
r_type);
ElfW(Addr) value = SYMBOL_ADDRESS (sym_map, sym, true);
@@ -290,12 +291,12 @@ elf_machine_rela(struct link_map *map, struct r_scope_elem *scope[],
%s: IFUNC symbol '%s' referenced in '%s' is defined in the executable \
and creates an unsatisfiable circular dependency.\n",
RTLD_PROGNAME, strtab + refsym->st_name,
- map->l_name);
+ map->l_public.l_name);
else
_dl_error_printf ("\
%s: Relink `%s' with `%s' for IFUNC symbol `%s'\n",
- RTLD_PROGNAME, map->l_name,
- sym_map->l_name,
+ RTLD_PROGNAME, map->l_public.l_name,
+ sym_map->l_public.l_name,
strtab + refsym->st_name);
}
# endif
@@ -459,7 +460,7 @@ and creates an unsatisfiable circular dependency.\n",
}
break;
case R_X86_64_IRELATIVE:
- value = map->l_addr + reloc->r_addend;
+ value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = ((ElfW(Addr) (*) (void)) value) ();
*reloc_addr = value;
@@ -493,7 +494,8 @@ elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
static inline void
__attribute ((always_inline))
-elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
+elf_machine_lazy_rel (struct link_map_private *map,
+ struct r_scope_elem *scope[],
ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
int skip_ifunc)
{
@@ -531,7 +533,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
}
else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE))
{
- ElfW(Addr) value = map->l_addr + reloc->r_addend;
+ ElfW(Addr) value = map->l_public.l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = ((ElfW(Addr) (*) (void)) value) ();
*reloc_addr = value;
diff --git a/sysdeps/x86_64/dl-tlsdesc.h b/sysdeps/x86_64/dl-tlsdesc.h
index 4931ccbe33..ebff5e819d 100644
--- a/sysdeps/x86_64/dl-tlsdesc.h
+++ b/sysdeps/x86_64/dl-tlsdesc.h
@@ -58,7 +58,7 @@ extern ptrdiff_t attribute_hidden
_dl_tlsdesc_undefweak(struct tlsdesc *on_rax);
# ifdef SHARED
-extern void *_dl_make_tlsdesc_dynamic (struct link_map *map,
+extern void *_dl_make_tlsdesc_dynamic (struct link_map_private *map,
size_t ti_offset)
attribute_hidden;
diff --git a/sysdeps/x86_64/tlsdesc.c b/sysdeps/x86_64/tlsdesc.c
index 30aa8eb9ae..2222622c8e 100644
--- a/sysdeps/x86_64/tlsdesc.c
+++ b/sysdeps/x86_64/tlsdesc.c
@@ -26,7 +26,7 @@
if there is one. */
void
-_dl_unmap (struct link_map *map)
+_dl_unmap (struct link_map_private *map)
{
_dl_unmap_segments (map);
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 15/32] elf: Remove run-time-writable fields from struct link_map_private
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (13 preceding siblings ...)
2023-12-07 10:31 ` [PATCH v3 14/32] elf: Use struct link_map_private for the internal link map Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-23 0:09 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 16/32] elf: Move l_tls_offset into read-write part of link map Florian Weimer
` (17 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
And introduce struct link_map_rw.
These fields are written during run-time relocation (for lazy binding)
or during dlopen, so they are difficult to handle efficiently with
otherwise read-only link maps. Moving them into a separate allocation
makes it possible to keep the read-write while the rest of the link
map is read-only.
---
elf/circleload1.c | 4 +-
elf/dl-call_fini.c | 2 +-
elf/dl-close.c | 33 ++++++++--------
elf/dl-deps.c | 14 +++----
elf/dl-find_object.c | 2 +-
elf/dl-fini.c | 6 +--
elf/dl-init.c | 4 +-
elf/dl-lookup.c | 47 ++++++++++++-----------
elf/dl-object.c | 6 +++
elf/dl-open.c | 30 +++++++--------
elf/dl-sort-maps.c | 13 ++++---
elf/dl-support.c | 1 +
elf/get-dynamic-info.h | 2 +-
elf/loadtest.c | 8 ++--
elf/neededtest.c | 5 +--
elf/neededtest2.c | 4 +-
elf/neededtest3.c | 4 +-
elf/neededtest4.c | 4 +-
elf/rtld.c | 5 ++-
elf/unload.c | 5 +--
elf/unload2.c | 5 +--
include/link.h | 67 ++++++++++++++++++++-------------
stdlib/cxa_thread_atexit_impl.c | 4 +-
sysdeps/x86/dl-prop.h | 2 +-
24 files changed, 151 insertions(+), 126 deletions(-)
diff --git a/elf/circleload1.c b/elf/circleload1.c
index dcf04bc25a..6252bd2961 100644
--- a/elf/circleload1.c
+++ b/elf/circleload1.c
@@ -29,8 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm != NULL; lm = l_next (lm))
{
if (lm->l_public.l_name && lm->l_public.l_name[0])
- printf(" %s, count = %d\n", lm->l_public.l_name,
- (int) lm->l_direct_opencount);
+ printf(" %s, count = %u\n",
+ lm->l_public.l_name, lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
diff --git a/elf/dl-call_fini.c b/elf/dl-call_fini.c
index a9d60e9803..7c5d7e02c9 100644
--- a/elf/dl-call_fini.c
+++ b/elf/dl-call_fini.c
@@ -30,7 +30,7 @@ _dl_call_fini (void *closure_map)
map->l_public.l_name, map->l_ns);
/* Make sure nothing happens if we are called twice. */
- map->l_init_called = 0;
+ map->l_rw->l_init_called = 0;
ElfW(Dyn) *fini_array = map->l_info[DT_FINI_ARRAY];
if (fini_array != NULL)
diff --git a/elf/dl-close.c b/elf/dl-close.c
index 7222b21cf0..1af60845f5 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -109,23 +109,23 @@ void
_dl_close_worker (struct link_map_private *map, bool force)
{
/* One less direct use. */
- --map->l_direct_opencount;
+ --map->l_rw->l_direct_opencount;
/* If _dl_close is called recursively (some destructor call dlclose),
just record that the parent _dl_close will need to do garbage collection
again and return. */
static enum { not_pending, pending, rerun } dl_close_state;
- if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
+ if (map->l_rw->l_direct_opencount > 0 || map->l_type != lt_loaded
|| dl_close_state != not_pending)
{
- if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
+ if (map->l_rw->l_direct_opencount == 0 && map->l_type == lt_loaded)
dl_close_state = rerun;
/* There are still references to this object. Do nothing more. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
- map->l_public.l_name, map->l_direct_opencount);
+ map->l_public.l_name, map->l_rw->l_direct_opencount);
return;
}
@@ -175,11 +175,11 @@ _dl_close_worker (struct link_map_private *map, bool force)
/* Check whether this object is still used. */
if (l->l_type == lt_loaded
- && l->l_direct_opencount == 0
- && !l->l_nodelete_active
+ && l->l_rw->l_direct_opencount == 0
+ && !l->l_rw->l_nodelete_active
/* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
acquire is sufficient and correct. */
- && atomic_load_acquire (&l->l_tls_dtor_count) == 0
+ && atomic_load_acquire (&l->l_rw->l_tls_dtor_count) == 0
&& !l->l_map_used)
continue;
@@ -217,10 +217,10 @@ _dl_close_worker (struct link_map_private *map, bool force)
}
}
/* And the same for relocation dependencies. */
- if (l->l_reldeps != NULL)
- for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
+ if (l->l_rw->l_reldeps != NULL)
+ for (unsigned int j = 0; j < l->l_rw->l_reldeps->act; ++j)
{
- struct link_map_private *jmap = l->l_reldeps->list[j];
+ struct link_map_private *jmap = l->l_rw->l_reldeps->list[j];
if (jmap->l_idx != IDX_STILL_USED)
{
@@ -255,12 +255,12 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (!imap->l_map_used)
{
- assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
+ assert (imap->l_type == lt_loaded && !imap->l_rw->l_nodelete_active);
/* Call its termination function. Do not do it for
half-cooked objects. Temporarily disable exception
handling, so that errors are fatal. */
- if (imap->l_init_called)
+ if (imap->l_rw->l_init_called)
_dl_catch_exception (NULL, _dl_call_fini, imap);
#ifdef SHARED
@@ -507,7 +507,7 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (GL(dl_tls_dtv_slotinfo_list) != NULL
&& ! remove_slotinfo (imap->l_tls_modid,
GL(dl_tls_dtv_slotinfo_list), 0,
- imap->l_init_called))
+ imap->l_rw->l_init_called))
/* All dynamically loaded modules with TLS are unloaded. */
/* Can be read concurrently. */
atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
@@ -663,7 +663,8 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (imap->l_origin != (char *) -1)
free ((char *) imap->l_origin);
- free (imap->l_reldeps);
+ free (imap->l_rw->l_reldeps);
+ free (imap->l_rw);
/* Print debugging message. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
@@ -769,7 +770,7 @@ _dl_close (void *_map)
before we took the lock. There is no way to detect this (see below)
so we proceed assuming this isn't the case. First see whether we
can remove the object at all. */
- if (__glibc_unlikely (map->l_nodelete_active))
+ if (__glibc_unlikely (map->l_rw->l_nodelete_active))
{
/* Nope. Do nothing. */
__rtld_lock_unlock_recursive (GL(dl_load_lock));
@@ -786,7 +787,7 @@ _dl_close (void *_map)
should be a detectable case and given that dlclose should be threadsafe
we need this to be a reliable detection.
This is bug 20990. */
- if (__builtin_expect (map->l_direct_opencount, 1) == 0)
+ if (__builtin_expect (map->l_rw->l_direct_opencount, 1) == 0)
{
__rtld_lock_unlock_recursive (GL(dl_load_lock));
_dl_signal_error (0, map->l_public.l_name, NULL,
diff --git a/elf/dl-deps.c b/elf/dl-deps.c
index c730713167..1e759fd895 100644
--- a/elf/dl-deps.c
+++ b/elf/dl-deps.c
@@ -483,20 +483,20 @@ _dl_map_object_deps (struct link_map_private *map,
/* Maybe we can remove some relocation dependencies now. */
struct link_map_reldeps *l_reldeps = NULL;
- if (map->l_reldeps != NULL)
+ if (map->l_rw->l_reldeps != NULL)
{
for (i = 0; i < nlist; ++i)
map->l_searchlist.r_list[i]->l_reserved = 1;
/* Avoid removing relocation dependencies of the main binary. */
map->l_reserved = 0;
- struct link_map_private **list = &map->l_reldeps->list[0];
- for (i = 0; i < map->l_reldeps->act; ++i)
+ struct link_map_private **list = &map->l_rw->l_reldeps->list[0];
+ for (i = 0; i < map->l_rw->l_reldeps->act; ++i)
if (list[i]->l_reserved)
{
/* Need to allocate new array of relocation dependencies. */
l_reldeps = malloc (sizeof (*l_reldeps)
- + map->l_reldepsmax
+ + map->l_rw->l_reldepsmax
* sizeof (struct link_map_private *));
if (l_reldeps == NULL)
/* Bad luck, keep the reldeps duplicated between
@@ -507,7 +507,7 @@ _dl_map_object_deps (struct link_map_private *map,
unsigned int j = i;
memcpy (&l_reldeps->list[0], &list[0],
i * sizeof (struct link_map_private *));
- for (i = i + 1; i < map->l_reldeps->act; ++i)
+ for (i = i + 1; i < map->l_rw->l_reldeps->act; ++i)
if (!list[i]->l_reserved)
l_reldeps->list[j++] = list[i];
l_reldeps->act = j;
@@ -552,8 +552,8 @@ _dl_map_object_deps (struct link_map_private *map,
if (l_reldeps != NULL)
{
atomic_write_barrier ();
- void *old_l_reldeps = map->l_reldeps;
- map->l_reldeps = l_reldeps;
+ void *old_l_reldeps = map->l_rw->l_reldeps;
+ map->l_rw->l_reldeps = l_reldeps;
_dl_scope_free (old_l_reldeps);
}
if (old_l_initfini != NULL)
diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
index 5042b0a8c1..f81351b0ef 100644
--- a/elf/dl-find_object.c
+++ b/elf/dl-find_object.c
@@ -508,7 +508,7 @@ _dlfo_process_initial (void)
if (l != main_map && l == l->l_real)
{
/* lt_library link maps are implicitly NODELETE. */
- if (l->l_type == lt_library || l->l_nodelete_active)
+ if (l->l_type == lt_library || l->l_rw->l_nodelete_active)
{
if (_dlfo_nodelete_mappings != NULL)
/* Second pass only. */
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index 2e10f1b0b6..5c78159fee 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -82,7 +82,7 @@ _dl_fini (void)
/* Bump l_direct_opencount of all objects so that they
are not dlclose()ed from underneath us. */
- ++l->l_direct_opencount;
+ ++l->l_rw->l_direct_opencount;
}
assert (ns != LM_ID_BASE || i == nloaded);
assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1);
@@ -109,7 +109,7 @@ _dl_fini (void)
{
struct link_map_private *l = maps[i];
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
{
_dl_call_fini (l);
#ifdef SHARED
@@ -119,7 +119,7 @@ _dl_fini (void)
}
/* Correct the previous increment. */
- --l->l_direct_opencount;
+ --l->l_rw->l_direct_opencount;
}
#ifdef SHARED
diff --git a/elf/dl-init.c b/elf/dl-init.c
index b220ca9239..0e44e6c24a 100644
--- a/elf/dl-init.c
+++ b/elf/dl-init.c
@@ -34,13 +34,13 @@ call_init (struct link_map_private *l, int argc, char **argv, char **env)
need relocation.) */
assert (l->l_relocated || l->l_type == lt_executable);
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
/* This object is all done. */
return;
/* Avoid handling this constructor again in case we have a circular
dependency. */
- l->l_init_called = 1;
+ l->l_rw->l_init_called = 1;
/* Check for object which constructors we do not run here. */
if (__builtin_expect (l->l_public.l_name[0], 'a') == '\0'
diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
index d3c705811c..1cfaedbd4e 100644
--- a/elf/dl-lookup.c
+++ b/elf/dl-lookup.c
@@ -175,9 +175,9 @@ static void
mark_nodelete (struct link_map_private *map, int flags)
{
if (flags & DL_LOOKUP_FOR_RELOCATE)
- map->l_nodelete_pending = true;
+ map->l_rw->l_nodelete_pending = true;
else
- map->l_nodelete_active = true;
+ map->l_rw->l_nodelete_active = true;
}
/* Return true if MAP is marked as NODELETE according to the lookup
@@ -187,8 +187,8 @@ is_nodelete (struct link_map_private *map, int flags)
{
/* Non-pending NODELETE always counts. Pending NODELETE only counts
during initial relocation processing. */
- return map->l_nodelete_active
- || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_nodelete_pending);
+ return map->l_rw->l_nodelete_active
+ || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_rw->l_nodelete_pending);
}
/* Utility function for do_lookup_x. Lookup an STB_GNU_UNIQUE symbol
@@ -535,7 +535,7 @@ add_dependency (struct link_map_private *undef_map,
return 0;
struct link_map_reldeps *l_reldeps
- = atomic_forced_read (undef_map->l_reldeps);
+ = atomic_forced_read (undef_map->l_rw->l_reldeps);
/* Make sure l_reldeps is read before l_initfini. */
atomic_read_barrier ();
@@ -595,22 +595,23 @@ add_dependency (struct link_map_private *undef_map,
/* Redo the l_reldeps check if undef_map's l_reldeps changed in
the mean time. */
- if (undef_map->l_reldeps != NULL)
+ if (undef_map->l_rw->l_reldeps != NULL)
{
- if (undef_map->l_reldeps != l_reldeps)
+ if (undef_map->l_rw->l_reldeps != l_reldeps)
{
- struct link_map_private **list = &undef_map->l_reldeps->list[0];
- l_reldepsact = undef_map->l_reldeps->act;
+ struct link_map_private **list
+ = &undef_map->l_rw->l_reldeps->list[0];
+ l_reldepsact = undef_map->l_rw->l_reldeps->act;
for (i = 0; i < l_reldepsact; ++i)
if (list[i] == map)
goto out_check;
}
- else if (undef_map->l_reldeps->act > l_reldepsact)
+ else if (undef_map->l_rw->l_reldeps->act > l_reldepsact)
{
struct link_map_private **list
- = &undef_map->l_reldeps->list[0];
+ = &undef_map->l_rw->l_reldeps->list[0];
i = l_reldepsact;
- l_reldepsact = undef_map->l_reldeps->act;
+ l_reldepsact = undef_map->l_rw->l_reldeps->act;
for (; i < l_reldepsact; ++i)
if (list[i] == map)
goto out_check;
@@ -666,14 +667,17 @@ marking %s [%lu] as NODELETE due to reference from %s [%lu]\n",
}
/* Add the reference now. */
- if (__glibc_unlikely (l_reldepsact >= undef_map->l_reldepsmax))
+ if (__glibc_unlikely (l_reldepsact >= undef_map->l_rw->l_reldepsmax))
{
/* Allocate more memory for the dependency list. Since this
can never happen during the startup phase we can use
`realloc'. */
struct link_map_reldeps *newp;
- unsigned int max
- = undef_map->l_reldepsmax ? undef_map->l_reldepsmax * 2 : 10;
+ unsigned int max;
+ if (undef_map->l_rw->l_reldepsmax > 0)
+ max = undef_map->l_rw->l_reldepsmax;
+ else
+ max = 10;
#ifdef RTLD_PREPARE_FOREIGN_CALL
RTLD_PREPARE_FOREIGN_CALL;
@@ -701,23 +705,24 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
else
{
if (l_reldepsact)
- memcpy (&newp->list[0], &undef_map->l_reldeps->list[0],
+ memcpy (&newp->list[0],
+ &undef_map->l_rw->l_reldeps->list[0],
l_reldepsact * sizeof (struct link_map_private *));
newp->list[l_reldepsact] = map;
newp->act = l_reldepsact + 1;
atomic_write_barrier ();
- void *old = undef_map->l_reldeps;
- undef_map->l_reldeps = newp;
- undef_map->l_reldepsmax = max;
+ void *old = undef_map->l_rw->l_reldeps;
+ undef_map->l_rw->l_reldeps = newp;
+ undef_map->l_rw->l_reldepsmax = max;
if (old)
_dl_scope_free (old);
}
}
else
{
- undef_map->l_reldeps->list[l_reldepsact] = map;
+ undef_map->l_rw->l_reldeps->list[l_reldepsact] = map;
atomic_write_barrier ();
- undef_map->l_reldeps->act = l_reldepsact + 1;
+ undef_map->l_rw->l_reldeps->act = l_reldepsact + 1;
}
/* Display information if we are debugging. */
diff --git a/elf/dl-object.c b/elf/dl-object.c
index 3e06e22ab2..c6c0f7824b 100644
--- a/elf/dl-object.c
+++ b/elf/dl-object.c
@@ -94,6 +94,12 @@ _dl_new_object (char *realname, const char *libname, int type,
+ sizeof (*newname) + libname_len, 1);
if (new == NULL)
return NULL;
+ new->l_rw = calloc (1, sizeof (*new->l_rw));
+ if (new->l_rw == NULL)
+ {
+ free (new);
+ return NULL;
+ }
new->l_real = new;
new->l_symbolic_searchlist.r_list
diff --git a/elf/dl-open.c b/elf/dl-open.c
index 542889a6b8..306cdcc6ac 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -263,7 +263,7 @@ resize_scopes (struct link_map_private *new)
/* If the initializer has been called already, the object has
not been loaded here and now. */
- if (imap->l_init_called && imap->l_type == lt_loaded)
+ if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
{
if (scope_has_map (imap, new))
/* Avoid duplicates. */
@@ -327,7 +327,7 @@ update_scopes (struct link_map_private *new)
struct link_map_private *imap = new->l_searchlist.r_list[i];
int from_scope = 0;
- if (imap->l_init_called && imap->l_type == lt_loaded)
+ if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
{
if (scope_has_map (imap, new))
/* Avoid duplicates. */
@@ -368,7 +368,7 @@ resize_tls_slotinfo (struct link_map_private *new)
/* Only add TLS memory if this object is loaded now and
therefore is not yet initialized. */
- if (! imap->l_init_called && imap->l_tls_blocksize > 0)
+ if (! imap->l_rw->l_init_called && imap->l_tls_blocksize > 0)
{
_dl_add_to_slotinfo (imap, false);
any_tls = true;
@@ -390,7 +390,7 @@ update_tls_slotinfo (struct link_map_private *new)
/* Only add TLS memory if this object is loaded now and
therefore is not yet initialized. */
- if (! imap->l_init_called && imap->l_tls_blocksize > 0)
+ if (! imap->l_rw->l_init_called && imap->l_tls_blocksize > 0)
{
_dl_add_to_slotinfo (imap, true);
@@ -415,7 +415,7 @@ TLS generation counter wrapped! Please report this."));
struct link_map_private *imap = new->l_searchlist.r_list[i];
if (imap->l_need_tls_init
- && ! imap->l_init_called
+ && ! imap->l_rw->l_init_called
&& imap->l_tls_blocksize > 0)
{
/* For static TLS we have to allocate the memory here and
@@ -451,7 +451,7 @@ activate_nodelete (struct link_map_private *new)
NODELETE status for objects outside the local scope. */
for (struct link_map_private *l = GL (dl_ns)[new->l_ns]._ns_loaded;
l != NULL; l = l_next (l))
- if (l->l_nodelete_pending)
+ if (l->l_rw->l_nodelete_pending)
{
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("activating NODELETE for %s [%lu]\n",
@@ -460,11 +460,11 @@ activate_nodelete (struct link_map_private *new)
/* The flag can already be true at this point, e.g. a signal
handler may have triggered lazy binding and set NODELETE
status immediately. */
- l->l_nodelete_active = true;
+ l->l_rw->l_nodelete_active = true;
/* This is just a debugging aid, to indicate that
activate_nodelete has run for this map. */
- l->l_nodelete_pending = false;
+ l->l_rw->l_nodelete_pending = false;
}
}
@@ -503,7 +503,7 @@ _dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
_dl_start_profile ();
/* Prevent unloading the object. */
- GL(dl_profile_map)->l_nodelete_active = true;
+ GL(dl_profile_map)->l_rw->l_nodelete_active = true;
}
}
else
@@ -591,7 +591,7 @@ dl_open_worker_begin (void *a)
return;
/* This object is directly loaded. */
- ++new->l_direct_opencount;
+ ++new->l_rw->l_direct_opencount;
/* It was already open. */
if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
@@ -600,7 +600,7 @@ dl_open_worker_begin (void *a)
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
new->l_public.l_name, new->l_ns,
- new->l_direct_opencount);
+ new->l_rw->l_direct_opencount);
/* If the user requested the object to be in the global
namespace but it is not so far, prepare to add it now. This
@@ -613,10 +613,10 @@ dl_open_worker_begin (void *a)
if (__glibc_unlikely (mode & RTLD_NODELETE))
{
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
- && !new->l_nodelete_active)
+ && !new->l_rw->l_nodelete_active)
_dl_debug_printf ("marking %s [%lu] as NODELETE\n",
new->l_public.l_name, new->l_ns);
- new->l_nodelete_active = true;
+ new->l_rw->l_nodelete_active = true;
}
/* Finalize the addition to the global scope. */
@@ -633,7 +633,7 @@ dl_open_worker_begin (void *a)
/* Schedule NODELETE marking for the directly loaded object if
requested. */
if (__glibc_unlikely (mode & RTLD_NODELETE))
- new->l_nodelete_pending = true;
+ new->l_rw->l_nodelete_pending = true;
/* Load that object's dependencies. */
_dl_map_object_deps (new, NULL, 0, 0,
@@ -838,7 +838,7 @@ dl_open_worker (void *a)
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
new->l_public.l_name, new->l_ns,
- new->l_direct_opencount);
+ new->l_rw->l_direct_opencount);
}
void *
diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
index bcc49fa0e9..e3a547e4da 100644
--- a/elf/dl-sort-maps.c
+++ b/elf/dl-sort-maps.c
@@ -87,10 +87,11 @@ _dl_sort_maps_original (struct link_map_private **maps, unsigned int nmaps,
goto next;
}
- if (__glibc_unlikely (for_fini && maps[k]->l_reldeps != NULL))
+ if (__glibc_unlikely (for_fini && maps[k]->l_rw->l_reldeps != NULL))
{
- unsigned int m = maps[k]->l_reldeps->act;
- struct link_map_private **relmaps = &maps[k]->l_reldeps->list[0];
+ unsigned int m = maps[k]->l_rw->l_reldeps->act;
+ struct link_map_private **relmaps
+ = &maps[k]->l_rw->l_reldeps->list[0];
/* Look through the relocation dependencies of the object. */
while (m-- > 0)
@@ -153,15 +154,15 @@ dfs_traversal (struct link_map_private ***rpo, struct link_map_private *map,
}
}
- if (__glibc_unlikely (do_reldeps != NULL && map->l_reldeps != NULL))
+ if (__glibc_unlikely (do_reldeps != NULL && map->l_rw->l_reldeps != NULL))
{
/* Indicate that we encountered relocation dependencies during
traversal. */
*do_reldeps = true;
- for (int m = map->l_reldeps->act - 1; m >= 0; m--)
+ for (int m = map->l_rw->l_reldeps->act - 1; m >= 0; m--)
{
- struct link_map_private *dep = map->l_reldeps->list[m];
+ struct link_map_private *dep = map->l_rw->l_reldeps->list[m];
if (dep->l_visited == 0
&& dep->l_main_map == 0)
dfs_traversal (rpo, dep, do_reldeps);
diff --git a/elf/dl-support.c b/elf/dl-support.c
index 3648dd4d05..9c422baa9b 100644
--- a/elf/dl-support.c
+++ b/elf/dl-support.c
@@ -81,6 +81,7 @@ int _dl_bind_not;
static struct link_map_private _dl_main_map =
{
.l_public = { .l_name = (char *) "", },
+ .l_rw = &(struct link_map_rw) { },
.l_real = &_dl_main_map,
.l_ns = LM_ID_BASE,
.l_libname = &(struct libname_list) { .name = "", .dont_free = 1 },
diff --git a/elf/get-dynamic-info.h b/elf/get-dynamic-info.h
index 066395eab5..ce5f40834b 100644
--- a/elf/get-dynamic-info.h
+++ b/elf/get-dynamic-info.h
@@ -163,7 +163,7 @@ elf_get_dynamic_info (struct link_map_private *l, bool bootstrap,
{
l->l_flags_1 = info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val;
if (l->l_flags_1 & DF_1_NODELETE)
- l->l_nodelete_pending = true;
+ l->l_rw->l_nodelete_pending = true;
/* Only DT_1_SUPPORTED_MASK bits are supported, and we would like
to assert this, but we can't. Users have been setting
diff --git a/elf/loadtest.c b/elf/loadtest.c
index ca7b634347..15355dd8f9 100644
--- a/elf/loadtest.c
+++ b/elf/loadtest.c
@@ -77,9 +77,9 @@ static const struct
{ \
for (map = MAPS; map != NULL; map = l_next (map)) \
if (map->l_type == lt_loaded) \
- printf ("name = \"%s\", direct_opencount = %d\n", \
+ printf ("name = \"%s\", direct_opencount = %u\n", \
map->l_public.l_name, \
- (int) map->l_direct_opencount); \
+ map->l_rw->l_direct_opencount); \
fflush (stdout); \
} \
while (0)
@@ -191,8 +191,8 @@ main (int argc, char *argv[])
for (map = MAPS; map != NULL; map = l_next (map))
if (map->l_type == lt_loaded)
{
- printf ("name = \"%s\", direct_opencount = %d\n",
- map->l_public.l_name, (int) map->l_direct_opencount);
+ printf ("name = \"%s\", direct_opencount = %u\n",
+ map->l_public.l_name, map->l_rw->l_direct_opencount);
result = 1;
}
diff --git a/elf/neededtest.c b/elf/neededtest.c
index 1fce50b81a..7a555f7780 100644
--- a/elf/neededtest.c
+++ b/elf/neededtest.c
@@ -29,9 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = l_next (lm))
{
if (lm->l_public.l_name && lm->l_public.l_name[0])
- printf(" %s, count = %d\n",
- lm->l_public.l_name,
- (int) lm->l_direct_opencount);
+ printf(" %s, count = %u\n",
+ lm->l_public.l_name, lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
diff --git a/elf/neededtest2.c b/elf/neededtest2.c
index 00b5dd0cb1..c5ae7dbe71 100644
--- a/elf/neededtest2.c
+++ b/elf/neededtest2.c
@@ -29,8 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = l_next (lm))
{
if (lm->l_public.l_name && lm->l_public.l_name[0])
- printf(" %s, count = %d\n",
- lm->l_public.l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %u\n",
+ lm->l_public.l_name, lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
diff --git a/elf/neededtest3.c b/elf/neededtest3.c
index cb625649fa..a32547a646 100644
--- a/elf/neededtest3.c
+++ b/elf/neededtest3.c
@@ -29,8 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = l_next (lm))
{
if (lm->l_public.l_name && lm->l_public.l_name[0])
- printf(" %s, count = %d\n",
- lm->l_public.l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %u\n",
+ lm->l_public.l_name, lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
diff --git a/elf/neededtest4.c b/elf/neededtest4.c
index 9f5d5fcbc3..58dc13f015 100644
--- a/elf/neededtest4.c
+++ b/elf/neededtest4.c
@@ -29,8 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = l_next (lm))
{
if (lm->l_public.l_name && lm->l_public.l_name[0])
- printf(" %s, count = %d\n",
- lm->l_public.l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %u\n",
+ lm->l_public.l_name, lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
diff --git a/elf/rtld.c b/elf/rtld.c
index 92d8fa6fd4..8e1cc38800 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -458,6 +458,9 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
interfere with __rtld_static_init. */
GLRO (dl_find_object) = &_dl_find_object;
+ static struct link_map_rw rtld_map_rw;
+ GL (dl_rtld_map).l_rw = &rtld_map_rw;
+
/* If it hasn't happen yet record the startup time. */
rtld_timer_start (&start_time);
#if !defined DONT_USE_BOOTSTRAP_MAP
@@ -1122,8 +1125,6 @@ rtld_setup_main_map (struct link_map_private *main_map)
main_map->l_map_end = 0;
/* Perhaps the executable has no PT_LOAD header entries at all. */
main_map->l_map_start = ~0;
- /* And it was opened directly. */
- ++main_map->l_direct_opencount;
main_map->l_contiguous = 1;
/* A PT_LOAD segment at an unexpected address will clear the
diff --git a/elf/unload.c b/elf/unload.c
index ab27d9da4a..b86aee4702 100644
--- a/elf/unload.c
+++ b/elf/unload.c
@@ -14,9 +14,8 @@
#define OUT \
for (map = MAPS; map != NULL; map = l_next (map)) \
if (map->l_type == lt_loaded) \
- printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_public.l_name, \
- (int) map->l_direct_opencount); \
+ printf ("name = \"%s\", direct_opencount = %u\n", \
+ map->l_public.l_name, map->l_rw->l_direct_opencount); \
fflush (stdout)
typedef struct
diff --git a/elf/unload2.c b/elf/unload2.c
index 3d6b224610..66fde61343 100644
--- a/elf/unload2.c
+++ b/elf/unload2.c
@@ -11,9 +11,8 @@
#define OUT \
for (map = MAPS; map != NULL; map = l_next (map)) \
if (map->l_type == lt_loaded) \
- printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_public.l_name, \
- (int) map->l_direct_opencount); \
+ printf ("name = \"%s\", direct_opencount = %u\n", \
+ map->l_public.l_name, map->l_rw->l_direct_opencount); \
fflush (stdout)
int
diff --git a/include/link.h b/include/link.h
index ae76a99c30..03194c0db2 100644
--- a/include/link.h
+++ b/include/link.h
@@ -75,6 +75,43 @@ struct r_search_path_struct
extern struct r_search_path_struct __rtld_search_dirs attribute_hidden;
extern struct r_search_path_struct __rtld_env_path_list attribute_hidden;
+
+/* Link map attributes that are always readable and writable. */
+struct link_map_rw
+{
+ /* List of the dependencies introduced through symbol binding. */
+ struct link_map_reldeps
+ {
+ unsigned int act;
+ struct link_map_private *list[];
+ } *l_reldeps;
+ unsigned int l_reldepsmax;
+
+ /* Reference count for dlopen/dlclose. See the l_direct_opencount
+ accessor function below. */
+ unsigned int l_direct_opencount;
+
+ /* Number of thread_local objects constructed by this DSO. This is
+ atomically accessed and modified and is not always protected by the load
+ lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
+ size_t l_tls_dtor_count;
+
+ /* Ture if ELF constructors have been called. */
+ bool l_init_called;
+
+ /* NODELETE status of the map. Only valid for maps of type
+ lt_loaded. Lazy binding sets l_nodelete_active directly,
+ potentially from signal handlers. Initial loading of an
+ DF_1_NODELETE object set l_nodelete_pending. Relocation may
+ set l_nodelete_pending as well. l_nodelete_pending maps are
+ promoted to l_nodelete_active status in the final stages of
+ dlopen, prior to calling ELF constructors. dlclose only
+ refuses to unload l_nodelete_active maps, the pending status is
+ ignored. */
+ bool l_nodelete_active;
+ bool l_nodelete_pending;
+};
+
/* Structure describing a loaded shared object. The `l_next' and `l_prev'
members form a chain of all the shared objects loaded at startup.
@@ -98,6 +135,9 @@ struct link_map_private
than one namespace. */
struct link_map_private *l_real;
+ /* Run-time writable fields. */
+ struct link_map_rw *l_rw;
+
/* Number of the namespace this link map belongs to. */
Lmid_t l_ns;
@@ -157,7 +197,6 @@ struct link_map_private
const Elf_Symndx *l_buckets;
};
- unsigned int l_direct_opencount; /* Reference count for dlopen/dlclose. */
enum /* Where this object came from. */
{
lt_executable, /* The main executable program. */
@@ -167,7 +206,6 @@ struct link_map_private
unsigned int l_dt_relr_ref:1; /* Nonzero if GLIBC_ABI_DT_RELR is
referenced. */
unsigned int l_relocated:1; /* Nonzero if object's relocations done. */
- unsigned int l_init_called:1; /* Nonzero if DT_INIT function called. */
unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
unsigned int l_reserved:2; /* Reserved for internal use. */
unsigned int l_main_map:1; /* Nonzero for the map of the main program. */
@@ -200,18 +238,6 @@ struct link_map_private
needs to process this
lt_library map. */
- /* NODELETE status of the map. Only valid for maps of type
- lt_loaded. Lazy binding sets l_nodelete_active directly,
- potentially from signal handlers. Initial loading of an
- DF_1_NODELETE object set l_nodelete_pending. Relocation may
- set l_nodelete_pending as well. l_nodelete_pending maps are
- promoted to l_nodelete_active status in the final stages of
- dlopen, prior to calling ELF constructors. dlclose only
- refuses to unload l_nodelete_active maps, the pending status is
- ignored. */
- bool l_nodelete_active;
- bool l_nodelete_pending;
-
#include <link_map.h>
/* Collected information about own RPATH directories. */
@@ -263,14 +289,6 @@ struct link_map_private
/* List of object in order of the init and fini calls. */
struct link_map_private **l_initfini;
- /* List of the dependencies introduced through symbol binding. */
- struct link_map_reldeps
- {
- unsigned int act;
- struct link_map_private *list[];
- } *l_reldeps;
- unsigned int l_reldepsmax;
-
/* Nonzero if the DSO is used. */
unsigned int l_used;
@@ -321,11 +339,6 @@ struct link_map_private
/* Index of the module in the dtv array. */
size_t l_tls_modid;
- /* Number of thread_local objects constructed by this DSO. This is
- atomically accessed and modified and is not always protected by the load
- lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
- size_t l_tls_dtor_count;
-
/* Information used to change permission after the relocations are
done. */
ElfW(Addr) l_relro_addr;
diff --git a/stdlib/cxa_thread_atexit_impl.c b/stdlib/cxa_thread_atexit_impl.c
index d35002af30..360cde46a2 100644
--- a/stdlib/cxa_thread_atexit_impl.c
+++ b/stdlib/cxa_thread_atexit_impl.c
@@ -133,7 +133,7 @@ __cxa_thread_atexit_impl (dtor_func func, void *obj, void *dso_symbol)
_dl_close_worker is protected by the dl_load_lock. The execution in
__call_tls_dtors does not really depend on this value beyond the fact that
it should be atomic, so Relaxed MO should be sufficient. */
- atomic_fetch_add_relaxed (&lm_cache->l_tls_dtor_count, 1);
+ atomic_fetch_add_relaxed (&lm_cache->l_rw->l_tls_dtor_count, 1);
__rtld_lock_unlock_recursive (GL(dl_load_lock));
new->map = lm_cache;
@@ -159,7 +159,7 @@ __call_tls_dtors (void)
l_tls_dtor_count decrement. That way, we protect this access from a
potential DSO unload in _dl_close_worker, which happens when
l_tls_dtor_count is 0. See CONCURRENCY NOTES for more detail. */
- atomic_fetch_add_release (&cur->map->l_tls_dtor_count, -1);
+ atomic_fetch_add_release (&cur->map->l_rw->l_tls_dtor_count, -1);
free (cur);
}
}
diff --git a/sysdeps/x86/dl-prop.h b/sysdeps/x86/dl-prop.h
index ba70b06c3a..f24fc1b028 100644
--- a/sysdeps/x86/dl-prop.h
+++ b/sysdeps/x86/dl-prop.h
@@ -40,7 +40,7 @@ dl_isa_level_check (struct link_map_private *m, const char *program)
l = m->l_initfini[i];
/* Skip ISA level check if functions have been executed. */
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
continue;
#ifdef SHARED
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 16/32] elf: Move l_tls_offset into read-write part of link map
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (14 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 15/32] elf: Remove run-time-writable fields from struct link_map_private Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-26 21:57 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 17/32] elf: Allocate auditor state after read-write " Florian Weimer
` (16 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
This field is written concurrently from the dynamic TLS lazy
allocation code, so it runs outside the main loader lock. The
protected memory allocator relies on that lock for synchronization.
The early initialization of l_tls_offset in _dl_start is not needed.
The dynamic loader does not have ELF TLS.
---
csu/libc-tls.c | 4 +--
elf/dl-close.c | 40 ++++++++++++++------------
elf/dl-object.c | 2 +-
elf/dl-reloc.c | 15 +++++-----
elf/dl-static-tls.h | 8 +++---
elf/dl-support.c | 6 ++--
elf/dl-tls.c | 35 +++++++++++-----------
elf/rtld.c | 13 ++-------
htl/pt-alloc.c | 5 ++--
include/link.h | 36 ++++++++++++++---------
nptl/Versions | 3 +-
nptl_db/db_info.c | 1 +
nptl_db/structs.def | 3 +-
nptl_db/td_thr_tlsbase.c | 12 ++++++--
sysdeps/aarch64/dl-machine.h | 5 ++--
sysdeps/alpha/dl-machine.h | 4 +--
sysdeps/arc/dl-machine.h | 3 +-
sysdeps/arm/dl-machine.h | 4 +--
sysdeps/csky/dl-machine.h | 2 +-
sysdeps/hppa/dl-machine.h | 3 +-
sysdeps/i386/dl-machine.h | 11 +++----
sysdeps/ia64/dl-machine.h | 2 +-
sysdeps/loongarch/dl-tls.h | 2 +-
sysdeps/m68k/dl-tls.h | 2 +-
sysdeps/microblaze/dl-machine.h | 3 +-
sysdeps/mips/dl-tls.h | 2 +-
sysdeps/nios2/dl-tls.h | 2 +-
sysdeps/or1k/dl-machine.h | 4 +--
sysdeps/powerpc/dl-tls.h | 2 +-
sysdeps/powerpc/powerpc32/dl-machine.h | 4 +--
sysdeps/powerpc/powerpc64/dl-machine.h | 4 +--
sysdeps/riscv/dl-tls.h | 2 +-
sysdeps/s390/s390-32/dl-machine.h | 5 ++--
sysdeps/s390/s390-64/dl-machine.h | 5 ++--
sysdeps/sh/dl-machine.h | 7 +++--
sysdeps/sparc/sparc32/dl-machine.h | 4 +--
sysdeps/sparc/sparc64/dl-machine.h | 4 +--
sysdeps/x86_64/dl-machine.h | 5 ++--
38 files changed, 151 insertions(+), 123 deletions(-)
diff --git a/csu/libc-tls.c b/csu/libc-tls.c
index 7a3238789d..2a502874a7 100644
--- a/csu/libc-tls.c
+++ b/csu/libc-tls.c
@@ -174,10 +174,10 @@ __libc_setup_tls (void)
#if TLS_TCB_AT_TP
_dl_static_dtv[2].pointer.val = ((char *) tlsblock + tcb_offset
- roundup (memsz, align ?: 1));
- main_map->l_tls_offset = roundup (memsz, align ?: 1);
+ main_map->l_rw->l_tls_offset = roundup (memsz, align ?: 1);
#elif TLS_DTV_AT_TP
_dl_static_dtv[2].pointer.val = (char *) tlsblock + tcb_offset;
- main_map->l_tls_offset = tcb_offset;
+ main_map->l_rw->l_tls_offset = tcb_offset;
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
diff --git a/elf/dl-close.c b/elf/dl-close.c
index 1af60845f5..f242dcee9e 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -513,8 +513,8 @@ _dl_close_worker (struct link_map_private *map, bool force)
atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
GL(dl_tls_static_nelem));
- if (imap->l_tls_offset != NO_TLS_OFFSET
- && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
+ if (imap->l_rw->l_tls_offset != NO_TLS_OFFSET
+ && imap->l_rw->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
{
/* Collect a contiguous chunk built from the objects in
this search list, going in either direction. When the
@@ -522,19 +522,19 @@ _dl_close_worker (struct link_map_private *map, bool force)
reclaim it. */
#if TLS_TCB_AT_TP
if (tls_free_start == NO_TLS_OFFSET
- || (size_t) imap->l_tls_offset == tls_free_start)
+ || (size_t) imap->l_rw->l_tls_offset == tls_free_start)
{
/* Extend the contiguous chunk being reclaimed. */
tls_free_start
- = imap->l_tls_offset - imap->l_tls_blocksize;
+ = imap->l_rw->l_tls_offset - imap->l_tls_blocksize;
if (tls_free_end == NO_TLS_OFFSET)
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
}
- else if (imap->l_tls_offset - imap->l_tls_blocksize
+ else if (imap->l_rw->l_tls_offset - imap->l_tls_blocksize
== tls_free_end)
/* Extend the chunk backwards. */
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
else
{
/* This isn't contiguous with the last chunk freed.
@@ -543,19 +543,20 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (tls_free_end == GL(dl_tls_static_used))
{
GL(dl_tls_static_used) = tls_free_start;
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
tls_free_start
= tls_free_end - imap->l_tls_blocksize;
}
- else if ((size_t) imap->l_tls_offset
+ else if ((size_t) imap->l_rw->l_tls_offset
== GL(dl_tls_static_used))
GL(dl_tls_static_used)
- = imap->l_tls_offset - imap->l_tls_blocksize;
- else if (tls_free_end < (size_t) imap->l_tls_offset)
+ = imap->l_rw->l_tls_offset - imap->l_tls_blocksize;
+ else if (tls_free_end
+ < (size_t) imap->l_rw->l_tls_offset)
{
/* We pick the later block. It has a chance to
be freed. */
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
tls_free_start
= tls_free_end - imap->l_tls_blocksize;
}
@@ -564,34 +565,37 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (tls_free_start == NO_TLS_OFFSET)
{
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = (imap->l_tls_offset
+ tls_free_end = (imap->l_rw->l_tls_offset
+ imap->l_tls_blocksize);
}
else if (imap->l_tls_firstbyte_offset == tls_free_end)
/* Extend the contiguous chunk being reclaimed. */
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
- else if (imap->l_tls_offset + imap->l_tls_blocksize
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
+ else if (imap->l_rw->l_tls_offset + imap->l_tls_blocksize
== tls_free_start)
/* Extend the chunk backwards. */
tls_free_start = imap->l_tls_firstbyte_offset;
/* This isn't contiguous with the last chunk freed.
One of them will be leaked unless we can free
one block right away. */
- else if (imap->l_tls_offset + imap->l_tls_blocksize
+ else if (imap->l_rw->l_tls_offset + imap->l_tls_blocksize
== GL(dl_tls_static_used))
GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
else if (tls_free_end == GL(dl_tls_static_used))
{
GL(dl_tls_static_used) = tls_free_start;
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
}
else if (tls_free_end < imap->l_tls_firstbyte_offset)
{
/* We pick the later block. It has a chance to
be freed. */
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
}
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
diff --git a/elf/dl-object.c b/elf/dl-object.c
index c6c0f7824b..1a9b04dd3c 100644
--- a/elf/dl-object.c
+++ b/elf/dl-object.c
@@ -137,7 +137,7 @@ _dl_new_object (char *realname, const char *libname, int type,
new->l_used = 1;
new->l_loader = loader;
#if NO_TLS_OFFSET != 0
- new->l_tls_offset = NO_TLS_OFFSET;
+ new->l_rw->l_tls_offset = NO_TLS_OFFSET;
#endif
new->l_ns = nsid;
diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c
index cbe4fcee4c..183efadfa2 100644
--- a/elf/dl-reloc.c
+++ b/elf/dl-reloc.c
@@ -41,7 +41,7 @@
dynamically loaded. This can only work if there is enough surplus in
the static TLS area already allocated for each running thread. If this
object's TLS segment is too big to fit, we fail with -1. If it fits,
- we set MAP->l_tls_offset and return 0.
+ we set MAP->l_rw->l_tls_offset and return 0.
A portion of the surplus static TLS can be optionally used to optimize
dynamic TLS access (with TLSDESC or powerpc TLS optimizations).
If OPTIONAL is true then TLS is allocated for such optimization and
@@ -53,7 +53,7 @@ _dl_try_allocate_static_tls (struct link_map_private *map, bool optional)
{
/* If we've already used the variable with dynamic access, or if the
alignment requirements are too high, fail. */
- if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
+ if (map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
|| map->l_tls_align > GLRO (dl_tls_static_align))
{
fail:
@@ -81,7 +81,7 @@ _dl_try_allocate_static_tls (struct link_map_private *map, bool optional)
size_t offset = GL(dl_tls_static_used) + use;
- map->l_tls_offset = GL(dl_tls_static_used) = offset;
+ map->l_rw->l_tls_offset = GL(dl_tls_static_used) = offset;
#elif TLS_DTV_AT_TP
/* dl_tls_static_used includes the TCB at the beginning. */
size_t offset = (ALIGN_UP(GL(dl_tls_static_used)
@@ -100,7 +100,7 @@ _dl_try_allocate_static_tls (struct link_map_private *map, bool optional)
else if (optional)
GL(dl_tls_static_optional) -= use;
- map->l_tls_offset = offset;
+ map->l_rw->l_tls_offset = offset;
map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
GL(dl_tls_static_used) = used;
#else
@@ -134,7 +134,7 @@ void
__attribute_noinline__
_dl_allocate_static_tls (struct link_map_private *map)
{
- if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
+ if (map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
|| _dl_try_allocate_static_tls (map, false))
{
_dl_signal_error (0, map->l_public.l_name, NULL, N_("\
@@ -150,9 +150,10 @@ void
_dl_nothread_init_static_tls (struct link_map_private *map)
{
#if TLS_TCB_AT_TP
- void *dest = (char *) THREAD_SELF - map->l_tls_offset;
+ void *dest = (char *) THREAD_SELF - map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = ((char *) THREAD_SELF + map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
diff --git a/elf/dl-static-tls.h b/elf/dl-static-tls.h
index d40dd882f6..b330100beb 100644
--- a/elf/dl-static-tls.h
+++ b/elf/dl-static-tls.h
@@ -29,8 +29,8 @@
can't be done, we fall back to the error that DF_STATIC_TLS is
intended to produce. */
#define HAVE_STATIC_TLS(map, sym_map) \
- (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET \
- && ((sym_map)->l_tls_offset \
+ (__builtin_expect ((sym_map)->l_rw->l_tls_offset != NO_TLS_OFFSET \
+ && ((sym_map)->l_rw->l_tls_offset \
!= FORCED_DYNAMIC_TLS_OFFSET), 1))
#define CHECK_STATIC_TLS(map, sym_map) \
@@ -40,9 +40,9 @@
} while (0)
#define TRY_STATIC_TLS(map, sym_map) \
- (__builtin_expect ((sym_map)->l_tls_offset \
+ (__builtin_expect ((sym_map)->l_rw->l_tls_offset \
!= FORCED_DYNAMIC_TLS_OFFSET, 1) \
- && (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
+ && (__builtin_expect ((sym_map)->l_rw->l_tls_offset != NO_TLS_OFFSET, 1)\
|| _dl_try_allocate_static_tls (sym_map, true) == 0))
int _dl_try_allocate_static_tls (struct link_map_private *map, bool optional)
diff --git a/elf/dl-support.c b/elf/dl-support.c
index 9c422baa9b..3e3eae7edc 100644
--- a/elf/dl-support.c
+++ b/elf/dl-support.c
@@ -81,7 +81,10 @@ int _dl_bind_not;
static struct link_map_private _dl_main_map =
{
.l_public = { .l_name = (char *) "", },
- .l_rw = &(struct link_map_rw) { },
+ .l_rw = &(struct link_map_rw)
+ {
+ .l_tls_offset = NO_TLS_OFFSET,
+ },
.l_real = &_dl_main_map,
.l_ns = LM_ID_BASE,
.l_libname = &(struct libname_list) { .name = "", .dont_free = 1 },
@@ -101,7 +104,6 @@ static struct link_map_private _dl_main_map =
.l_scope = _dl_main_map.l_scope_mem,
.l_local_scope = { &_dl_main_map.l_searchlist },
.l_used = 1,
- .l_tls_offset = NO_TLS_OFFSET,
.l_serial = 1,
};
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index 68ed806c8e..da8ba37df8 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -278,7 +278,7 @@ _dl_determine_tlsoffset (void)
/* XXX For some architectures we perhaps should store the
negative offset. */
- slotinfo[cnt].map->l_tls_offset = off;
+ slotinfo[cnt].map->l_rw->l_tls_offset = off;
continue;
}
}
@@ -295,7 +295,7 @@ _dl_determine_tlsoffset (void)
/* XXX For some architectures we perhaps should store the
negative offset. */
- slotinfo[cnt].map->l_tls_offset = off;
+ slotinfo[cnt].map->l_rw->l_tls_offset = off;
}
GL(dl_tls_static_used) = offset;
@@ -322,7 +322,7 @@ _dl_determine_tlsoffset (void)
off += slotinfo[cnt].map->l_tls_align;
if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
{
- slotinfo[cnt].map->l_tls_offset = off - firstbyte;
+ slotinfo[cnt].map->l_rw->l_tls_offset = off - firstbyte;
freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
- firstbyte);
continue;
@@ -333,7 +333,7 @@ _dl_determine_tlsoffset (void)
if (off - offset < firstbyte)
off += slotinfo[cnt].map->l_tls_align;
- slotinfo[cnt].map->l_tls_offset = off - firstbyte;
+ slotinfo[cnt].map->l_rw->l_tls_offset = off - firstbyte;
if (off - firstbyte - offset > freetop - freebottom)
{
freebottom = offset;
@@ -573,17 +573,17 @@ _dl_allocate_tls_init (void *result, bool init_tls)
dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
dtv[map->l_tls_modid].pointer.to_free = NULL;
- if (map->l_tls_offset == NO_TLS_OFFSET
- || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
+ if (map->l_rw->l_tls_offset == NO_TLS_OFFSET
+ || map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
continue;
assert (map->l_tls_modid == total + cnt);
assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
#if TLS_TCB_AT_TP
- assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
- dest = (char *) result - map->l_tls_offset;
+ assert ((size_t) map->l_rw->l_tls_offset >= map->l_tls_blocksize);
+ dest = (char *) result - map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- dest = (char *) result + map->l_tls_offset;
+ dest = (char *) result + map->l_rw->l_tls_offset;
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -872,22 +872,23 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map_private *the_map)
variable into static storage, we'll wait until the address in the
static TLS block is set up, and use that. If we're undecided
yet, make sure we make the decision holding the lock as well. */
- if (__glibc_unlikely (the_map->l_tls_offset
+ if (__glibc_unlikely (the_map->l_rw->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET))
{
__rtld_lock_lock_recursive (GL(dl_load_tls_lock));
- if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
+ if (__glibc_likely (the_map->l_rw->l_tls_offset == NO_TLS_OFFSET))
{
- the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
+ the_map->l_rw->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
__rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
}
- else if (__glibc_likely (the_map->l_tls_offset
+ else if (__glibc_likely (the_map->l_rw->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET))
{
#if TLS_TCB_AT_TP
- void *p = (char *) THREAD_SELF - the_map->l_tls_offset;
+ void *p = (char *) THREAD_SELF - the_map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- void *p = (char *) THREAD_SELF + the_map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *p = ((char *) THREAD_SELF + the_map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -1076,9 +1077,9 @@ static inline void __attribute__((always_inline))
init_one_static_tls (struct pthread *curp, struct link_map_private *map)
{
# if TLS_TCB_AT_TP
- void *dest = (char *) curp - map->l_tls_offset;
+ void *dest = (char *) curp - map->l_rw->l_tls_offset;
# elif TLS_DTV_AT_TP
- void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = (char *) curp + map->l_rw->l_tls_offset + TLS_PRE_TCB_SIZE;
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
diff --git a/elf/rtld.c b/elf/rtld.c
index 8e1cc38800..25a9c8aa58 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -460,6 +460,9 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
static struct link_map_rw rtld_map_rw;
GL (dl_rtld_map).l_rw = &rtld_map_rw;
+#if NO_TLS_OFFSET != 0
+ GL (dl_rtld_map).l_rw->l_tls_offset = NO_TLS_OFFSET;
+#endif
/* If it hasn't happen yet record the startup time. */
rtld_timer_start (&start_time);
@@ -481,12 +484,6 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
- /* Copy the TLS related data if necessary. */
-#ifndef DONT_USE_BOOTSTRAP_MAP
-# if NO_TLS_OFFSET != 0
- GL(dl_rtld_map).l_tls_offset = NO_TLS_OFFSET;
-# endif
-#endif
/* Initialize the stack end variable. */
__libc_stack_end = __builtin_frame_address (0);
@@ -552,10 +549,6 @@ _dl_start (void *arg)
bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
elf_get_dynamic_info (&bootstrap_map, true, false);
-#if NO_TLS_OFFSET != 0
- bootstrap_map.l_tls_offset = NO_TLS_OFFSET;
-#endif
-
#ifdef ELF_MACHINE_BEFORE_RTLD_RELOC
ELF_MACHINE_BEFORE_RTLD_RELOC (&bootstrap_map, bootstrap_map.l_info);
#endif
diff --git a/htl/pt-alloc.c b/htl/pt-alloc.c
index c94a4dcad9..ec1b66c53d 100644
--- a/htl/pt-alloc.c
+++ b/htl/pt-alloc.c
@@ -217,9 +217,10 @@ __pthread_init_static_tls (struct link_map_private *map)
continue;
# if TLS_TCB_AT_TP
- void *dest = (char *) t->tcb - map->l_tls_offset;
+ void *dest = (char *) t->tcb - map->l_rw->l_tls_offset;
# elif TLS_DTV_AT_TP
- void *dest = (char *) t->tcb + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = ((char *) t->tcb + map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
diff --git a/include/link.h b/include/link.h
index 03194c0db2..220926248c 100644
--- a/include/link.h
+++ b/include/link.h
@@ -91,6 +91,28 @@ struct link_map_rw
accessor function below. */
unsigned int l_direct_opencount;
+ /* For objects present at startup time: offset in the static TLS
+ block. For loaded objects, it can be NO_TLS_OFFSET (not yet
+ initialized), FORCED_DYNAMIC_TLS_OFFSET (if fully dynamic TLS is
+ used), or an actual TLS offset (if the static TLS allocation has
+ been re-used to satisfy dynamic TLS needs).
+
+ This field is written outside the general loader lock, so it has
+ to reside in the read-write porition of the link map. */
+#ifndef NO_TLS_OFFSET
+# define NO_TLS_OFFSET 0
+#endif
+#ifndef FORCED_DYNAMIC_TLS_OFFSET
+# if NO_TLS_OFFSET == 0
+# define FORCED_DYNAMIC_TLS_OFFSET -1
+# elif NO_TLS_OFFSET == -1
+# define FORCED_DYNAMIC_TLS_OFFSET -2
+# else
+# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
+# endif
+#endif
+ ptrdiff_t l_tls_offset;
+
/* Number of thread_local objects constructed by this DSO. This is
atomically accessed and modified and is not always protected by the load
lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
@@ -322,20 +344,6 @@ struct link_map_private
size_t l_tls_align;
/* Offset of first byte module alignment. */
size_t l_tls_firstbyte_offset;
-#ifndef NO_TLS_OFFSET
-# define NO_TLS_OFFSET 0
-#endif
-#ifndef FORCED_DYNAMIC_TLS_OFFSET
-# if NO_TLS_OFFSET == 0
-# define FORCED_DYNAMIC_TLS_OFFSET -1
-# elif NO_TLS_OFFSET == -1
-# define FORCED_DYNAMIC_TLS_OFFSET -2
-# else
-# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
-# endif
-#endif
- /* For objects present at startup time: offset in the static TLS block. */
- ptrdiff_t l_tls_offset;
/* Index of the module in the dtv array. */
size_t l_tls_modid;
diff --git a/nptl/Versions b/nptl/Versions
index 3221de89d1..ea1ab9e5a8 100644
--- a/nptl/Versions
+++ b/nptl/Versions
@@ -404,8 +404,9 @@ libc {
_thread_db_dtv_slotinfo_map;
_thread_db_dtv_t_counter;
_thread_db_dtv_t_pointer_val;
+ _thread_db_link_map_l_rw;
_thread_db_link_map_l_tls_modid;
- _thread_db_link_map_l_tls_offset;
+ _thread_db_link_map_rw_l_tls_offset;
_thread_db_list_t_next;
_thread_db_list_t_prev;
_thread_db_pthread_cancelhandling;
diff --git a/nptl_db/db_info.c b/nptl_db/db_info.c
index 64e5d220ef..20ccb1dda0 100644
--- a/nptl_db/db_info.c
+++ b/nptl_db/db_info.c
@@ -38,6 +38,7 @@ typedef struct
} dtv;
typedef struct link_map_private link_map;
+typedef struct link_map_rw link_map_rw;
typedef struct rtld_global rtld_global;
typedef struct dtv_slotinfo_list dtv_slotinfo_list;
typedef struct dtv_slotinfo dtv_slotinfo;
diff --git a/nptl_db/structs.def b/nptl_db/structs.def
index c105f0925c..25013c7c09 100644
--- a/nptl_db/structs.def
+++ b/nptl_db/structs.def
@@ -93,7 +93,8 @@ DB_STRUCT (pthread_key_data_level2)
DB_STRUCT_ARRAY_FIELD (pthread_key_data_level2, data)
DB_STRUCT_FIELD (link_map, l_tls_modid)
-DB_STRUCT_FIELD (link_map, l_tls_offset)
+DB_STRUCT_FIELD (link_map, l_rw)
+DB_STRUCT_FIELD (link_map_rw, l_tls_offset)
DB_STRUCT_ARRAY_FIELD (dtv, dtv)
#define pointer_val pointer.val /* Field of anonymous struct in dtv_t. */
diff --git a/nptl_db/td_thr_tlsbase.c b/nptl_db/td_thr_tlsbase.c
index d05abf6213..1755129cf4 100644
--- a/nptl_db/td_thr_tlsbase.c
+++ b/nptl_db/td_thr_tlsbase.c
@@ -191,9 +191,15 @@ td_thr_tlsbase (const td_thrhandle_t *th,
/* Is the DTV current enough? */
if (dtvgen < modgen)
{
- try_static_tls:
- /* If the module uses Static TLS, we're still good. */
- err = DB_GET_FIELD (temp, th->th_ta_p, map, link_map, l_tls_offset, 0);
+ try_static_tls:;
+ /* If the module uses Static TLS, we're still good. Follow the
+ l_rw pointer to l_tls_offset. */
+ psaddr_t l_rw;
+ err = DB_GET_FIELD (l_rw, th->th_ta_p, map, link_map, l_rw, 0);
+ if (err != TD_OK)
+ return err;
+ err = DB_GET_FIELD (temp, th->th_ta_p, l_rw, link_map_rw,
+ l_tls_offset, 0);
if (err != TD_OK)
return err;
ptrdiff_t tlsoff = (uintptr_t)temp;
diff --git a/sysdeps/aarch64/dl-machine.h b/sysdeps/aarch64/dl-machine.h
index 370bbfceba..63a48b1452 100644
--- a/sysdeps/aarch64/dl-machine.h
+++ b/sysdeps/aarch64/dl-machine.h
@@ -250,7 +250,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
else
# endif
{
- td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ + sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
td->entry = _dl_tlsdesc_return;
}
@@ -275,7 +276,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr =
- sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
+ sym->st_value + reloc->r_addend + sym_map->l_rw->l_tls_offset;
}
break;
diff --git a/sysdeps/alpha/dl-machine.h b/sysdeps/alpha/dl-machine.h
index 746c7d8189..4553f4fbca 100644
--- a/sysdeps/alpha/dl-machine.h
+++ b/sysdeps/alpha/dl-machine.h
@@ -402,12 +402,12 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
else if (r_type == R_ALPHA_TPREL64)
{
# ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym_raw_value + map->l_tls_offset;
+ *reloc_addr = sym_raw_value + map->l_rw->l_tls_offset;
# else
if (sym_map)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_raw_value + sym_map->l_tls_offset;
+ *reloc_addr = sym_raw_value + sym_map->l_rw->l_tls_offset;
}
# endif
}
diff --git a/sysdeps/arc/dl-machine.h b/sysdeps/arc/dl-machine.h
index c07ebe0838..ade3646d47 100644
--- a/sysdeps/arc/dl-machine.h
+++ b/sysdeps/arc/dl-machine.h
@@ -285,7 +285,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_map->l_tls_offset + sym->st_value + reloc->r_addend;
+ *reloc_addr = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
break;
diff --git a/sysdeps/arm/dl-machine.h b/sysdeps/arm/dl-machine.h
index e5fd0cc4d2..b138596252 100644
--- a/sysdeps/arm/dl-machine.h
+++ b/sysdeps/arm/dl-machine.h
@@ -406,7 +406,7 @@ elf_machine_rel (struct link_map_private *map, struct r_scope_elem *scope[],
# endif
# endif
{
- td->argument.value = value + sym_map->l_tls_offset;
+ td->argument.value = value + sym_map->l_rw->l_tls_offset;
td->entry = _dl_tlsdesc_return;
}
}
@@ -436,7 +436,7 @@ elf_machine_rel (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym->st_value + sym_map->l_tls_offset;
+ *reloc_addr += sym->st_value + sym_map->l_rw->l_tls_offset;
}
break;
case R_ARM_IRELATIVE:
diff --git a/sysdeps/csky/dl-machine.h b/sysdeps/csky/dl-machine.h
index 8dcb9b11c8..e01b993391 100644
--- a/sysdeps/csky/dl-machine.h
+++ b/sysdeps/csky/dl-machine.h
@@ -303,7 +303,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = (sym->st_value + sym_map->l_tls_offset
+ *reloc_addr = (sym->st_value + sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
}
break;
diff --git a/sysdeps/hppa/dl-machine.h b/sysdeps/hppa/dl-machine.h
index 60ef82c3e7..a37e834df8 100644
--- a/sysdeps/hppa/dl-machine.h
+++ b/sysdeps/hppa/dl-machine.h
@@ -716,7 +716,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym_map->l_tls_offset + sym->st_value + reloc->r_addend;
+ value = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
break;
#endif /* use TLS */
diff --git a/sysdeps/i386/dl-machine.h b/sysdeps/i386/dl-machine.h
index b0bba3621f..b0bcb3bb53 100644
--- a/sysdeps/i386/dl-machine.h
+++ b/sysdeps/i386/dl-machine.h
@@ -361,7 +361,8 @@ and creates an unsatisfiable circular dependency.\n",
# endif
# endif
{
- td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ - sym_map->l_rw->l_tls_offset
+ (ElfW(Word))td->arg);
td->entry = _dl_tlsdesc_return;
}
@@ -371,7 +372,7 @@ and creates an unsatisfiable circular dependency.\n",
case R_386_TLS_TPOFF32:
/* The offset is positive, backward from the thread pointer. */
# ifdef RTLD_BOOTSTRAP
- *reloc_addr += map->l_tls_offset - sym->st_value;
+ *reloc_addr += map->l_rw->l_tls_offset - sym->st_value;
# else
/* We know the offset of object the symbol is contained in.
It is a positive value which will be subtracted from the
@@ -380,14 +381,14 @@ and creates an unsatisfiable circular dependency.\n",
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym_map->l_tls_offset - sym->st_value;
+ *reloc_addr += sym_map->l_rw->l_tls_offset - sym->st_value;
}
# endif
break;
case R_386_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
# ifdef RTLD_BOOTSTRAP
- *reloc_addr += sym->st_value - map->l_tls_offset;
+ *reloc_addr += sym->st_value - map->l_rw->l_tls_offset;
# else
/* We know the offset of object the symbol is contained in.
It is a negative value which will be added to the
@@ -395,7 +396,7 @@ and creates an unsatisfiable circular dependency.\n",
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym->st_value - sym_map->l_tls_offset;
+ *reloc_addr += sym->st_value - sym_map->l_rw->l_tls_offset;
}
# endif
break;
diff --git a/sysdeps/ia64/dl-machine.h b/sysdeps/ia64/dl-machine.h
index e9c59dc65a..0379322b99 100644
--- a/sysdeps/ia64/dl-machine.h
+++ b/sysdeps/ia64/dl-machine.h
@@ -399,7 +399,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
#ifndef RTLD_BOOTSTRAP
CHECK_STATIC_TLS (map, sym_map);
#endif
- value += sym_map->l_tls_offset - sym_map->l_public.l_addr;
+ value += sym_map->l_rw->l_tls_offset - sym_map->l_public.l_addr;
}
else
_dl_reloc_bad_type (map, r_type, 0);
diff --git a/sysdeps/loongarch/dl-tls.h b/sysdeps/loongarch/dl-tls.h
index a551594b64..aef696606d 100644
--- a/sysdeps/loongarch/dl-tls.h
+++ b/sysdeps/loongarch/dl-tls.h
@@ -32,7 +32,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) ((sym)->st_value - TLS_DTV_OFFSET)
diff --git a/sysdeps/m68k/dl-tls.h b/sysdeps/m68k/dl-tls.h
index a98948dbd5..c67d441808 100644
--- a/sysdeps/m68k/dl-tls.h
+++ b/sysdeps/m68k/dl-tls.h
@@ -33,7 +33,7 @@ typedef struct
/* Compute the value for a TPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym, reloc) \
- ((sym_map)->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
- TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
diff --git a/sysdeps/microblaze/dl-machine.h b/sysdeps/microblaze/dl-machine.h
index e8f9cebe2f..213048d593 100644
--- a/sysdeps/microblaze/dl-machine.h
+++ b/sysdeps/microblaze/dl-machine.h
@@ -263,7 +263,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value + sym_map->l_tls_offset + reloc->r_addend;
+ *reloc_addr = (sym->st_value + sym_map->l_rw->l_tls_offset
+ + reloc->r_addend);
}
}
#endif
diff --git a/sysdeps/mips/dl-tls.h b/sysdeps/mips/dl-tls.h
index b4dca8291e..6f516c820b 100644
--- a/sysdeps/mips/dl-tls.h
+++ b/sysdeps/mips/dl-tls.h
@@ -33,7 +33,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) \
diff --git a/sysdeps/nios2/dl-tls.h b/sysdeps/nios2/dl-tls.h
index 4059b7e10d..df33af6492 100644
--- a/sysdeps/nios2/dl-tls.h
+++ b/sysdeps/nios2/dl-tls.h
@@ -33,7 +33,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) \
diff --git a/sysdeps/or1k/dl-machine.h b/sysdeps/or1k/dl-machine.h
index d016493be9..41abaab4da 100644
--- a/sysdeps/or1k/dl-machine.h
+++ b/sysdeps/or1k/dl-machine.h
@@ -251,13 +251,13 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
case R_OR1K_TLS_TPOFF:
# ifdef RTLD_BOOTSTRAP
*reloc_addr = sym->st_value + reloc->r_addend +
- map->l_tls_offset - TLS_TCB_SIZE;
+ map->l_rw->l_tls_offset - TLS_TCB_SIZE;
# else
if (sym_map != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = sym->st_value + reloc->r_addend +
- sym_map->l_tls_offset - TLS_TCB_SIZE;
+ sym_map->l_rw->l_tls_offset - TLS_TCB_SIZE;
}
# endif
break;
diff --git a/sysdeps/powerpc/dl-tls.h b/sysdeps/powerpc/dl-tls.h
index 3324713e40..a444d63d9b 100644
--- a/sysdeps/powerpc/dl-tls.h
+++ b/sysdeps/powerpc/dl-tls.h
@@ -35,7 +35,7 @@ typedef struct
/* Compute the value for a @tprel reloc. */
#define TLS_TPREL_VALUE(sym_map, sym, reloc) \
- ((sym_map)->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
- TLS_TP_OFFSET)
/* Compute the value for a @dtprel reloc. */
diff --git a/sysdeps/powerpc/powerpc32/dl-machine.h b/sysdeps/powerpc/powerpc32/dl-machine.h
index 789255e427..8fa1cc0d71 100644
--- a/sysdeps/powerpc/powerpc32/dl-machine.h
+++ b/sysdeps/powerpc/powerpc32/dl-machine.h
@@ -355,7 +355,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (!NOT_BOOTSTRAP)
{
reloc_addr[0] = 0;
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
break;
}
@@ -369,7 +369,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
reloc_addr[0] = 0;
/* Set up for local dynamic. */
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
break;
}
diff --git a/sysdeps/powerpc/powerpc64/dl-machine.h b/sysdeps/powerpc/powerpc64/dl-machine.h
index 9db1838f44..6104d6ae9c 100644
--- a/sysdeps/powerpc/powerpc64/dl-machine.h
+++ b/sysdeps/powerpc/powerpc64/dl-machine.h
@@ -732,7 +732,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
#ifdef RTLD_BOOTSTRAP
reloc_addr[0] = 0;
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
return;
#else
@@ -746,7 +746,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
reloc_addr[0] = 0;
/* Set up for local dynamic. */
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
return;
}
diff --git a/sysdeps/riscv/dl-tls.h b/sysdeps/riscv/dl-tls.h
index 67c8ae639c..8ede95f014 100644
--- a/sysdeps/riscv/dl-tls.h
+++ b/sysdeps/riscv/dl-tls.h
@@ -33,7 +33,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) \
diff --git a/sysdeps/s390/s390-32/dl-machine.h b/sysdeps/s390/s390-32/dl-machine.h
index a92248d2ed..ea83216155 100644
--- a/sysdeps/s390/s390-32/dl-machine.h
+++ b/sysdeps/s390/s390-32/dl-machine.h
@@ -357,7 +357,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
case R_390_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym->st_value + reloc->r_addend - map->l_tls_offset;
+ *reloc_addr = (sym->st_value + reloc->r_addend
+ - map->l_rw->l_tls_offset);
#else
/* We know the offset of the object the symbol is contained in.
It is a negative value which will be added to the
@@ -366,7 +367,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
}
#endif
break;
diff --git a/sysdeps/s390/s390-64/dl-machine.h b/sysdeps/s390/s390-64/dl-machine.h
index f21df5232c..a4be61d61c 100644
--- a/sysdeps/s390/s390-64/dl-machine.h
+++ b/sysdeps/s390/s390-64/dl-machine.h
@@ -333,7 +333,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
case R_390_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym->st_value + reloc->r_addend - map->l_tls_offset;
+ *reloc_addr = (sym->st_value + reloc->r_addend
+ - map->l_rw->l_tls_offset);
#else
/* We know the offset of the object the symbol is contained in.
It is a negative value which will be added to the
@@ -342,7 +343,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
}
#endif
break;
diff --git a/sysdeps/sh/dl-machine.h b/sysdeps/sh/dl-machine.h
index 6841671ae6..786cd00cab 100644
--- a/sysdeps/sh/dl-machine.h
+++ b/sysdeps/sh/dl-machine.h
@@ -364,7 +364,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
case R_SH_TLS_TPOFF32:
/* The offset is positive, afterward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = map->l_tls_offset + sym->st_value + reloc->r_addend;
+ *reloc_addr = (map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
#else
/* We know the offset of object the symbol is contained in.
It is a positive value which will be added to the thread
@@ -373,8 +374,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_map->l_tls_offset + sym->st_value
- + reloc->r_addend;
+ *reloc_addr = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
#endif
break;
diff --git a/sysdeps/sparc/sparc32/dl-machine.h b/sysdeps/sparc/sparc32/dl-machine.h
index 2e9c84e771..450e17295a 100644
--- a/sysdeps/sparc/sparc32/dl-machine.h
+++ b/sysdeps/sparc/sparc32/dl-machine.h
@@ -378,7 +378,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value - sym_map->l_tls_offset
+ *reloc_addr = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
}
break;
@@ -388,7 +388,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym->st_value - sym_map->l_tls_offset
+ value = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
if (r_type == R_SPARC_TLS_LE_HIX22)
*reloc_addr = (*reloc_addr & 0xffc00000) | ((~value) >> 10);
diff --git a/sysdeps/sparc/sparc64/dl-machine.h b/sysdeps/sparc/sparc64/dl-machine.h
index c06f568a45..21ab5572a4 100644
--- a/sysdeps/sparc/sparc64/dl-machine.h
+++ b/sysdeps/sparc/sparc64/dl-machine.h
@@ -387,7 +387,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value - sym_map->l_tls_offset
+ *reloc_addr = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
}
break;
@@ -397,7 +397,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym->st_value - sym_map->l_tls_offset
+ value = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
if (r_type == R_SPARC_TLS_LE_HIX22)
*(unsigned int *)reloc_addr =
diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
index 915ac77dd8..3136a0335b 100644
--- a/sysdeps/x86_64/dl-machine.h
+++ b/sysdeps/x86_64/dl-machine.h
@@ -375,7 +375,8 @@ and creates an unsatisfiable circular dependency.\n",
else
# endif
{
- td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ - sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
td->entry = _dl_tlsdesc_return;
}
@@ -391,7 +392,7 @@ and creates an unsatisfiable circular dependency.\n",
It is a negative value which will be added to the
thread pointer. */
value = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
# ifdef __ILP32__
/* The symbol and addend values are 32 bits but the GOT
entry is 64 bits wide and the whole 64-bit entry is used
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 17/32] elf: Allocate auditor state after read-write link map
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (15 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 16/32] elf: Move l_tls_offset into read-write part of link map Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-26 22:01 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 18/32] elf: Move link map fields used by dependency sorting to writable part Florian Weimer
` (15 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
Auditors can write to the cookie member, so it has to remain
read-write even if other parts of the link map are write-protected.
---
elf/dl-object.c | 6 +++---
elf/rtld.c | 9 +++++++--
include/link.h | 9 +++++----
sysdeps/generic/ldsodefs.h | 17 +++--------------
4 files changed, 18 insertions(+), 23 deletions(-)
diff --git a/elf/dl-object.c b/elf/dl-object.c
index 1a9b04dd3c..0741371b80 100644
--- a/elf/dl-object.c
+++ b/elf/dl-object.c
@@ -89,12 +89,12 @@ _dl_new_object (char *realname, const char *libname, int type,
# define audit_space 0
#endif
- new = calloc (sizeof (*new) + audit_space
+ new = calloc (sizeof (*new)
+ sizeof (struct link_map_private *)
+ sizeof (*newname) + libname_len, 1);
if (new == NULL)
return NULL;
- new->l_rw = calloc (1, sizeof (*new->l_rw));
+ new->l_rw = calloc (1, sizeof (*new->l_rw) + audit_space);
if (new->l_rw == NULL)
{
free (new);
@@ -103,7 +103,7 @@ _dl_new_object (char *realname, const char *libname, int type,
new->l_real = new;
new->l_symbolic_searchlist.r_list
- = (struct link_map_private **) ((char *) (new + 1) + audit_space);
+ = (struct link_map_private **) ((char *) (new + 1));
new->l_libname = newname
= (struct libname_list *) (new->l_symbolic_searchlist.r_list + 1);
diff --git a/elf/rtld.c b/elf/rtld.c
index 25a9c8aa58..b2f0b478bb 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -458,8 +458,13 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
interfere with __rtld_static_init. */
GLRO (dl_find_object) = &_dl_find_object;
- static struct link_map_rw rtld_map_rw;
- GL (dl_rtld_map).l_rw = &rtld_map_rw;
+ /* Pre-allocated read-write status of the ld.so link map. */
+ static struct
+ {
+ struct link_map_rw l;
+ struct auditstate _dl_rtld_auditstate[DL_NNS];
+ } rtld_map_rw;
+ GL (dl_rtld_map).l_rw = &rtld_map_rw.l;
#if NO_TLS_OFFSET != 0
GL (dl_rtld_map).l_rw->l_tls_offset = NO_TLS_OFFSET;
#endif
diff --git a/include/link.h b/include/link.h
index 220926248c..c752bc2cb7 100644
--- a/include/link.h
+++ b/include/link.h
@@ -372,15 +372,16 @@ l_next (struct link_map_private *l)
#include <dl-relocate-ld.h>
-/* Information used by audit modules. For most link maps, this data
- immediate follows the link map in memory. For the dynamic linker,
- it is allocated separately. See link_map_audit_state in
- <ldsodefs.h>. */
+/* Information used by audit modules. An array of size GLRO (naudit)
+ elements follows the l_rw link map data in memory (in some cases
+ conservatively extended to to DL_NNS). */
struct auditstate
{
uintptr_t cookie;
unsigned int bindflags;
};
+_Static_assert (__alignof (struct auditstate) <= __alignof (struct link_map_rw),
+ "auditstate alignment compatible with link_map_rw alignment");
/* This is the hidden instance of struct r_debug_extended used by the
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 53cc428421..d6d45f8c69 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -396,11 +396,6 @@ struct rtld_global
/* Structure describing the dynamic linker itself. */
EXTERN struct link_map_private _dl_rtld_map;
-#ifdef SHARED
- /* Used to store the audit information for the link map of the
- dynamic loader. */
- struct auditstate _dl_rtld_auditstate[DL_NNS];
-#endif
#if !PTHREAD_IN_LIBC && defined SHARED \
&& defined __rtld_lock_default_lock_recursive
@@ -1323,15 +1318,9 @@ rtld_active (void)
static inline struct auditstate *
link_map_audit_state (struct link_map_private *l, size_t index)
{
- if (l == &GL (dl_rtld_map))
- /* The auditstate array is stored separately. */
- return &GL (dl_rtld_auditstate) [index];
- else
- {
- /* The auditstate array follows the link map in memory. */
- struct auditstate *base = (struct auditstate *) (l + 1);
- return &base[index];
- }
+ /* The auditstate array follows the read-write link map part in memory. */
+ struct auditstate *base = (struct auditstate *) (l->l_rw + 1);
+ return &base[index];
}
/* Call the la_objsearch from the audit modules from the link map L. If
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 18/32] elf: Move link map fields used by dependency sorting to writable part
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (16 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 17/32] elf: Allocate auditor state after read-write " Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-27 17:51 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 19/32] elf: Split _dl_lookup_map, _dl_map_new_object from _dl_map_object Florian Weimer
` (14 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
Currently, ld.so re-runs dependency sorting during process shutdown
in _dl_fini, instead of simply using the reverse initialization order.
This means that the l_idx and l_visited fields are written to. There
is no way to report errors during shutdown. If these fields are
always writable, this avoids the need to make link maps writable
during _dl_fini, avoiding the error reporting issue.
This commit can be reverted once we stop re-sorting dependencies
in _dl_fini.
---
elf/dl-close.c | 33 +++++++++++++++++----------------
elf/dl-fini.c | 2 +-
elf/dl-sort-maps.c | 14 +++++++-------
include/link.h | 12 +++++++-----
4 files changed, 32 insertions(+), 29 deletions(-)
diff --git a/elf/dl-close.c b/elf/dl-close.c
index f242dcee9e..8f9d57df39 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -147,7 +147,7 @@ _dl_close_worker (struct link_map_private *map, bool force)
{
l->l_map_used = 0;
l->l_map_done = 0;
- l->l_idx = idx;
+ l->l_rw->l_idx = idx;
maps[idx] = l;
++idx;
}
@@ -157,10 +157,10 @@ _dl_close_worker (struct link_map_private *map, bool force)
The map variable is NULL after a retry. */
if (map != NULL)
{
- maps[map->l_idx] = maps[0];
- maps[map->l_idx]->l_idx = map->l_idx;
+ maps[map->l_rw->l_idx] = maps[0];
+ maps[map->l_rw->l_idx]->l_rw->l_idx = map->l_rw->l_idx;
maps[0] = map;
- maps[0]->l_idx = 0;
+ maps[0]->l_rw->l_idx = 0;
}
/* Keep track of the lowest index link map we have covered already. */
@@ -187,7 +187,7 @@ _dl_close_worker (struct link_map_private *map, bool force)
l->l_map_used = 1;
l->l_map_done = 1;
/* Signal the object is still needed. */
- l->l_idx = IDX_STILL_USED;
+ l->l_rw->l_idx = IDX_STILL_USED;
/* Mark all dependencies as used. */
if (l->l_initfini != NULL)
@@ -197,9 +197,10 @@ _dl_close_worker (struct link_map_private *map, bool force)
struct link_map_private **lp = &l->l_initfini[1];
while (*lp != NULL)
{
- if ((*lp)->l_idx != IDX_STILL_USED)
+ if ((*lp)->l_rw->l_idx != IDX_STILL_USED)
{
- assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
+ assert ((*lp)->l_rw->l_idx >= 0
+ && (*lp)->l_rw->l_idx < nloaded);
if (!(*lp)->l_map_used)
{
@@ -208,8 +209,8 @@ _dl_close_worker (struct link_map_private *map, bool force)
already processed it, then we need to go back
and process again from that point forward to
ensure we keep all of its dependencies also. */
- if ((*lp)->l_idx - 1 < done_index)
- done_index = (*lp)->l_idx - 1;
+ if ((*lp)->l_rw->l_idx - 1 < done_index)
+ done_index = (*lp)->l_rw->l_idx - 1;
}
}
@@ -222,15 +223,15 @@ _dl_close_worker (struct link_map_private *map, bool force)
{
struct link_map_private *jmap = l->l_rw->l_reldeps->list[j];
- if (jmap->l_idx != IDX_STILL_USED)
+ if (jmap->l_rw->l_idx != IDX_STILL_USED)
{
- assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
+ assert (jmap->l_rw->l_idx >= 0 && jmap->l_rw->l_idx < nloaded);
if (!jmap->l_map_used)
{
jmap->l_map_used = 1;
- if (jmap->l_idx - 1 < done_index)
- done_index = jmap->l_idx - 1;
+ if (jmap->l_rw->l_idx - 1 < done_index)
+ done_index = jmap->l_rw->l_idx - 1;
}
}
}
@@ -321,7 +322,7 @@ _dl_close_worker (struct link_map_private *map, bool force)
((char *) imap->l_scope[cnt]
- offsetof (struct link_map_private, l_searchlist));
assert (tmap->l_ns == nsid);
- if (tmap->l_idx == IDX_STILL_USED)
+ if (tmap->l_rw->l_idx == IDX_STILL_USED)
++remain;
else
removed_any = true;
@@ -368,7 +369,7 @@ _dl_close_worker (struct link_map_private *map, bool force)
((char *) imap->l_scope[cnt]
- offsetof (struct link_map_private,
l_searchlist)));
- if (tmap->l_idx != IDX_STILL_USED)
+ if (tmap->l_rw->l_idx != IDX_STILL_USED)
{
/* Remove the scope. Or replace with own map's
scope. */
@@ -413,7 +414,7 @@ _dl_close_worker (struct link_map_private *map, bool force)
/* The loader is gone, so mark the object as not having one.
Note: l_idx != IDX_STILL_USED -> object will be removed. */
if (imap->l_loader != NULL
- && imap->l_loader->l_idx != IDX_STILL_USED)
+ && imap->l_loader->l_rw->l_idx != IDX_STILL_USED)
imap->l_loader = NULL;
/* Remember where the first dynamically loaded object is. */
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index 5c78159fee..2abd63cb08 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -77,7 +77,7 @@ _dl_fini (void)
assert (i < nloaded);
maps[i] = l;
- l->l_idx = i;
+ l->l_rw->l_idx = i;
++i;
/* Bump l_direct_opencount of all objects so that they
diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
index e3a547e4da..ae8e7bb528 100644
--- a/elf/dl-sort-maps.c
+++ b/elf/dl-sort-maps.c
@@ -51,7 +51,7 @@ _dl_sort_maps_original (struct link_map_private **maps, unsigned int nmaps,
{
/* Do not handle ld.so in secondary namespaces and objects which
are not removed. */
- if (thisp != thisp->l_real || thisp->l_idx == -1)
+ if (thisp != thisp->l_real || thisp->l_rw->l_idx == -1)
goto skip;
}
@@ -138,17 +138,17 @@ dfs_traversal (struct link_map_private ***rpo, struct link_map_private *map,
{
/* _dl_map_object_deps ignores l_faked objects when calculating the
number of maps before calling _dl_sort_maps, ignore them as well. */
- if (map->l_visited || map->l_faked)
+ if (map->l_rw->l_visited || map->l_faked)
return;
- map->l_visited = 1;
+ map->l_rw->l_visited = 1;
if (map->l_initfini)
{
for (int i = 0; map->l_initfini[i] != NULL; i++)
{
struct link_map_private *dep = map->l_initfini[i];
- if (dep->l_visited == 0
+ if (dep->l_rw->l_visited == 0
&& dep->l_main_map == 0)
dfs_traversal (rpo, dep, do_reldeps);
}
@@ -163,7 +163,7 @@ dfs_traversal (struct link_map_private ***rpo, struct link_map_private *map,
for (int m = map->l_rw->l_reldeps->act - 1; m >= 0; m--)
{
struct link_map_private *dep = map->l_rw->l_reldeps->list[m];
- if (dep->l_visited == 0
+ if (dep->l_rw->l_visited == 0
&& dep->l_main_map == 0)
dfs_traversal (rpo, dep, do_reldeps);
}
@@ -182,7 +182,7 @@ _dl_sort_maps_dfs (struct link_map_private **maps, unsigned int nmaps,
{
struct link_map_private *first_map = maps[0];
for (int i = nmaps - 1; i >= 0; i--)
- maps[i]->l_visited = 0;
+ maps[i]->l_rw->l_visited = 0;
/* We apply DFS traversal for each of maps[i] until the whole total order
is found and we're at the start of the Reverse-Postorder (RPO) sequence,
@@ -245,7 +245,7 @@ _dl_sort_maps_dfs (struct link_map_private **maps, unsigned int nmaps,
if (do_reldeps)
{
for (int i = nmaps - 1; i >= 0; i--)
- rpo[i]->l_visited = 0;
+ rpo[i]->l_rw->l_visited = 0;
struct link_map_private **maps_head = &maps[nmaps];
for (int i = nmaps - 1; i >= 0; i--)
diff --git a/include/link.h b/include/link.h
index c752bc2cb7..2632337e29 100644
--- a/include/link.h
+++ b/include/link.h
@@ -132,6 +132,13 @@ struct link_map_rw
ignored. */
bool l_nodelete_active;
bool l_nodelete_pending;
+
+ /* Used for dependency sorting in dlclose/_dl_fini. These need to
+ be writable all the time because there is no way to report an
+ error in _dl_fini. These flags can be moved into struct
+ link_map_private once _dl_fini no longer re-sorts link maps. */
+ bool l_visited;
+ int l_idx;
};
/* Structure describing a loaded shared object. The `l_next' and `l_prev'
@@ -231,8 +238,6 @@ struct link_map_private
unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
unsigned int l_reserved:2; /* Reserved for internal use. */
unsigned int l_main_map:1; /* Nonzero for the map of the main program. */
- unsigned int l_visited:1; /* Used internally for map dependency
- graph traversal. */
unsigned int l_map_used:1; /* These two bits are used during traversal */
unsigned int l_map_done:1; /* of maps in _dl_close_worker. */
unsigned int l_phdr_allocated:1; /* Nonzero if the data structure pointed
@@ -319,9 +324,6 @@ struct link_map_private
ElfW(Word) l_flags_1;
ElfW(Word) l_flags;
- /* Temporarily used in `dl_close'. */
- int l_idx;
-
struct link_map_machine l_mach;
struct
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 19/32] elf: Split _dl_lookup_map, _dl_map_new_object from _dl_map_object
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (17 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 18/32] elf: Move link map fields used by dependency sorting to writable part Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-27 17:56 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 20/32] elf: Add l_soname accessor function for DT_SONAME values Florian Weimer
` (13 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
So that they can eventually be called separately from dlopen.
---
elf/dl-load.c | 43 ++++++++++++++++++++++++++------------
sysdeps/generic/ldsodefs.h | 13 ++++++++++++
2 files changed, 43 insertions(+), 13 deletions(-)
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 5c9aaf2eec..d45ae48354 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1904,24 +1904,14 @@ open_path (const char *name, size_t namelen, int mode,
return -1;
}
-/* Map in the shared object file NAME. */
-
struct link_map_private *
-_dl_map_object (struct link_map_private *loader, const char *name,
- int type, int trace_mode, int mode, Lmid_t nsid)
+_dl_lookup_map (Lmid_t nsid, const char *name)
{
- int fd;
- const char *origname = NULL;
- char *realname;
- char *name_copy;
- struct link_map_private *l;
- struct filebuf fb;
-
assert (nsid >= 0);
assert (nsid < GL(dl_nns));
- /* Look for this name among those already loaded. */
- for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l_next (l))
+ for (struct link_map_private *l = GL(dl_ns)[nsid]._ns_loaded;
+ l != NULL; l = l_next (l))
{
/* If the requested name matches the soname of a loaded object,
use that object. Elide this check for names that have not
@@ -1950,6 +1940,22 @@ _dl_map_object (struct link_map_private *loader, const char *name,
return l;
}
+ return NULL;
+}
+
+/* Map in the shared object file NAME. */
+
+struct link_map_private *
+_dl_map_new_object (struct link_map_private *loader, const char *name,
+ int type, int trace_mode, int mode, Lmid_t nsid)
+{
+ int fd;
+ const char *origname = NULL;
+ char *realname;
+ char *name_copy;
+ struct link_map_private *l;
+ struct filebuf fb;
+
/* Display information if we are debugging. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)
&& loader != NULL)
@@ -2204,6 +2210,17 @@ _dl_map_object (struct link_map_private *loader, const char *name,
type, mode, &stack_end, nsid);
}
+struct link_map_private *
+_dl_map_object (struct link_map_private *loader, const char *name,
+ int type, int trace_mode, int mode, Lmid_t nsid)
+{
+ struct link_map_private *l = _dl_lookup_map (nsid, name);
+ if (l != NULL)
+ return l;
+ return _dl_map_new_object (loader, name, type, trace_mode, mode, nsid);
+}
+
+
struct add_path_state
{
bool counting;
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index d6d45f8c69..01f99a57b7 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -908,6 +908,11 @@ int _dl_catch_exception (struct dl_exception *exception,
void (*operate) (void *), void *args);
rtld_hidden_proto (_dl_catch_exception)
+/* Search NSID for a map with NAME. If no such map is already loaded,
+ return NULL. */
+struct link_map_private *_dl_lookup_map (Lmid_t nsid, const char *name)
+ attribute_hidden;
+
/* Open the shared object NAME and map in its segments.
LOADER's DT_RPATH is used in searching for NAME.
If the object is already opened, returns its existing map. */
@@ -916,6 +921,14 @@ struct link_map_private *_dl_map_object (struct link_map_private * loader,
int type, int trace_mode, int mode,
Lmid_t nsid) attribute_hidden;
+/* Like _dl_map_object, but assumes that NAME has not been loaded yet
+ (_dl_lookmap returned NULL). */
+struct link_map_private *_dl_map_new_object (struct link_map_private * loader,
+ const char *name,
+ int type, int trace_mode, int mode,
+ Lmid_t nsid) attribute_hidden;
+
+
/* Call _dl_map_object on the dependencies of MAP, and set up
MAP->l_searchlist. PRELOADS points to a vector of NPRELOADS previously
loaded objects that will be inserted into MAP->l_searchlist after MAP
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 20/32] elf: Add l_soname accessor function for DT_SONAME values
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (18 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 19/32] elf: Split _dl_lookup_map, _dl_map_new_object from _dl_map_object Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-27 22:14 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 21/32] elf: _dl_rtld_map should not exist in static builds Florian Weimer
` (12 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
It's not necessary to introduce temporaries because the compiler
is able to evaluate l_soname just once in constracts like:
l_soname (l) != NULL && strcmp (l_soname (l), LIBC_SO) != 0
---
elf/dl-load.c | 20 +++++------------
elf/dl-open.c | 4 +---
elf/rtld.c | 44 ++++++++++++++++----------------------
elf/setup-vdso.h | 15 +++++++------
elf/sprof.c | 5 +----
sysdeps/generic/ldsodefs.h | 12 +++++++++++
6 files changed, 46 insertions(+), 54 deletions(-)
diff --git a/elf/dl-load.c b/elf/dl-load.c
index d45ae48354..9e64992c36 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1414,8 +1414,7 @@ cannot enable executable stack as shared object requires");
loading. Add it right away. */
if (__glibc_unlikely (GLRO(dl_profile) != NULL)
&& l->l_info[DT_SONAME] != NULL)
- add_name_to_object (l, ((const char *) D_PTR (l, l_info[DT_STRTAB])
- + l->l_info[DT_SONAME]->d_un.d_val));
+ add_name_to_object (l, l_soname (l));
#else
/* Audit modules only exist when linking is dynamic so ORIGNAME
cannot be non-NULL. */
@@ -1425,9 +1424,7 @@ cannot enable executable stack as shared object requires");
/* If we have newly loaded libc.so, update the namespace
description. */
if (GL(dl_ns)[nsid].libc_map == NULL
- && l->l_info[DT_SONAME] != NULL
- && strcmp (((const char *) D_PTR (l, l_info[DT_STRTAB])
- + l->l_info[DT_SONAME]->d_un.d_val), LIBC_SO) == 0)
+ && l_soname (l) != NULL && strcmp (l_soname(l), LIBC_SO) == 0)
GL(dl_ns)[nsid].libc_map = l;
/* _dl_close can only eventually undo the module ID assignment (via
@@ -1920,19 +1917,12 @@ _dl_lookup_map (Lmid_t nsid, const char *name)
continue;
if (!_dl_name_match_p (name, l))
{
- const char *soname;
-
- if (__glibc_likely (l->l_soname_added)
- || l->l_info[DT_SONAME] == NULL)
- continue;
-
- soname = ((const char *) D_PTR (l, l_info[DT_STRTAB])
- + l->l_info[DT_SONAME]->d_un.d_val);
- if (strcmp (name, soname) != 0)
+ if (__glibc_likely (l->l_soname_added) || l_soname (l) == NULL
+ || strcmp (name, l_soname (l)) != 0)
continue;
/* We have a match on a new name -- cache it. */
- add_name_to_object (l, soname);
+ add_name_to_object (l, l_soname (l));
l->l_soname_added = 1;
}
diff --git a/elf/dl-open.c b/elf/dl-open.c
index 306cdcc6ac..d270672c1f 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -650,9 +650,7 @@ dl_open_worker_begin (void *a)
Perform partial initialization in this case. This must
come after the symbol versioning initialization in
_dl_check_map_versions. */
- if (map->l_info[DT_SONAME] != NULL
- && strcmp (((const char *) D_PTR (map, l_info[DT_STRTAB])
- + map->l_info[DT_SONAME]->d_un.d_val), LD_SO) == 0)
+ if (l_soname (map) != NULL && strcmp (l_soname (map), LD_SO) == 0)
__rtld_static_init (map);
#endif
}
diff --git a/elf/rtld.c b/elf/rtld.c
index b2f0b478bb..c92e99927b 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -1075,13 +1075,9 @@ static void
rtld_chain_load (struct link_map_private *main_map, char *argv0)
{
/* The dynamic loader run against itself. */
- const char *rtld_soname
- = ((const char *) D_PTR (&GL(dl_rtld_map), l_info[DT_STRTAB])
- + GL(dl_rtld_map).l_info[DT_SONAME]->d_un.d_val);
- if (main_map->l_info[DT_SONAME] != NULL
- && strcmp (rtld_soname,
- ((const char *) D_PTR (main_map, l_info[DT_STRTAB])
- + main_map->l_info[DT_SONAME]->d_un.d_val)) == 0)
+ const char *rtld_soname = l_soname (&GL(dl_rtld_map));
+ if (l_soname (main_map) != NULL
+ && strcmp (rtld_soname, l_soname (main_map)) == 0)
_dl_fatal_printf ("%s: loader cannot load itself\n", rtld_soname);
/* With DT_NEEDED dependencies, the executable is dynamically
@@ -1648,20 +1644,20 @@ dl_main (const ElfW(Phdr) *phdr,
/* If the current libname is different from the SONAME, add the
latter as well. */
- if (GL(dl_rtld_map).l_info[DT_SONAME] != NULL
- && strcmp (GL(dl_rtld_map).l_libname->name,
- (const char *) D_PTR (&GL(dl_rtld_map), l_info[DT_STRTAB])
- + GL(dl_rtld_map).l_info[DT_SONAME]->d_un.d_val) != 0)
- {
- static struct libname_list newname;
- newname.name = ((char *) D_PTR (&GL(dl_rtld_map), l_info[DT_STRTAB])
- + GL(dl_rtld_map).l_info[DT_SONAME]->d_un.d_ptr);
- newname.next = NULL;
- newname.dont_free = 1;
-
- assert (GL(dl_rtld_map).l_libname->next == NULL);
- GL(dl_rtld_map).l_libname->next = &newname;
- }
+ {
+ const char *soname = l_soname (&GL(dl_rtld_map));
+ if (soname != NULL
+ && strcmp (GL(dl_rtld_map).l_libname->name, soname) != 0)
+ {
+ static struct libname_list newname;
+ newname.name = soname;
+ newname.next = NULL;
+ newname.dont_free = 1;
+
+ assert (GL(dl_rtld_map).l_libname->next == NULL);
+ GL(dl_rtld_map).l_libname->next = &newname;
+ }
+ }
/* The ld.so must be relocated since otherwise loading audit modules
will fail since they reuse the very same ld.so. */
assert (GL(dl_rtld_map).l_relocated);
@@ -1674,10 +1670,8 @@ dl_main (const ElfW(Phdr) *phdr,
/* If the main map is libc.so, update the base namespace to
refer to this map. If libc.so is loaded later, this happens
in _dl_map_object_from_fd. */
- if (main_map->l_info[DT_SONAME] != NULL
- && (strcmp (((const char *) D_PTR (main_map, l_info[DT_STRTAB])
- + main_map->l_info[DT_SONAME]->d_un.d_val), LIBC_SO)
- == 0))
+ if (l_soname (main_map) != NULL
+ && strcmp (l_soname (main_map), LIBC_SO) == 0)
GL(dl_ns)[LM_ID_BASE].libc_map = main_map;
/* Set up our cache of pointers into the hash table. */
diff --git a/elf/setup-vdso.h b/elf/setup-vdso.h
index 5e9d2eb820..8aaa44ca26 100644
--- a/elf/setup-vdso.h
+++ b/elf/setup-vdso.h
@@ -77,13 +77,14 @@ setup_vdso (struct link_map_private *main_map __attribute__ ((unused)),
/* Now that we have the info handy, use the DSO image's soname
so this object can be looked up by name. */
- if (l->l_info[DT_SONAME] != NULL)
- {
- char *dsoname = ((char *) D_PTR (l, l_info[DT_STRTAB])
- + l->l_info[DT_SONAME]->d_un.d_val);
- l->l_libname->name = dsoname;
- l->l_public.l_name = dsoname;
- }
+ {
+ const char *dsoname = l_soname (l);
+ if (dsoname != NULL)
+ {
+ l->l_libname->name = dsoname;
+ l->l_public.l_name = (char *) dsoname;
+ }
+ }
/* Add the vDSO to the object list. */
_dl_add_to_namespace_list (l, LM_ID_BASE);
diff --git a/elf/sprof.c b/elf/sprof.c
index 155da1bd03..81f51a7632 100644
--- a/elf/sprof.c
+++ b/elf/sprof.c
@@ -530,10 +530,7 @@ load_shobj (const char *name)
printf ("string table: %p\n", result->dynstrtab);
/* Determine the soname. */
- if (map->l_info[DT_SONAME] == NULL)
- result->soname = NULL;
- else
- result->soname = result->dynstrtab + map->l_info[DT_SONAME]->d_un.d_val;
+ result->soname = l_soname (map);
if (do_test && result->soname != NULL)
printf ("soname: %s\n", result->soname);
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 01f99a57b7..9f2022d43e 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -88,6 +88,18 @@ dl_relocate_ld (const struct link_map_private *l)
#define D_PTR(map, i) \
((map)->i->d_un.d_ptr + (dl_relocate_ld (map) ? 0 : (map)->l_public.l_addr))
+/* Returns the soname string if the link map has a DT_SONAME tag, or
+ NULL if it does not. */
+static inline const char *
+l_soname (const struct link_map_private *l)
+{
+ if (l->l_info[DT_SONAME] == NULL)
+ return NULL;
+ else
+ return ((const char *) D_PTR (l, l_info[DT_STRTAB])
+ + l->l_info[DT_SONAME]->d_un.d_val);
+}
+
/* Result of the lookup functions and how to retrieve the base address. */
typedef struct link_map_private *lookup_t;
#define LOOKUP_VALUE(map) map
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 21/32] elf: _dl_rtld_map should not exist in static builds
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (19 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 20/32] elf: Add l_soname accessor function for DT_SONAME values Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-28 12:38 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 22/32] elf: Introduce GLPM accessor for the protected memory area Florian Weimer
` (11 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
We have separate objects for SHARED and !SHARED, so there is no need
to make the rtld link map equality check somehow work in static
builds. Instead, hide the _dl_rtld_map reference in the new
is_rtld_link_map function, and simply define that to return false
for !SHARED because the static linked loader does not have a link
map.
---
elf/do-rel.h | 11 +----------
elf/dynamic-link.h | 2 +-
sysdeps/arm/dl-machine.h | 11 +----------
sysdeps/generic/ldsodefs.h | 14 ++++++++++++++
sysdeps/mips/dl-machine.h | 16 +++-------------
sysdeps/powerpc/powerpc64/dl-machine.h | 4 ++--
sysdeps/sh/dl-machine.h | 7 ++-----
7 files changed, 24 insertions(+), 41 deletions(-)
diff --git a/elf/do-rel.h b/elf/do-rel.h
index 8083cb4162..78b5ddfe87 100644
--- a/elf/do-rel.h
+++ b/elf/do-rel.h
@@ -102,16 +102,7 @@ elf_dynamic_do_Rel (struct link_map_private *map, struct r_scope_elem *scope[],
else
#endif
{
- /* This is defined in rtld.c, but nowhere in the static libc.a; make
- the reference weak so static programs can still link. This
- declaration cannot be done when compiling rtld.c (i.e. #ifdef
- RTLD_BOOTSTRAP) because rtld.c contains the common defn for
- _dl_rtld_map, which is incompatible with a weak decl in the same
- file. */
-# ifndef SHARED
- weak_extern (GL(dl_rtld_map));
-# endif
- if (map != &GL(dl_rtld_map)) /* Already done in rtld itself. */
+ if (!is_rtld_link_map (map)) /* Already done in rtld itself. */
# if !defined DO_RELA || defined ELF_MACHINE_REL_RELATIVE
/* Rela platforms get the offset from r_addend and this must
be copied in the relocation address. Therefore we can skip
diff --git a/elf/dynamic-link.h b/elf/dynamic-link.h
index 2f72240b6a..8f4d096182 100644
--- a/elf/dynamic-link.h
+++ b/elf/dynamic-link.h
@@ -190,7 +190,7 @@ elf_machine_lazy_rel (struct link_map_private *map,
do { \
int edr_lazy = elf_machine_runtime_setup ((map), (scope), (lazy), \
(consider_profile)); \
- if (((map) != &GL(dl_rtld_map) || DO_RTLD_BOOTSTRAP)) \
+ if ((!is_rtld_link_map (map) || DO_RTLD_BOOTSTRAP)) \
ELF_DYNAMIC_DO_RELR (map); \
ELF_DYNAMIC_DO_REL ((map), (scope), edr_lazy, skip_ifunc); \
ELF_DYNAMIC_DO_RELA ((map), (scope), edr_lazy, skip_ifunc); \
diff --git a/sysdeps/arm/dl-machine.h b/sysdeps/arm/dl-machine.h
index b138596252..99bf9656b2 100644
--- a/sysdeps/arm/dl-machine.h
+++ b/sysdeps/arm/dl-machine.h
@@ -356,16 +356,7 @@ elf_machine_rel (struct link_map_private *map, struct r_scope_elem *scope[],
Elf32_Addr x;
} __attribute__ ((packed, may_alias));
# ifndef RTLD_BOOTSTRAP
- /* This is defined in rtld.c, but nowhere in the static
- libc.a; make the reference weak so static programs can
- still link. This declaration cannot be done when
- compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP) because
- rtld.c contains the common defn for _dl_rtld_map, which
- is incompatible with a weak decl in the same file. */
-# ifndef SHARED
- weak_extern (_dl_rtld_map);
-# endif
- if (map == &GL(dl_rtld_map))
+ if (is_rtld_link_map (map))
/* Undo the relocation done here during bootstrapping.
Now we will relocate it anew, possibly using a
binding found in the user program or a loaded library
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 9f2022d43e..c5d49cae62 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -406,8 +406,10 @@ struct rtld_global
/* List of search directories. */
EXTERN struct r_search_path_elem *_dl_all_dirs;
+#ifdef SHARED
/* Structure describing the dynamic linker itself. */
EXTERN struct link_map_private _dl_rtld_map;
+#endif
#if !PTHREAD_IN_LIBC && defined SHARED \
&& defined __rtld_lock_default_lock_recursive
@@ -1326,6 +1328,18 @@ dl_init_static_tls (struct link_map_private *map)
void __rtld_static_init (struct link_map_private *map) attribute_hidden;
#endif
+/* Returns true if MAP is the dynamic loader itself. Statically
+ linked binaries do not have a dynamic loader, so return false. */
+static inline bool
+is_rtld_link_map (const struct link_map_private *map)
+{
+#ifdef SHARED
+ return map == &GL (dl_rtld_map);
+#else
+ return false;
+#endif
+}
+
/* Return true if the ld.so copy in this namespace is actually active
and working. If false, the dl_open/dlfcn hooks have to be used to
call into the outer dynamic linker (which happens after static
diff --git a/sysdeps/mips/dl-machine.h b/sysdeps/mips/dl-machine.h
index b7b1705f65..2eb01ca7cb 100644
--- a/sysdeps/mips/dl-machine.h
+++ b/sysdeps/mips/dl-machine.h
@@ -436,16 +436,6 @@ elf_machine_reloc (struct link_map_private *map, struct r_scope_elem *scope[],
const unsigned long int r_type = ELFW(R_TYPE) (r_info);
ElfW(Addr) *addr_field = (ElfW(Addr) *) reloc_addr;
-#if !defined RTLD_BOOTSTRAP && !defined SHARED
- /* This is defined in rtld.c, but nowhere in the static libc.a;
- make the reference weak so static programs can still link. This
- declaration cannot be done when compiling rtld.c (i.e. #ifdef
- RTLD_BOOTSTRAP) because rtld.c contains the common defn for
- _dl_rtld_map, which is incompatible with a weak decl in the same
- file. */
- weak_extern (GL(dl_rtld_map));
-#endif
-
switch (r_type)
{
#if !defined (RTLD_BOOTSTRAP)
@@ -534,7 +524,7 @@ elf_machine_reloc (struct link_map_private *map, struct r_scope_elem *scope[],
though it's not ABI compliant. Some day we should
bite the bullet and stop doing this. */
#ifndef RTLD_BOOTSTRAP
- if (map != &GL(dl_rtld_map))
+ if (!is_rtld_link_map (map))
#endif
reloc_value += SYMBOL_ADDRESS (map, sym, true);
}
@@ -553,7 +543,7 @@ elf_machine_reloc (struct link_map_private *map, struct r_scope_elem *scope[],
}
else
#ifndef RTLD_BOOTSTRAP
- if (map != &GL(dl_rtld_map))
+ if (!is_rtld_link_map (map))
#endif
reloc_value += map->l_public.l_addr;
@@ -752,7 +742,7 @@ elf_machine_got_rel (struct link_map_private *map,
n = map->l_info[DT_MIPS (LOCAL_GOTNO)]->d_un.d_val;
/* The dynamic linker's local got entries have already been relocated. */
- if (map != &GL(dl_rtld_map))
+ if (!is_rtld_link_map (map))
{
/* got[0] is reserved. got[1] is also reserved for the dynamic object
generated by gnu ld. Skip these reserved entries from relocation. */
diff --git a/sysdeps/powerpc/powerpc64/dl-machine.h b/sysdeps/powerpc/powerpc64/dl-machine.h
index 6104d6ae9c..80754546b4 100644
--- a/sysdeps/powerpc/powerpc64/dl-machine.h
+++ b/sysdeps/powerpc/powerpc64/dl-machine.h
@@ -519,7 +519,7 @@ elf_machine_fixup_plt (struct link_map_private *map, lookup_t sym_map,
if (finaladdr != 0 && map != sym_map && !sym_map->l_relocated
#if !defined RTLD_BOOTSTRAP && defined SHARED
/* Bootstrap map doesn't have l_relocated set for it. */
- && sym_map != &GL(dl_rtld_map)
+ && !is_rtld_link_map (sym_map)
#endif
)
offset = sym_map->l_public.l_addr;
@@ -645,7 +645,7 @@ resolve_ifunc (Elf64_Addr value,
if (map != sym_map
# if !defined RTLD_BOOTSTRAP && defined SHARED
/* Bootstrap map doesn't have l_relocated set for it. */
- && sym_map != &GL(dl_rtld_map)
+ && !is_rtld_link_map (sym_map)
# endif
&& !sym_map->l_relocated)
{
diff --git a/sysdeps/sh/dl-machine.h b/sysdeps/sh/dl-machine.h
index 786cd00cab..f76254cca8 100644
--- a/sysdeps/sh/dl-machine.h
+++ b/sysdeps/sh/dl-machine.h
@@ -285,7 +285,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (__glibc_unlikely (r_type == R_SH_RELATIVE))
{
#ifndef RTLD_BOOTSTRAP
- if (map != &GL(dl_rtld_map)) /* Already done in rtld itself. */
+ if (!is_rtld_link_map (map)) /* Already done in rtld itself. */
#endif
{
if (reloc->r_addend)
@@ -388,10 +388,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP) because
rtld.c contains the common defn for _dl_rtld_map, which
is incompatible with a weak decl in the same file. */
-# ifndef SHARED
- weak_extern (_dl_rtld_map);
-# endif
- if (map == &GL(dl_rtld_map))
+ if (is_rtld_link_map (map))
/* Undo the relocation done here during bootstrapping.
Now we will relocate it anew, possibly using a
binding found in the user program or a loaded library
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 22/32] elf: Introduce GLPM accessor for the protected memory area
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (20 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 21/32] elf: _dl_rtld_map should not exist in static builds Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-28 12:44 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 23/32] elf: Bootstrap allocation for future protected memory allocator Florian Weimer
` (10 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
The memory area is still allocated in the data segment, so this change
is preparatory only.
---
elf/dl-load.c | 12 ++--
elf/rtld.c | 129 +++++++++++++++++++------------------
elf/setup-vdso.h | 4 +-
sysdeps/generic/ldsodefs.h | 24 +++++--
sysdeps/x86/dl-cet.c | 4 +-
sysdeps/x86/dl-prop.h | 2 +-
6 files changed, 94 insertions(+), 81 deletions(-)
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 9e64992c36..30727afddb 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -744,7 +744,7 @@ _dl_init_paths (const char *llp, const char *source,
l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
#ifdef SHARED
if (l == NULL)
- l = &GL (dl_rtld_map);
+ l = &GLPM (dl_rtld_map);
#endif
assert (l->l_type != lt_loaded);
@@ -969,8 +969,8 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
/* When loading into a namespace other than the base one we must
avoid loading ld.so since there can only be one copy. Ever. */
if (__glibc_unlikely (nsid != LM_ID_BASE)
- && (_dl_file_id_match_p (&id, &GL(dl_rtld_map).l_file_id)
- || _dl_name_match_p (name, &GL(dl_rtld_map))))
+ && (_dl_file_id_match_p (&id, &GLPM(dl_rtld_map).l_file_id)
+ || _dl_name_match_p (name, &GLPM(dl_rtld_map))))
{
/* This is indeed ld.so. Create a new link_map which refers to
the real one for almost everything. */
@@ -979,7 +979,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
goto fail_new;
/* Refer to the real descriptor. */
- l->l_real = &GL(dl_rtld_map);
+ l->l_real = &GLPM(dl_rtld_map);
/* Copy l_addr and l_ld to avoid a GDB warning with dlmopen(). */
l->l_public.l_addr = l->l_real->l_public.l_addr;
@@ -1284,7 +1284,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
const uintptr_t p = (uintptr_t) &__stack_prot & -GLRO(dl_pagesize);
const size_t s = (uintptr_t) (&__stack_prot + 1) - p;
- struct link_map_private *const m = &GL(dl_rtld_map);
+ struct link_map_private *const m = &GLPM(dl_rtld_map);
const uintptr_t relro_end = ((m->l_public.l_addr + m->l_relro_addr
+ m->l_relro_size)
& -GLRO(dl_pagesize));
@@ -2071,7 +2071,7 @@ _dl_map_new_object (struct link_map_private *loader, const char *name,
l = (loader
?: GL(dl_ns)[LM_ID_BASE]._ns_loaded
# ifdef SHARED
- ?: &GL(dl_rtld_map)
+ ?: &GLPM(dl_rtld_map)
# endif
);
diff --git a/elf/rtld.c b/elf/rtld.c
index c92e99927b..733dbf46a3 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -345,6 +345,7 @@ struct rtld_global _rtld_global =
extern struct rtld_global _rtld_local
__attribute__ ((alias ("_rtld_global"), visibility ("hidden")));
+struct rtld_protmem _rtld_protmem;
/* This variable is similar to _rtld_local, but all values are
read-only after relocation. */
@@ -464,9 +465,9 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
struct link_map_rw l;
struct auditstate _dl_rtld_auditstate[DL_NNS];
} rtld_map_rw;
- GL (dl_rtld_map).l_rw = &rtld_map_rw.l;
+ GLPM (dl_rtld_map).l_rw = &rtld_map_rw.l;
#if NO_TLS_OFFSET != 0
- GL (dl_rtld_map).l_rw->l_tls_offset = NO_TLS_OFFSET;
+ GLPM (dl_rtld_map).l_rw->l_tls_offset = NO_TLS_OFFSET;
#endif
/* If it hasn't happen yet record the startup time. */
@@ -477,18 +478,18 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
/* Transfer data about ourselves to the permanent link_map structure. */
#ifndef DONT_USE_BOOTSTRAP_MAP
- GL(dl_rtld_map).l_public.l_addr = info->l.l_public.l_addr;
- GL(dl_rtld_map).l_public.l_ld = info->l.l_public.l_ld;
- GL(dl_rtld_map).l_ld_readonly = info->l.l_ld_readonly;
- memcpy (GL(dl_rtld_map).l_info, info->l.l_info,
- sizeof GL(dl_rtld_map).l_info);
- GL(dl_rtld_map).l_mach = info->l.l_mach;
- GL(dl_rtld_map).l_relocated = 1;
+ GLPM(dl_rtld_map).l_public.l_addr = info->l.l_public.l_addr;
+ GLPM(dl_rtld_map).l_public.l_ld = info->l.l_public.l_ld;
+ GLPM(dl_rtld_map).l_ld_readonly = info->l.l_ld_readonly;
+ memcpy (GLPM(dl_rtld_map).l_info, info->l.l_info,
+ sizeof GLPM(dl_rtld_map).l_info);
+ GLPM(dl_rtld_map).l_mach = info->l.l_mach;
+ GLPM(dl_rtld_map).l_relocated = 1;
#endif
- _dl_setup_hash (&GL(dl_rtld_map));
- GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
- GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
- GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
+ _dl_setup_hash (&GLPM(dl_rtld_map));
+ GLPM(dl_rtld_map).l_real = &GLPM(dl_rtld_map);
+ GLPM(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
+ GLPM(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
/* Initialize the stack end variable. */
__libc_stack_end = __builtin_frame_address (0);
@@ -513,7 +514,7 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
}
#ifdef DONT_USE_BOOTSTRAP_MAP
-# define bootstrap_map GL(dl_rtld_map)
+# define bootstrap_map GLPM(dl_rtld_map)
#else
# define bootstrap_map info.l
#endif
@@ -1036,8 +1037,8 @@ ERROR: audit interface '%s' requires version %d (maximum supported version %d);
/* The dynamic linker link map is statically allocated, so the
cookie in _dl_new_object has not happened. */
- link_map_audit_state (&GL (dl_rtld_map), GLRO (dl_naudit))->cookie
- = (intptr_t) &GL (dl_rtld_map);
+ link_map_audit_state (&GLPM (dl_rtld_map), GLRO (dl_naudit))->cookie
+ = (intptr_t) &GLPM (dl_rtld_map);
++GLRO(dl_naudit);
@@ -1065,7 +1066,7 @@ load_audit_modules (struct link_map_private *main_map,
if (GLRO(dl_naudit) > 0)
{
_dl_audit_objopen (main_map, LM_ID_BASE);
- _dl_audit_objopen (&GL(dl_rtld_map), LM_ID_BASE);
+ _dl_audit_objopen (&GLPM(dl_rtld_map), LM_ID_BASE);
}
}
@@ -1075,7 +1076,7 @@ static void
rtld_chain_load (struct link_map_private *main_map, char *argv0)
{
/* The dynamic loader run against itself. */
- const char *rtld_soname = l_soname (&GL(dl_rtld_map));
+ const char *rtld_soname = l_soname (&GLPM(dl_rtld_map));
if (l_soname (main_map) != NULL
&& strcmp (rtld_soname, l_soname (main_map)) == 0)
_dl_fatal_printf ("%s: loader cannot load itself\n", rtld_soname);
@@ -1161,7 +1162,7 @@ rtld_setup_main_map (struct link_map_private *main_map)
_dl_rtld_libname.name = ((const char *) main_map->l_public.l_addr
+ ph->p_vaddr);
/* _dl_rtld_libname.next = NULL; Already zero. */
- GL(dl_rtld_map).l_libname = &_dl_rtld_libname;
+ GLPM(dl_rtld_map).l_libname = &_dl_rtld_libname;
has_interp = true;
break;
@@ -1243,16 +1244,16 @@ rtld_setup_main_map (struct link_map_private *main_map)
= (char *) main_map->l_tls_initimage + main_map->l_public.l_addr;
if (! main_map->l_map_end)
main_map->l_map_end = ~0;
- if (! GL(dl_rtld_map).l_libname && GL(dl_rtld_map).l_public.l_name)
+ if (! GLPM(dl_rtld_map).l_libname && GLPM(dl_rtld_map).l_public.l_name)
{
/* We were invoked directly, so the program might not have a
PT_INTERP. */
- _dl_rtld_libname.name = GL(dl_rtld_map).l_public.l_name;
+ _dl_rtld_libname.name = GLPM(dl_rtld_map).l_public.l_name;
/* _dl_rtld_libname.next = NULL; Already zero. */
- GL(dl_rtld_map).l_libname = &_dl_rtld_libname;
+ GLPM(dl_rtld_map).l_libname = &_dl_rtld_libname;
}
else
- assert (GL(dl_rtld_map).l_libname); /* How else did we get here? */
+ assert (GLPM(dl_rtld_map).l_libname); /* How else did we get here? */
return has_interp;
}
@@ -1369,7 +1370,7 @@ dl_main (const ElfW(Phdr) *phdr,
char **orig_argv = _dl_argv;
/* Note the place where the dynamic linker actually came from. */
- GL(dl_rtld_map).l_public.l_name = rtld_progname;
+ GLPM(dl_rtld_map).l_public.l_name = rtld_progname;
while (_dl_argc > 1)
if (! strcmp (_dl_argv[1], "--list"))
@@ -1645,22 +1646,22 @@ dl_main (const ElfW(Phdr) *phdr,
/* If the current libname is different from the SONAME, add the
latter as well. */
{
- const char *soname = l_soname (&GL(dl_rtld_map));
+ const char *soname = l_soname (&GLPM(dl_rtld_map));
if (soname != NULL
- && strcmp (GL(dl_rtld_map).l_libname->name, soname) != 0)
+ && strcmp (GLPM(dl_rtld_map).l_libname->name, soname) != 0)
{
static struct libname_list newname;
newname.name = soname;
newname.next = NULL;
newname.dont_free = 1;
- assert (GL(dl_rtld_map).l_libname->next == NULL);
- GL(dl_rtld_map).l_libname->next = &newname;
+ assert (GLPM(dl_rtld_map).l_libname->next == NULL);
+ GLPM(dl_rtld_map).l_libname->next = &newname;
}
}
/* The ld.so must be relocated since otherwise loading audit modules
will fail since they reuse the very same ld.so. */
- assert (GL(dl_rtld_map).l_relocated);
+ assert (GLPM(dl_rtld_map).l_relocated);
if (! rtld_is_main)
{
@@ -1690,7 +1691,7 @@ dl_main (const ElfW(Phdr) *phdr,
_exit (has_interp ? 0 : 2);
}
- struct link_map **first_preload = &GL(dl_rtld_map).l_public.l_next;
+ struct link_map **first_preload = &GLPM(dl_rtld_map).l_public.l_next;
/* Set up the data structures for the system-supplied DSO early,
so they can influence _dl_init_paths. */
setup_vdso (main_map, &first_preload);
@@ -1703,20 +1704,20 @@ dl_main (const ElfW(Phdr) *phdr,
call_init_paths (&state);
/* Initialize _r_debug_extended. */
- struct r_debug *r = _dl_debug_initialize (GL(dl_rtld_map).l_public.l_addr,
+ struct r_debug *r = _dl_debug_initialize (GLPM(dl_rtld_map).l_public.l_addr,
LM_ID_BASE);
r->r_state = RT_CONSISTENT;
/* Put the link_map for ourselves on the chain so it can be found by
name. Note that at this point the global chain of link maps contains
exactly one element, which is pointed to by dl_loaded. */
- if (! GL(dl_rtld_map).l_public.l_name)
+ if (! GLPM(dl_rtld_map).l_public.l_name)
/* If not invoked directly, the dynamic linker shared object file was
found by the PT_INTERP name. */
- GL(dl_rtld_map).l_public.l_name = (char *) GL(dl_rtld_map).l_libname->name;
- GL(dl_rtld_map).l_type = lt_library;
- main_map->l_public.l_next = &GL(dl_rtld_map).l_public;
- GL(dl_rtld_map).l_public.l_prev = &main_map->l_public;
+ GLPM(dl_rtld_map).l_public.l_name = (char *) GLPM(dl_rtld_map).l_libname->name;
+ GLPM(dl_rtld_map).l_type = lt_library;
+ main_map->l_public.l_next = &GLPM(dl_rtld_map).l_public;
+ GLPM(dl_rtld_map).l_public.l_prev = &main_map->l_public;
++GL(dl_ns)[LM_ID_BASE]._ns_nloaded;
++GL(dl_load_adds);
@@ -1734,8 +1735,8 @@ dl_main (const ElfW(Phdr) *phdr,
const ElfW(Phdr) *rtld_phdr = (const void *) rtld_ehdr + rtld_ehdr->e_phoff;
- GL(dl_rtld_map).l_phdr = rtld_phdr;
- GL(dl_rtld_map).l_phnum = rtld_ehdr->e_phnum;
+ GLPM(dl_rtld_map).l_phdr = rtld_phdr;
+ GLPM(dl_rtld_map).l_phnum = rtld_ehdr->e_phnum;
/* PT_GNU_RELRO is usually the last phdr. */
@@ -1743,15 +1744,15 @@ dl_main (const ElfW(Phdr) *phdr,
while (cnt-- > 0)
if (rtld_phdr[cnt].p_type == PT_GNU_RELRO)
{
- GL(dl_rtld_map).l_relro_addr = rtld_phdr[cnt].p_vaddr;
- GL(dl_rtld_map).l_relro_size = rtld_phdr[cnt].p_memsz;
+ GLPM(dl_rtld_map).l_relro_addr = rtld_phdr[cnt].p_vaddr;
+ GLPM(dl_rtld_map).l_relro_size = rtld_phdr[cnt].p_memsz;
break;
}
/* Add the dynamic linker to the TLS list if it also uses TLS. */
- if (GL(dl_rtld_map).l_tls_blocksize != 0)
+ if (GLPM(dl_rtld_map).l_tls_blocksize != 0)
/* Assign a module ID. Do this before loading any audit modules. */
- _dl_assign_tls_modid (&GL(dl_rtld_map));
+ _dl_assign_tls_modid (&GLPM(dl_rtld_map));
audit_list_add_dynamic_tag (&state.audit_list, main_map, DT_AUDIT);
audit_list_add_dynamic_tag (&state.audit_list, main_map, DT_DEPAUDIT);
@@ -1945,12 +1946,12 @@ dl_main (const ElfW(Phdr) *phdr,
main_map->l_searchlist.r_list[--i]->l_global = 1;
/* Remove _dl_rtld_map from the chain. */
- GL(dl_rtld_map).l_public.l_prev->l_next = GL(dl_rtld_map).l_public.l_next;
- if (GL(dl_rtld_map).l_public.l_next != NULL)
- GL(dl_rtld_map).l_public.l_next->l_prev = GL(dl_rtld_map).l_public.l_prev;
+ GLPM(dl_rtld_map).l_public.l_prev->l_next = GLPM(dl_rtld_map).l_public.l_next;
+ if (GLPM(dl_rtld_map).l_public.l_next != NULL)
+ GLPM(dl_rtld_map).l_public.l_next->l_prev = GLPM(dl_rtld_map).l_public.l_prev;
for (i = 1; i < main_map->l_searchlist.r_nlist; ++i)
- if (main_map->l_searchlist.r_list[i] == &GL(dl_rtld_map))
+ if (main_map->l_searchlist.r_list[i] == &GLPM(dl_rtld_map))
break;
bool rtld_multiple_ref = false;
@@ -1962,21 +1963,21 @@ dl_main (const ElfW(Phdr) *phdr,
its symbol search order. */
rtld_multiple_ref = true;
- GL(dl_rtld_map).l_public.l_prev
+ GLPM(dl_rtld_map).l_public.l_prev
= &main_map->l_searchlist.r_list[i - 1]->l_public;
if (__glibc_likely (state.mode == rtld_mode_normal))
{
- GL(dl_rtld_map).l_public.l_next
+ GLPM(dl_rtld_map).l_public.l_next
= (i + 1 < main_map->l_searchlist.r_nlist
? &main_map->l_searchlist.r_list[i + 1]->l_public
: NULL);
#ifdef NEED_DL_SYSINFO_DSO
if (GLRO(dl_sysinfo_map) != NULL
- && (GL(dl_rtld_map).l_public.l_prev->l_next
+ && (GLPM(dl_rtld_map).l_public.l_prev->l_next
== &GLRO(dl_sysinfo_map)->l_public)
- && (GL(dl_rtld_map).l_public.l_next
+ && (GLPM(dl_rtld_map).l_public.l_next
!= &GLRO(dl_sysinfo_map)->l_public))
- GL(dl_rtld_map).l_public.l_prev = &GLRO(dl_sysinfo_map)->l_public;
+ GLPM(dl_rtld_map).l_public.l_prev = &GLRO(dl_sysinfo_map)->l_public;
#endif
}
else
@@ -1985,17 +1986,17 @@ dl_main (const ElfW(Phdr) *phdr,
In this case it doesn't matter much where we put the
interpreter object, so we just initialize the list pointer so
that the assertion below holds. */
- GL(dl_rtld_map).l_public.l_next
- = GL(dl_rtld_map).l_public.l_prev->l_next;
+ GLPM(dl_rtld_map).l_public.l_next
+ = GLPM(dl_rtld_map).l_public.l_prev->l_next;
- assert (GL(dl_rtld_map).l_public.l_prev->l_next
- == GL(dl_rtld_map).l_public.l_next);
- GL(dl_rtld_map).l_public.l_prev->l_next = &GL(dl_rtld_map).l_public;
- if (GL(dl_rtld_map).l_public.l_next != NULL)
+ assert (GLPM(dl_rtld_map).l_public.l_prev->l_next
+ == GLPM(dl_rtld_map).l_public.l_next);
+ GLPM(dl_rtld_map).l_public.l_prev->l_next = &GLPM(dl_rtld_map).l_public;
+ if (GLPM(dl_rtld_map).l_public.l_next != NULL)
{
- assert (GL(dl_rtld_map).l_public.l_next->l_prev
- == GL(dl_rtld_map).l_public.l_prev);
- GL(dl_rtld_map).l_public.l_next->l_prev = &GL(dl_rtld_map).l_public;
+ assert (GLPM(dl_rtld_map).l_public.l_next->l_prev
+ == GLPM(dl_rtld_map).l_public.l_prev);
+ GLPM(dl_rtld_map).l_public.l_next->l_prev = &GLPM(dl_rtld_map).l_public;
}
}
@@ -2138,7 +2139,7 @@ dl_main (const ElfW(Phdr) *phdr,
while (i-- > 0)
{
struct link_map_private *l = main_map->l_initfini[i];
- if (l != &GL(dl_rtld_map) && ! l->l_faked)
+ if (l != &GLPM(dl_rtld_map) && ! l->l_faked)
{
args.l = l;
_dl_receive_error (print_unresolved, relocate_doit,
@@ -2288,7 +2289,7 @@ dl_main (const ElfW(Phdr) *phdr,
/* Also allocated with the fake malloc(). */
l->l_free_initfini = 0;
- if (l != &GL(dl_rtld_map))
+ if (l != &GLPM(dl_rtld_map))
_dl_relocate_object (l, l->l_scope, GLRO(dl_lazy) ? RTLD_LAZY : 0,
consider_profiling);
@@ -2349,8 +2350,8 @@ dl_main (const ElfW(Phdr) *phdr,
rtld_timer_start (&start);
/* Mark the link map as not yet relocated again. */
- GL(dl_rtld_map).l_relocated = 0;
- _dl_relocate_object (&GL(dl_rtld_map), main_map->l_scope, 0, 0);
+ GLPM(dl_rtld_map).l_relocated = 0;
+ _dl_relocate_object (&GLPM(dl_rtld_map), main_map->l_scope, 0, 0);
rtld_timer_accum (&relocate_time, start);
}
diff --git a/elf/setup-vdso.h b/elf/setup-vdso.h
index 8aaa44ca26..2934ed187a 100644
--- a/elf/setup-vdso.h
+++ b/elf/setup-vdso.h
@@ -93,8 +93,8 @@ setup_vdso (struct link_map_private *main_map __attribute__ ((unused)),
/* Rearrange the list so this DSO appears after rtld_map. */
assert (l->l_public.l_next == NULL);
assert (l->l_public.l_prev == &main_map->l_public);
- GL(dl_rtld_map).l_public.l_next = &l->l_public;
- l->l_public.l_prev = &GL(dl_rtld_map).l_public;
+ GLPM(dl_rtld_map).l_public.l_next = &l->l_public;
+ l->l_public.l_prev = &GLPM(dl_rtld_map).l_public;
*first_preload = &l->l_public.l_next;
# else
GL(dl_nns) = 1;
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index c5d49cae62..0015fcf993 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -406,11 +406,6 @@ struct rtld_global
/* List of search directories. */
EXTERN struct r_search_path_elem *_dl_all_dirs;
-#ifdef SHARED
- /* Structure describing the dynamic linker itself. */
- EXTERN struct link_map_private _dl_rtld_map;
-#endif
-
#if !PTHREAD_IN_LIBC && defined SHARED \
&& defined __rtld_lock_default_lock_recursive
EXTERN void (*_dl_rtld_lock_recursive) (void *);
@@ -528,6 +523,23 @@ extern struct rtld_global _rtld_global __rtld_global_attribute__;
# undef __rtld_global_attribute__
#endif
+#ifdef SHARED
+/* Implementation structure for the protected memory area. */
+struct rtld_protmem
+{
+ /* Structure describing the dynamic linker itself. */
+ EXTERN struct link_map_private _dl_rtld_map;
+};
+extern struct rtld_protmem _rtld_protmem attribute_hidden;
+#endif /* SHARED */
+
+/* GLPM(FIELD) denotes the FIELD in the protected memory area. */
+#ifdef SHARED
+# define GLPM(name) _rtld_protmem._##name
+#else
+# define GLPM(name) _##name
+#endif
+
#ifndef SHARED
# define GLRO(name) _##name
#else
@@ -1334,7 +1346,7 @@ static inline bool
is_rtld_link_map (const struct link_map_private *map)
{
#ifdef SHARED
- return map == &GL (dl_rtld_map);
+ return map == &GLPM (dl_rtld_map);
#else
return false;
#endif
diff --git a/sysdeps/x86/dl-cet.c b/sysdeps/x86/dl-cet.c
index 60ea1cb558..c47b2c8f4e 100644
--- a/sysdeps/x86/dl-cet.c
+++ b/sysdeps/x86/dl-cet.c
@@ -107,8 +107,8 @@ dl_cet_check (struct link_map *m, const char *program)
/* Skip CET check for ld.so since ld.so is CET-enabled.
CET will be disabled later if CET isn't enabled in
executable. */
- if (l == &GL(dl_rtld_map)
- || l->l_real == &GL(dl_rtld_map)
+ if (l == &GLPM(dl_rtld_map)
+ || l->l_real == &GLPM(dl_rtld_map)
|| (program && l == m))
continue;
#endif
diff --git a/sysdeps/x86/dl-prop.h b/sysdeps/x86/dl-prop.h
index f24fc1b028..64710b76cc 100644
--- a/sysdeps/x86/dl-prop.h
+++ b/sysdeps/x86/dl-prop.h
@@ -46,7 +46,7 @@ dl_isa_level_check (struct link_map_private *m, const char *program)
#ifdef SHARED
/* Skip ISA level check for ld.so since ld.so won't run if its ISA
level is higher than CPU. */
- if (l == &GL(dl_rtld_map) || l->l_real == &GL(dl_rtld_map))
+ if (l == &GLPM(dl_rtld_map) || l->l_real == &GLPM(dl_rtld_map))
continue;
#endif
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 23/32] elf: Bootstrap allocation for future protected memory allocator
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (21 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 22/32] elf: Introduce GLPM accessor for the protected memory area Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-28 15:04 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 24/32] elf: Implement a basic " Florian Weimer
` (9 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
A subsequent change will place link maps into memory which is
read-only most of the time. This means that the link map for
ld.so itself (GLPM (dl_rtld_map)) needs to be put there as well,
which requires allocating it dynamically.
---
elf/Makefile | 1 +
elf/dl-protmem_bootstrap.h | 29 ++++
elf/rtld.c | 88 ++++++----
elf/tst-rtld-nomem.c | 177 ++++++++++++++++++++
sysdeps/generic/dl-early_mmap.h | 35 ++++
sysdeps/generic/ldsodefs.h | 6 +-
sysdeps/mips/Makefile | 6 +
sysdeps/unix/sysv/linux/dl-early_allocate.c | 17 +-
sysdeps/unix/sysv/linux/dl-early_mmap.h | 41 +++++
9 files changed, 346 insertions(+), 54 deletions(-)
create mode 100644 elf/dl-protmem_bootstrap.h
create mode 100644 elf/tst-rtld-nomem.c
create mode 100644 sysdeps/generic/dl-early_mmap.h
create mode 100644 sysdeps/unix/sysv/linux/dl-early_mmap.h
diff --git a/elf/Makefile b/elf/Makefile
index afec7be084..feeaffe533 100644
--- a/elf/Makefile
+++ b/elf/Makefile
@@ -443,6 +443,7 @@ tests += \
tst-p_align3 \
tst-relsort1 \
tst-ro-dynamic \
+ tst-rtld-nomem \
tst-rtld-run-static \
tst-single_threaded \
tst-single_threaded-pthread \
diff --git a/elf/dl-protmem_bootstrap.h b/elf/dl-protmem_bootstrap.h
new file mode 100644
index 0000000000..2ba0973d07
--- /dev/null
+++ b/elf/dl-protmem_bootstrap.h
@@ -0,0 +1,29 @@
+/* Bootstrap allocation for the protected memory area.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <dl-early_mmap.h>
+
+/* Return a pointer to the protected memory area, or NULL if
+ allocation fails. This function is called before self-relocation,
+ and the system call needs to be inlined for (most)
+ HIDDEN_VAR_NEEDS_DYNAMIC_RELOC targets. */
+static inline __attribute__ ((always_inline)) struct rtld_protmem *
+_dl_protmem_bootstrap (void)
+{
+ return _dl_early_mmap (sizeof (struct rtld_protmem));
+}
diff --git a/elf/rtld.c b/elf/rtld.c
index 733dbf46a3..4abede1bab 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -53,6 +53,7 @@
#include <dl-find_object.h>
#include <dl-audit-check.h>
#include <dl-call_tls_init_tp.h>
+#include <dl-protmem_bootstrap.h>
#include <assert.h>
@@ -345,8 +346,6 @@ struct rtld_global _rtld_global =
extern struct rtld_global _rtld_local
__attribute__ ((alias ("_rtld_global"), visibility ("hidden")));
-struct rtld_protmem _rtld_protmem;
-
/* This variable is similar to _rtld_local, but all values are
read-only after relocation. */
struct rtld_global_ro _rtld_global_ro attribute_relro =
@@ -418,7 +417,7 @@ static ElfW(Addr) _dl_start_final (void *arg);
#else
struct dl_start_final_info
{
- struct link_map_private l;
+ struct rtld_protmem *protmem;
RTLD_TIMING_VAR (start_time);
};
static ElfW(Addr) _dl_start_final (void *arg,
@@ -453,6 +452,14 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
{
ElfW(Addr) start_addr;
+#ifndef DONT_USE_BOOTSTRAP_MAP
+ GLRO (dl_protmem) = info->protmem;
+#endif
+
+ /* Delayed error reporting after relocation processing. */
+ if (GLRO (dl_protmem) == NULL)
+ _dl_fatal_printf ("Fatal glibc error: Cannot allocate link map\n");
+
__rtld_malloc_init_stubs ();
/* Do not use an initializer for these members because it would
@@ -477,15 +484,6 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
#endif
/* Transfer data about ourselves to the permanent link_map structure. */
-#ifndef DONT_USE_BOOTSTRAP_MAP
- GLPM(dl_rtld_map).l_public.l_addr = info->l.l_public.l_addr;
- GLPM(dl_rtld_map).l_public.l_ld = info->l.l_public.l_ld;
- GLPM(dl_rtld_map).l_ld_readonly = info->l.l_ld_readonly;
- memcpy (GLPM(dl_rtld_map).l_info, info->l.l_info,
- sizeof GLPM(dl_rtld_map).l_info);
- GLPM(dl_rtld_map).l_mach = info->l.l_mach;
- GLPM(dl_rtld_map).l_relocated = 1;
-#endif
_dl_setup_hash (&GLPM(dl_rtld_map));
GLPM(dl_rtld_map).l_real = &GLPM(dl_rtld_map);
GLPM(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
@@ -529,44 +527,60 @@ _dl_start (void *arg)
rtld_timer_start (&info.start_time);
#endif
- /* Partly clean the `bootstrap_map' structure up. Don't use
- `memset' since it might not be built in or inlined and we cannot
- make function calls at this point. Use '__builtin_memset' if we
- know it is available. We do not have to clear the memory if we
- do not have to use the temporary bootstrap_map. Global variables
- are initialized to zero by default. */
-#ifndef DONT_USE_BOOTSTRAP_MAP
-# ifdef HAVE_BUILTIN_MEMSET
- __builtin_memset (bootstrap_map.l_info, '\0', sizeof (bootstrap_map.l_info));
-# else
- for (size_t cnt = 0;
- cnt < sizeof (bootstrap_map.l_info) / sizeof (bootstrap_map.l_info[0]);
- ++cnt)
- bootstrap_map.l_info[cnt] = 0;
-# endif
+ struct rtld_protmem *protmem = _dl_protmem_bootstrap ();
+ bool protmem_failed = protmem == NULL;
+ if (protmem_failed)
+ {
+ /* Allocate some space for a stub protected memory area on the
+ stack, to get to the point when we can report the error. */
+ protmem = alloca (sizeof (*protmem));
+
+ /* Partly clean the `bootstrap_map' structure up. Don't use
+ `memset' since it might not be built in or inlined and we
+ cannot make function calls at this point. Use
+ '__builtin_memset' if we know it is available. */
+#ifdef HAVE_BUILTIN_MEMSET
+ __builtin_memset (protmem->_dl_rtld_map.l_info,
+ '\0', sizeof (protmem->_dl_rtld_map.l_info));
+#else
+ for (size_t i = 0; i < array_length (protmem->_dl_rtld_map.l_info); ++i)
+ protmem->_dl_rtld_map.l_info[i] = NULL;
#endif
+ }
/* Figure out the run-time load address of the dynamic linker itself. */
- bootstrap_map.l_public.l_addr = elf_machine_load_address ();
+ protmem->_dl_rtld_map.l_public.l_addr = elf_machine_load_address ();
/* Read our own dynamic section and fill in the info array. */
- bootstrap_map.l_public.l_ld
- = (void *) bootstrap_map.l_public.l_addr + elf_machine_dynamic ();
- bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
- elf_get_dynamic_info (&bootstrap_map, true, false);
+ protmem->_dl_rtld_map.l_public.l_ld
+ = ((void *) protmem->_dl_rtld_map.l_public.l_addr
+ + elf_machine_dynamic ());
+ protmem->_dl_rtld_map.l_ld_readonly = DL_RO_DYN_SECTION;
+ elf_get_dynamic_info (&protmem->_dl_rtld_map, true, false);
#ifdef ELF_MACHINE_BEFORE_RTLD_RELOC
- ELF_MACHINE_BEFORE_RTLD_RELOC (&bootstrap_map, bootstrap_map.l_info);
+ ELF_MACHINE_BEFORE_RTLD_RELOC (&protmem->_dl_rtld_map,
+ protmem->_dl_rtld_map.l_info);
#endif
- if (bootstrap_map.l_public.l_addr)
+ if (protmem->_dl_rtld_map.l_public.l_addr)
{
/* Relocate ourselves so we can do normal function calls and
data access using the global offset table. */
- ELF_DYNAMIC_RELOCATE (&bootstrap_map, NULL, 0, 0, 0);
+ ELF_DYNAMIC_RELOCATE (&protmem->_dl_rtld_map, NULL, 0, 0, 0);
}
- bootstrap_map.l_relocated = 1;
+ protmem->_dl_rtld_map.l_relocated = 1;
+
+ /* Communicate the original mmap failure to _dl_start_final. */
+ if (protmem_failed)
+ protmem = NULL;
+
+#ifdef DONT_USE_BOOTSTRAP_MAP
+ GLRO (dl_protmem) = protmem;
+#else
+ info.protmem = protmem;
+#endif
/* Please note that we don't allow profiling of this object and
therefore need not test whether we have to allocate the array
@@ -1035,7 +1049,7 @@ ERROR: audit interface '%s' requires version %d (maximum supported version %d);
else
*last_audit = (*last_audit)->next = &newp->ifaces;
- /* The dynamic linker link map is statically allocated, so the
+ /* The dynamic linker link map is allocated separately, so the
cookie in _dl_new_object has not happened. */
link_map_audit_state (&GLPM (dl_rtld_map), GLRO (dl_naudit))->cookie
= (intptr_t) &GLPM (dl_rtld_map);
diff --git a/elf/tst-rtld-nomem.c b/elf/tst-rtld-nomem.c
new file mode 100644
index 0000000000..37dbfe2903
--- /dev/null
+++ b/elf/tst-rtld-nomem.c
@@ -0,0 +1,177 @@
+/* Test that out-of-memory during early ld.so startup reports an error.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* This test invokes execve with increasing RLIMIT_AS limits, to
+ trigger the early _dl_protmem_bootstrap memory allocation failure
+ and check that a proper error is reported for it. */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <support/check.h>
+#include <support/support.h>
+#include <support/xunistd.h>
+#include <sys/resource.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+static int
+do_test (void)
+{
+ long int page_size = sysconf (_SC_PAGE_SIZE);
+ TEST_VERIFY (page_size > 0);
+
+ struct rlimit rlim;
+ TEST_COMPARE (getrlimit (RLIMIT_AS, &rlim), 0);
+
+ /* Reduced once we encounter success. */
+ int kb_limit = 2048;
+
+ /* Exit status in case of test error. */
+ enum { unexpected_error = 17 };
+
+ /* Used to verify that at least one execve crash is encountered.
+ This is how exexcve reports late memory allocation failures due
+ to rlimit. */
+ bool crash_seen = false;
+
+ /* Set to true if the early out-of-memory error message is
+ encountered. */
+ bool oom_error_seen = false;
+
+ /* Set to true once success (the usage message) is encountered.
+ This is expected to happen only after oom_error_seen turns true,
+ otherwise the rlimit does not work. */
+ bool success_seen = false;
+
+ /* Try increasing rlimits. The kernel rounds down to page sizes, so
+ try only page size increments. */
+ for (int kb = 128; kb <= kb_limit; kb += page_size / 1024)
+ {
+ printf ("info: trying %d KiB\n", kb);
+
+ int pipe_stdout[2];
+ xpipe (pipe_stdout);
+ int pipe_stderr[2];
+ xpipe (pipe_stderr);
+
+ pid_t pid = xfork ();
+ if (pid == 0)
+ {
+ /* Restrict address space for the ld.so invocation. */
+ rlim.rlim_cur = kb * 1024;
+ int ret = setrlimit (RLIMIT_AS, &rlim);
+ TEST_COMPARE (ret, 0);
+ if (ret != 0)
+ _exit (unexpected_error);
+
+ /* Redirect output for capture. */
+ TEST_COMPARE (dup2 (pipe_stdout[1], STDOUT_FILENO),
+ STDOUT_FILENO);
+ TEST_COMPARE (dup2 (pipe_stderr[1], STDERR_FILENO),
+ STDERR_FILENO);
+
+ /* Try to invoke ld.so with the resource limit in place. */
+ char ldso[] = "ld.so";
+ char *const argv[] = { ldso, NULL };
+ execve (support_objdir_elf_ldso, argv, &argv[1]);
+ TEST_COMPARE (errno, ENOMEM);
+ _exit (unexpected_error);
+ }
+
+ int status;
+ xwaitpid (pid, &status, 0);
+
+ xclose (pipe_stdout[1]);
+ xclose (pipe_stderr[1]);
+
+ /* No output on stdout. */
+ char actual[1024];
+ ssize_t count = read (pipe_stdout[0], actual, sizeof (actual));
+ if (count < 0)
+ FAIL_EXIT1 ("read stdout: %m");
+ TEST_COMPARE_BLOB ("", 0, actual, count);
+
+ /* Read the standard error output. */
+ count = read (pipe_stderr[0], actual, sizeof (actual));
+ if (count < 0)
+ FAIL_EXIT1 ("read stderr: %m");
+
+ if (WIFEXITED (status) && WEXITSTATUS (status) == 1)
+ {
+ TEST_VERIFY (oom_error_seen);
+ static const char expected[] = "\
+ld.so: missing program name\n\
+Try 'ld.so --help' for more information.\n\
+";
+ TEST_COMPARE_BLOB (expected, strlen (expected), actual, count);
+ if (!success_seen)
+ {
+ puts ("info: first success");
+ /* Four more tries with increasing rlimit, to catch
+ potential secondary crashes. */
+ kb_limit = kb + page_size / 1024 * 4;
+ }
+ success_seen = true;
+ continue;
+ }
+ if (WIFEXITED (status) && WEXITSTATUS (status) == 127)
+ {
+ TEST_VERIFY (crash_seen);
+ TEST_VERIFY (!success_seen);
+ static const char expected[] =
+ "Fatal glibc error: Cannot allocate link map\n";
+ TEST_COMPARE_BLOB (expected, strlen (expected), actual, count);
+ if (!oom_error_seen)
+ puts ("info: first memory allocation error");
+ oom_error_seen = true;
+ continue;
+ }
+
+ TEST_VERIFY (!success_seen);
+ TEST_VERIFY (!oom_error_seen);
+
+ if (WIFEXITED (status))
+ {
+ /* Unexpected regular exit status. */
+ TEST_COMPARE (WIFEXITED (status), 1);
+ TEST_COMPARE_BLOB ("", 0, actual, count);
+ }
+ else if (WIFSIGNALED (status) && WTERMSIG (status) == SIGSEGV)
+ {
+ /* Very early out of memory. No output expected. */
+ TEST_COMPARE_BLOB ("", 0, actual, count);
+ if (!crash_seen)
+ puts ("info: first expected crash observed");
+ crash_seen = true;
+ }
+ else
+ {
+ /* Unexpected status. */
+ printf ("error: unexpected exit status %d\n", status);
+ support_record_failure ();
+ TEST_COMPARE_BLOB ("", 0, actual, count);
+ }
+ }
+
+ return 0;
+}
+
+#include <support/test-driver.c>
diff --git a/sysdeps/generic/dl-early_mmap.h b/sysdeps/generic/dl-early_mmap.h
new file mode 100644
index 0000000000..33dd8c2f68
--- /dev/null
+++ b/sysdeps/generic/dl-early_mmap.h
@@ -0,0 +1,35 @@
+/* Early anonymous mmap for ld.so, before self-relocation. Generic version.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#ifndef DL_EARLY_MMAP_H
+#define DL_EARLY_MMAP_H
+
+/* The generic version assumes that regular mmap works. It returns
+ NULL on failure. */
+static inline void *
+_dl_early_mmap (size_t size)
+{
+ void *ret = __mmap (NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (ret == MAP_FAILED)
+ return NULL;
+ else
+ return ret;
+}
+
+#endif /* DL_EARLY_MMAP_H */
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 0015fcf993..e8f7c8b70b 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -530,12 +530,11 @@ struct rtld_protmem
/* Structure describing the dynamic linker itself. */
EXTERN struct link_map_private _dl_rtld_map;
};
-extern struct rtld_protmem _rtld_protmem attribute_hidden;
#endif /* SHARED */
/* GLPM(FIELD) denotes the FIELD in the protected memory area. */
#ifdef SHARED
-# define GLPM(name) _rtld_protmem._##name
+# define GLPM(name) GLRO (dl_protmem)->_##name
#else
# define GLPM(name) _##name
#endif
@@ -673,6 +672,9 @@ struct rtld_global_ro
EXTERN enum dso_sort_algorithm _dl_dso_sort_algo;
#ifdef SHARED
+ /* Pointer to the protected memory area. */
+ EXTERN struct rtld_protmem *_dl_protmem;
+
/* We add a function table to _rtld_global which is then used to
call the function instead of going through the PLT. The result
is that we can avoid exporting the functions and we do not jump
diff --git a/sysdeps/mips/Makefile b/sysdeps/mips/Makefile
index d770e59fc9..551612cb1e 100644
--- a/sysdeps/mips/Makefile
+++ b/sysdeps/mips/Makefile
@@ -23,6 +23,12 @@ ASFLAGS-.o += $(pie-default)
ASFLAGS-.op += $(pie-default)
ifeq ($(subdir),elf)
+# _dl_start performs a system call before self-relocation, to allocate
+# the link map for ld.so itself. This involves a direct function
+# call. Build rtld.c in MIPS32 mode, so that this function call does
+# not require a run-time relocation.
+CFLAGS-rtld.c += -mno-mips16
+
ifneq ($(o32-fpabi),)
tests += tst-abi-interlink
diff --git a/sysdeps/unix/sysv/linux/dl-early_allocate.c b/sysdeps/unix/sysv/linux/dl-early_allocate.c
index 9d5976a3b7..c688097d6f 100644
--- a/sysdeps/unix/sysv/linux/dl-early_allocate.c
+++ b/sysdeps/unix/sysv/linux/dl-early_allocate.c
@@ -29,7 +29,7 @@
#include <unistd.h>
#include <brk_call.h>
-#include <mmap_call.h>
+#include <dl-early_mmap.h>
/* Defined in brk.c. */
extern void *__curbrk;
@@ -63,20 +63,7 @@ _dl_early_allocate (size_t size)
unfortunate ASLR layout decisions and kernel bugs, particularly
for static PIE. */
if (result == NULL)
- {
- long int ret;
- int prot = PROT_READ | PROT_WRITE;
- int flags = MAP_PRIVATE | MAP_ANONYMOUS;
-#ifdef __NR_mmap2
- ret = MMAP_CALL_INTERNAL (mmap2, 0, size, prot, flags, -1, 0);
-#else
- ret = MMAP_CALL_INTERNAL (mmap, 0, size, prot, flags, -1, 0);
-#endif
- if (INTERNAL_SYSCALL_ERROR_P (ret))
- result = NULL;
- else
- result = (void *) ret;
- }
+ result = _dl_early_mmap (size);
return result;
}
diff --git a/sysdeps/unix/sysv/linux/dl-early_mmap.h b/sysdeps/unix/sysv/linux/dl-early_mmap.h
new file mode 100644
index 0000000000..1d83daa6a6
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/dl-early_mmap.h
@@ -0,0 +1,41 @@
+/* Early anonymous mmap for ld.so, before self-relocation. Linux version.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#ifndef DL_EARLY_MMAP_H
+#define DL_EARLY_MMAP_H
+
+#include <mmap_call.h>
+
+static inline __attribute__ ((always_inline)) void *
+_dl_early_mmap (size_t size)
+{
+ long int ret;
+ int prot = PROT_READ | PROT_WRITE;
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#ifdef __NR_mmap2
+ ret = MMAP_CALL_INTERNAL (mmap2, 0, size, prot, flags, -1, 0);
+#else
+ ret = MMAP_CALL_INTERNAL (mmap, 0, size, prot, flags, -1, 0);
+#endif
+ if (INTERNAL_SYSCALL_ERROR_P (ret))
+ return NULL;
+ else
+ return (void *) ret;
+}
+
+#endif /* DL_EARLY_MMAP_H */
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 24/32] elf: Implement a basic protected memory allocator
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (22 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 23/32] elf: Bootstrap allocation for future protected memory allocator Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-28 18:46 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 25/32] elf: Move most of the _dl_find_object data to the protected heap Florian Weimer
` (8 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
Use it to keep the link maps read-only most of the time. The path to
the link maps is not yet protected (they still come from GL (dl_nns)).
However, direct overwrites over l_info (l_info[DT_FINI] in particular)
are blocked.
In _dl_new_object, do not assume that the allocator provides
zeroed memory.
---
elf/Makefile | 11 +++
elf/dl-close.c | 20 ++++--
elf/dl-libc_freeres.c | 5 ++
elf/dl-load.c | 33 +++++++--
elf/dl-object.c | 24 ++++---
elf/dl-open.c | 18 +++++
elf/dl-protmem-internal.h | 39 +++++++++++
elf/dl-protmem.c | 132 +++++++++++++++++++++++++++++++++++
elf/dl-protmem.h | 93 ++++++++++++++++++++++++
elf/dl-protmem_bootstrap.h | 8 ++-
elf/rtld.c | 10 +++
elf/tst-relro-linkmap-mod1.c | 42 +++++++++++
elf/tst-relro-linkmap-mod2.c | 2 +
elf/tst-relro-linkmap-mod3.c | 2 +
elf/tst-relro-linkmap.c | 112 +++++++++++++++++++++++++++++
include/link.h | 3 +
sysdeps/generic/ldsodefs.h | 8 ++-
17 files changed, 544 insertions(+), 18 deletions(-)
create mode 100644 elf/dl-protmem-internal.h
create mode 100644 elf/dl-protmem.c
create mode 100644 elf/dl-protmem.h
create mode 100644 elf/tst-relro-linkmap-mod1.c
create mode 100644 elf/tst-relro-linkmap-mod2.c
create mode 100644 elf/tst-relro-linkmap-mod3.c
create mode 100644 elf/tst-relro-linkmap.c
diff --git a/elf/Makefile b/elf/Makefile
index feeaffe533..7ababc0fc4 100644
--- a/elf/Makefile
+++ b/elf/Makefile
@@ -72,6 +72,7 @@ dl-routines = \
dl-open \
dl-origin \
dl-printf \
+ dl-protmem \
dl-reloc \
dl-runtime \
dl-scope \
@@ -117,6 +118,7 @@ elide-routines.os = \
# These object files are only included in the dynamically-linked libc.
shared-only-routines = \
+ dl-protmem \
libc-dl-profile \
libc-dl-profstub \
libc-dl_find_object \
@@ -505,6 +507,7 @@ tests-internal += \
tst-dl_find_object-threads \
tst-dlmopen2 \
tst-ptrguard1 \
+ tst-relro-linkmap \
tst-stackguard1 \
tst-tls-surplus \
tst-tls3 \
@@ -872,6 +875,9 @@ modules-names += \
tst-null-argv-lib \
tst-p_alignmod-base \
tst-p_alignmod3 \
+ tst-relro-linkmap-mod1 \
+ tst-relro-linkmap-mod2 \
+ tst-relro-linkmap-mod3 \
tst-relsort1mod1 \
tst-relsort1mod2 \
tst-ro-dynamic-mod \
@@ -3031,3 +3037,8 @@ $(objpfx)tst-nodeps2-mod.so: $(common-objpfx)libc.so \
$(LINK.o) -Wl,--no-as-needed -nostartfiles -nostdlib -shared -o $@ $^
$(objpfx)tst-nodeps2.out: \
$(objpfx)tst-nodeps1-mod.so $(objpfx)tst-nodeps2-mod.so
+
+LDFLAGS-tst-relro-linkmap = -Wl,-E
+$(objpfx)tst-relro-linkmap: $(objpfx)tst-relro-linkmap-mod1.so
+$(objpfx)tst-relro-linkmap.out: $(objpfx)tst-dlopenfailmod1.so \
+ $(objpfx)tst-relro-linkmap-mod2.so $(objpfx)tst-relro-linkmap-mod3.so
diff --git a/elf/dl-close.c b/elf/dl-close.c
index 8f9d57df39..8391abe2d7 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -33,6 +33,7 @@
#include <tls.h>
#include <stap-probe.h>
#include <dl-find_object.h>
+#include <dl-protmem.h>
#include <dl-unmap-segments.h>
@@ -130,6 +131,9 @@ _dl_close_worker (struct link_map_private *map, bool force)
return;
}
+ /* Actual changes are about to happen. */
+ _dl_protmem_begin ();
+
Lmid_t nsid = map->l_ns;
struct link_namespaces *ns = &GL(dl_ns)[nsid];
@@ -260,7 +264,10 @@ _dl_close_worker (struct link_map_private *map, bool force)
/* Call its termination function. Do not do it for
half-cooked objects. Temporarily disable exception
- handling, so that errors are fatal. */
+ handling, so that errors are fatal.
+
+ Link maps are writable during this call, but avoiding
+ that is probably too costly. */
if (imap->l_rw->l_init_called)
_dl_catch_exception (NULL, _dl_call_fini, imap);
@@ -354,8 +361,11 @@ _dl_close_worker (struct link_map_private *map, bool force)
newp = (struct r_scope_elem **)
malloc (new_size * sizeof (struct r_scope_elem *));
if (newp == NULL)
- _dl_signal_error (ENOMEM, "dlclose", NULL,
- N_("cannot create scope list"));
+ {
+ _dl_protmem_end ();
+ _dl_signal_error (ENOMEM, "dlclose", NULL,
+ N_("cannot create scope list"));
+ }
}
/* Copy over the remaining scope elements. */
@@ -709,7 +719,7 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (imap == GL(dl_initfirst))
GL(dl_initfirst) = NULL;
- free (imap);
+ _dl_free_object (imap);
}
}
@@ -758,6 +768,8 @@ _dl_close_worker (struct link_map_private *map, bool force)
}
dl_close_state = not_pending;
+
+ _dl_protmem_end ();
}
diff --git a/elf/dl-libc_freeres.c b/elf/dl-libc_freeres.c
index 65fc70837a..88c0e444b8 100644
--- a/elf/dl-libc_freeres.c
+++ b/elf/dl-libc_freeres.c
@@ -18,6 +18,7 @@
#include <ldsodefs.h>
#include <dl-find_object.h>
+#include <dl-protmem.h>
static bool
free_slotinfo (struct dtv_slotinfo_list **elemp)
@@ -52,6 +53,10 @@ __rtld_libc_freeres (void)
struct link_map_private *l;
struct r_search_path_elem *d;
+ /* We are about to write to link maps. This is not paired with
+ _dl_protmem_end because the process is going away anyway. */
+ _dl_protmem_begin ();
+
/* Remove all search directories. */
d = GL(dl_all_dirs);
while (d != GLRO(dl_init_all_dirs))
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 30727afddb..560a83ea60 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -33,6 +33,7 @@
#include <sys/types.h>
#include <gnu/lib-names.h>
#include <alloc_buffer.h>
+#include <dl-protmem.h>
/* Type for the buffer we put the ELF header and hopefully the program
header. This buffer does not really have to be too large. In most
@@ -943,7 +944,8 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
free (l->l_libname);
if (l != NULL && l->l_phdr_allocated)
free ((void *) l->l_phdr);
- free (l);
+ if (l != NULL)
+ _dl_free_object (l);
free (realname);
_dl_signal_error (errval, name, NULL, errstring);
}
@@ -2251,6 +2253,22 @@ add_path (struct add_path_state *p, const struct r_search_path_struct *sps,
}
}
+/* Wrap cache_rpath to unprotect memory first if necessary. */
+static bool
+cache_rpath_unprotect (struct link_map_private *l,
+ struct r_search_path_struct *sp,
+ int tag,
+ const char *what,
+ bool *unprotected)
+{
+ if (sp->dirs == NULL && !*unprotected)
+ {
+ _dl_protmem_begin ();
+ *unprotected = true;
+ }
+ return cache_rpath (l, sp, tag, what);
+}
+
void
_dl_rtld_di_serinfo (struct link_map_private *loader, Dl_serinfo *si,
bool counting)
@@ -2268,6 +2286,7 @@ _dl_rtld_di_serinfo (struct link_map_private *loader, Dl_serinfo *si,
.si = si,
.allocptr = (char *) &si->dls_serpath[si->dls_cnt]
};
+ bool unprotected = false;
# define add_path(p, sps, flags) add_path(p, sps, 0) /* XXX */
@@ -2280,7 +2299,8 @@ _dl_rtld_di_serinfo (struct link_map_private *loader, Dl_serinfo *si,
struct link_map_private *l = loader;
do
{
- if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
+ if (cache_rpath_unprotect (l, &l->l_rpath_dirs, DT_RPATH,
+ "RPATH", &unprotected))
add_path (&p, &l->l_rpath_dirs, XXX_RPATH);
l = l->l_loader;
}
@@ -2291,7 +2311,8 @@ _dl_rtld_di_serinfo (struct link_map_private *loader, Dl_serinfo *si,
{
l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
if (l != NULL && l->l_type != lt_loaded && l != loader)
- if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
+ if (cache_rpath_unprotect (l, &l->l_rpath_dirs, DT_RPATH,
+ "RPATH", &unprotected))
add_path (&p, &l->l_rpath_dirs, XXX_RPATH);
}
}
@@ -2300,7 +2321,8 @@ _dl_rtld_di_serinfo (struct link_map_private *loader, Dl_serinfo *si,
add_path (&p, &__rtld_env_path_list, XXX_ENV);
/* Look at the RUNPATH information for this binary. */
- if (cache_rpath (loader, &loader->l_runpath_dirs, DT_RUNPATH, "RUNPATH"))
+ if (cache_rpath_unprotect (loader, &loader->l_runpath_dirs, DT_RUNPATH,
+ "RUNPATH", &unprotected))
add_path (&p, &loader->l_runpath_dirs, XXX_RUNPATH);
/* XXX
@@ -2315,4 +2337,7 @@ _dl_rtld_di_serinfo (struct link_map_private *loader, Dl_serinfo *si,
/* Count the struct size before the string area, which we didn't
know before we completed dls_cnt. */
si->dls_size += (char *) &si->dls_serpath[si->dls_cnt] - (char *) si;
+
+ if (unprotected)
+ _dl_protmem_end ();
}
diff --git a/elf/dl-object.c b/elf/dl-object.c
index 0741371b80..0ea3f6e2da 100644
--- a/elf/dl-object.c
+++ b/elf/dl-object.c
@@ -21,6 +21,7 @@
#include <stdlib.h>
#include <unistd.h>
#include <ldsodefs.h>
+#include <dl-protmem.h>
#include <assert.h>
@@ -89,15 +90,19 @@ _dl_new_object (char *realname, const char *libname, int type,
# define audit_space 0
#endif
- new = calloc (sizeof (*new)
- + sizeof (struct link_map_private *)
- + sizeof (*newname) + libname_len, 1);
+ size_t l_size = (sizeof (*new)
+ + sizeof (struct link_map_private *)
+ + sizeof (*newname) + libname_len);
+
+ new = _dl_protmem_allocate (l_size);
if (new == NULL)
return NULL;
+ memset (new, 0, sizeof (*new));
+ new->l_size = l_size;
new->l_rw = calloc (1, sizeof (*new->l_rw) + audit_space);
if (new->l_rw == NULL)
{
- free (new);
+ _dl_protmem_free (new, l_size);
return NULL;
}
@@ -108,7 +113,7 @@ _dl_new_object (char *realname, const char *libname, int type,
new->l_libname = newname
= (struct libname_list *) (new->l_symbolic_searchlist.r_list + 1);
newname->name = (char *) memcpy (newname + 1, libname, libname_len);
- /* newname->next = NULL; We use calloc therefore not necessary. */
+ newname->next = NULL;
newname->dont_free = 1;
/* When we create the executable link map, or a VDSO link map, we start
@@ -143,12 +148,9 @@ _dl_new_object (char *realname, const char *libname, int type,
#ifdef SHARED
for (unsigned int cnt = 0; cnt < naudit; ++cnt)
- /* No need to initialize bindflags due to calloc. */
link_map_audit_state (new, cnt)->cookie = (uintptr_t) new;
#endif
- /* new->l_global = 0; We use calloc therefore not necessary. */
-
/* Use the 'l_scope_mem' array by default for the 'l_scope'
information. If we need more entries we will allocate a large
array dynamically. */
@@ -267,3 +269,9 @@ _dl_new_object (char *realname, const char *libname, int type,
return new;
}
+
+void
+_dl_free_object (struct link_map_private *l)
+{
+ _dl_protmem_free (l, l->l_size);
+}
diff --git a/elf/dl-open.c b/elf/dl-open.c
index d270672c1f..afac8498be 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -37,6 +37,7 @@
#include <libc-early-init.h>
#include <gnu/lib-names.h>
#include <dl-find_object.h>
+#include <dl-protmem.h>
#include <dl-prop.h>
@@ -174,6 +175,8 @@ add_to_global_update (struct link_map_private *new)
{
struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
+ _dl_protmem_begin ();
+
/* Now add the new entries. */
unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
@@ -204,6 +207,8 @@ add_to_global_update (struct link_map_private *new)
atomic_write_barrier ();
ns->_ns_main_searchlist->r_nlist = new_nlist;
+
+ _dl_protmem_end ();
}
/* Search link maps in all namespaces for the DSO that contains the object at
@@ -560,6 +565,11 @@ dl_open_worker_begin (void *a)
args->nsid = call_map->l_ns;
}
+ /* Prepare for link map updates. If dl_open_worker below returns
+ normally, a matching _dl_protmem_end call is performed there. On
+ an exception, the handler in the caller has to perform it. */
+ _dl_protmem_begin ();
+
/* The namespace ID is now known. Keep track of whether libc.so was
already loaded, to determine whether it is necessary to call the
early initialization routine (or clear libc_map on error). */
@@ -808,6 +818,10 @@ dl_open_worker (void *a)
_dl_signal_exception (err, &ex, NULL);
}
+ /* Make state read-only before running user code in ELF
+ constructors. */
+ _dl_protmem_end ();
+
if (!args->worker_continue)
return;
@@ -941,6 +955,10 @@ no more namespaces available for dlmopen()"));
the flag here. */
}
+ /* Due to the exception, we did not end the protmem transaction
+ before. */
+ _dl_protmem_end ();
+
/* Release the lock. */
__rtld_lock_unlock_recursive (GL(dl_load_lock));
diff --git a/elf/dl-protmem-internal.h b/elf/dl-protmem-internal.h
new file mode 100644
index 0000000000..ce50d174a6
--- /dev/null
+++ b/elf/dl-protmem-internal.h
@@ -0,0 +1,39 @@
+/* Protected memory allocator for ld.so. Internal interfaces.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* These declarations are needed by <dl-protmem_bootstrap.h>, which
+ has to be inlined into _dl_start. */
+
+/* Header before all protected memory allocations. */
+struct dl_protmem_header
+{
+ struct dl_protmem_header *next;
+ unsigned int size;
+};
+
+/* Singleton allocator state. It also serves as the bootstrap
+ allocation. */
+struct dl_protmem_state
+{
+ struct dl_protmem_header hdr; /* For consistency with other allocations. */
+ struct rtld_protmem protmem; /* GLRO (dl_protmem) points to this field. */
+
+ /* Allocator state: Linked list of allocations. Initially points to
+ this structure. */
+ struct dl_protmem_header *root;
+};
diff --git a/elf/dl-protmem.c b/elf/dl-protmem.c
new file mode 100644
index 0000000000..f5a66868e6
--- /dev/null
+++ b/elf/dl-protmem.c
@@ -0,0 +1,132 @@
+/* Protected memory allocator for ld.so.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <ldsodefs.h>
+
+#include <dl-protmem.h>
+#include <dl-protmem-internal.h>
+
+#include <assert.h>
+#include <sys/mman.h>
+
+/* Nesting counter for _dl_protmem_begin/_dl_protmem_end. This is
+ primaryly required because we may have a call sequence dlopen,
+ malloc, dlopen. Without the counter, _dl_protmem_end in the inner
+ dlopen would make a link map that is still being initialized
+ read-only. */
+static unsigned int _dl_protmem_begin_count;
+
+static inline struct dl_protmem_state *
+_dl_protmem_state (void)
+{
+ return ((void *) GLRO (dl_protmem)
+ - offsetof (struct dl_protmem_state, protmem));
+}
+
+void
+_dl_protmem_init (void)
+{
+ /* Go back from the start of the protected memory area to the
+ wrapping bootstrap allocation. */
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ state->hdr.size = sizeof (struct dl_protmem_state);
+ state->root = &state->hdr;
+ _dl_protmem_begin_count = 1;
+}
+
+void *
+_dl_protmem_allocate (size_t size)
+{
+ assert (_dl_protmem_begin_count > 0);
+ assert (size > 0);
+
+ struct dl_protmem_header *hdr;
+
+ /* Add the header. */
+ unsigned int total_size;
+ if (__builtin_add_overflow (size, sizeof (*hdr), &total_size))
+ return NULL;
+
+ hdr = __mmap (NULL, total_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ if (hdr == MAP_FAILED)
+ return NULL;
+ hdr->size = total_size;
+
+ /* Put the allocation on the list of allocations. */
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ hdr->next = state->root;
+ state->root = hdr;
+
+ /* Return aa pointer to the user data. */
+ return (char *) hdr + sizeof (*hdr);
+}
+
+void
+_dl_protmem_free (void *ptr, size_t size)
+{
+ assert (_dl_protmem_begin_count > 0);
+
+ struct dl_protmem_header *hdr = ptr - sizeof (*hdr);
+ assert (hdr->size == size + sizeof (*hdr));
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ if (hdr == state->root)
+ {
+ state->root = hdr->next;
+ (void) __munmap (hdr, hdr->size);
+ return;
+ }
+
+ for (struct dl_protmem_header *p = state->root; p != NULL; p = p ->next)
+ if (p->next == hdr)
+ {
+ p->next = hdr->next;
+ (void) __munmap (hdr, hdr->size);
+ return;
+ }
+ _dl_fatal_printf ("\
+Fatal glibc error: Protected memory allocation not found during free\n");
+}
+
+void
+_dl_protmem_begin (void)
+{
+ if (_dl_protmem_begin_count++ > 0)
+ return;
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ for (struct dl_protmem_header *hdr = state->root;
+ hdr != NULL; hdr = hdr->next)
+ if (__mprotect (hdr, hdr->size, PROT_READ | PROT_WRITE) != 0)
+ _dl_signal_error (ENOMEM, NULL, NULL,
+ "Cannot make protected memory writable");
+}
+
+void
+_dl_protmem_end (void)
+{
+ if (--_dl_protmem_begin_count > 0)
+ return;
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ for (struct dl_protmem_header *hdr = state->root;
+ hdr != NULL; hdr = hdr->next)
+ /* If the mapping is left read-write, this is not fatal. */
+ (void) __mprotect (hdr, hdr->size, PROT_READ);
+}
diff --git a/elf/dl-protmem.h b/elf/dl-protmem.h
new file mode 100644
index 0000000000..59aeaf630d
--- /dev/null
+++ b/elf/dl-protmem.h
@@ -0,0 +1,93 @@
+/* Protected memory allocator for ld.so.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* The protected memory allocation manages the memory for the GLPM
+ variables (in shared builds), and for additional memory managed by
+ _dl_protmem_allocate and _dl_protmem_free.
+
+ After a call to _dl_protmem_begin and until the matching call to
+ _dl_protmem_end, the GLPM variables and memory allocated using
+ _dl_protmem_allocate is writable. _dl_protmem_begin and
+ _dl_protmem_end calls can be nested. In this case, only the
+ outermost _dl_protmem_end call makes memory read-only. */
+
+#ifndef DL_PROTMEM_H
+#define DL_PROTMEM_H
+
+#include <stddef.h>
+
+#ifdef SHARED
+/* Must be called after _dl_allocate_rtld_map and before any of the
+ functions below. Implies the first _dl_protmem_begin call. */
+void _dl_protmem_init (void) attribute_hidden;
+
+/* Frees memory allocated using _dl_protmem_allocate. The passed size
+ must be the same that was passed to _dl_protmem_allocate.
+ Protected memory must be writable when this function is called. */
+void _dl_protmem_free (void *ptr, size_t size) attribute_hidden;
+
+/* Allocate protected memory of SIZE bytes. Returns NULL on
+ allocation failure. Protected memory must be writable when this
+ function is called. The allocation will be writable and contains
+ unspecified bytes (similar to malloc). */
+void *_dl_protmem_allocate (size_t size) attribute_hidden
+ __attribute_malloc__ __attribute_alloc_size__ ((1))
+ __attr_dealloc (_dl_protmem_free, 1);
+
+/* _dl_protmem_begin makes protected memory writable, and
+ _dl_protmem_end makes it read-only again. Calls to these functions
+ must be paired. Within this region, protected memory is writable.
+ See the initial description above.
+
+ Failure to make memory writable in _dl_protmem_end is communicated
+ via an ld.so exception, typically resulting in a dlopen failure.
+ This can happen after a call to fork if memory overcommitment is
+ disabled. */
+void _dl_protmem_begin (void) attribute_hidden;
+void _dl_protmem_end (void) attribute_hidden;
+
+#else /*!SHARED */
+/* The protected memory allocator does not exist for static builds.
+ Use malloc directly. */
+
+#include <stdlib.h>
+
+static inline void *
+_dl_protmem_allocate (size_t size)
+{
+ return calloc (size, 1);
+}
+
+static inline void
+_dl_protmem_free (void *ptr, size_t size)
+{
+ free (ptr);
+}
+
+static inline void
+_dl_protmem_begin (void)
+{
+}
+
+static inline void
+_dl_protmem_end (void)
+{
+}
+#endif /* !SHARED */
+
+#endif /* DL_PROTMEM_H */
diff --git a/elf/dl-protmem_bootstrap.h b/elf/dl-protmem_bootstrap.h
index 2ba0973d07..a9d763bc7b 100644
--- a/elf/dl-protmem_bootstrap.h
+++ b/elf/dl-protmem_bootstrap.h
@@ -17,6 +17,7 @@
<https://www.gnu.org/licenses/>. */
#include <dl-early_mmap.h>
+#include <dl-protmem-internal.h>
/* Return a pointer to the protected memory area, or NULL if
allocation fails. This function is called before self-relocation,
@@ -25,5 +26,10 @@
static inline __attribute__ ((always_inline)) struct rtld_protmem *
_dl_protmem_bootstrap (void)
{
- return _dl_early_mmap (sizeof (struct rtld_protmem));
+ /* The protected memory area is nested within the bootstrap
+ allocation. */
+ struct dl_protmem_state *ptr = _dl_early_mmap (sizeof (*ptr));
+ if (ptr == NULL)
+ return NULL;
+ return &ptr->protmem;
}
diff --git a/elf/rtld.c b/elf/rtld.c
index 4abede1bab..fb752e0dfd 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -54,6 +54,7 @@
#include <dl-audit-check.h>
#include <dl-call_tls_init_tp.h>
#include <dl-protmem_bootstrap.h>
+#include <dl-protmem.h>
#include <assert.h>
@@ -460,6 +461,10 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
if (GLRO (dl_protmem) == NULL)
_dl_fatal_printf ("Fatal glibc error: Cannot allocate link map\n");
+ /* Set up the protected memory allocator, transferring the rtld link
+ map allocation in GLRO (dl_rtld_map). */
+ _dl_protmem_init ();
+
__rtld_malloc_init_stubs ();
/* Do not use an initializer for these members because it would
@@ -2385,6 +2390,11 @@ dl_main (const ElfW(Phdr) *phdr,
/* Auditing checkpoint: we have added all objects. */
_dl_audit_activity_nsid (LM_ID_BASE, LA_ACT_CONSISTENT);
+ /* Most of the initialization work has happened by this point, and
+ it should not be necessary to make the link maps read-write after
+ this point. */
+ _dl_protmem_end ();
+
/* Notify the debugger all new objects are now ready to go. We must re-get
the address since by now the variable might be in another object. */
r = _dl_debug_update (LM_ID_BASE);
diff --git a/elf/tst-relro-linkmap-mod1.c b/elf/tst-relro-linkmap-mod1.c
new file mode 100644
index 0000000000..dd73d26936
--- /dev/null
+++ b/elf/tst-relro-linkmap-mod1.c
@@ -0,0 +1,42 @@
+/* Module with the checking function for read-only link maps.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <link.h>
+#include <stdio.h>
+#include <unistd.h>
+
+/* Export for use by the main program, to avoid copy relocations on
+ _r_debug. */
+struct r_debug_extended *const r_debug_extended_address
+ = (struct r_debug_extended *) &_r_debug;
+
+/* The real definition is in the main program. */
+void
+check_relro_link_maps (const char *context)
+{
+ puts ("error: check_relro_link_maps not interposed");
+ _exit (1);
+}
+
+static void __attribute__ ((constructor))
+init (void)
+{
+ check_relro_link_maps ("ELF constructor (DSO)");
+}
+
+/* NB: destructor not checked. Memory is writable when they run. */
diff --git a/elf/tst-relro-linkmap-mod2.c b/elf/tst-relro-linkmap-mod2.c
new file mode 100644
index 0000000000..f022264ffd
--- /dev/null
+++ b/elf/tst-relro-linkmap-mod2.c
@@ -0,0 +1,2 @@
+/* Same checking as the first module, but loaded via dlopen. */
+#include "tst-relro-linkmap-mod1.c"
diff --git a/elf/tst-relro-linkmap-mod3.c b/elf/tst-relro-linkmap-mod3.c
new file mode 100644
index 0000000000..b2b7349200
--- /dev/null
+++ b/elf/tst-relro-linkmap-mod3.c
@@ -0,0 +1,2 @@
+/* No checking possible because the check_relro_link_maps function
+ from the main program is inaccessible after dlopen. */
diff --git a/elf/tst-relro-linkmap.c b/elf/tst-relro-linkmap.c
new file mode 100644
index 0000000000..08cfd32c52
--- /dev/null
+++ b/elf/tst-relro-linkmap.c
@@ -0,0 +1,112 @@
+/* Verify that link maps are read-only most of the time.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <support/memprobe.h>
+#include <support/check.h>
+#include <support/xdlfcn.h>
+#include <support/xunistd.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <support/support.h>
+
+static int do_test (void);
+#include <support/test-driver.c>
+
+/* This hack results in a definition of struct rtld_global_ro and
+ related data structures. Do this after all the other header
+ inclusions, to minimize the impact. This only works from the main
+ program due to tests-internal. */
+#define SHARED
+#include <ldsodefs.h>
+
+/* Defined in tst-relro-linkmap-mod1.so. */
+extern struct r_debug_extended *const r_debug_extended_address;
+
+/* Check that link maps are read-only in all namespaces. */
+void
+check_relro_link_maps (const char *context)
+{
+ for (struct r_debug_extended *r = r_debug_extended_address;
+ r != NULL; r = r->r_next)
+ for (struct link_map *l = r->base.r_map; l != NULL; l = l->l_next)
+ {
+ char *ctx;
+
+ ctx = xasprintf ("%s: link map for %s", context, l->l_name);
+ support_memprobe_readonly (ctx, l_private (l),
+ sizeof (*l_private (l)));
+ free (ctx);
+ if (false) /* Link map names are currently writable. */
+ {
+ ctx = xasprintf ("%s: link map name for %s", context, l->l_name);
+ support_memprobe_readonly (ctx, l->l_name, strlen (l->l_name) + 1);
+ free (ctx);
+ }
+ }
+}
+
+static void __attribute__ ((constructor))
+init (void)
+{
+ check_relro_link_maps ("ELF constructor (main)");
+}
+
+static void __attribute__ ((destructor))
+deinit (void)
+{
+ /* _dl_fini does not make link maps writable. */
+ check_relro_link_maps ("ELF destructor (main)");
+}
+
+static int
+do_test (void)
+{
+ check_relro_link_maps ("initial do_test");
+
+ /* Avoid copy relocations. Do this from the main program because we
+ need access to internal headers. */
+ {
+ struct rtld_global_ro *ro = xdlsym (RTLD_DEFAULT, "_rtld_global_ro");
+ check_relro_link_maps ("after _rtld_global_ro");
+ support_memprobe_readonly ("_rtld_global_ro", ro, sizeof (*ro));
+ support_memprobe_readonly ("GLPM", ro->_dl_protmem,
+ sizeof (*ro->_dl_protmem));
+ }
+ support_memprobe_readwrite ("_rtld_global",
+ xdlsym (RTLD_DEFAULT, "_rtld_global"),
+ sizeof (struct rtld_global_ro));
+ check_relro_link_maps ("after _rtld_global");
+
+ /* This is supposed to fail. */
+ TEST_VERIFY (dlopen ("tst-dlopenfailmod1.so", RTLD_LAZY) == NULL);
+ check_relro_link_maps ("after failed dlopen");
+
+ /* This should succeed. */
+ void *handle = xdlopen ("tst-relro-linkmap-mod2.so", RTLD_LAZY);
+ check_relro_link_maps ("after successful dlopen");
+ xdlclose (handle);
+ check_relro_link_maps ("after dlclose 1");
+
+ handle = xdlmopen (LM_ID_NEWLM, "tst-relro-linkmap-mod3.so", RTLD_LAZY);
+ check_relro_link_maps ("after dlmopen");
+ xdlclose (handle);
+ check_relro_link_maps ("after dlclose 2");
+
+ return 0;
+}
diff --git a/include/link.h b/include/link.h
index 2632337e29..1651a9b118 100644
--- a/include/link.h
+++ b/include/link.h
@@ -164,6 +164,9 @@ struct link_map_private
than one namespace. */
struct link_map_private *l_real;
+ /* Allocated size of this link map. */
+ size_t l_size;
+
/* Run-time writable fields. */
struct link_map_rw *l_rw;
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index e8f7c8b70b..b2bb42e8c6 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -524,7 +524,10 @@ extern struct rtld_global _rtld_global __rtld_global_attribute__;
#endif
#ifdef SHARED
-/* Implementation structure for the protected memory area. */
+/* Implementation structure for the protected memory area. In static
+ builds, the protected memory area is just regular (.data) memory,
+ as there is no RELRO support anyway. Some fields are only needed
+ for SHARED builds and are not included for static builds. */
struct rtld_protmem
{
/* Structure describing the dynamic linker itself. */
@@ -1043,6 +1046,9 @@ struct link_map_private *_dl_new_object (char *realname,
int mode, Lmid_t nsid)
attribute_hidden;
+/* Deallocates the specified link map (only the link map itself). */
+void _dl_free_object (struct link_map_private *) attribute_hidden;
+
/* Relocate the given object (if it hasn't already been).
SCOPE is passed to _dl_lookup_symbol in symbol lookups.
If RTLD_LAZY is set in RELOC-MODE, don't relocate its PLT. */
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 25/32] elf: Move most of the _dl_find_object data to the protected heap
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (23 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 24/32] elf: Implement a basic " Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-28 19:06 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 26/32] elf: Switch to a region-based protected memory allocator Florian Weimer
` (7 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
The heap is mostly read-only by design, so allocation padding is
no longer required. The protected heap is not visible to malloc,
so it's not necessary to deallocate the allocations during
__libc_freeres anymore.
---
elf/dl-find_object.c | 94 ++++++++-----------------------------------
elf/dl-find_object.h | 3 --
elf/dl-libc_freeres.c | 2 -
3 files changed, 16 insertions(+), 83 deletions(-)
diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
index f81351b0ef..82f493d817 100644
--- a/elf/dl-find_object.c
+++ b/elf/dl-find_object.c
@@ -20,6 +20,7 @@
#include <atomic.h>
#include <atomic_wide_counter.h>
#include <dl-find_object.h>
+#include <dl-protmem.h>
#include <dlfcn.h>
#include <ldsodefs.h>
#include <link.h>
@@ -91,8 +92,9 @@ static struct dl_find_object_internal *_dlfo_nodelete_mappings
to avoid data races.
The memory allocations are never deallocated, but slots used for
- objects that have been dlclose'd can be reused by dlopen. The
- memory can live in the regular C malloc heap.
+ objects that have been dlclose'd can be reused by dlopen.
+ Allocations come from the protected memory heap. This makes it
+ harder to inject DWARF data.
The segments are populated from the start of the list, with the
mappings with the highest address. Only if this segment is full,
@@ -111,9 +113,6 @@ struct dlfo_mappings_segment
initialization; read in the TM region. */
struct dlfo_mappings_segment *previous;
- /* Used by __libc_freeres to deallocate malloc'ed memory. */
- void *to_free;
-
/* Count of array elements in use and allocated. */
size_t size; /* Read in the TM region. */
size_t allocated;
@@ -154,44 +153,15 @@ _dlfo_mappings_segment_count_allocated (struct dlfo_mappings_segment *seg)
/* This is essentially an arbitrary value. dlopen allocates plenty of
memory anyway, so over-allocated a bit does not hurt. Not having
- many small-ish segments helps to avoid many small binary searches.
- Not using a power of 2 means that we do not waste an extra page
- just for the malloc header if a mapped allocation is used in the
- glibc allocator. */
-enum { dlfo_mappings_initial_segment_size = 63 };
-
-/* Allocate an empty segment. This used for the first ever
- allocation. */
-static struct dlfo_mappings_segment *
-_dlfo_mappings_segment_allocate_unpadded (size_t size)
-{
- if (size < dlfo_mappings_initial_segment_size)
- size = dlfo_mappings_initial_segment_size;
- /* No overflow checks here because the size is a mapping count, and
- struct link_map_private is larger than what we allocate here. */
- enum
- {
- element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
- };
- size_t to_allocate = (sizeof (struct dlfo_mappings_segment)
- + size * element_size);
- struct dlfo_mappings_segment *result = malloc (to_allocate);
- if (result != NULL)
- {
- result->previous = NULL;
- result->to_free = NULL; /* Minimal malloc memory cannot be freed. */
- result->size = 0;
- result->allocated = size;
- }
- return result;
-}
+ many small-ish segments helps to avoid many small binary searches. */
+enum { dlfo_mappings_initial_segment_size = 64 };
/* Allocate an empty segment that is at least SIZE large. PREVIOUS
points to the chain of previously allocated segments and can be
NULL. */
static struct dlfo_mappings_segment *
_dlfo_mappings_segment_allocate (size_t size,
- struct dlfo_mappings_segment * previous)
+ struct dlfo_mappings_segment *previous)
{
/* Exponential sizing policies, so that lookup approximates a binary
search. */
@@ -200,11 +170,10 @@ _dlfo_mappings_segment_allocate (size_t size,
if (previous == NULL)
minimum_growth = dlfo_mappings_initial_segment_size;
else
- minimum_growth = 2* previous->allocated;
+ minimum_growth = 2 * previous->allocated;
if (size < minimum_growth)
size = minimum_growth;
}
- enum { cache_line_size_estimate = 128 };
/* No overflow checks here because the size is a mapping count, and
struct link_map_private is larger than what we allocate here. */
enum
@@ -212,28 +181,13 @@ _dlfo_mappings_segment_allocate (size_t size,
element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
};
size_t to_allocate = (sizeof (struct dlfo_mappings_segment)
- + size * element_size
- + 2 * cache_line_size_estimate);
- char *ptr = malloc (to_allocate);
- if (ptr == NULL)
+ + size * element_size);
+ struct dlfo_mappings_segment *result = _dl_protmem_allocate (to_allocate);
+ if (result == NULL)
return NULL;
- char *original_ptr = ptr;
- /* Start and end at a (conservative) 128-byte cache line boundary.
- Do not use memalign for compatibility with partially interposing
- malloc implementations. */
- char *end = PTR_ALIGN_DOWN (ptr + to_allocate, cache_line_size_estimate);
- ptr = PTR_ALIGN_UP (ptr, cache_line_size_estimate);
- struct dlfo_mappings_segment *result
- = (struct dlfo_mappings_segment *) ptr;
result->previous = previous;
- result->to_free = original_ptr;
result->size = 0;
- /* We may have obtained slightly more space if malloc happened
- to provide an over-aligned pointer. */
- result->allocated = (((uintptr_t) (end - ptr)
- - sizeof (struct dlfo_mappings_segment))
- / element_size);
- assert (result->allocated >= size);
+ result->allocated = size;
return result;
}
@@ -577,11 +531,12 @@ _dl_find_object_init (void)
/* Allocate the data structures. */
size_t loaded_size = _dlfo_process_initial ();
- _dlfo_nodelete_mappings = malloc (_dlfo_nodelete_mappings_size
- * sizeof (*_dlfo_nodelete_mappings));
+ _dlfo_nodelete_mappings
+ = _dl_protmem_allocate (_dlfo_nodelete_mappings_size
+ * sizeof (*_dlfo_nodelete_mappings));
if (loaded_size > 0)
_dlfo_loaded_mappings[0]
- = _dlfo_mappings_segment_allocate_unpadded (loaded_size);
+ = _dlfo_mappings_segment_allocate (loaded_size, NULL);
if (_dlfo_nodelete_mappings == NULL
|| (loaded_size > 0 && _dlfo_loaded_mappings[0] == NULL))
_dl_fatal_printf ("\
@@ -838,20 +793,3 @@ _dl_find_object_dlclose (struct link_map_private *map)
return;
}
}
-
-void
-_dl_find_object_freeres (void)
-{
- for (int idx = 0; idx < 2; ++idx)
- {
- for (struct dlfo_mappings_segment *seg = _dlfo_loaded_mappings[idx];
- seg != NULL; )
- {
- struct dlfo_mappings_segment *previous = seg->previous;
- free (seg->to_free);
- seg = previous;
- }
- /* Stop searching in shared objects. */
- _dlfo_loaded_mappings[idx] = 0;
- }
-}
diff --git a/elf/dl-find_object.h b/elf/dl-find_object.h
index edcc0a7755..54601e7d00 100644
--- a/elf/dl-find_object.h
+++ b/elf/dl-find_object.h
@@ -135,7 +135,4 @@ bool _dl_find_object_update (struct link_map_private *new_l) attribute_hidden;
data structures. Needs to be protected by loader write lock. */
void _dl_find_object_dlclose (struct link_map_private *l) attribute_hidden;
-/* Called from __libc_freeres to deallocate malloc'ed memory. */
-void _dl_find_object_freeres (void) attribute_hidden;
-
#endif /* _DL_FIND_OBJECT_H */
diff --git a/elf/dl-libc_freeres.c b/elf/dl-libc_freeres.c
index 88c0e444b8..066629639c 100644
--- a/elf/dl-libc_freeres.c
+++ b/elf/dl-libc_freeres.c
@@ -128,6 +128,4 @@ __rtld_libc_freeres (void)
void *scope_free_list = GL(dl_scope_free_list);
GL(dl_scope_free_list) = NULL;
free (scope_free_list);
-
- _dl_find_object_freeres ();
}
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 26/32] elf: Switch to a region-based protected memory allocator
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (24 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 25/32] elf: Move most of the _dl_find_object data to the protected heap Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-03-05 23:36 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 27/32] elf: Determine the caller link map in _dl_open Florian Weimer
` (6 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
The old allocator is retained for debugging purposes.
---
elf/Makefile | 1 +
elf/dl-protmem-internal.h | 65 +++++-
elf/dl-protmem.c | 392 +++++++++++++++++++++++++++++++++++++
elf/dl-protmem_bootstrap.h | 3 +-
elf/tst-dl-protmem.c | 354 +++++++++++++++++++++++++++++++++
5 files changed, 812 insertions(+), 3 deletions(-)
create mode 100644 elf/tst-dl-protmem.c
diff --git a/elf/Makefile b/elf/Makefile
index 7ababc0fc4..2ebf5d2702 100644
--- a/elf/Makefile
+++ b/elf/Makefile
@@ -503,6 +503,7 @@ tests-internal += \
tst-audit19a \
tst-create_format1 \
tst-dl-hwcaps_split \
+ tst-dl-protmem \
tst-dl_find_object \
tst-dl_find_object-threads \
tst-dlmopen2 \
diff --git a/elf/dl-protmem-internal.h b/elf/dl-protmem-internal.h
index ce50d174a6..00f4639b60 100644
--- a/elf/dl-protmem-internal.h
+++ b/elf/dl-protmem-internal.h
@@ -19,10 +19,24 @@
/* These declarations are needed by <dl-protmem_bootstrap.h>, which
has to be inlined into _dl_start. */
-/* Header before all protected memory allocations. */
+#ifndef DL_PROTMEM_INTERNAL_H
+#define DL_PROTMEM_INTERNAL_H
+
+/* Define this to 1 to switch to the debugging allocator. */
+#ifndef DL_PROTMEM_DEBUG
+# define DL_PROTMEM_DEBUG 0
+#endif
+
+/* Minimum chunk size. Used to preserve alignment. */
+enum { _dlpm_chunk_minimal_size = 8 };
+
+#if DL_PROTMEM_DEBUG
+/* The debugging allocator uses mmap directly and offers full size
+ checking. */
struct dl_protmem_header
{
- struct dl_protmem_header *next;
+ struct dl_protmem_header *next
+ __attribute__ ((__aligned__ (_dlpm_chunk_minimal_size)));
unsigned int size;
};
@@ -37,3 +51,50 @@ struct dl_protmem_state
this structure. */
struct dl_protmem_header *root;
};
+
+/* The initial allocation contains just the state singleton. */
+# define DL_PROTMEM_INITIAL_REGION_SIZE (sizeof (struct dl_protmem_state))
+
+#else /* Non-debugging allocator. */
+
+/* The initial allocation covers about 150 link maps, which should be
+ enough for most programs. */
+# if __WORDSIZE == 32
+# define DL_PROTMEM_INITIAL_REGION_SIZE 131072
+# else
+# define DL_PROTMEM_INITIAL_REGION_SIZE 262144
+# endif
+
+# define DL_PROTMEM_REGION_COUNT 12
+
+/* Struct tag denoting freelist entries. */
+struct dl_protmem_freelist_chunk;
+
+/* Global state for the protected memory allocator. */
+struct dl_protmem_state
+{
+ /* GLRO (dl_protmem) points to this field. */
+ struct rtld_protmem protmem
+ __attribute__ ((__aligned__ (_dlpm_chunk_minimal_size)));
+
+ /* Pointers to mmap-allocated regions. For index i, the size of the
+ allocation is DL_PROTMEM_INITIAL_ALLOCATION << i. The space of
+ the combined regions is sufficient for hundreds of thousands of
+ link maps, so the dynamic linker runs into scalability issues
+ well before it is exhausted. */
+ void *regions[DL_PROTMEM_REGION_COUNT];
+
+ /* List of unused allocation for each region, in increasing address
+ order. See _dlpm_chunk_size for how the freed chunk size is
+ encoded. */
+ struct dl_protmem_freelist_chunk *freelist[DL_PROTMEM_REGION_COUNT];
+
+ /* Pending free chunk that can be merged with other deallocations.
+ One pending chunk per region avoids accidental merging across
+ regions. */
+ struct dl_protmem_freelist_chunk *pending_free[DL_PROTMEM_REGION_COUNT];
+};
+
+#endif /* Non-debbuging allocator. */
+
+#endif /* DL_PROTMEM_INTERNAL_H */
diff --git a/elf/dl-protmem.c b/elf/dl-protmem.c
index f5a66868e6..cd416e33a5 100644
--- a/elf/dl-protmem.c
+++ b/elf/dl-protmem.c
@@ -21,6 +21,7 @@
#include <dl-protmem.h>
#include <dl-protmem-internal.h>
+#include <array_length.h>
#include <assert.h>
#include <sys/mman.h>
@@ -38,6 +39,8 @@ _dl_protmem_state (void)
- offsetof (struct dl_protmem_state, protmem));
}
+/* Debugging allocator. The real allocator is below. */
+#if DL_PROTMEM_DEBUG
void
_dl_protmem_init (void)
{
@@ -130,3 +133,392 @@ _dl_protmem_end (void)
/* If the mapping is left read-write, this is not fatal. */
(void) __mprotect (hdr, hdr->size, PROT_READ);
}
+
+#else /* The non-debugging allocator follows. */
+
+/* Address of a chunk on the free list. This is an abstract pointer,
+ never to be dereferenced explictly. Use the accessor functions
+ below instead.
+
+ Metadata layout: The first word is the pointer to the next chunk,
+ except the that the lowest bit (unused due to alignment) is used as
+ a flag. If it is 1, the chunk size is the minimal size, and the
+ size is not stored separately. If the flag is 0, the size is
+ stored in the second metadata word. */
+typedef struct dl_protmem_freelist_chunk *chunk;
+
+/* Returns the size of a chunk on the free list whose start address is
+ FREEPTR. The size includes the metadata. */
+static inline size_t
+_dlpm_chunk_size (chunk ptr)
+{
+ uintptr_t *p = (uintptr_t *)ptr;
+ if (*p & 1)
+ return _dlpm_chunk_minimal_size;
+ else
+ return p[1];
+}
+
+/* Returns the address of the next free list element. */
+static inline chunk
+_dlpm_chunk_next (chunk ptr)
+{
+ uintptr_t *p = (uintptr_t *)ptr;
+ /* Mask away the size bit. */
+ return (chunk) (*p & -2);
+}
+
+static inline void
+_dlpm_chunk_set_next (chunk ptr, chunk newnext)
+{
+ /* Preserve the value of the size bit. */
+ uintptr_t *p = (uintptr_t *)ptr;
+ *p = (uintptr_t) newnext | (*p & 1);
+}
+
+/* Creates a new freelist chunk at PTR, with NEXT as the next chunk,
+ and SIZE as the size of this chunk (which includes the
+ metadata). Returns PTR. */
+static inline chunk
+_dlpm_chunk_make (chunk ptr, chunk next, size_t size)
+{
+ uintptr_t *p = (uintptr_t *)ptr;
+ if (size <= _dlpm_chunk_minimal_size)
+ /* Compressed size. */
+ *p = (uintptr_t) next | 1;
+ else
+ {
+ p[0] = (uintptr_t) next;
+ p[1] = size;
+ }
+ return ptr;
+}
+
+/* Return true if PTR2 comes immediately after PTR1 in memory. PTR2
+ can be NULL. */
+static inline bool
+_dlpm_chunk_adjancent (chunk ptr1, chunk ptr2)
+{
+ return (uintptr_t) ptr2 == (uintptr_t) ptr1 + _dlpm_chunk_size (ptr1);
+}
+
+/* Put the pending allocation on the free list. */
+static void
+_dlpm_free_pending (struct dl_protmem_state *state, unsigned int region)
+{
+ chunk pending = state->pending_free[region];
+ state->pending_free[region] = NULL;
+
+ /* The current chunk pointer. In the while loop below, coalescing
+ potentially happens at the end of this chunk, so that the chunk
+ address does not change. */
+ chunk current = state->freelist[region];
+
+ /* Special cases before loop start. */
+
+ if (current == NULL)
+ {
+ /* The freelist is empty. Nothing to coalesce. */
+ state->freelist[region] = pending;
+ return;
+ }
+
+ /* During the loop below, this merge is handled as part of the next
+ chunk processing. */
+ if (pending < current)
+ {
+ /* The new chunk will be first on the freelist. */
+ state->freelist[region] = pending;
+
+ /* See if we can coalesce. */
+ if (_dlpm_chunk_adjancent (pending, current))
+ {
+ chunk new_next = _dlpm_chunk_next (current);
+ size_t new_size = (_dlpm_chunk_size (pending)
+ + _dlpm_chunk_size (current));
+ _dlpm_chunk_make (pending, new_next, new_size);
+ }
+ else
+ _dlpm_chunk_set_next (pending, current);
+ return;
+ }
+
+ while (true)
+ {
+ chunk next = _dlpm_chunk_next (current);
+ if (_dlpm_chunk_adjancent (current, pending))
+ {
+ /* We can coalesce. See if this completely fills a gap. */
+ if (_dlpm_chunk_adjancent (pending, next))
+ {
+ /* Merge three chunks. */
+ chunk new_next = _dlpm_chunk_next (next);
+ size_t new_size = (_dlpm_chunk_size (current)
+ + _dlpm_chunk_size (pending)
+ + _dlpm_chunk_size (next));
+ /* The address of the current chunk does not change, so
+ the next pointer leading to it remains valid. */
+ _dlpm_chunk_make (current, new_next, new_size);
+ }
+ else
+ {
+ /* Merge two chunks. */
+ size_t new_size = (_dlpm_chunk_size (current)
+ + _dlpm_chunk_size (pending));
+ /* The current chunk pointer remains unchanged. */
+ _dlpm_chunk_make (current, next, new_size);
+ }
+ break;
+ }
+ if (next == NULL)
+ {
+ /* New last chunk on freelist. */
+ _dlpm_chunk_set_next (current, pending);
+ break;
+ }
+ if (pending < next)
+ {
+ /* This is the right spot on the freelist. */
+ _dlpm_chunk_set_next (current, pending);
+
+ /* See if we can coalesce with the next chunk. */
+ if (_dlpm_chunk_adjancent (pending, next))
+ {
+ chunk new_next = _dlpm_chunk_next (next);
+ size_t new_size = (_dlpm_chunk_size (pending)
+ + _dlpm_chunk_size (next));
+ _dlpm_chunk_make (pending, new_next, new_size);
+ }
+ else
+ _dlpm_chunk_set_next (pending, next);
+ break;
+ }
+ current = next;
+ }
+}
+
+/* Returns the region index for the pointer. Terminates the process
+ if PTR is not on the heap. */
+static unsigned int
+_dlpm_find_region (struct dl_protmem_state *state, void *ptr)
+{
+ /* Find the region in which the pointer is located. */
+ size_t region_size = DL_PROTMEM_INITIAL_REGION_SIZE;
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ {
+ if (ptr >= state->regions[i] && ptr < state->regions[i] + region_size)
+ return i;
+ region_size *= 2;
+ }
+
+ _dl_fatal_printf ("\
+Fatal glibc error: Protected memory allocation not found\n");
+}
+
+void
+_dl_protmem_init (void)
+{
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ state->regions[0] = state;
+ /* The part of the region after the allocator state (with the
+ embeded protected memory area) is unused. */
+ state->freelist[0] = (chunk) (state + 1);
+ void *initial_region_end = (void *) state + DL_PROTMEM_INITIAL_REGION_SIZE;
+ _dlpm_chunk_make (state->freelist[0], NULL,
+ initial_region_end - (void *) state->freelist[0]);
+ _dl_protmem_begin_count = 1;
+}
+
+void
+_dl_protmem_begin (void)
+{
+ if (_dl_protmem_begin_count++ != 0)
+ /* Already unprotected. */
+ return;
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ size_t region_size = DL_PROTMEM_INITIAL_REGION_SIZE;
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ if (state->regions[i] != NULL)
+ {
+ if (__mprotect (state->regions[i], region_size,
+ PROT_READ | PROT_WRITE) != 0)
+ _dl_signal_error (ENOMEM, NULL, NULL,
+ "Cannot make protected memory writable");
+ region_size *= 2;
+ }
+}
+
+void
+_dl_protmem_end (void)
+{
+ if (--_dl_protmem_begin_count > 0)
+ return;
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ size_t region_size = DL_PROTMEM_INITIAL_REGION_SIZE;
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ if (state->regions[i] != NULL)
+ /* Ignore errors here because we can continue running with
+ read-write memory, with reduced hardening. */
+ (void) __mprotect (state->regions[i], region_size, PROT_READ);
+}
+
+void *
+_dl_protmem_allocate (size_t requested_size)
+{
+ /* Round up the size to the next multiple of 8, to preserve chunk
+ alignment. */
+ {
+ size_t adjusted_size = roundup (requested_size, _dlpm_chunk_minimal_size);
+ if (adjusted_size < requested_size)
+ return NULL; /* Overflow. */
+ requested_size = adjusted_size;
+ }
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+
+ /* Try to find an exact match among the pending chunks. */
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ {
+ chunk pending = state->pending_free[i];
+ if (pending == NULL)
+ continue;
+ size_t pending_size = _dlpm_chunk_size (pending);
+ if (pending_size == requested_size)
+ {
+ state->pending_free[i] = NULL;
+ return pending;
+ }
+ }
+
+ /* Remove all pending allocations. */
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ if (state->pending_free[i] != NULL)
+ _dlpm_free_pending (state, i);
+
+ /* This points to the previous chunk of the best chunk found so far,
+ or the root of the freelist. This place needs to be updated to
+ remove the best chunk from the freelist. */
+ chunk best_previous_p = NULL;
+ size_t best_p_size = -1;
+
+ /* Best-fit search along the free lists. */
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ if (state->freelist[i] != NULL)
+ {
+ /* Use the head pointer of the list as the next pointer.
+ The missing size field is not updated below. */
+ chunk last_p = (chunk) &state->freelist[i];
+ chunk p = state->freelist[i];
+ while (true)
+ {
+ size_t candidate_size = _dlpm_chunk_size (p);
+ chunk next_p = _dlpm_chunk_next (p);
+ if (candidate_size == requested_size)
+ {
+ /* Perfect fit. No further search needed.
+ Remove this chunk from the free list. */
+ _dlpm_chunk_set_next (last_p, next_p);
+ return p;
+ }
+ if (candidate_size > requested_size
+ && candidate_size < best_p_size)
+ /* Chunk with a better usable size. */
+ {
+ best_previous_p = last_p;
+ best_p_size = candidate_size;
+ }
+ if (next_p == NULL)
+ break;
+ last_p = p;
+ p = next_p;
+ }
+ }
+
+ if (best_previous_p == NULL)
+ {
+ /* No usable chunk found. Grow the heap. */
+ size_t region_size = DL_PROTMEM_INITIAL_REGION_SIZE;
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ {
+ if (state->regions[i] == NULL && region_size >= requested_size)
+ {
+ void *ptr = __mmap (NULL, region_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ if (ptr == MAP_FAILED)
+ return NULL;
+ state->regions[i] = ptr;
+ if (region_size == requested_size)
+ /* Perfect fit: the entire region serves as the allocation. */
+ return ptr;
+
+ /* Create a free list with one entry for the entire region. */
+ state->freelist[i] = _dlpm_chunk_make (ptr, NULL, region_size);
+ best_previous_p = (chunk) &state->freelist[i];
+ best_p_size = region_size;
+
+ /* Chunk is split below. */
+ break;
+ }
+ region_size *= 2;
+ }
+
+ /* All regions have been exhausted. */
+ if (best_previous_p == NULL)
+ return NULL;
+ }
+
+ /* Split the chunk. */
+ chunk p = _dlpm_chunk_next (best_previous_p);
+ void *p_end = (void *) p + best_p_size; /* Memory after this chunk. */
+ chunk p_next = _dlpm_chunk_next (p); /* Following chunk on freelist. */
+ void *remaining = (void *) p + requested_size; /* Place of the new chunk. */
+ /* Replace the chunk on the free list with its remainder. */
+ _dlpm_chunk_set_next (best_previous_p,
+ _dlpm_chunk_make (remaining,
+ p_next, p_end - remaining));
+ return p;
+}
+
+void
+_dl_protmem_free (void *ptr, size_t requested_size)
+{
+ requested_size = roundup (requested_size, _dlpm_chunk_minimal_size);
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ unsigned int region = _dlpm_find_region (state, ptr);
+
+ {
+ chunk pending = state->pending_free[region];
+ if (pending != NULL)
+ {
+ /* First try merging with the old allocation. */
+ if (_dlpm_chunk_adjancent (pending, ptr))
+ {
+ /* Extend the existing pending chunk. The start address does
+ not change. */
+ _dlpm_chunk_make (pending, NULL,
+ _dlpm_chunk_size (pending) + requested_size);
+ return;
+ }
+ if (_dlpm_chunk_adjancent (ptr, pending))
+ {
+ /* Create a new chunk that has the exsting chunk at the end. */
+ state->pending_free[region]
+ = _dlpm_chunk_make (ptr, NULL,
+ requested_size + _dlpm_chunk_size (pending));
+ return;
+ }
+
+ /* Merging did not work out. Get rid of the old pending
+ allocation. */
+ _dlpm_free_pending (state, region);
+ }
+ }
+
+ /* No pending allocation at this point. Create new free chunk. */
+ state->pending_free[region] = _dlpm_chunk_make (ptr, NULL, requested_size);
+}
+
+#endif /* Non-debugging allocator. */
diff --git a/elf/dl-protmem_bootstrap.h b/elf/dl-protmem_bootstrap.h
index a9d763bc7b..b5b44ca7a9 100644
--- a/elf/dl-protmem_bootstrap.h
+++ b/elf/dl-protmem_bootstrap.h
@@ -28,7 +28,8 @@ _dl_protmem_bootstrap (void)
{
/* The protected memory area is nested within the bootstrap
allocation. */
- struct dl_protmem_state *ptr = _dl_early_mmap (sizeof (*ptr));
+ struct dl_protmem_state *ptr
+ = _dl_early_mmap (DL_PROTMEM_INITIAL_REGION_SIZE);
if (ptr == NULL)
return NULL;
return &ptr->protmem;
diff --git a/elf/tst-dl-protmem.c b/elf/tst-dl-protmem.c
new file mode 100644
index 0000000000..6061845ca7
--- /dev/null
+++ b/elf/tst-dl-protmem.c
@@ -0,0 +1,354 @@
+/* Internal test for the protected memory allocator.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <array_length.h>
+#include <libc-diag.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <support/check.h>
+#include <support/xunistd.h>
+#include <sys/mman.h>
+
+static int do_test (void);
+#include <support/test-driver.c>
+
+/* Tracking allocated memory. Allocation granularity is assumed to be
+ 8 bytes. */
+
+/* Lowest level. Covers 65536 * 32 * 8 bytes (24 bit of address space). */
+struct level3
+{
+ uint32_t bits[1 << 16];
+};
+
+/* Mid-level covers. 20 bits of address space. */
+struct level2
+{
+ struct level3 *level2[1 << 20];
+};
+
+/* Top level. 20 bits of address space. */
+static struct level2 *level1[1 << 20];
+
+/* Byte address to index in level1. */
+static inline unsigned int
+level1_index (uintptr_t u)
+{
+#if UINTPTR_WIDTH > 44
+ return u >> 44;
+#else
+ return 0;
+#endif
+}
+
+/* Byte address to index in level1[N]->level2. */
+static inline unsigned int
+level2_index (uintptr_t u)
+{
+ return (u >> 24) & ((1 << 20) - 1);
+}
+
+/* Byte address to index in level1[N]->level2[M]->level3. */
+static inline unsigned int
+level3_index (uintptr_t u)
+{
+ unsigned int a = u >> 3; /* Every 8th byte tracked. */;
+ return (a >> 5) & ((1 << 16) - 1);
+}
+
+/* Mask for the bit in level3_index. */
+static inline uint32_t
+level3_mask (uintptr_t u)
+{
+ return (uint32_t) 1U << ((u >> 3) & 31);
+}
+
+/* Flip a bit from unset to set. Return false if the bit was already set. */
+static bool
+set_unset_bit_at (void *p)
+{
+ uintptr_t u = (uintptr_t) p;
+ struct level2 *l2 = level1[level1_index (u)];
+ if (l2 == NULL)
+ {
+ l2 = xmmap (NULL, sizeof (*l2), PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1);
+ level1[level1_index (u)] = l2;
+ }
+ struct level3 *l3 = l2->level2[level2_index (u)];
+ if (l3 == NULL)
+ {
+ l3 = xmmap (NULL, sizeof (*l3), PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1);
+ l2->level2[level2_index (u)] = l3;
+ }
+ unsigned int idx = level3_index (u);
+ uint32_t mask = level3_mask (u);
+ if (l3->bits[idx] & mask)
+ return false;
+ l3->bits[idx] |= mask;
+ return true;
+}
+
+/* Flip a bit from set to unset. Return false if the bit was already
+ cleared. */
+static bool
+clear_set_bit_at (void *p)
+{
+ uintptr_t u = (uintptr_t) p;
+ struct level2 *l2 = level1[level1_index (u)];
+ if (l2 == NULL)
+ return false;
+ struct level3 *l3 = l2->level2[level2_index (u)];
+ if (l3 == NULL)
+ return false;
+ unsigned int idx = level3_index (u);
+ uint32_t mask = level3_mask (u);
+ if (!(l3->bits[idx] & mask))
+ return false;
+ l3->bits[idx] &= ~mask;
+ return true;
+}
+
+/* Record an allocation in the bitmap. Errors if the covered bytes
+ are already allocated. */
+static void
+record_allocate (void *p, size_t size)
+{
+ TEST_VERIFY_EXIT (p != NULL);
+ TEST_VERIFY_EXIT (size > 0);
+ if (((uintptr_t) p & 7) != 0)
+ FAIL_EXIT1 ("unaligned allocation: %p of %zu bytes", p, size);
+ for (size_t i = 0; i < size; i += 8)
+ if (!set_unset_bit_at (p + i))
+ FAIL_EXIT1 ("already allocated byte %p in %zu-byte allocation at %p"
+ " (offset %zu)", p + i, size, p, i);
+}
+
+/* Record a deallocation in the bitmap. Errors if the covered bytes
+ are not allcoated. */
+static void
+record_free (void *p, size_t size)
+{
+ TEST_VERIFY_EXIT (p != NULL);
+ TEST_VERIFY_EXIT (size > 0);
+ if (((uintptr_t) p & 7) != 0)
+ FAIL_EXIT1 ("unaligned free: %p of %zu bytes", p, size);
+ for (size_t i = 0; i < size; i += 8)
+ if (!clear_set_bit_at (p + i))
+ FAIL_EXIT1 ("already deallocated byte %p in %zu-byte deallocation at %p"
+ " (offset %zu)", p + i, size, p, i);
+}
+
+/* This hack results in a definition of struct rtld_global_ro and
+ related data structures. Do this after all the other header
+ inclusions, to minimize the impact. */
+#define SHARED
+#include <ldsodefs.h>
+
+/* Create our own version of GLRO (dl_protmem). */
+static struct rtld_protmem *dl_protmem;
+#undef GLRO
+#define GLRO(x) x
+
+#define SHARED
+#include <dl-protmem.h>
+#include <dl-protmem.c>
+#include <sysdeps/generic/dl-early_mmap.h> /* Avoid direct system call. */
+#include <dl-protmem_bootstrap.h>
+
+#if !DL_PROTMEM_DEBUG
+/* Return the allocation bit for an address. */
+static bool
+bit_at (void *p)
+{
+ uintptr_t u = (uintptr_t) p;
+ struct level2 *l2 = level1[level1_index (u)];
+ if (l2 == NULL)
+ return false;
+ struct level3 *l3 = l2->level2[level2_index (u)];
+ if (l3 == NULL)
+ return false;
+ unsigned int idx = level3_index (u);
+ uint32_t mask = level3_mask (u);
+ return l3->bits[idx] & mask;
+}
+
+/* Assert that SIZE bytes at P are unallocated. */
+static void
+check_free_chunk (void *p, size_t size)
+{
+ if (((uintptr_t) p & 7) != 0)
+ FAIL_EXIT1 ("unaligned free chunk: %p of %zu bytes", p, size);
+ for (size_t i = 0; i < size; i += 8)
+ if (bit_at (p + i))
+ FAIL_EXIT1 ("allocated byte %p in free chunk at %p (%zu bytes,"
+ " offset %zu)", p + i, p, size, i);
+}
+#endif
+
+/* Dump statistics for the allocator regions (freelist length, maximum
+ free allocation size). If VERBOSE, log the entire freelist. */
+static void
+dump_regions (bool verbose)
+{
+#if !DL_PROTMEM_DEBUG
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ {
+ if (verbose && state->regions[i] != NULL)
+ printf (" region %u at %p\n", i, state->regions[i]);
+
+ chunk pending = state->pending_free[i];
+ unsigned int count;
+ unsigned int max_size;
+ if (pending == NULL)
+ {
+ count = 0;
+ max_size = 0;
+ }
+ else
+ {
+ count = 1;
+ max_size = _dlpm_chunk_size (pending);
+ check_free_chunk (pending, max_size);
+ if (verbose)
+ printf (" pending free chunk %p, %u\n", pending, max_size);
+ }
+
+ uintptr_t last = 0;
+ for (chunk c = state->freelist[i]; c != NULL; c = _dlpm_chunk_next (c))
+ {
+ ++count;
+ size_t sz = _dlpm_chunk_size (c);
+ if (verbose)
+ printf (" free chunk %p, %zu\n", c, sz);
+ check_free_chunk (c, sz);
+ if (sz > max_size)
+ max_size = sz;
+ TEST_VERIFY ((uintptr_t) c > last);
+ last = (uintptr_t) c;
+ }
+
+ if (count > 0)
+ {
+ if (verbose)
+ printf (" ");
+ else
+ printf (" region %u at %p: ", i, state->regions[i]);
+ printf ("freelist length %u, maximum size %u\n", count, max_size);
+ }
+ }
+#endif
+}
+
+
+static int
+do_test (void)
+{
+ dl_protmem = _dl_protmem_bootstrap ();
+ _dl_protmem_init ();
+
+ /* Perform a random allocations in a loop. */
+ srand (1);
+ {
+ struct allocation
+ {
+ void *ptr;
+ size_t size;
+ } allocations[10007] = {};
+ for (unsigned int i = 0; i < 20 * 1000; ++i)
+ {
+ struct allocation *a
+ = &allocations[rand () % array_length (allocations)];
+ if (a->ptr == NULL)
+ {
+ a->size = 8 * ((rand() % 37) + 1);
+ a->ptr = _dl_protmem_allocate (a->size);
+ record_allocate (a->ptr, a->size);
+ /* Clobber the new allocation, in case some metadata still
+ references it. */
+ memset (a->ptr, 0xcc, a->size);
+ }
+ else
+ {
+ record_free (a->ptr, a->size);
+ _dl_protmem_free (a->ptr, a->size);
+ a->ptr = NULL;
+ a->size = 0;
+ }
+ }
+
+ puts ("info: after running test loop");
+ dump_regions (false);
+
+ for (unsigned int i = 0; i < array_length (allocations); ++i)
+ if (allocations[i].ptr != NULL)
+ {
+ record_free (allocations[i].ptr, allocations[i].size);
+ _dl_protmem_free (allocations[i].ptr, allocations[i].size);
+ }
+ puts ("info: after post-loop deallocations");
+ dump_regions (true);
+ }
+
+ /* Do a few larger allocations to show that coalescing works. Note
+ that the first allocation has some metadata in it, so the free
+ chunk is not an integral power of two. */
+ {
+ void *ptrs[50];
+ for (unsigned int i = 0; i < array_length (ptrs); ++i)
+ {
+ ptrs[i] = _dl_protmem_allocate (65536);
+ record_allocate (ptrs[i], 65536);
+ }
+ puts ("info: after large allocations");
+ dump_regions (true);
+ for (unsigned int i = 0; i < array_length (ptrs); ++i)
+ {
+ record_free (ptrs[i], 65536);
+ _dl_protmem_free (ptrs[i], 65536);
+ }
+ puts ("info: after freeing allocations");
+ dump_regions (true);
+
+ ptrs[0] = _dl_protmem_allocate (8);
+ record_allocate (ptrs[0], 8);
+ puts ("info: after dummy allocation");
+ dump_regions (true);
+
+ record_free (ptrs[0], 8);
+#if __GNUC_PREREQ (11, 0)
+ /* Suppress invalid GCC warning with -O3 (GCC PR 110546):
+ error: '_dl_protmem_free' called on pointer returned from a
+ mismatched allocation function [-Werror=mismatched-dealloc]
+ note: returned from '_dl_protmem_allocate.constprop */
+ DIAG_IGNORE_NEEDS_COMMENT (11, "-Wmismatched-dealloc");
+#endif
+ _dl_protmem_free (ptrs[0], 8);
+#if __GNUC_PREREQ (11, 0) && __OPTIMIZE__ >= 3
+ DIAG_POP_NEEDS_COMMENT;
+#endif
+ puts ("info: after dummy deallocation");
+ dump_regions (true);
+ }
+
+ return 0;
+}
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 27/32] elf: Determine the caller link map in _dl_open
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (25 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 26/32] elf: Switch to a region-based protected memory allocator Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-28 19:23 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 28/32] elf: Add fast path to dlopen for fully-opened maps Florian Weimer
` (5 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
No functional change expected.
This is in preparation of adding a fast path to dlopen in case
no link map changes are required.
---
elf/dl-open.c | 47 +++++++++++++++++++----------------------------
1 file changed, 19 insertions(+), 28 deletions(-)
diff --git a/elf/dl-open.c b/elf/dl-open.c
index afac8498be..52e44804cf 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -49,8 +49,7 @@ struct dl_open_args
{
const char *file;
int mode;
- /* This is the caller of the dlopen() function. */
- const void *caller_dlopen;
+ struct link_map_private *caller_map; /* Derived from the caller address. */
struct link_map_private *map;
/* Namespace ID. */
Lmid_t nsid;
@@ -540,30 +539,6 @@ dl_open_worker_begin (void *a)
struct dl_open_args *args = a;
const char *file = args->file;
int mode = args->mode;
- struct link_map_private *call_map = NULL;
-
- /* Determine the caller's map if necessary. This is needed in case
- we have a DST, when we don't know the namespace ID we have to put
- the new object in, or when the file name has no path in which
- case we need to look along the RUNPATH/RPATH of the caller. */
- const char *dst = strchr (file, '$');
- if (dst != NULL || args->nsid == __LM_ID_CALLER
- || strchr (file, '/') == NULL)
- {
- const void *caller_dlopen = args->caller_dlopen;
-
- /* We have to find out from which object the caller is calling.
- By default we assume this is the main application. */
- call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
-
- struct link_map_private *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
-
- if (l)
- call_map = l;
-
- if (args->nsid == __LM_ID_CALLER)
- args->nsid = call_map->l_ns;
- }
/* Prepare for link map updates. If dl_open_worker below returns
normally, a matching _dl_protmem_end call is performed there. On
@@ -585,7 +560,7 @@ dl_open_worker_begin (void *a)
/* Load the named object. */
struct link_map_private *new;
- args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
+ args->map = new = _dl_map_object (args->caller_map, file, lt_loaded, 0,
mode | __RTLD_CALLMAP, args->nsid);
/* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
@@ -906,7 +881,6 @@ no more namespaces available for dlmopen()"));
struct dl_open_args args;
args.file = file;
args.mode = mode;
- args.caller_dlopen = caller_dlopen;
args.map = NULL;
args.nsid = nsid;
/* args.libc_already_loaded is always assigned by dl_open_worker
@@ -915,6 +889,23 @@ no more namespaces available for dlmopen()"));
args.argv = argv;
args.env = env;
+ /* Determine the caller's map if necessary. This is needed when we
+ don't know the namespace ID in which we have to put the new object,
+ in case we have a DST, or when the file name has no path in
+ which case we need to look along the RUNPATH/RPATH of the caller. */
+ if (nsid == __LM_ID_CALLER || strchr (file, '$') != NULL
+ || strchr (file, '/') == NULL)
+ {
+ args.caller_map = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
+ if (args.caller_map == NULL)
+ /* By default we assume this is the main application. */
+ args.caller_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
+ if (args.nsid == __LM_ID_CALLER)
+ args.nsid = args.caller_map->l_ns;
+ }
+ else
+ args.caller_map = NULL;
+
struct dl_exception exception;
int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 28/32] elf: Add fast path to dlopen for fully-opened maps
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (26 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 27/32] elf: Determine the caller link map in _dl_open Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-28 19:26 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 29/32] elf: Use _dl_find_object instead of _dl_find_dso_for_object in dlopen Florian Weimer
` (4 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
---
elf/dl-open.c | 36 +++++++++++++++++++++++++++++++-----
1 file changed, 31 insertions(+), 5 deletions(-)
diff --git a/elf/dl-open.c b/elf/dl-open.c
index 52e44804cf..a8c9945f2b 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -533,6 +533,23 @@ call_dl_init (void *closure)
_dl_init (args->new, args->argc, args->argv, args->env);
}
+/* Return true if the object does not need any processing beyond the
+ l_direct_opencount update. Needs to be kept in sync with the logic
+ in dl_o-en_worker_begin after the l->l_searchlist.r_list != NULL check.
+ MODE is the dlopen mode argument. */
+static bool
+is_already_fully_open (struct link_map_private *map, int mode)
+{
+ return (map != NULL /* An existing map was found. */
+ /* dlopen completed initialization of this map. Maps with
+ l_type == lt_library start out as partially initialized. */
+ && map->l_searchlist.r_list != NULL
+ /* The object is already in the global scope if requested. */
+ && (!(mode & RTLD_GLOBAL) || map->l_global)
+ /* The object is already NODELETE if requested. */
+ && (!(mode & RTLD_NODELETE) || map->l_rw->l_nodelete_active));
+}
+
static void
dl_open_worker_begin (void *a)
{
@@ -559,9 +576,10 @@ dl_open_worker_begin (void *a)
_dl_debug_initialize (0, args->nsid);
/* Load the named object. */
- struct link_map_private *new;
- args->map = new = _dl_map_object (args->caller_map, file, lt_loaded, 0,
- mode | __RTLD_CALLMAP, args->nsid);
+ struct link_map_private *new = args->map;
+ if (new == NULL)
+ args->map = new = _dl_map_new_object (args->caller_map, file, lt_loaded, 0,
+ mode | __RTLD_CALLMAP, args->nsid);
/* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
set and the object is not already loaded. */
@@ -578,7 +596,7 @@ dl_open_worker_begin (void *a)
/* This object is directly loaded. */
++new->l_rw->l_direct_opencount;
- /* It was already open. */
+ /* It was already open. See is_already_fully_open above. */
if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
{
/* Let the user know about the opencount. */
@@ -881,7 +899,6 @@ no more namespaces available for dlmopen()"));
struct dl_open_args args;
args.file = file;
args.mode = mode;
- args.map = NULL;
args.nsid = nsid;
/* args.libc_already_loaded is always assigned by dl_open_worker
(before any explicit/non-local returns). */
@@ -906,6 +923,15 @@ no more namespaces available for dlmopen()"));
else
args.caller_map = NULL;
+ args.map = _dl_lookup_map (args.nsid, file);
+ if (is_already_fully_open (args.map, mode))
+ {
+ /* We can use the fast path. */
+ ++args.map->l_rw->l_direct_opencount;
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+ return args.map;
+ }
+
struct dl_exception exception;
int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 29/32] elf: Use _dl_find_object instead of _dl_find_dso_for_object in dlopen
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (27 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 28/32] elf: Add fast path to dlopen for fully-opened maps Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-02-28 19:27 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 30/32] elf: Put critical _dl_find_object pointers into protected memory area Florian Weimer
` (3 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
The _dl_find_object uses a binary search and is faster if there are many
objects.
---
elf/dl-open.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/elf/dl-open.c b/elf/dl-open.c
index a8c9945f2b..47638128dc 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -913,8 +913,10 @@ no more namespaces available for dlmopen()"));
if (nsid == __LM_ID_CALLER || strchr (file, '$') != NULL
|| strchr (file, '/') == NULL)
{
- args.caller_map = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
- if (args.caller_map == NULL)
+ struct dl_find_object dlfo;
+ if (_dl_find_object ((void *) caller_dlopen, &dlfo) == 0)
+ args.caller_map = l_private (dlfo.dlfo_link_map);
+ else
/* By default we assume this is the main application. */
args.caller_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
if (args.nsid == __LM_ID_CALLER)
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 30/32] elf: Put critical _dl_find_object pointers into protected memory area
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (28 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 29/32] elf: Use _dl_find_object instead of _dl_find_dso_for_object in dlopen Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-03-04 21:39 ` Joseph Myers
2023-12-07 10:32 ` [PATCH v3 31/32] elf: Add hash tables to speed up DT_NEEDED, dlopen lookups Florian Weimer
` (2 subsequent siblings)
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
With this change, all control data for _dl_find_object is either
RELRO data, or in the protected area, or tightly constrained
(the version counter is always masked using & 1 before array
indexing).
This commit can serve as an example how to extend the protected
memory area.
---
elf/dl-find_object.c | 39 +++++++++++++++++++-------------------
sysdeps/generic/ldsodefs.h | 9 +++++++++
2 files changed, 29 insertions(+), 19 deletions(-)
diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
index 82f493d817..baab80fdb7 100644
--- a/elf/dl-find_object.c
+++ b/elf/dl-find_object.c
@@ -120,13 +120,6 @@ struct dlfo_mappings_segment
struct dl_find_object_internal objects[]; /* Read in the TM region. */
};
-/* To achieve async-signal-safety, two copies of the data structure
- are used, so that a signal handler can still use this data even if
- dlopen or dlclose modify the other copy. The the least significant
- bit in _dlfo_loaded_mappings_version determines which array element
- is the currently active region. */
-static struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
-
/* Returns the number of actually used elements in all segments
starting at SEG. */
static inline size_t
@@ -192,10 +185,17 @@ _dlfo_mappings_segment_allocate (size_t size,
}
/* Monotonic counter for software transactional memory. The lowest
- bit indicates which element of the _dlfo_loaded_mappings contains
- up-to-date data. */
+ bit indicates which element of the GLPM (dlfo_loaded_mappings)
+ contains up-to-date data. This achieves async-signal-safety for
+ _dl_find_object: a signal handler can still use the
+ GLPM (dlfo_loaded_mappings) data even if dlopen or dlclose
+ modify the other copy. */
static __atomic_wide_counter _dlfo_loaded_mappings_version;
+#ifndef SHARED
+struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
+#endif
+
/* TM version at the start of the read operation. */
static inline uint64_t
_dlfo_read_start_version (void)
@@ -263,7 +263,7 @@ _dlfo_read_success (uint64_t start_version)
static struct dlfo_mappings_segment *
_dlfo_mappings_active_segment (uint64_t start_version)
{
- return _dlfo_loaded_mappings[start_version & 1];
+ return GLPM (dlfo_loaded_mappings)[start_version & 1];
}
/* Searches PC among the address-sorted array [FIRST1, FIRST1 +
@@ -472,10 +472,10 @@ _dlfo_process_initial (void)
}
else if (l->l_type == lt_loaded)
{
- if (_dlfo_loaded_mappings[0] != NULL)
+ if (GLPM (dlfo_loaded_mappings)[0] != NULL)
/* Second pass only. */
_dl_find_object_from_map
- (l, &_dlfo_loaded_mappings[0]->objects[loaded]);
+ (l, &GLPM (dlfo_loaded_mappings)[0]->objects[loaded]);
++loaded;
}
}
@@ -535,10 +535,10 @@ _dl_find_object_init (void)
= _dl_protmem_allocate (_dlfo_nodelete_mappings_size
* sizeof (*_dlfo_nodelete_mappings));
if (loaded_size > 0)
- _dlfo_loaded_mappings[0]
+ GLPM (dlfo_loaded_mappings)[0]
= _dlfo_mappings_segment_allocate (loaded_size, NULL);
if (_dlfo_nodelete_mappings == NULL
- || (loaded_size > 0 && _dlfo_loaded_mappings[0] == NULL))
+ || (loaded_size > 0 && GLPM (dlfo_loaded_mappings)[0] == NULL))
_dl_fatal_printf ("\
Fatal glibc error: cannot allocate memory for find-object data\n");
/* Fill in the data with the second call. */
@@ -554,8 +554,8 @@ Fatal glibc error: cannot allocate memory for find-object data\n");
_dlfo_nodelete_mappings_end = _dlfo_nodelete_mappings[last_idx].map_end;
}
if (loaded_size > 0)
- _dlfo_sort_mappings (_dlfo_loaded_mappings[0]->objects,
- _dlfo_loaded_mappings[0]->size);
+ _dlfo_sort_mappings (GLPM (dlfo_loaded_mappings)[0]->objects,
+ GLPM (dlfo_loaded_mappings)[0]->size);
}
static void
@@ -609,11 +609,11 @@ _dl_find_object_update_1 (struct link_map_private **loaded, size_t count)
int active_idx = _dlfo_read_version_locked () & 1;
struct dlfo_mappings_segment *current_seg
- = _dlfo_loaded_mappings[active_idx];
+ = GLPM (dlfo_loaded_mappings)[active_idx];
size_t current_used = _dlfo_mappings_segment_count_used (current_seg);
struct dlfo_mappings_segment *target_seg
- = _dlfo_loaded_mappings[!active_idx];
+ = GLPM (dlfo_loaded_mappings)[!active_idx];
size_t remaining_to_add = current_used + count;
/* Ensure that the new segment chain has enough space. */
@@ -634,7 +634,8 @@ _dl_find_object_update_1 (struct link_map_private **loaded, size_t count)
/* The barrier ensures that a concurrent TM read or fork does
not see a partially initialized segment. */
- atomic_store_release (&_dlfo_loaded_mappings[!active_idx], target_seg);
+ atomic_store_release (&GLPM (dlfo_loaded_mappings)[!active_idx],
+ target_seg);
}
else
/* Start update cycle without allocation. */
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index b2bb42e8c6..eaa144cc4e 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -523,6 +523,8 @@ extern struct rtld_global _rtld_global __rtld_global_attribute__;
# undef __rtld_global_attribute__
#endif
+struct dlfo_mappings_segment;
+
#ifdef SHARED
/* Implementation structure for the protected memory area. In static
builds, the protected memory area is just regular (.data) memory,
@@ -532,6 +534,13 @@ struct rtld_protmem
{
/* Structure describing the dynamic linker itself. */
EXTERN struct link_map_private _dl_rtld_map;
+#endif /* SHARED */
+
+ /* Two copies of the data structures for _dl_find_object. See
+ _dlfo_loaded_mappings_version in dl-find_object.c. */
+ EXTERN struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
+
+#ifdef SHARED
};
#endif /* SHARED */
--
2.43.0
^ permalink raw reply [flat|nested] 77+ messages in thread
* [PATCH v3 31/32] elf: Add hash tables to speed up DT_NEEDED, dlopen lookups
2023-12-07 10:30 [PATCH v3 00/32] RELRO linkmaps Florian Weimer
` (29 preceding siblings ...)
2023-12-07 10:32 ` [PATCH v3 30/32] elf: Put critical _dl_find_object pointers into protected memory area Florian Weimer
@ 2023-12-07 10:32 ` Florian Weimer
2024-03-06 0:04 ` Joseph Myers
2023-12-07 10:33 ` [PATCH v3 32/32] elf: Use memory protection keys for the protected memory allocator Florian Weimer
2023-12-07 10:53 ` [PATCH v3 00/32] RELRO linkmaps Andreas Schwab
32 siblings, 1 reply; 77+ messages in thread
From: Florian Weimer @ 2023-12-07 10:32 UTC (permalink / raw)
To: libc-alpha
Each hash table is specific to one dlopen namespace. For convenience,
it uses the GNU symbol hash function, but that choice is arbitrary.
The hash tables use the protected memory allocator.
The associated aliases are linked from the link map using the dual-use
l_name field. See the new l_libname accessor function. This could
be changed back to a dedicated field in the private link map if
it is necessary to enable applications which write to l_name in a
limited fashion.
The alloca copy in _dl_load_cache_lookup is no longer needed
because _dl_libname_allocate does not use the interposable malloc,
and so cannot call back into the dynamic linker.
In _dl_map_new_object, check for memory allocation failure and
empty tokens during DST expansion. This was handled implicitly
before, by falling through to the fd == -1 path (memory allocation
failure), or by trying to open "" (empty DST expansion).
The rewritten logic in _dl_new_object avoids adding an alias to
an object that is identical to the object's file name in l_name.
It also special-cases the vDSO cases, which is initially created
without a name (the soname becomes known only afterwards).
The l_soname_added field in the link map is no longer needed because
duplicated additions are avoided in _dl_libname_add_alias.
---
elf/Makefile | 1 +
elf/dl-cache.c | 13 +-
elf/dl-close.c | 14 --
elf/dl-libc_freeres.c | 13 --
elf/dl-libname.c | 281 +++++++++++++++++++++++++++++++++++++
elf/dl-libname.h | 121 ++++++++++++++++
elf/dl-load.c | 168 ++++++++--------------
elf/dl-misc.c | 18 ---
elf/dl-object.c | 131 ++++++++++-------
elf/dl-open.c | 11 +-
elf/dl-support.c | 15 +-
elf/dl-version.c | 9 +-
elf/pldd-xx.c | 19 +--
elf/pldd.c | 1 +
elf/rtld.c | 92 ++++--------
elf/setup-vdso.h | 18 +--
elf/sotruss-lib.c | 6 +-
include/link.h | 5 +-
sysdeps/generic/ldsodefs.h | 23 ++-
19 files changed, 631 insertions(+), 328 deletions(-)
create mode 100644 elf/dl-libname.c
create mode 100644 elf/dl-libname.h
diff --git a/elf/Makefile b/elf/Makefile
index 2ebf5d2702..d13a959fdb 100644
--- a/elf/Makefile
+++ b/elf/Makefile
@@ -63,6 +63,7 @@ dl-routines = \
dl-find_object \
dl-fini \
dl-init \
+ dl-libname \
dl-load \
dl-lookup \
dl-lookup-direct \
diff --git a/elf/dl-cache.c b/elf/dl-cache.c
index a3eb960dac..2818f9a8f4 100644
--- a/elf/dl-cache.c
+++ b/elf/dl-cache.c
@@ -26,6 +26,7 @@
#include <_itoa.h>
#include <dl-hwcaps.h>
#include <dl-isa-level.h>
+#include <dl-libname.h>
#ifndef _DL_PLATFORMS_COUNT
# define _DL_PLATFORMS_COUNT 0
@@ -399,7 +400,7 @@ _dl_cache_libcmp (const char *p1, const char *p2)
this function must take care that it does not return references to
any data in the mapping. */
bool
-_dl_load_cache_lookup (const char *name, char **realname)
+_dl_load_cache_lookup (const char *name, struct libname **realname)
{
/* Print a message if the loading of libs is traced. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS))
@@ -510,15 +511,7 @@ _dl_load_cache_lookup (const char *name, char **realname)
return true;
}
- /* The double copy is *required* since malloc may be interposed
- and call dlopen itself whose completion would unmap the data
- we are accessing. Therefore we must make the copy of the
- mapping data without using malloc. */
- char *temp;
- size_t best_len = strlen (best) + 1;
- temp = alloca (best_len);
- memcpy (temp, best, best_len);
- char *copy = __strdup (temp);
+ struct libname *copy = _dl_libname_allocate (best);
if (copy == NULL)
return false;
*realname = copy;
diff --git a/elf/dl-close.c b/elf/dl-close.c
index 8391abe2d7..3bdfd15bd7 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -686,20 +686,6 @@ _dl_close_worker (struct link_map_private *map, bool force)
_dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
imap->l_public.l_name, imap->l_ns);
- /* This name always is allocated. */
- free (imap->l_public.l_name);
- /* Remove the list with all the names of the shared object. */
-
- struct libname_list *lnp = imap->l_libname;
- do
- {
- struct libname_list *this = lnp;
- lnp = lnp->next;
- if (!this->dont_free)
- free (this);
- }
- while (lnp != NULL);
-
/* Remove the searchlists. */
free (imap->l_initfini);
diff --git a/elf/dl-libc_freeres.c b/elf/dl-libc_freeres.c
index 066629639c..f72d9df8d6 100644
--- a/elf/dl-libc_freeres.c
+++ b/elf/dl-libc_freeres.c
@@ -70,19 +70,6 @@ __rtld_libc_freeres (void)
{
for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l_next (l))
{
- struct libname_list *lnp = l->l_libname->next;
-
- l->l_libname->next = NULL;
-
- /* Remove all additional names added to the objects. */
- while (lnp != NULL)
- {
- struct libname_list *old = lnp;
- lnp = lnp->next;
- if (! old->dont_free)
- free (old);
- }
-
/* Free the initfini dependency list. */
if (l->l_free_initfini)
free (l->l_initfini);
diff --git a/elf/dl-libname.c b/elf/dl-libname.c
new file mode 100644
index 0000000000..28fc262343
--- /dev/null
+++ b/elf/dl-libname.c
@@ -0,0 +1,281 @@
+/* Managing alias names for link names, and link map lookup by name.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <dl-libname.h>
+
+#include <assert.h>
+#include <dl-new-hash.h>
+#include <dl-protmem.h>
+#include <ldsodefs.h>
+#include <libintl.h>
+
+/* Per-namespace hash table of library names. Uses linked lists via
+ next_hash for collision resolution. Resized once half-full. */
+struct libname_table { uint32_t count; /* Number of
+entries in the hash table. */
+ uint32_t mask; /* Bucket count minus 1. */
+ struct libname **buckets; /* Hash buckets. */
+};
+
+#ifndef SHARED
+struct libname_table *_dl_libnames[DL_NNS];
+#endif
+
+struct libname *
+_dl_libname_allocate_hash (const char *name, uint32_t hash)
+{
+ size_t name_len = strlen (name) + 1;
+ struct libname *result
+ = _dl_protmem_allocate (offsetof (struct libname, name) + name_len);
+ result->map = NULL;
+ result->next_link_map = NULL;
+ result->next_hash = NULL;
+ result->hash = _dl_libname_hash (name);
+ memcpy (result->name, name, name_len);
+ return result;
+}
+
+struct libname *
+_dl_libname_allocate (const char *name)
+{
+ return _dl_libname_allocate_hash (name, _dl_libname_hash (name));
+}
+
+void
+_dl_libname_free (struct libname *ln)
+{
+ _dl_protmem_free (ln,
+ offsetof (struct libname, name) + strlen (ln->name) + 1);
+}
+
+uint32_t
+_dl_libname_hash (const char *name)
+{
+ return _dl_new_hash (name);
+}
+
+/* Returns the appropriate hash chain for the name's HASH in
+ namespace NSID. */
+static struct libname *
+_dl_libname_chain (Lmid_t nsid, uint32_t hash)
+{
+ struct libname_table *lt = GLPM (dl_libnames)[nsid];
+ if (lt == NULL)
+ return NULL;
+ return lt->buckets[hash & lt->mask];
+}
+
+struct link_map_private *
+_dl_libname_lookup_hash (Lmid_t nsid, const char *name, uint32_t hash)
+{
+
+ /* Checking l_prev and l_next verifies that the discovered alias has
+ been added to a namespace list. It is necessary to add aliases
+ to the hash table early, before updating the namespace list, so
+ that _dl_libname_add_alias can avoid adding duplicates. However,
+ during early startup, the ld.so link map is not added to the list
+ when the main program is loaded as part of an explicit loader
+ invocation. If the main program is again ld.so (a user error),
+ it is not loaded again, violating some core assumptions in
+ rtld_chain_load and setup_vdso. For static builds, the l_prev
+ and l_next checks need to be disabled because the main program is
+ the only map if there is no vDSO (and the hash table is
+ initialized after the namespace list anyway). */
+ for (struct libname *ln = _dl_libname_chain (nsid, hash);
+ ln != NULL; ln = ln->next_hash)
+ if (ln->hash == hash && strcmp (name, ln->name) == 0
+ && (ln->map->l_faked | ln->map->l_removed) == 0
+#ifdef SHARED
+ && (ln->map->l_public.l_prev != NULL
+ || ln->map->l_public.l_next != NULL)
+#endif
+ )
+ return ln->map;
+ return NULL;
+}
+
+struct link_map_private *
+_dl_lookup_map (Lmid_t nsid, const char *name)
+{
+ return _dl_libname_lookup_hash (nsid, name, _dl_libname_hash (name));
+}
+
+struct link_map_private *
+_dl_lookup_map_unfiltered (Lmid_t nsid, const char *name)
+{
+ /* This is only used in dl-version.c, which may rely l_faked
+ objects. The l_prev/l_next filter is not needed there because
+ the namespace list update has completed. */
+ uint32_t hash = _dl_libname_hash (name);
+ for (struct libname *ln = _dl_libname_chain (nsid, hash);
+ ln != NULL; ln = ln->next_hash)
+ if (ln->hash == hash && strcmp (name, ln->name) == 0
+ && (ln->map->l_removed == 0))
+ return ln->map;
+ return NULL;
+}
+
+int
+_dl_name_match_p (const char *name, const struct link_map_private *map)
+{
+ /* An alternative implementation could use the list of names
+ starting at l_libname (map), but this implementation is fast even
+ with many aliases. */
+ uint32_t hash = _dl_libname_hash (name);
+ for (struct libname *ln = _dl_libname_chain (map->l_ns, hash);
+ ln != NULL; ln = ln->next_hash)
+ if (ln->hash == hash && ln->map == map && strcmp (name, ln->name) == 0)
+ return true;
+ return false;
+}
+
+bool
+_dl_libname_table_init (Lmid_t nsid)
+{
+ struct libname_table *lt = GLPM (dl_libnames)[nsid];
+ if (lt != NULL)
+ return true;
+ lt = _dl_protmem_allocate (sizeof (*lt));
+ if (lt == NULL)
+ return false;
+ lt->count = 0;
+ lt->mask = 15;
+ size_t buckets_size = (lt->mask + 1) * sizeof (*lt->buckets);
+ lt->buckets = _dl_protmem_allocate (buckets_size);
+ if (lt->buckets == NULL)
+ {
+ _dl_protmem_free (lt, sizeof (*lt));
+ return NULL;
+ }
+ memset (lt->buckets, 0, buckets_size);
+ GLPM (dl_libnames)[nsid] = lt;
+#ifndef SHARED
+ /* _dl_libname_table_init is called from dlopen in the !SHARED case
+ to set up the hash map. The code in _dl_non_dynamic_init avoids
+ these allocation in case dlopen is never called. */
+ _dl_libname_link_hash (l_libname (GL (dl_ns)[0]._ns_loaded));
+#endif
+ return true;
+}
+
+void
+_dl_libname_add_link_map (struct link_map_private *l, struct libname *ln)
+{
+ assert (ln->map == NULL);
+ ln->map = l;
+