From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 129044 invoked by alias); 28 Oct 2019 18:38:48 -0000 Mailing-List: contact glibc-cvs-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Archive: List-Post: List-Help: , Sender: glibc-cvs-owner@sourceware.org List-Subscribe: Received: (qmail 129016 invoked by uid 9299); 28 Oct 2019 18:38:47 -0000 Date: Mon, 28 Oct 2019 18:38:00 -0000 Message-ID: <20191028183847.129015.qmail@sourceware.org> Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: Florian Weimer To: glibc-cvs@sourceware.org Subject: [glibc/fw/bug25112] Remove all loaded objects if dlopen fails [BZ #20839] X-Act-Checkin: glibc X-Git-Author: Florian Weimer X-Git-Refname: refs/heads/fw/bug25112 X-Git-Oldrev: 93e07b2f9ef6825c7165f4587d2204464c83959a X-Git-Newrev: d0ef9b5c8f42cb8a5c7052b1d27966dc493d9005 X-SW-Source: 2019-q4/txt/msg00183.txt.bz2 https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=d0ef9b5c8f42cb8a5c7052b1d27966dc493d9005 commit d0ef9b5c8f42cb8a5c7052b1d27966dc493d9005 Author: Florian Weimer Date: Mon Oct 28 19:26:48 2019 +0100 Remove all loaded objects if dlopen fails [BZ #20839] Diff: --- elf/dl-close.c | 14 +++--------- elf/dl-lookup.c | 56 ++++++++++++++++++++++++++++++---------------- elf/dl-open.c | 47 ++++++++++++++++++++++++++++++-------- elf/dl-reloc.c | 3 ++- elf/get-dynamic-info.h | 2 ++ include/link.h | 19 ++++++++++++++++ sysdeps/generic/ldsodefs.h | 4 ++++ 7 files changed, 105 insertions(+), 40 deletions(-) diff --git a/elf/dl-close.c b/elf/dl-close.c index 2e38943..b919a03 100644 --- a/elf/dl-close.c +++ b/elf/dl-close.c @@ -168,14 +168,6 @@ _dl_close_worker (struct link_map *map, bool force) char done[nloaded]; struct link_map *maps[nloaded]; - /* Clear DF_1_NODELETE to force object deletion. We don't need to touch - l_tls_dtor_count because forced object deletion only happens when an - error occurs during object load. Destructor registration for TLS - non-POD objects should not have happened till then for this - object. */ - if (force) - map->l_flags_1 &= ~DF_1_NODELETE; - /* Run over the list and assign indexes to the link maps and enter them into the MAPS array. */ int idx = 0; @@ -205,7 +197,7 @@ _dl_close_worker (struct link_map *map, bool force) /* Check whether this object is still used. */ if (l->l_type == lt_loaded && l->l_direct_opencount == 0 - && (l->l_flags_1 & DF_1_NODELETE) == 0 + && l->l_nodelete != link_map_nodelete_active /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why acquire is sufficient and correct. */ && atomic_load_acquire (&l->l_tls_dtor_count) == 0 @@ -288,7 +280,7 @@ _dl_close_worker (struct link_map *map, bool force) if (!used[i]) { assert (imap->l_type == lt_loaded - && (imap->l_flags_1 & DF_1_NODELETE) == 0); + && imap->l_nodelete != link_map_nodelete_active); /* Call its termination function. Do not do it for half-cooked objects. Temporarily disable exception @@ -828,7 +820,7 @@ _dl_close (void *_map) before we took the lock. There is no way to detect this (see below) so we proceed assuming this isn't the case. First see whether we can remove the object at all. */ - if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE)) + if (__glibc_unlikely (map->l_nodelete == link_map_nodelete_active)) { /* Nope. Do nothing. */ __rtld_lock_unlock_recursive (GL(dl_load_lock)); diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c index fd44cd4..138f758 100644 --- a/elf/dl-lookup.c +++ b/elf/dl-lookup.c @@ -192,9 +192,10 @@ enter_unique_sym (struct unique_sym *table, size_t size, Return the matching symbol in RESULT. */ static void do_lookup_unique (const char *undef_name, uint_fast32_t new_hash, - const struct link_map *map, struct sym_val *result, + struct link_map *map, struct sym_val *result, int type_class, const ElfW(Sym) *sym, const char *strtab, - const ElfW(Sym) *ref, const struct link_map *undef_map) + const ElfW(Sym) *ref, const struct link_map *undef_map, + int flags) { /* We have to determine whether we already found a symbol with this name before. If not then we have to add it to the search table. @@ -222,7 +223,7 @@ do_lookup_unique (const char *undef_name, uint_fast32_t new_hash, copy from the copy addressed through the relocation. */ result->s = sym; - result->m = (struct link_map *) map; + result->m = map; } else { @@ -311,9 +312,14 @@ do_lookup_unique (const char *undef_name, uint_fast32_t new_hash, new_hash, strtab + sym->st_name, sym, map); if (map->l_type == lt_loaded) - /* Make sure we don't unload this object by - setting the appropriate flag. */ - ((struct link_map *) map)->l_flags_1 |= DF_1_NODELETE; + { + /* Make sure we don't unload this object by + setting the appropriate flag. */ + if (flags & DL_LOOKUP_INSIDE_DLOPEN) + map->l_nodelete = link_map_nodelete_pending; + else + map->l_nodelete = link_map_nodelete_active; + } } ++tab->n_elements; @@ -525,8 +531,9 @@ do_lookup_x (const char *undef_name, uint_fast32_t new_hash, return 1; case STB_GNU_UNIQUE:; - do_lookup_unique (undef_name, new_hash, map, result, type_class, - sym, strtab, ref, undef_map); + do_lookup_unique (undef_name, new_hash, (struct link_map *) map, + result, type_class, sym, strtab, ref, + undef_map, flags); return 1; default: @@ -568,9 +575,13 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags) if (undef_map == map) return 0; - /* Avoid references to objects which cannot be unloaded anyway. */ + /* Avoid references to objects which cannot be unloaded anyway. We + do not need to record dependencies if this object goes away + during dlopen failure, either. IFUNC resolvers with relocation + dependencies may pick an dependency which can be dlclose'd, but + such IFUNC resolvers are undefined anyway. */ assert (map->l_type == lt_loaded); - if ((map->l_flags_1 & DF_1_NODELETE) != 0) + if (map->l_nodelete != link_map_nodelete_inactive) return 0; struct link_map_reldeps *l_reldeps @@ -678,16 +689,19 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags) /* Redo the NODELETE check, as when dl_load_lock wasn't held yet this could have changed. */ - if ((map->l_flags_1 & DF_1_NODELETE) != 0) + if (map->l_nodelete != link_map_nodelete_inactive) goto out; /* If the object with the undefined reference cannot be removed ever just make sure the same is true for the object which contains the definition. */ if (undef_map->l_type != lt_loaded - || (undef_map->l_flags_1 & DF_1_NODELETE) != 0) + || (undef_map->l_nodelete != link_map_nodelete_inactive)) { - map->l_flags_1 |= DF_1_NODELETE; + if (flags & DL_LOOKUP_INSIDE_DLOPEN) + map->l_nodelete = link_map_nodelete_pending; + else + map->l_nodelete = link_map_nodelete_active; goto out; } @@ -712,7 +726,13 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags) no fatal problem. We simply make sure the referenced object cannot be unloaded. This is semantically the correct behavior. */ - map->l_flags_1 |= DF_1_NODELETE; + if (flags & DL_LOOKUP_INSIDE_DLOPEN) + /* In case of non-lazy binding, we could actually + report the memory allocation error, but for now, we + use the conservative approximation as well. */ + map->l_nodelete = link_map_nodelete_pending; + else + map->l_nodelete = link_map_nodelete_active; goto out; } else @@ -792,11 +812,9 @@ _dl_lookup_symbol_x (const char *undef_name, struct link_map *undef_map, bump_num_relocations (); - /* No other flag than DL_LOOKUP_ADD_DEPENDENCY or DL_LOOKUP_GSCOPE_LOCK - is allowed if we look up a versioned symbol. */ - assert (version == NULL - || (flags & ~(DL_LOOKUP_ADD_DEPENDENCY | DL_LOOKUP_GSCOPE_LOCK)) - == 0); + /* DL_LOOKUP_RETURN_NEWEST does not make sense for versioned + lookups. */ + assert (version == NULL || !(flags & DL_LOOKUP_RETURN_NEWEST)); size_t i = 0; if (__glibc_unlikely (skip_map != NULL)) diff --git a/elf/dl-open.c b/elf/dl-open.c index 94d29d1..a40ab00 100644 --- a/elf/dl-open.c +++ b/elf/dl-open.c @@ -419,6 +419,25 @@ TLS generation counter wrapped! Please report this.")); } } +/* Mark the objects as NODELETE if required. This is delayed until + after dlopen failure is not possible, so that _dl_close can clean + up objects if necessary. */ +static void +activate_nodelete (struct link_map *new, int mode) +{ + if (mode & RTLD_NODELETE || new->l_nodelete == link_map_nodelete_pending) + new->l_nodelete = link_map_nodelete_active; + + /* Update all objects in our scope. Any of them could have been + promoted to NODELETE status due to new relocations. */ + for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) + { + struct link_map *imap = new->l_searchlist.r_list[i]; + if (imap->l_nodelete == link_map_nodelete_pending) + imap->l_nodelete = link_map_nodelete_pending; + } +} + /* struct dl_init_args and call_dl_init are used to call _dl_init with exception handling disabled. */ struct dl_init_args @@ -488,12 +507,6 @@ dl_open_worker (void *a) return; } - /* Mark the object as not deletable if the RTLD_NODELETE flags was passed. - Do this early so that we don't skip marking the object if it was - already loaded. */ - if (__glibc_unlikely (mode & RTLD_NODELETE)) - new->l_flags_1 |= DF_1_NODELETE; - if (__glibc_unlikely (mode & __RTLD_SPROF)) /* This happens only if we load a DSO for 'sprof'. */ return; @@ -509,11 +522,17 @@ dl_open_worker (void *a) _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n", new->l_name, new->l_ns, new->l_direct_opencount); - /* If the user requested the object to be in the global namespace - but it is not so far, add it now. */ + /* If the user requested the object to be in the global + namespace but it is not so far, add it now. This can raise + an exception to do a malloc failure. */ if ((mode & RTLD_GLOBAL) && new->l_global == 0) add_to_global (new); + /* Mark the object as not deletable if the RTLD_NODELETE flags + was passed. */ + if (__glibc_unlikely (mode & RTLD_NODELETE)) + new->l_nodelete = link_map_nodelete_active; + assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT); return; @@ -590,6 +609,14 @@ dl_open_worker (void *a) int relocation_in_progress = 0; + /* Perform relocation. This can trigger lazy binding in IFUNC + resolvers. For NODELETE mappings, these dependencies are not + recorded because the flag has not been applied to the newly + loaded objects. This means that upon dlopen failure, these + NODELETE objects can be unloaded despite existing references to + them. However, such relocation dependencies in IFUNC resolvers + are undefined anyway, so this is not a problem. */ + for (unsigned int i = nmaps; i-- > 0; ) { l = maps[i]; @@ -619,7 +646,7 @@ dl_open_worker (void *a) _dl_start_profile (); /* Prevent unloading the object. */ - GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE; + GL(dl_profile_map)->l_nodelete = link_map_nodelete_active; } } else @@ -650,6 +677,8 @@ dl_open_worker (void *a) All memory allocations for new objects must have happened before. */ + activate_nodelete (new, mode); + /* Second stage after resize_scopes: Actually perform the scope update. After this, dlsym and lazy binding can bind to new objects. */ diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c index 725a074..edb9d74 100644 --- a/elf/dl-reloc.c +++ b/elf/dl-reloc.c @@ -244,7 +244,8 @@ _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[], v = (version); \ _lr = _dl_lookup_symbol_x (strtab + (*ref)->st_name, l, (ref), \ scope, v, _tc, \ - DL_LOOKUP_ADD_DEPENDENCY, NULL); \ + DL_LOOKUP_ADD_DEPENDENCY \ + | DL_LOOKUP_INSIDE_DLOPEN, NULL); \ l->l_lookup_cache.ret = (*ref); \ l->l_lookup_cache.value = _lr; })) \ : l) diff --git a/elf/get-dynamic-info.h b/elf/get-dynamic-info.h index af9dd57..075683d 100644 --- a/elf/get-dynamic-info.h +++ b/elf/get-dynamic-info.h @@ -163,6 +163,8 @@ elf_get_dynamic_info (struct link_map *l, ElfW(Dyn) *temp) if (info[VERSYMIDX (DT_FLAGS_1)] != NULL) { l->l_flags_1 = info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val; + if (l->l_flags_1 & DF_1_NODELETE) + l->l_nodelete = link_map_nodelete_pending; /* Only DT_1_SUPPORTED_MASK bits are supported, and we would like to assert this, but we can't. Users have been setting diff --git a/include/link.h b/include/link.h index 1184201..8a74399 100644 --- a/include/link.h +++ b/include/link.h @@ -79,6 +79,21 @@ struct r_search_path_struct int malloced; }; +/* Type used by the l_nodelete member. */ +enum link_map_nodelete +{ + /* This link map can be deallocated. */ + link_map_nodelete_inactive, + + /* This link map cannot be deallocated. */ + link_map_nodelete_active, + + /* This link map cannot be deallocated after dlopen has succeded. + dlopen turns this into link_map_nodelete_active. dlclose treats + this intermediate state as link_map_nodelete_active. */ + link_map_nodelete_pending +}; + /* Structure describing a loaded shared object. The `l_next' and `l_prev' members form a chain of all the shared objects loaded at startup. @@ -203,6 +218,10 @@ struct link_map freed, ie. not allocated with the dummy malloc in ld.so. */ + /* Actually of type enum link_map_nodelete. Separate byte due to + concurrent access. */ + unsigned char l_nodelete; + #include /* Collected information about own RPATH directories. */ diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h index c546757..874be96 100644 --- a/sysdeps/generic/ldsodefs.h +++ b/sysdeps/generic/ldsodefs.h @@ -923,6 +923,10 @@ enum DL_LOOKUP_RETURN_NEWEST = 2, /* Set if dl_lookup* called with GSCOPE lock held. */ DL_LOOKUP_GSCOPE_LOCK = 4, + + /* Set if dl_lookup* is called from within dlopen, for non-lazy + binding. */ + DL_LOOKUP_INSIDE_DLOPEN = 8, }; /* Lookup versioned symbol. */