From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 1851) id 258423858418; Wed, 4 May 2022 09:02:44 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 258423858418 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain; charset="utf-8" From: Martin Liska To: gcc-cvs@gcc.gnu.org Subject: [gcc r13-103] libsanitizer: merge from upstream (0a1bcab9f3bf75c4c5d3e53bafb3eeb80320af46). X-Act-Checkin: gcc X-Git-Author: Martin Liska X-Git-Refname: refs/heads/master X-Git-Oldrev: e2285af309000b74da0f7dc756a0b55e5f0b1b56 X-Git-Newrev: f732bf6a603721f61102a08ad2d023c7c2670870 Message-Id: <20220504090244.258423858418@sourceware.org> Date: Wed, 4 May 2022 09:02:44 +0000 (GMT) X-BeenThere: gcc-cvs@gcc.gnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Gcc-cvs mailing list List-Unsubscribe: , List-Archive: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 04 May 2022 09:02:44 -0000 https://gcc.gnu.org/g:f732bf6a603721f61102a08ad2d023c7c2670870 commit r13-103-gf732bf6a603721f61102a08ad2d023c7c2670870 Author: Martin Liska Date: Tue May 3 12:56:26 2022 +0200 libsanitizer: merge from upstream (0a1bcab9f3bf75c4c5d3e53bafb3eeb80320af46). Diff: --- libsanitizer/MERGE | 2 +- libsanitizer/asan/asan_activation.cpp | 2 +- libsanitizer/asan/asan_allocator.cpp | 56 +- libsanitizer/asan/asan_debugging.cpp | 4 +- libsanitizer/asan/asan_errors.cpp | 10 +- libsanitizer/asan/asan_errors.h | 6 +- libsanitizer/asan/asan_fake_stack.cpp | 15 +- libsanitizer/asan/asan_flags.cpp | 6 +- libsanitizer/asan/asan_flags.inc | 5 +- libsanitizer/asan/asan_fuchsia.cpp | 32 +- libsanitizer/asan/asan_globals.cpp | 26 +- libsanitizer/asan/asan_interceptors.cpp | 35 +- libsanitizer/asan/asan_interceptors.h | 7 +- libsanitizer/asan/asan_interface.inc | 1 + libsanitizer/asan/asan_internal.h | 38 +- libsanitizer/asan/asan_linux.cpp | 32 +- libsanitizer/asan/asan_mac.cpp | 2 +- libsanitizer/asan/asan_mapping.h | 194 ++--- libsanitizer/asan/asan_mapping_sparc64.h | 9 +- libsanitizer/asan/asan_poisoning.cpp | 30 +- libsanitizer/asan/asan_poisoning.h | 11 +- libsanitizer/asan/asan_posix.cpp | 47 +- libsanitizer/asan/asan_premap_shadow.cpp | 2 +- libsanitizer/asan/asan_report.cpp | 10 +- libsanitizer/asan/asan_rtl.cpp | 73 +- libsanitizer/asan/asan_rtl_static.cpp | 36 + libsanitizer/asan/asan_rtl_x86_64.S | 146 ++++ libsanitizer/asan/asan_thread.cpp | 21 +- libsanitizer/asan/asan_win.cpp | 40 +- libsanitizer/asan/asan_win_dll_thunk.cpp | 11 + libsanitizer/builtins/assembly.h | 6 + libsanitizer/hwasan/hwasan.cpp | 1 + libsanitizer/hwasan/hwasan.h | 15 - libsanitizer/hwasan/hwasan_allocator.cpp | 9 +- libsanitizer/hwasan/hwasan_interceptors.cpp | 9 +- libsanitizer/hwasan/hwasan_memintrinsics.cpp | 2 +- libsanitizer/hwasan/hwasan_new_delete.cpp | 63 +- libsanitizer/hwasan/hwasan_preinit.cpp | 23 + .../include/sanitizer/common_interface_defs.h | 9 + libsanitizer/include/sanitizer/dfsan_interface.h | 28 + libsanitizer/interception/interception_win.cpp | 3 + libsanitizer/lsan/lsan.cpp | 9 +- libsanitizer/lsan/lsan.h | 26 +- libsanitizer/lsan/lsan_allocator.cpp | 25 +- libsanitizer/lsan/lsan_allocator.h | 5 +- libsanitizer/lsan/lsan_common.cpp | 481 +++++------ libsanitizer/lsan/lsan_common.h | 48 +- libsanitizer/lsan/lsan_common_fuchsia.cpp | 15 +- libsanitizer/lsan/lsan_common_linux.cpp | 5 +- libsanitizer/lsan/lsan_common_mac.cpp | 11 +- libsanitizer/lsan/lsan_fuchsia.cpp | 12 +- libsanitizer/lsan/lsan_interceptors.cpp | 22 +- libsanitizer/lsan/lsan_mac.cpp | 2 +- libsanitizer/lsan/lsan_posix.cpp | 7 +- libsanitizer/lsan/lsan_thread.cpp | 26 +- libsanitizer/lsan/lsan_thread.h | 5 +- .../sanitizer_common/sanitizer_addrhashmap.h | 6 +- .../sanitizer_common/sanitizer_allocator.cpp | 83 +- .../sanitizer_common/sanitizer_allocator.h | 3 + .../sanitizer_allocator_combined.h | 4 +- .../sanitizer_allocator_internal.h | 2 + .../sanitizer_allocator_primary32.h | 4 +- .../sanitizer_allocator_primary64.h | 4 +- .../sanitizer_allocator_secondary.h | 4 +- .../sanitizer_common/sanitizer_atomic_clang.h | 13 +- .../sanitizer_chained_origin_depot.cpp | 1 - libsanitizer/sanitizer_common/sanitizer_common.cpp | 39 +- libsanitizer/sanitizer_common/sanitizer_common.h | 50 +- .../sanitizer_common_interceptors.inc | 159 +++- .../sanitizer_common_interceptors_ioctl.inc | 10 +- .../sanitizer_common_interface_posix.inc | 2 + .../sanitizer_common/sanitizer_common_libcdep.cpp | 77 +- .../sanitizer_coverage_fuchsia.cpp | 1 + .../sanitizer_coverage_libcdep_new.cpp | 20 +- .../sanitizer_common/sanitizer_dense_map.h | 705 ++++++++++++++++ .../sanitizer_common/sanitizer_dense_map_info.h | 282 +++++++ libsanitizer/sanitizer_common/sanitizer_file.cpp | 9 +- libsanitizer/sanitizer_common/sanitizer_file.h | 2 +- libsanitizer/sanitizer_common/sanitizer_flags.inc | 3 + .../sanitizer_common/sanitizer_fuchsia.cpp | 62 +- .../sanitizer_interface_internal.h | 178 ++-- .../sanitizer_common/sanitizer_internal_defs.h | 8 +- libsanitizer/sanitizer_common/sanitizer_leb128.h | 87 ++ libsanitizer/sanitizer_common/sanitizer_linux.cpp | 215 ++--- libsanitizer/sanitizer_common/sanitizer_linux.h | 3 + .../sanitizer_common/sanitizer_linux_libcdep.cpp | 46 +- .../sanitizer_common/sanitizer_linux_s390.cpp | 14 +- libsanitizer/sanitizer_common/sanitizer_lzw.h | 159 ++++ libsanitizer/sanitizer_common/sanitizer_mac.cpp | 95 ++- libsanitizer/sanitizer_common/sanitizer_mac.h | 20 - libsanitizer/sanitizer_common/sanitizer_mutex.h | 63 +- .../sanitizer_persistent_allocator.h | 110 --- libsanitizer/sanitizer_common/sanitizer_platform.h | 331 ++++---- .../sanitizer_platform_interceptors.h | 2 + .../sanitizer_platform_limits_freebsd.cpp | 2 +- .../sanitizer_platform_limits_freebsd.h | 2 +- .../sanitizer_platform_limits_linux.cpp | 5 +- .../sanitizer_platform_limits_netbsd.cpp | 2 +- .../sanitizer_platform_limits_netbsd.h | 2 +- .../sanitizer_platform_limits_posix.cpp | 33 +- .../sanitizer_platform_limits_posix.h | 24 +- .../sanitizer_platform_limits_solaris.cpp | 2 +- .../sanitizer_platform_limits_solaris.h | 2 +- libsanitizer/sanitizer_common/sanitizer_posix.cpp | 1 + .../sanitizer_common/sanitizer_posix_libcdep.cpp | 2 +- libsanitizer/sanitizer_common/sanitizer_printf.cpp | 10 +- .../sanitizer_common/sanitizer_procmaps_mac.cpp | 12 +- .../sanitizer_common/sanitizer_quarantine.h | 4 +- .../sanitizer_common/sanitizer_ring_buffer.h | 13 +- .../sanitizer_common/sanitizer_stack_store.cpp | 379 +++++++++ .../sanitizer_common/sanitizer_stack_store.h | 121 +++ .../sanitizer_common/sanitizer_stackdepot.cpp | 157 +++- .../sanitizer_common/sanitizer_stackdepot.h | 1 + .../sanitizer_common/sanitizer_stacktrace.cpp | 28 +- .../sanitizer_common/sanitizer_stacktrace.h | 9 +- .../sanitizer_stacktrace_libcdep.cpp | 13 +- .../sanitizer_stacktrace_printer.cpp | 19 + .../sanitizer_stoptheworld_win.cpp | 175 ++++ .../sanitizer_common/sanitizer_symbolizer.cpp | 20 +- .../sanitizer_common/sanitizer_symbolizer.h | 7 +- .../sanitizer_symbolizer_internal.h | 7 +- .../sanitizer_symbolizer_libcdep.cpp | 18 +- .../sanitizer_common/sanitizer_symbolizer_mac.cpp | 57 +- .../sanitizer_common/sanitizer_symbolizer_mac.h | 1 - .../sanitizer_symbolizer_markup.cpp | 4 +- .../sanitizer_symbolizer_posix_libcdep.cpp | 58 +- .../sanitizer_symbolizer_report.cpp | 4 +- .../sanitizer_common/sanitizer_symbolizer_win.cpp | 2 +- .../sanitizer_common/sanitizer_syscalls_netbsd.inc | 4 +- .../sanitizer_common/sanitizer_thread_registry.cpp | 56 +- .../sanitizer_common/sanitizer_thread_registry.h | 18 +- .../sanitizer_common/sanitizer_thread_safety.h | 45 +- .../sanitizer_common/sanitizer_type_traits.h | 79 ++ .../sanitizer_common/sanitizer_unwind_win.cpp | 27 +- libsanitizer/sanitizer_common/sanitizer_win.cpp | 38 +- libsanitizer/tsan/tsan_clock.cpp | 625 -------------- libsanitizer/tsan/tsan_clock.h | 293 ------- libsanitizer/tsan/tsan_debugging.cpp | 2 +- libsanitizer/tsan/tsan_defs.h | 54 +- libsanitizer/tsan/tsan_dense_alloc.h | 9 + libsanitizer/tsan/tsan_fd.cpp | 91 ++- libsanitizer/tsan/tsan_fd.h | 1 + libsanitizer/tsan/tsan_flags.cpp | 8 +- libsanitizer/tsan/tsan_flags.inc | 16 +- libsanitizer/tsan/tsan_interceptors.h | 20 +- libsanitizer/tsan/tsan_interceptors_posix.cpp | 200 +++-- libsanitizer/tsan/tsan_interface.cpp | 14 - libsanitizer/tsan/tsan_interface.inc | 8 + libsanitizer/tsan/tsan_interface_atomic.cpp | 87 +- libsanitizer/tsan/tsan_interface_java.cpp | 4 +- libsanitizer/tsan/tsan_mman.cpp | 82 +- libsanitizer/tsan/tsan_mman.h | 4 + libsanitizer/tsan/tsan_mutexset.cpp | 54 +- libsanitizer/tsan/tsan_mutexset.h | 11 +- libsanitizer/tsan/tsan_platform.h | 285 ++----- libsanitizer/tsan/tsan_platform_linux.cpp | 48 +- libsanitizer/tsan/tsan_platform_mac.cpp | 152 ++-- libsanitizer/tsan/tsan_platform_posix.cpp | 18 +- libsanitizer/tsan/tsan_platform_windows.cpp | 3 - libsanitizer/tsan/tsan_report.cpp | 29 +- libsanitizer/tsan/tsan_report.h | 6 +- libsanitizer/tsan/tsan_rtl.cpp | 724 ++++++++++++----- libsanitizer/tsan/tsan_rtl.h | 341 ++++---- libsanitizer/tsan/tsan_rtl_access.cpp | 895 ++++++++++++--------- libsanitizer/tsan/tsan_rtl_amd64.S | 236 ------ libsanitizer/tsan/tsan_rtl_mutex.cpp | 645 ++++++++------- libsanitizer/tsan/tsan_rtl_ppc64.S | 1 - libsanitizer/tsan/tsan_rtl_proc.cpp | 1 - libsanitizer/tsan/tsan_rtl_report.cpp | 369 +++------ libsanitizer/tsan/tsan_rtl_thread.cpp | 228 +++--- libsanitizer/tsan/tsan_shadow.h | 315 +++----- libsanitizer/tsan/tsan_sync.cpp | 82 +- libsanitizer/tsan/tsan_sync.h | 47 +- libsanitizer/tsan/tsan_trace.h | 113 ++- libsanitizer/tsan/tsan_update_shadow_word.inc | 59 -- libsanitizer/ubsan/ubsan_diag.cpp | 12 +- libsanitizer/ubsan/ubsan_flags.cpp | 1 - libsanitizer/ubsan/ubsan_handlers.cpp | 15 - libsanitizer/ubsan/ubsan_handlers.h | 8 - libsanitizer/ubsan/ubsan_handlers_cxx.h | 2 +- libsanitizer/ubsan/ubsan_init.cpp | 7 +- libsanitizer/ubsan/ubsan_platform.h | 2 - 182 files changed, 6955 insertions(+), 5387 deletions(-) diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE index 01913de5d66..b92d082f9ae 100644 --- a/libsanitizer/MERGE +++ b/libsanitizer/MERGE @@ -1,4 +1,4 @@ -82bc6a094e85014f1891ef9407496f44af8fe442 +0a1bcab9f3bf75c4c5d3e53bafb3eeb80320af46 The first line of this file holds the git revision number of the last merge done from the master library sources. diff --git a/libsanitizer/asan/asan_activation.cpp b/libsanitizer/asan/asan_activation.cpp index 795df95a541..1757838600c 100644 --- a/libsanitizer/asan/asan_activation.cpp +++ b/libsanitizer/asan/asan_activation.cpp @@ -112,7 +112,7 @@ void AsanDeactivate() { disabled.quarantine_size_mb = 0; disabled.thread_local_quarantine_size_kb = 0; // Redzone must be at least Max(16, granularity) bytes long. - disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY); + disabled.min_redzone = Max(16, (int)ASAN_SHADOW_GRANULARITY); disabled.max_redzone = disabled.min_redzone; disabled.alloc_dealloc_mismatch = false; disabled.may_return_null = true; diff --git a/libsanitizer/asan/asan_allocator.cpp b/libsanitizer/asan/asan_allocator.cpp index 3fa36742060..7b7a289c2d2 100644 --- a/libsanitizer/asan/asan_allocator.cpp +++ b/libsanitizer/asan/asan_allocator.cpp @@ -210,8 +210,7 @@ struct QuarantineCallback { CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE); } - PoisonShadow(m->Beg(), - RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY), kAsanHeapLeftRedzoneMagic); // Statistics. @@ -305,7 +304,6 @@ struct Allocator { QuarantineCache fallback_quarantine_cache; uptr max_user_defined_malloc_size; - atomic_uint8_t rss_limit_exceeded; // ------------------- Options -------------------------- atomic_uint16_t min_redzone; @@ -345,14 +343,6 @@ struct Allocator { : kMaxAllowedMallocSize; } - bool RssLimitExceeded() { - return atomic_load(&rss_limit_exceeded, memory_order_relaxed); - } - - void SetRssLimitExceeded(bool limit_exceeded) { - atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); - } - void RePoisonChunk(uptr chunk) { // This could be a user-facing chunk (with redzones), or some internal // housekeeping chunk, like TransferBatch. Start by assuming the former. @@ -366,7 +356,7 @@ struct Allocator { if (chunk < beg && beg < end && end <= chunk_end) { // Looks like a valid AsanChunk in use, poison redzones only. PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); - uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY); + uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY); FastPoisonShadowPartialRightRedzone( end_aligned_down, end - end_aligned_down, chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); @@ -484,14 +474,14 @@ struct Allocator { AllocType alloc_type, bool can_fill) { if (UNLIKELY(!asan_inited)) AsanInitFromRtl(); - if (RssLimitExceeded()) { + if (UNLIKELY(IsRssLimitExceeded())) { if (AllocatorMayReturnNull()) return nullptr; ReportRssLimitExceeded(stack); } Flags &fl = *flags(); CHECK(stack); - const uptr min_alignment = SHADOW_GRANULARITY; + const uptr min_alignment = ASAN_SHADOW_GRANULARITY; const uptr user_requested_alignment_log = ComputeUserRequestedAlignmentLog(alignment); if (alignment < min_alignment) @@ -572,7 +562,7 @@ struct Allocator { m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack)); uptr size_rounded_down_to_granularity = - RoundDownTo(size, SHADOW_GRANULARITY); + RoundDownTo(size, ASAN_SHADOW_GRANULARITY); // Unpoison the bulk of the memory region. if (size_rounded_down_to_granularity) PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); @@ -580,7 +570,7 @@ struct Allocator { if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { u8 *shadow = (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); - *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; + *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0; } AsanStats &thread_stats = GetCurrentThreadStats(); @@ -607,7 +597,7 @@ struct Allocator { CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg); reinterpret_cast(alloc_beg)->Set(m); } - ASAN_MALLOC_HOOK(res, size); + RunMallocHooks(res, size); return res; } @@ -650,8 +640,7 @@ struct Allocator { } // Poison the region. - PoisonShadow(m->Beg(), - RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY), kAsanHeapFreeMagic); AsanStats &thread_stats = GetCurrentThreadStats(); @@ -689,7 +678,7 @@ struct Allocator { return; } - ASAN_FREE_HOOK(ptr); + RunFreeHooks(ptr); // Must mark the chunk as quarantined before any changes to its metadata. // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. @@ -851,12 +840,12 @@ struct Allocator { quarantine.PrintStats(); } - void ForceLock() ACQUIRE(fallback_mutex) { + void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) { allocator.ForceLock(); fallback_mutex.Lock(); } - void ForceUnlock() RELEASE(fallback_mutex) { + void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) { fallback_mutex.Unlock(); allocator.ForceUnlock(); } @@ -1065,14 +1054,12 @@ uptr asan_mz_size(const void *ptr) { return instance.AllocationSize(reinterpret_cast(ptr)); } -void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); } - -void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS { - instance.ForceUnlock(); +void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { + instance.ForceLock(); } -void AsanSoftRssLimitExceededCallback(bool limit_exceeded) { - instance.SetRssLimitExceeded(limit_exceeded); +void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { + instance.ForceUnlock(); } } // namespace __asan @@ -1230,16 +1217,3 @@ int __asan_update_allocation_context(void* addr) { GET_STACK_TRACE_MALLOC; return instance.UpdateAllocationStack((uptr)addr, &stack); } - -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -// Provide default (no-op) implementation of malloc hooks. -SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, - void *ptr, uptr size) { - (void)ptr; - (void)size; -} - -SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) { - (void)ptr; -} -#endif diff --git a/libsanitizer/asan/asan_debugging.cpp b/libsanitizer/asan/asan_debugging.cpp index 0b4bf52f249..f078f1041a8 100644 --- a/libsanitizer/asan/asan_debugging.cpp +++ b/libsanitizer/asan/asan_debugging.cpp @@ -141,7 +141,7 @@ uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) { SANITIZER_INTERFACE_ATTRIBUTE void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) { if (shadow_scale) - *shadow_scale = SHADOW_SCALE; + *shadow_scale = ASAN_SHADOW_SCALE; if (shadow_offset) - *shadow_offset = SHADOW_OFFSET; + *shadow_offset = ASAN_SHADOW_OFFSET; } diff --git a/libsanitizer/asan/asan_errors.cpp b/libsanitizer/asan/asan_errors.cpp index 7cd9fe911af..a22bf130d82 100644 --- a/libsanitizer/asan/asan_errors.cpp +++ b/libsanitizer/asan/asan_errors.cpp @@ -329,7 +329,7 @@ void ErrorBadParamsToAnnotateContiguousContainer::Print() { " old_mid : %p\n" " new_mid : %p\n", (void *)beg, (void *)end, (void *)old_mid, (void *)new_mid); - uptr granularity = SHADOW_GRANULARITY; + uptr granularity = ASAN_SHADOW_GRANULARITY; if (!IsAligned(beg, granularity)) Report("ERROR: beg is not aligned by %zu\n", granularity); stack->Print(); @@ -410,7 +410,8 @@ ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr, if (AddrIsInMem(addr)) { u8 *shadow_addr = (u8 *)MemToShadow(addr); // If we are accessing 16 bytes, look at the second shadow byte. - if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++; + if (*shadow_addr == 0 && access_size > ASAN_SHADOW_GRANULARITY) + shadow_addr++; // If we are in the partial right redzone, look at the next shadow byte. if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++; bool far_from_bounds = false; @@ -501,10 +502,11 @@ static void PrintLegend(InternalScopedString *str) { str->append( "Shadow byte legend (one shadow byte represents %d " "application bytes):\n", - (int)SHADOW_GRANULARITY); + (int)ASAN_SHADOW_GRANULARITY); PrintShadowByte(str, " Addressable: ", 0); str->append(" Partially addressable: "); - for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " "); + for (u8 i = 1; i < ASAN_SHADOW_GRANULARITY; i++) + PrintShadowByte(str, "", i, " "); str->append("\n"); PrintShadowByte(str, " Heap left redzone: ", kAsanHeapLeftRedzoneMagic); diff --git a/libsanitizer/asan/asan_errors.h b/libsanitizer/asan/asan_errors.h index a7fda2fd9f5..c6ac88f6dc2 100644 --- a/libsanitizer/asan/asan_errors.h +++ b/libsanitizer/asan/asan_errors.h @@ -53,9 +53,9 @@ struct ErrorDeadlySignal : ErrorBase { scariness.Scare(10, "null-deref"); } else if (signal.addr == signal.pc) { scariness.Scare(60, "wild-jump"); - } else if (signal.write_flag == SignalContext::WRITE) { + } else if (signal.write_flag == SignalContext::Write) { scariness.Scare(30, "wild-addr-write"); - } else if (signal.write_flag == SignalContext::READ) { + } else if (signal.write_flag == SignalContext::Read) { scariness.Scare(20, "wild-addr-read"); } else { scariness.Scare(25, "wild-addr"); @@ -372,7 +372,7 @@ struct ErrorGeneric : ErrorBase { u8 shadow_val; ErrorGeneric() = default; // (*) - ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_, + ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr, bool is_write_, uptr access_size_); void Print(); }; diff --git a/libsanitizer/asan/asan_fake_stack.cpp b/libsanitizer/asan/asan_fake_stack.cpp index 07681c10de9..74a039b6579 100644 --- a/libsanitizer/asan/asan_fake_stack.cpp +++ b/libsanitizer/asan/asan_fake_stack.cpp @@ -28,8 +28,8 @@ static const u64 kAllocaRedzoneMask = 31UL; // For small size classes inline PoisonShadow for better performance. ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { u64 *shadow = reinterpret_cast(MemToShadow(ptr)); - if (SHADOW_SCALE == 3 && class_id <= 6) { - // This code expects SHADOW_SCALE=3. + if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) { + // This code expects ASAN_SHADOW_SCALE=3. for (uptr i = 0; i < (((uptr)1) << class_id); i++) { shadow[i] = magic; // Make sure this does not become memset. @@ -140,7 +140,6 @@ void FakeStack::HandleNoReturn() { // We do it based on their 'real_stack' values -- everything that is lower // than the current real_stack is garbage. NOINLINE void FakeStack::GC(uptr real_stack) { - uptr collected = 0; for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { u8 *flags = GetFlags(stack_size_log(), class_id); for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; @@ -150,7 +149,6 @@ NOINLINE void FakeStack::GC(uptr real_stack) { GetFrame(stack_size_log(), class_id, i)); if (ff->real_stack < real_stack) { flags[i] = 0; - collected++; } } } @@ -294,10 +292,10 @@ void __asan_alloca_poison(uptr addr, uptr size) { uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize; uptr PartialRzAddr = addr + size; uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask; - uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1); + uptr PartialRzAligned = PartialRzAddr & ~(ASAN_SHADOW_GRANULARITY - 1); FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic); FastPoisonShadowPartialRightRedzone( - PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY, + PartialRzAligned, PartialRzAddr % ASAN_SHADOW_GRANULARITY, RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic); FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic); } @@ -305,7 +303,8 @@ void __asan_alloca_poison(uptr addr, uptr size) { SANITIZER_INTERFACE_ATTRIBUTE void __asan_allocas_unpoison(uptr top, uptr bottom) { if ((!top) || (top > bottom)) return; - REAL(memset)(reinterpret_cast(MemToShadow(top)), 0, - (bottom - top) / SHADOW_GRANULARITY); + REAL(memset) + (reinterpret_cast(MemToShadow(top)), 0, + (bottom - top) / ASAN_SHADOW_GRANULARITY); } } // extern "C" diff --git a/libsanitizer/asan/asan_flags.cpp b/libsanitizer/asan/asan_flags.cpp index c64e4647028..9ea899f84b4 100644 --- a/libsanitizer/asan/asan_flags.cpp +++ b/libsanitizer/asan/asan_flags.cpp @@ -140,9 +140,9 @@ void InitializeFlags() { SanitizerToolName); Die(); } - // Ensure that redzone is at least SHADOW_GRANULARITY. - if (f->redzone < (int)SHADOW_GRANULARITY) - f->redzone = SHADOW_GRANULARITY; + // Ensure that redzone is at least ASAN_SHADOW_GRANULARITY. + if (f->redzone < (int)ASAN_SHADOW_GRANULARITY) + f->redzone = ASAN_SHADOW_GRANULARITY; // Make "strict_init_order" imply "check_initialization_order". // TODO(samsonov): Use a single runtime flag for an init-order checker. if (f->strict_init_order) { diff --git a/libsanitizer/asan/asan_flags.inc b/libsanitizer/asan/asan_flags.inc index 514b225c407..314ed193535 100644 --- a/libsanitizer/asan/asan_flags.inc +++ b/libsanitizer/asan/asan_flags.inc @@ -49,9 +49,10 @@ ASAN_FLAG( "to find more errors.") ASAN_FLAG(bool, replace_intrin, true, "If set, uses custom wrappers for memset/memcpy/memmove intrinsics.") -ASAN_FLAG(bool, detect_stack_use_after_return, false, +ASAN_FLAG(bool, detect_stack_use_after_return, + SANITIZER_LINUX && !SANITIZER_ANDROID, "Enables stack-use-after-return checking at run-time.") -ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway. +ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway. "Minimum fake stack size log.") ASAN_FLAG(int, max_uar_stack_size_log, 20, // 1Mb per size class, i.e. ~11Mb per thread diff --git a/libsanitizer/asan/asan_fuchsia.cpp b/libsanitizer/asan/asan_fuchsia.cpp index b419019d137..2b15504123b 100644 --- a/libsanitizer/asan/asan_fuchsia.cpp +++ b/libsanitizer/asan/asan_fuchsia.cpp @@ -14,16 +14,17 @@ #include "sanitizer_common/sanitizer_fuchsia.h" #if SANITIZER_FUCHSIA -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_stack.h" -#include "asan_thread.h" - #include #include #include #include +# include "asan_interceptors.h" +# include "asan_internal.h" +# include "asan_stack.h" +# include "asan_thread.h" +# include "lsan/lsan_common.h" + namespace __asan { // The system already set up the shadow memory for us. @@ -118,14 +119,12 @@ struct AsanThread::InitOptions { // Shared setup between thread creation and startup for the initial thread. static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid, - uptr user_id, bool detached, - const char *name) { + bool detached, const char *name) { // In lieu of AsanThread::Create. AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__); AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; - u32 tid = - asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args); + u32 tid = asanThreadRegistry().CreateThread(0, detached, parent_tid, &args); asanThreadRegistry().SetThreadName(tid, name); return thread; @@ -152,7 +151,7 @@ AsanThread *CreateMainThread() { CHECK_NE(__sanitizer::MainThreadStackBase, 0); CHECK_GT(__sanitizer::MainThreadStackSize, 0); AsanThread *t = CreateAsanThread( - nullptr, 0, reinterpret_cast(self), true, + nullptr, 0, true, _zx_object_get_property(thrd_get_zx_handle(self), ZX_PROP_NAME, name, sizeof(name)) == ZX_OK ? name @@ -182,8 +181,7 @@ static void *BeforeThreadCreateHook(uptr user_id, bool detached, GET_STACK_TRACE_THREAD; u32 parent_tid = GetCurrentTidOrInvalid(); - AsanThread *thread = - CreateAsanThread(&stack, parent_tid, user_id, detached, name); + AsanThread *thread = CreateAsanThread(&stack, parent_tid, detached, name); // On other systems, AsanThread::Init() is called from the new // thread itself. But on Fuchsia we already know the stack address @@ -238,8 +236,18 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) { __sanitizer_fill_shadow(p, size, 0, 0); } +// On Fuchsia, leak detection is done by a special hook after atexit hooks. +// So this doesn't install any atexit hook like on other platforms. +void InstallAtExitCheckLeaks() {} + } // namespace __asan +namespace __lsan { + +bool UseExitcodeOnLeak() { return __asan::flags()->halt_on_error; } + +} // namespace __lsan + // These are declared (in extern "C") by . // The system runtime will call our definitions directly. diff --git a/libsanitizer/asan/asan_globals.cpp b/libsanitizer/asan/asan_globals.cpp index 94004877227..ecc2600f039 100644 --- a/libsanitizer/asan/asan_globals.cpp +++ b/libsanitizer/asan/asan_globals.cpp @@ -61,14 +61,13 @@ ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) { } ALWAYS_INLINE void PoisonRedZones(const Global &g) { - uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY); + uptr aligned_size = RoundUpTo(g.size, ASAN_SHADOW_GRANULARITY); FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size, kAsanGlobalRedzoneMagic); if (g.size != aligned_size) { FastPoisonShadowPartialRightRedzone( - g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY), - g.size % SHADOW_GRANULARITY, - SHADOW_GRANULARITY, + g.beg + RoundDownTo(g.size, ASAN_SHADOW_GRANULARITY), + g.size % ASAN_SHADOW_GRANULARITY, ASAN_SHADOW_GRANULARITY, kAsanGlobalRedzoneMagic); } } @@ -154,6 +153,23 @@ static void CheckODRViolationViaIndicator(const Global *g) { } } +// Check ODR violation for given global G by checking if it's already poisoned. +// We use this method in case compiler doesn't use private aliases for global +// variables. +static void CheckODRViolationViaPoisoning(const Global *g) { + if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) { + // This check may not be enough: if the first global is much larger + // the entire redzone of the second global may be within the first global. + for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { + if (g->beg == l->g->beg && + (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && + !IsODRViolationSuppressed(g->name)) + ReportODRViolation(g, FindRegistrationSite(g), + l->g, FindRegistrationSite(l->g)); + } + } +} + // Clang provides two different ways for global variables protection: // it can poison the global itself or its private alias. In former // case we may poison same symbol multiple times, that can help us to @@ -199,6 +215,8 @@ static void RegisterGlobal(const Global *g) { // where two globals with the same name are defined in different modules. if (UseODRIndicator(g)) CheckODRViolationViaIndicator(g); + else + CheckODRViolationViaPoisoning(g); } if (CanPoisonMemory()) PoisonRedZones(*g); diff --git a/libsanitizer/asan/asan_interceptors.cpp b/libsanitizer/asan/asan_interceptors.cpp index b28909152e2..2ff314a5a9c 100644 --- a/libsanitizer/asan/asan_interceptors.cpp +++ b/libsanitizer/asan/asan_interceptors.cpp @@ -130,23 +130,24 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) // Strict init-order checking is dlopen-hostile: // https://github.com/google/sanitizers/issues/178 -#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ - do { \ - if (flags()->strict_init_order) \ - StopInitOrderChecking(); \ - CheckNoDeepBind(filename, flag); \ - } while (false) -#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() -#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) -#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() -#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) -#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ - if (AsanThread *t = GetCurrentThread()) { \ - *begin = t->tls_begin(); \ - *end = t->tls_end(); \ - } else { \ - *begin = *end = 0; \ - } +# define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \ + ({ \ + if (flags()->strict_init_order) \ + StopInitOrderChecking(); \ + CheckNoDeepBind(filename, flag); \ + REAL(dlopen)(filename, flag); \ + }) +# define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() +# define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) +# define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() +# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) +# define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ + if (AsanThread *t = GetCurrentThread()) { \ + *begin = t->tls_begin(); \ + *end = t->tls_end(); \ + } else { \ + *begin = *end = 0; \ + } #define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \ do { \ diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h index 105c672cc24..047b044c8bf 100644 --- a/libsanitizer/asan/asan_interceptors.h +++ b/libsanitizer/asan/asan_interceptors.h @@ -81,12 +81,7 @@ void InitializePlatformInterceptors(); #if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \ !SANITIZER_NETBSD # define ASAN_INTERCEPT___CXA_THROW 1 -# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \ - || ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION -# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1 -# else -# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0 -# endif +# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1 # if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__)) # define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1 # else diff --git a/libsanitizer/asan/asan_interface.inc b/libsanitizer/asan/asan_interface.inc index ea28fc8ae87..89ef552b711 100644 --- a/libsanitizer/asan/asan_interface.inc +++ b/libsanitizer/asan/asan_interface.inc @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// // Asan interface list. //===----------------------------------------------------------------------===// + INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack) INTERFACE_FUNCTION(__asan_address_is_poisoned) INTERFACE_FUNCTION(__asan_after_dynamic_init) diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h index ad3320304d0..7468f126d37 100644 --- a/libsanitizer/asan/asan_internal.h +++ b/libsanitizer/asan/asan_internal.h @@ -17,19 +17,19 @@ #include "asan_interface_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_internal_defs.h" -#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_stacktrace.h" #if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) -# error "The AddressSanitizer run-time should not be" - " instrumented by AddressSanitizer" +# error \ + "The AddressSanitizer run-time should not be instrumented by AddressSanitizer" #endif // Build-time configuration options. // If set, asan will intercept C++ exception api call(s). #ifndef ASAN_HAS_EXCEPTIONS -# define ASAN_HAS_EXCEPTIONS 1 +# define ASAN_HAS_EXCEPTIONS 1 #endif // If set, values like allocator chunk size, as well as defaults for some flags @@ -43,11 +43,11 @@ #endif #ifndef ASAN_DYNAMIC -# ifdef PIC -# define ASAN_DYNAMIC 1 -# else -# define ASAN_DYNAMIC 0 -# endif +# ifdef PIC +# define ASAN_DYNAMIC 1 +# else +# define ASAN_DYNAMIC 0 +# endif #endif // All internal functions in asan reside inside the __asan namespace @@ -123,26 +123,18 @@ void *AsanDlSymNext(const char *sym); // `dlopen()` specific initialization inside this function. bool HandleDlopenInit(); -// Add convenient macro for interface functions that may be represented as -// weak hooks. -#define ASAN_MALLOC_HOOK(ptr, size) \ - do { \ - if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size); \ - RunMallocHooks(ptr, size); \ - } while (false) -#define ASAN_FREE_HOOK(ptr) \ - do { \ - if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr); \ - RunFreeHooks(ptr); \ - } while (false) +void InstallAtExitCheckLeaks(); + #define ASAN_ON_ERROR() \ - if (&__asan_on_error) __asan_on_error() + if (&__asan_on_error) \ + __asan_on_error() extern int asan_inited; // Used to avoid infinite recursion in __asan_init(). extern bool asan_init_is_running; extern void (*death_callback)(void); -// These magic values are written to shadow for better error reporting. +// These magic values are written to shadow for better error +// reporting. const int kAsanHeapLeftRedzoneMagic = 0xfa; const int kAsanHeapFreeMagic = 0xfd; const int kAsanStackLeftRedzoneMagic = 0xf1; diff --git a/libsanitizer/asan/asan_linux.cpp b/libsanitizer/asan/asan_linux.cpp index ad3693d5e6a..defd81bc19e 100644 --- a/libsanitizer/asan/asan_linux.cpp +++ b/libsanitizer/asan/asan_linux.cpp @@ -107,7 +107,7 @@ uptr FindDynamicShadowStart() { return FindPremappedShadowStart(shadow_size_bytes); #endif - return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE, + return MapDynamicShadow(shadow_size_bytes, ASAN_SHADOW_SCALE, /*min_shadow_base_alignment*/ 0, kHighMemEnd); } @@ -131,30 +131,24 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size, VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n", info->dlpi_name, (void *)info->dlpi_addr); - // Continue until the first dynamic library is found - if (!info->dlpi_name || info->dlpi_name[0] == 0) - return 0; - - // Ignore vDSO - if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0) - return 0; + const char **name = (const char **)data; -#if SANITIZER_FREEBSD || SANITIZER_NETBSD // Ignore first entry (the main program) - char **p = (char **)data; - if (!(*p)) { - *p = (char *)-1; + if (!*name) { + *name = ""; return 0; } -#endif -#if SANITIZER_SOLARIS - // Ignore executable on Solaris - if (info->dlpi_addr == 0) +# if SANITIZER_LINUX + // Ignore vDSO. glibc versions earlier than 2.15 (and some patched + // by distributors) return an empty name for the vDSO entry, so + // detect this as well. + if (!info->dlpi_name[0] || + internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0) return 0; -#endif +# endif - *(const char **)data = info->dlpi_name; + *name = info->dlpi_name; return 1; } @@ -175,7 +169,7 @@ void AsanCheckDynamicRTPrereqs() { // Ensure that dynamic RT is the first DSO in the list const char *first_dso_name = nullptr; dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name); - if (first_dso_name && !IsDynamicRTName(first_dso_name)) { + if (first_dso_name && first_dso_name[0] && !IsDynamicRTName(first_dso_name)) { Report("ASan runtime does not come first in initial library list; " "you should either link runtime to your application or " "manually preload it with LD_PRELOAD.\n"); diff --git a/libsanitizer/asan/asan_mac.cpp b/libsanitizer/asan/asan_mac.cpp index c6950547f08..9161f728d44 100644 --- a/libsanitizer/asan/asan_mac.cpp +++ b/libsanitizer/asan/asan_mac.cpp @@ -55,7 +55,7 @@ void *AsanDoesNotSupportStaticLinkage() { } uptr FindDynamicShadowStart() { - return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE, + return MapDynamicShadow(MemToShadowSize(kHighMemEnd), ASAN_SHADOW_SCALE, /*min_shadow_base_alignment*/ 0, kHighMemEnd); } diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h index 4b0037fced3..4ff09b103d5 100644 --- a/libsanitizer/asan/asan_mapping.h +++ b/libsanitizer/asan/asan_mapping.h @@ -13,7 +13,7 @@ #ifndef ASAN_MAPPING_H #define ASAN_MAPPING_H -#include "asan_internal.h" +#include "sanitizer_common/sanitizer_platform.h" // The full explanation of the memory mapping could be found here: // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm @@ -151,149 +151,145 @@ // || `[0x30000000, 0x35ffffff]` || LowShadow || // || `[0x00000000, 0x2fffffff]` || LowMem || -#if defined(ASAN_SHADOW_SCALE) -static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE; -#else -static const u64 kDefaultShadowScale = 3; -#endif -static const u64 kDefaultShadowSentinel = ~(uptr)0; -static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000 -static const u64 kDefaultShadowOffset64 = 1ULL << 44; -static const u64 kDefaultShort64bitShadowOffset = - 0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G. -static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; -static const u64 kRiscv64_ShadowOffset64 = 0xd55550000; -static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; -static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; -static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; -static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; -static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000 -static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 -static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 -static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 -static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 -static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 - -#define SHADOW_SCALE kDefaultShadowScale +#define ASAN_SHADOW_SCALE 3 #if SANITIZER_FUCHSIA -# define SHADOW_OFFSET (0) +# define ASAN_SHADOW_OFFSET_CONST (0) #elif SANITIZER_WORDSIZE == 32 # if SANITIZER_ANDROID -# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address +# define ASAN_SHADOW_OFFSET_DYNAMIC # elif defined(__mips__) -# define SHADOW_OFFSET kMIPS32_ShadowOffset32 +# define ASAN_SHADOW_OFFSET_CONST 0x0aaa0000 # elif SANITIZER_FREEBSD -# define SHADOW_OFFSET kFreeBSD_ShadowOffset32 +# define ASAN_SHADOW_OFFSET_CONST 0x40000000 # elif SANITIZER_NETBSD -# define SHADOW_OFFSET kNetBSD_ShadowOffset32 +# define ASAN_SHADOW_OFFSET_CONST 0x40000000 # elif SANITIZER_WINDOWS -# define SHADOW_OFFSET kWindowsShadowOffset32 +# define ASAN_SHADOW_OFFSET_CONST 0x30000000 # elif SANITIZER_IOS -# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address +# define ASAN_SHADOW_OFFSET_DYNAMIC # else -# define SHADOW_OFFSET kDefaultShadowOffset32 +# define ASAN_SHADOW_OFFSET_CONST 0x20000000 # endif #else # if SANITIZER_IOS -# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address +# define ASAN_SHADOW_OFFSET_DYNAMIC # elif SANITIZER_MAC && defined(__aarch64__) -# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address -#elif SANITIZER_RISCV64 -#define SHADOW_OFFSET kRiscv64_ShadowOffset64 +# define ASAN_SHADOW_OFFSET_DYNAMIC +# elif SANITIZER_RISCV64 +# define ASAN_SHADOW_OFFSET_CONST 0x0000000d55550000 # elif defined(__aarch64__) -# define SHADOW_OFFSET kAArch64_ShadowOffset64 +# define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000 # elif defined(__powerpc64__) -# define SHADOW_OFFSET kPPC64_ShadowOffset64 +# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000 # elif defined(__s390x__) -# define SHADOW_OFFSET kSystemZ_ShadowOffset64 +# define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000 # elif SANITIZER_FREEBSD -# define SHADOW_OFFSET kFreeBSD_ShadowOffset64 +# define ASAN_SHADOW_OFFSET_CONST 0x0000400000000000 # elif SANITIZER_NETBSD -# define SHADOW_OFFSET kNetBSD_ShadowOffset64 +# define ASAN_SHADOW_OFFSET_CONST 0x0000400000000000 # elif SANITIZER_MAC -# define SHADOW_OFFSET kDefaultShadowOffset64 +# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000 # elif defined(__mips64) -# define SHADOW_OFFSET kMIPS64_ShadowOffset64 -#elif defined(__sparc__) -#define SHADOW_OFFSET kSPARC64_ShadowOffset64 +# define ASAN_SHADOW_OFFSET_CONST 0x0000002000000000 +# elif defined(__sparc__) +# define ASAN_SHADOW_OFFSET_CONST 0x0000080000000000 # elif SANITIZER_WINDOWS64 -# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address +# define ASAN_SHADOW_OFFSET_DYNAMIC # else -# define SHADOW_OFFSET kDefaultShort64bitShadowOffset +# if ASAN_SHADOW_SCALE != 3 +# error "Value below is based on shadow scale = 3." +# error "Original formula was: 0x7FFFFFFF & (~0xFFFULL << SHADOW_SCALE)." +# endif +# define ASAN_SHADOW_OFFSET_CONST 0x000000007fff8000 # endif #endif -#if SANITIZER_ANDROID && defined(__arm__) -# define ASAN_PREMAP_SHADOW 1 -#else -# define ASAN_PREMAP_SHADOW 0 -#endif +#if defined(__cplusplus) +# include "asan_internal.h" + +static const u64 kDefaultShadowSentinel = ~(uptr)0; -#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) +# if defined(ASAN_SHADOW_OFFSET_CONST) +static const u64 kConstShadowOffset = ASAN_SHADOW_OFFSET_CONST; +# define ASAN_SHADOW_OFFSET kConstShadowOffset +# elif defined(ASAN_SHADOW_OFFSET_DYNAMIC) +# define ASAN_SHADOW_OFFSET __asan_shadow_memory_dynamic_address +# else +# error "ASAN_SHADOW_OFFSET can't be determined." +# endif -#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below. +# if SANITIZER_ANDROID && defined(__arm__) +# define ASAN_PREMAP_SHADOW 1 +# else +# define ASAN_PREMAP_SHADOW 0 +# endif -#if DO_ASAN_MAPPING_PROFILE -# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++; -#else -# define PROFILE_ASAN_MAPPING() -#endif +# define ASAN_SHADOW_GRANULARITY (1ULL << ASAN_SHADOW_SCALE) + +# define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below. + +# if DO_ASAN_MAPPING_PROFILE +# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++; +# else +# define PROFILE_ASAN_MAPPING() +# endif // If 1, all shadow boundaries are constants. // Don't set to 1 other than for testing. -#define ASAN_FIXED_MAPPING 0 +# define ASAN_FIXED_MAPPING 0 namespace __asan { extern uptr AsanMappingProfile[]; -#if ASAN_FIXED_MAPPING +# if ASAN_FIXED_MAPPING // Fixed mapping for 64-bit Linux. Mostly used for performance comparison // with non-fixed mapping. As of r175253 (Feb 2013) the performance // difference between fixed and non-fixed mapping is below the noise level. static uptr kHighMemEnd = 0x7fffffffffffULL; -static uptr kMidMemBeg = 0x3000000000ULL; -static uptr kMidMemEnd = 0x4fffffffffULL; -#else +static uptr kMidMemBeg = 0x3000000000ULL; +static uptr kMidMemEnd = 0x4fffffffffULL; +# else extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init. -#endif +# endif } // namespace __asan -#if defined(__sparc__) && SANITIZER_WORDSIZE == 64 -# include "asan_mapping_sparc64.h" -#else -#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET)) +# if defined(__sparc__) && SANITIZER_WORDSIZE == 64 +# include "asan_mapping_sparc64.h" +# else +# define MEM_TO_SHADOW(mem) \ + (((mem) >> ASAN_SHADOW_SCALE) + (ASAN_SHADOW_OFFSET)) -#define kLowMemBeg 0 -#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0) +# define kLowMemBeg 0 +# define kLowMemEnd (ASAN_SHADOW_OFFSET ? ASAN_SHADOW_OFFSET - 1 : 0) -#define kLowShadowBeg SHADOW_OFFSET -#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd) +# define kLowShadowBeg ASAN_SHADOW_OFFSET +# define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd) -#define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1) +# define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1) -#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg) -#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd) +# define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg) +# define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd) -# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg) -# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd) +# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg) +# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd) // With the zero shadow base we can not actually map pages starting from 0. // This constant is somewhat arbitrary. -#define kZeroBaseShadowStart 0 -#define kZeroBaseMaxShadowStart (1 << 18) +# define kZeroBaseShadowStart 0 +# define kZeroBaseMaxShadowStart (1 << 18) -#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \ - : kZeroBaseShadowStart) -#define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1) +# define kShadowGapBeg \ + (kLowShadowEnd ? kLowShadowEnd + 1 : kZeroBaseShadowStart) +# define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1) -#define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0) -#define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0) +# define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0) +# define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0) -#define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0) -#define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0) +# define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0) +# define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0) namespace __asan { @@ -331,29 +327,31 @@ static inline bool AddrIsInShadowGap(uptr a) { PROFILE_ASAN_MAPPING(); if (kMidMemBeg) { if (a <= kShadowGapEnd) - return SHADOW_OFFSET == 0 || a >= kShadowGapBeg; + return ASAN_SHADOW_OFFSET == 0 || a >= kShadowGapBeg; return (a >= kShadowGap2Beg && a <= kShadowGap2End) || (a >= kShadowGap3Beg && a <= kShadowGap3End); } // In zero-based shadow mode we treat addresses near zero as addresses // in shadow gap as well. - if (SHADOW_OFFSET == 0) + if (ASAN_SHADOW_OFFSET == 0) return a <= kShadowGapEnd; return a >= kShadowGapBeg && a <= kShadowGapEnd; } } // namespace __asan -#endif +# endif namespace __asan { -static inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; } +static inline uptr MemToShadowSize(uptr size) { + return size >> ASAN_SHADOW_SCALE; +} static inline bool AddrIsInMem(uptr a) { PROFILE_ASAN_MAPPING(); return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) || - (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a)); + (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a)); } static inline uptr MemToShadow(uptr p) { @@ -369,17 +367,17 @@ static inline bool AddrIsInShadow(uptr a) { static inline bool AddrIsAlignedByGranularity(uptr a) { PROFILE_ASAN_MAPPING(); - return (a & (SHADOW_GRANULARITY - 1)) == 0; + return (a & (ASAN_SHADOW_GRANULARITY - 1)) == 0; } static inline bool AddressIsPoisoned(uptr a) { PROFILE_ASAN_MAPPING(); const uptr kAccessSize = 1; - u8 *shadow_address = (u8*)MEM_TO_SHADOW(a); + u8 *shadow_address = (u8 *)MEM_TO_SHADOW(a); s8 shadow_value = *shadow_address; if (shadow_value) { - u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1)) - + kAccessSize - 1; + u8 last_accessed_byte = + (a & (ASAN_SHADOW_GRANULARITY - 1)) + kAccessSize - 1; return (last_accessed_byte >= shadow_value); } return false; @@ -390,4 +388,6 @@ static const uptr kAsanMappingProfileSize = __LINE__; } // namespace __asan +#endif // __cplusplus + #endif // ASAN_MAPPING_H diff --git a/libsanitizer/asan/asan_mapping_sparc64.h b/libsanitizer/asan/asan_mapping_sparc64.h index 432a1816f79..90261d301f7 100644 --- a/libsanitizer/asan/asan_mapping_sparc64.h +++ b/libsanitizer/asan/asan_mapping_sparc64.h @@ -25,13 +25,14 @@ // The idea is to chop the high bits before doing the scaling, so the two // parts become contiguous again and the usual scheme can be applied. -#define MEM_TO_SHADOW(mem) \ - ((((mem) << HIGH_BITS) >> (HIGH_BITS + (SHADOW_SCALE))) + (SHADOW_OFFSET)) +#define MEM_TO_SHADOW(mem) \ + ((((mem) << HIGH_BITS) >> (HIGH_BITS + (ASAN_SHADOW_SCALE))) + \ + (ASAN_SHADOW_OFFSET)) #define kLowMemBeg 0 -#define kLowMemEnd (SHADOW_OFFSET - 1) +#define kLowMemEnd (ASAN_SHADOW_OFFSET - 1) -#define kLowShadowBeg SHADOW_OFFSET +#define kLowShadowBeg ASAN_SHADOW_OFFSET #define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd) // But of course there is the huge hole between the high shadow memory, diff --git a/libsanitizer/asan/asan_poisoning.cpp b/libsanitizer/asan/asan_poisoning.cpp index d97af91e692..3b7c9d1312d 100644 --- a/libsanitizer/asan/asan_poisoning.cpp +++ b/libsanitizer/asan/asan_poisoning.cpp @@ -12,11 +12,13 @@ //===----------------------------------------------------------------------===// #include "asan_poisoning.h" + #include "asan_report.h" #include "asan_stack.h" #include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_interface_internal.h" +#include "sanitizer_common/sanitizer_libc.h" namespace __asan { @@ -35,7 +37,7 @@ void PoisonShadow(uptr addr, uptr size, u8 value) { CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsInMem(addr)); CHECK(AddrIsAlignedByGranularity(addr + size)); - CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY)); + CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY)); CHECK(REAL(memset)); FastPoisonShadow(addr, size, value); } @@ -52,12 +54,12 @@ void PoisonShadowPartialRightRedzone(uptr addr, struct ShadowSegmentEndpoint { u8 *chunk; - s8 offset; // in [0, SHADOW_GRANULARITY) + s8 offset; // in [0, ASAN_SHADOW_GRANULARITY) s8 value; // = *chunk; explicit ShadowSegmentEndpoint(uptr address) { chunk = (u8*)MemToShadow(address); - offset = address & (SHADOW_GRANULARITY - 1); + offset = address & (ASAN_SHADOW_GRANULARITY - 1); value = *chunk; } }; @@ -72,14 +74,14 @@ void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { } CHECK(size); CHECK_LE(size, 4096); - CHECK(IsAligned(end, SHADOW_GRANULARITY)); - if (!IsAligned(ptr, SHADOW_GRANULARITY)) { + CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY)); + if (!IsAligned(ptr, ASAN_SHADOW_GRANULARITY)) { *(u8 *)MemToShadow(ptr) = - poison ? static_cast(ptr % SHADOW_GRANULARITY) : 0; - ptr |= SHADOW_GRANULARITY - 1; + poison ? static_cast(ptr % ASAN_SHADOW_GRANULARITY) : 0; + ptr |= ASAN_SHADOW_GRANULARITY - 1; ptr++; } - for (; ptr < end; ptr += SHADOW_GRANULARITY) + for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY) *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0; } @@ -181,12 +183,12 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) { if (!AddrIsInMem(end)) return end; CHECK_LT(beg, end); - uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY); - uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY); + uptr aligned_b = RoundUpTo(beg, ASAN_SHADOW_GRANULARITY); + uptr aligned_e = RoundDownTo(end, ASAN_SHADOW_GRANULARITY); uptr shadow_beg = MemToShadow(aligned_b); uptr shadow_end = MemToShadow(aligned_e); // First check the first and the last application bytes, - // then check the SHADOW_GRANULARITY-aligned region by calling + // then check the ASAN_SHADOW_GRANULARITY-aligned region by calling // mem_is_zero on the corresponding shadow. if (!__asan::AddressIsPoisoned(beg) && !__asan::AddressIsPoisoned(end - 1) && (shadow_end <= shadow_beg || @@ -285,7 +287,7 @@ uptr __asan_load_cxx_array_cookie(uptr *p) { // assumes that left border of region to be poisoned is properly aligned. static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { if (size == 0) return; - uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1); + uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1); PoisonShadow(addr, aligned_size, do_poison ? kAsanStackUseAfterScopeMagic : 0); if (size == aligned_size) @@ -351,7 +353,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p, uptr end = reinterpret_cast(end_p); uptr old_mid = reinterpret_cast(old_mid_p); uptr new_mid = reinterpret_cast(new_mid_p); - uptr granularity = SHADOW_GRANULARITY; + uptr granularity = ASAN_SHADOW_GRANULARITY; if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end && IsAligned(beg, granularity))) { GET_STACK_TRACE_FATAL_HERE; diff --git a/libsanitizer/asan/asan_poisoning.h b/libsanitizer/asan/asan_poisoning.h index 3d536f2d309..600bd011f30 100644 --- a/libsanitizer/asan/asan_poisoning.h +++ b/libsanitizer/asan/asan_poisoning.h @@ -44,8 +44,8 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, common_flags()->clear_shadow_mmap_threshold); #else uptr shadow_beg = MEM_TO_SHADOW(aligned_beg); - uptr shadow_end = MEM_TO_SHADOW( - aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1; + uptr shadow_end = + MEM_TO_SHADOW(aligned_beg + aligned_size - ASAN_SHADOW_GRANULARITY) + 1; // FIXME: Page states are different on Windows, so using the same interface // for mapping shadow and zeroing out pages doesn't "just work", so we should // probably provide higher-level interface for these operations. @@ -78,11 +78,12 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( DCHECK(CanPoisonMemory()); bool poison_partial = flags()->poison_partial; u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr); - for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) { - if (i + SHADOW_GRANULARITY <= size) { + for (uptr i = 0; i < redzone_size; i += ASAN_SHADOW_GRANULARITY, shadow++) { + if (i + ASAN_SHADOW_GRANULARITY <= size) { *shadow = 0; // fully addressable } else if (i >= size) { - *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable + *shadow = + (ASAN_SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable } else { // first size-i bytes are addressable *shadow = poison_partial ? static_cast(size - i) : 0; diff --git a/libsanitizer/asan/asan_posix.cpp b/libsanitizer/asan/asan_posix.cpp index 63ad735f8bb..765f4a26cd7 100644 --- a/libsanitizer/asan/asan_posix.cpp +++ b/libsanitizer/asan/asan_posix.cpp @@ -14,22 +14,23 @@ #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_POSIX -#include "asan_internal.h" -#include "asan_interceptors.h" -#include "asan_mapping.h" -#include "asan_poisoning.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "sanitizer_common/sanitizer_posix.h" -#include "sanitizer_common/sanitizer_procmaps.h" - -#include -#include -#include -#include -#include -#include +# include +# include +# include +# include +# include +# include + +# include "asan_interceptors.h" +# include "asan_internal.h" +# include "asan_mapping.h" +# include "asan_poisoning.h" +# include "asan_report.h" +# include "asan_stack.h" +# include "lsan/lsan_common.h" +# include "sanitizer_common/sanitizer_libc.h" +# include "sanitizer_common/sanitizer_posix.h" +# include "sanitizer_common/sanitizer_procmaps.h" namespace __asan { @@ -131,7 +132,7 @@ void AsanTSDSet(void *tsd) { } void PlatformTSDDtor(void *tsd) { - AsanThreadContext *context = (AsanThreadContext*)tsd; + AsanThreadContext *context = (AsanThreadContext *)tsd; if (context->destructor_iterations > 1) { context->destructor_iterations--; CHECK_EQ(0, pthread_setspecific(tsd_key, tsd)); @@ -140,6 +141,18 @@ void PlatformTSDDtor(void *tsd) { AsanThread::TSDDtor(tsd); } #endif + +void InstallAtExitCheckLeaks() { + if (CAN_SANITIZE_LEAKS) { + if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { + if (flags()->halt_on_error) + Atexit(__lsan::DoLeakCheck); + else + Atexit(__lsan::DoRecoverableLeakCheckVoid); + } + } +} + } // namespace __asan #endif // SANITIZER_POSIX diff --git a/libsanitizer/asan/asan_premap_shadow.cpp b/libsanitizer/asan/asan_premap_shadow.cpp index 666bb9b34bd..bed2f62a225 100644 --- a/libsanitizer/asan/asan_premap_shadow.cpp +++ b/libsanitizer/asan/asan_premap_shadow.cpp @@ -26,7 +26,7 @@ namespace __asan { // Conservative upper limit. uptr PremapShadowSize() { uptr granularity = GetMmapGranularity(); - return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity); + return RoundUpTo(GetMaxVirtualAddress() >> ASAN_SHADOW_SCALE, granularity); } // Returns an address aligned to 8 pages, such that one page on the left and diff --git a/libsanitizer/asan/asan_report.cpp b/libsanitizer/asan/asan_report.cpp index 1f266334b31..2a55d6c0978 100644 --- a/libsanitizer/asan/asan_report.cpp +++ b/libsanitizer/asan/asan_report.cpp @@ -11,17 +11,19 @@ // This file contains error reporting code. //===----------------------------------------------------------------------===// +#include "asan_report.h" + +#include "asan_descriptions.h" #include "asan_errors.h" #include "asan_flags.h" -#include "asan_descriptions.h" #include "asan_internal.h" #include "asan_mapping.h" -#include "asan_report.h" #include "asan_scariness_score.h" #include "asan_stack.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_interface_internal.h" #include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_symbolizer.h" @@ -460,6 +462,10 @@ static bool SuppressErrorReport(uptr pc) { void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, uptr access_size, u32 exp, bool fatal) { + if (__asan_test_only_reported_buggy_pointer) { + *__asan_test_only_reported_buggy_pointer = addr; + return; + } if (!fatal && SuppressErrorReport(pc)) return; ENABLE_FRAME_POINTER; diff --git a/libsanitizer/asan/asan_rtl.cpp b/libsanitizer/asan/asan_rtl.cpp index 1b150b393cf..3a5261474b2 100644 --- a/libsanitizer/asan/asan_rtl.cpp +++ b/libsanitizer/asan/asan_rtl.cpp @@ -27,6 +27,7 @@ #include "lsan/lsan_common.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_interface_internal.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include "ubsan/ubsan_init.h" @@ -44,7 +45,9 @@ static void AsanDie() { static atomic_uint32_t num_calls; if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) { // Don't die twice - run a busy loop. - while (1) { } + while (1) { + internal_sched_yield(); + } } if (common_flags()->print_module_map >= 1) DumpProcessMap(); @@ -85,12 +88,8 @@ void ShowStatsAndAbort() { NOINLINE static void ReportGenericErrorWrapper(uptr addr, bool is_write, int size, int exp_arg, bool fatal) { - if (__asan_test_only_reported_buggy_pointer) { - *__asan_test_only_reported_buggy_pointer = addr; - } else { - GET_CALLER_PC_BP_SP; - ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal); - } + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal); } // --------------- LowLevelAllocateCallbac ---------- {{{1 @@ -150,11 +149,11 @@ ASAN_REPORT_ERROR_N(store, true) #define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \ uptr sp = MEM_TO_SHADOW(addr); \ - uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast(sp) \ - : *reinterpret_cast(sp); \ + uptr s = size <= ASAN_SHADOW_GRANULARITY ? *reinterpret_cast(sp) \ + : *reinterpret_cast(sp); \ if (UNLIKELY(s)) { \ - if (UNLIKELY(size >= SHADOW_GRANULARITY || \ - ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \ + if (UNLIKELY(size >= ASAN_SHADOW_GRANULARITY || \ + ((s8)((addr & (ASAN_SHADOW_GRANULARITY - 1)) + size - 1)) >= \ (s8)s)) { \ ReportGenericErrorWrapper(addr, is_write, size, exp_arg, fatal); \ } \ @@ -188,7 +187,7 @@ ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16) extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN(uptr addr, uptr size) { - if (__asan_region_is_poisoned(addr, size)) { + if ((addr = __asan_region_is_poisoned(addr, size))) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, false, size, 0, true); } @@ -197,7 +196,7 @@ void __asan_loadN(uptr addr, uptr size) { extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_exp_loadN(uptr addr, uptr size, u32 exp) { - if (__asan_region_is_poisoned(addr, size)) { + if ((addr = __asan_region_is_poisoned(addr, size))) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, false, size, exp, true); } @@ -206,7 +205,7 @@ void __asan_exp_loadN(uptr addr, uptr size, u32 exp) { extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN_noabort(uptr addr, uptr size) { - if (__asan_region_is_poisoned(addr, size)) { + if ((addr = __asan_region_is_poisoned(addr, size))) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, false, size, 0, false); } @@ -215,7 +214,7 @@ void __asan_loadN_noabort(uptr addr, uptr size) { extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN(uptr addr, uptr size) { - if (__asan_region_is_poisoned(addr, size)) { + if ((addr = __asan_region_is_poisoned(addr, size))) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, true, size, 0, true); } @@ -224,7 +223,7 @@ void __asan_storeN(uptr addr, uptr size) { extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_exp_storeN(uptr addr, uptr size, u32 exp) { - if (__asan_region_is_poisoned(addr, size)) { + if ((addr = __asan_region_is_poisoned(addr, size))) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, true, size, exp, true); } @@ -233,7 +232,7 @@ void __asan_exp_storeN(uptr addr, uptr size, u32 exp) { extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN_noabort(uptr addr, uptr size) { - if (__asan_region_is_poisoned(addr, size)) { + if ((addr = __asan_region_is_poisoned(addr, size))) { GET_CALLER_PC_BP_SP; ReportGenericError(pc, bp, sp, addr, true, size, 0, false); } @@ -313,7 +312,7 @@ static void InitializeHighMemEnd() { kHighMemEnd = GetMaxUserVirtualAddress(); // Increase kHighMemEnd to make sure it's properly // aligned together with kHighMemBeg: - kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1; + kHighMemEnd |= (GetMmapGranularity() << ASAN_SHADOW_SCALE) - 1; #endif // !ASAN_FIXED_MAPPING CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0); } @@ -365,29 +364,16 @@ void PrintAddressSpaceLayout() { Printf("malloc_context_size=%zu\n", (uptr)common_flags()->malloc_context_size); - Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE); - Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY); - Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET); - CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); + Printf("SHADOW_SCALE: %d\n", (int)ASAN_SHADOW_SCALE); + Printf("SHADOW_GRANULARITY: %d\n", (int)ASAN_SHADOW_GRANULARITY); + Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)ASAN_SHADOW_OFFSET); + CHECK(ASAN_SHADOW_SCALE >= 3 && ASAN_SHADOW_SCALE <= 7); if (kMidMemBeg) CHECK(kMidShadowBeg > kLowShadowEnd && kMidMemBeg > kMidShadowEnd && kHighShadowBeg > kMidMemEnd); } -#if defined(__thumb__) && defined(__linux__) -#define START_BACKGROUND_THREAD_IN_ASAN_INTERNAL -#endif - -#ifndef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL -static bool UNUSED __local_asan_dyninit = [] { - MaybeStartBackgroudThread(); - SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); - - return false; -}(); -#endif - static void AsanInitInternal() { if (LIKELY(asan_inited)) return; SanitizerToolName = "AddressSanitizer"; @@ -438,7 +424,7 @@ static void AsanInitInternal() { MaybeReexec(); // Setup internal allocator callback. - SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY); + SetLowLevelAllocateMinAlignment(ASAN_SHADOW_GRANULARITY); SetLowLevelAllocateCallback(OnLowLevelAllocate); InitializeAsanInterceptors(); @@ -462,10 +448,8 @@ static void AsanInitInternal() { allocator_options.SetFrom(flags(), common_flags()); InitializeAllocator(allocator_options); -#ifdef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL - MaybeStartBackgroudThread(); - SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); -#endif + if (SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL) + MaybeStartBackgroudThread(); // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited // should be set to 1 prior to initializing the threads. @@ -493,12 +477,7 @@ static void AsanInitInternal() { if (CAN_SANITIZE_LEAKS) { __lsan::InitCommonLsan(); - if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { - if (flags()->halt_on_error) - Atexit(__lsan::DoLeakCheck); - else - Atexit(__lsan::DoRecoverableLeakCheckVoid); - } + InstallAtExitCheckLeaks(); } #if CAN_SANITIZE_UB @@ -561,7 +540,7 @@ void UnpoisonStack(uptr bottom, uptr top, const char *type) { top - bottom); return; } - PoisonShadow(bottom, RoundUpTo(top - bottom, SHADOW_GRANULARITY), 0); + PoisonShadow(bottom, RoundUpTo(top - bottom, ASAN_SHADOW_GRANULARITY), 0); } static void UnpoisonDefaultStack() { diff --git a/libsanitizer/asan/asan_rtl_static.cpp b/libsanitizer/asan/asan_rtl_static.cpp new file mode 100644 index 00000000000..a6f812bb891 --- /dev/null +++ b/libsanitizer/asan/asan_rtl_static.cpp @@ -0,0 +1,36 @@ +//===-- asan_static_rtl.cpp -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Main file of the ASan run-time library. +//===----------------------------------------------------------------------===// + +// This file is empty for now. Main reason to have it is workaround for Windows +// build, which complains because no files are part of the asan_static lib. + +#include "sanitizer_common/sanitizer_common.h" + +#define REPORT_FUNCTION(Name) \ + extern "C" SANITIZER_WEAK_ATTRIBUTE void Name(__asan::uptr addr); \ + extern "C" void Name##_asm(uptr addr) { Name(addr); } + +namespace __asan { + +REPORT_FUNCTION(__asan_report_load1) +REPORT_FUNCTION(__asan_report_load2) +REPORT_FUNCTION(__asan_report_load4) +REPORT_FUNCTION(__asan_report_load8) +REPORT_FUNCTION(__asan_report_load16) +REPORT_FUNCTION(__asan_report_store1) +REPORT_FUNCTION(__asan_report_store2) +REPORT_FUNCTION(__asan_report_store4) +REPORT_FUNCTION(__asan_report_store8) +REPORT_FUNCTION(__asan_report_store16) + +} // namespace __asan diff --git a/libsanitizer/asan/asan_rtl_x86_64.S b/libsanitizer/asan/asan_rtl_x86_64.S new file mode 100644 index 00000000000..d93b5ed2a7f --- /dev/null +++ b/libsanitizer/asan/asan_rtl_x86_64.S @@ -0,0 +1,146 @@ +#include "asan_mapping.h" +#include "sanitizer_common/sanitizer_asm.h" + +#if defined(__x86_64__) +#include "sanitizer_common/sanitizer_platform.h" + +.file "asan_rtl_x86_64.S" + +#define NAME(n, reg, op, s, i) n##_##op##_##i##_##s##_##reg + +#define FNAME(reg, op, s, i) NAME(__asan_check, reg, op, s, i) +#define RLABEL(reg, op, s, i) NAME(.return, reg, op, s, i) +#define CLABEL(reg, op, s, i) NAME(.check, reg, op, s, i) +#define FLABEL(reg, op, s, i) NAME(.fail, reg, op, s, i) + +#define BEGINF(reg, op, s, i) \ +.section .text.FNAME(reg, op, s, i),"ax",@progbits ;\ +.globl FNAME(reg, op, s, i) ;\ +.hidden FNAME(reg, op, s, i) ;\ +ASM_TYPE_FUNCTION(FNAME(reg, op, s, i)) ;\ +.cfi_startproc ;\ +FNAME(reg, op, s, i): ;\ + +#define ENDF .cfi_endproc ;\ + +// Access check functions for 1,2 and 4 byte types, which require extra checks. +#define ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, s) \ + mov %##reg,%r10 ;\ + shr $0x3,%r10 ;\ + movsbl ASAN_SHADOW_OFFSET_CONST(%r10),%r10d ;\ + test %r10d,%r10d ;\ + jne CLABEL(reg, op, s, add) ;\ +RLABEL(reg, op, s, add): ;\ + retq ;\ + +#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_1(reg, op, i) \ +CLABEL(reg, op, 1, i): ;\ + push %rcx ;\ + mov %##reg,%rcx ;\ + and $0x7,%ecx ;\ + cmp %r10d,%ecx ;\ + pop %rcx ;\ + jl RLABEL(reg, op, 1, i);\ + mov %##reg,%rdi ;\ + jmp __asan_report_##op##1_asm ;\ + +#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_2(reg, op, i) \ +CLABEL(reg, op, 2, i): ;\ + push %rcx ;\ + mov %##reg,%rcx ;\ + and $0x7,%ecx ;\ + add $0x1,%ecx ;\ + cmp %r10d,%ecx ;\ + pop %rcx ;\ + jl RLABEL(reg, op, 2, i);\ + mov %##reg,%rdi ;\ + jmp __asan_report_##op##2_asm ;\ + +#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_4(reg, op, i) \ +CLABEL(reg, op, 4, i): ;\ + push %rcx ;\ + mov %##reg,%rcx ;\ + and $0x7,%ecx ;\ + add $0x3,%ecx ;\ + cmp %r10d,%ecx ;\ + pop %rcx ;\ + jl RLABEL(reg, op, 4, i);\ + mov %##reg,%rdi ;\ + jmp __asan_report_##op##4_asm ;\ + +#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, op) \ +BEGINF(reg, op, 1, add) ;\ + ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 1) ;\ + ASAN_MEMORY_ACCESS_EXTRA_CHECK_1(reg, op, add) ;\ +ENDF + +#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, op) \ +BEGINF(reg, op, 2, add) ;\ + ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 2) ;\ + ASAN_MEMORY_ACCESS_EXTRA_CHECK_2(reg, op, add) ;\ +ENDF + +#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, op) \ +BEGINF(reg, op, 4, add) ;\ + ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 4) ;\ + ASAN_MEMORY_ACCESS_EXTRA_CHECK_4(reg, op, add) ;\ +ENDF + +// Access check functions for 8 and 16 byte types: no extra checks required. +#define ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, s, c) \ + mov %##reg,%r10 ;\ + shr $0x3,%r10 ;\ + ##c $0x0,ASAN_SHADOW_OFFSET_CONST(%r10) ;\ + jne FLABEL(reg, op, s, add) ;\ + retq ;\ + +#define ASAN_MEMORY_ACCESS_FAIL(reg, op, s, i) \ +FLABEL(reg, op, s, i): ;\ + mov %##reg,%rdi ;\ + jmp __asan_report_##op##s##_asm;\ + +#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, op) \ +BEGINF(reg, op, 8, add) ;\ + ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, 8, cmpb) ;\ + ASAN_MEMORY_ACCESS_FAIL(reg, op, 8, add) ;\ +ENDF + +#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, op) \ +BEGINF(reg, op, 16, add) ;\ + ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, 16, cmpw) ;\ + ASAN_MEMORY_ACCESS_FAIL(reg, op, 16, add) ;\ +ENDF + +#define ASAN_MEMORY_ACCESS_CALLBACKS_ADD(reg) \ +ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, load) \ +ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, store) \ +ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, load) \ +ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, store) \ +ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, load) \ +ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, store) \ +ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, load) \ +ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, store) \ +ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, load) \ +ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, store) \ + + +// Instantiate all but R10 and R11 callbacks. We are using PLTSafe class with +// the intrinsic, which guarantees that the code generation will never emit +// R10 or R11 callback. +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RAX) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBX) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RCX) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDX) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RSI) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDI) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBP) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R8) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R9) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R12) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R13) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R14) +ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R15) + +#endif + +NO_EXEC_STACK_DIRECTIVE diff --git a/libsanitizer/asan/asan_thread.cpp b/libsanitizer/asan/asan_thread.cpp index 8af74254cdc..c15963e1418 100644 --- a/libsanitizer/asan/asan_thread.cpp +++ b/libsanitizer/asan/asan_thread.cpp @@ -83,8 +83,7 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg, thread->start_routine_ = start_routine; thread->arg_ = arg; AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; - asanThreadRegistry().CreateThread(*reinterpret_cast(thread), detached, - parent_tid, &args); + asanThreadRegistry().CreateThread(0, detached, parent_tid, &args); return thread; } @@ -306,7 +305,7 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) { uptr stack_size = 0; GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size, &tls_begin_, &tls_size); - stack_top_ = RoundDownTo(stack_bottom_ + stack_size, SHADOW_GRANULARITY); + stack_top_ = RoundDownTo(stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY); tls_end_ = tls_begin_ + tls_size; dtls_ = DTLS_Get(); @@ -322,11 +321,9 @@ void AsanThread::ClearShadowForThreadStackAndTLS() { if (stack_top_ != stack_bottom_) PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0); if (tls_begin_ != tls_end_) { - uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY); - uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY); - FastPoisonShadowPartialRightRedzone(tls_begin_aligned, - tls_end_ - tls_begin_aligned, - tls_end_aligned - tls_end_, 0); + uptr tls_begin_aligned = RoundDownTo(tls_begin_, ASAN_SHADOW_GRANULARITY); + uptr tls_end_aligned = RoundUpTo(tls_end_, ASAN_SHADOW_GRANULARITY); + FastPoisonShadow(tls_begin_aligned, tls_end_aligned - tls_begin_aligned, 0); } } @@ -347,27 +344,27 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr, return true; } uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr. - uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY); + uptr mem_ptr = RoundDownTo(aligned_addr, ASAN_SHADOW_GRANULARITY); u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); u8 *shadow_bottom = (u8*)MemToShadow(bottom); while (shadow_ptr >= shadow_bottom && *shadow_ptr != kAsanStackLeftRedzoneMagic) { shadow_ptr--; - mem_ptr -= SHADOW_GRANULARITY; + mem_ptr -= ASAN_SHADOW_GRANULARITY; } while (shadow_ptr >= shadow_bottom && *shadow_ptr == kAsanStackLeftRedzoneMagic) { shadow_ptr--; - mem_ptr -= SHADOW_GRANULARITY; + mem_ptr -= ASAN_SHADOW_GRANULARITY; } if (shadow_ptr < shadow_bottom) { return false; } - uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY); + uptr *ptr = (uptr *)(mem_ptr + ASAN_SHADOW_GRANULARITY); CHECK(ptr[0] == kCurrentStackFrameMagic); access->offset = addr - (uptr)ptr; access->frame_pc = ptr[2]; diff --git a/libsanitizer/asan/asan_win.cpp b/libsanitizer/asan/asan_win.cpp index 1577c83cf99..81958038fb1 100644 --- a/libsanitizer/asan/asan_win.cpp +++ b/libsanitizer/asan/asan_win.cpp @@ -1,4 +1,5 @@ -//===-- asan_win.cpp ------------------------------------------------------===// +//===-- asan_win.cpp +//------------------------------------------------------===//> // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -13,21 +14,20 @@ #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_WINDOWS -#define WIN32_LEAN_AND_MEAN -#include - -#include - -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "sanitizer_common/sanitizer_mutex.h" -#include "sanitizer_common/sanitizer_win.h" -#include "sanitizer_common/sanitizer_win_defs.h" +# define WIN32_LEAN_AND_MEAN +# include +# include + +# include "asan_interceptors.h" +# include "asan_internal.h" +# include "asan_mapping.h" +# include "asan_report.h" +# include "asan_stack.h" +# include "asan_thread.h" +# include "sanitizer_common/sanitizer_libc.h" +# include "sanitizer_common/sanitizer_mutex.h" +# include "sanitizer_common/sanitizer_win.h" +# include "sanitizer_common/sanitizer_win_defs.h" using namespace __asan; @@ -49,8 +49,8 @@ uptr __asan_get_shadow_memory_dynamic_address() { static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler; static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler; -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) { +extern "C" SANITIZER_INTERFACE_ATTRIBUTE long __asan_unhandled_exception_filter( + EXCEPTION_POINTERS *info) { EXCEPTION_RECORD *exception_record = info->ExceptionRecord; CONTEXT *context = info->ContextRecord; @@ -187,6 +187,8 @@ void InitializePlatformInterceptors() { } } +void InstallAtExitCheckLeaks() {} + void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { UNIMPLEMENTED(); } @@ -253,7 +255,7 @@ void *AsanDoesNotSupportStaticLinkage() { } uptr FindDynamicShadowStart() { - return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE, + return MapDynamicShadow(MemToShadowSize(kHighMemEnd), ASAN_SHADOW_SCALE, /*min_shadow_base_alignment*/ 0, kHighMemEnd); } diff --git a/libsanitizer/asan/asan_win_dll_thunk.cpp b/libsanitizer/asan/asan_win_dll_thunk.cpp index a5671cc9dff..e3a90f18ed8 100644 --- a/libsanitizer/asan/asan_win_dll_thunk.cpp +++ b/libsanitizer/asan/asan_win_dll_thunk.cpp @@ -56,6 +56,13 @@ INTERCEPT_WRAP_W_W(_expand_dbg) // TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cpp) +# if defined(_MSC_VER) && !defined(__clang__) +// Disable warnings such as: 'void memchr(void)': incorrect number of arguments +// for intrinsic function, expected '3' arguments. +# pragma warning(push) +# pragma warning(disable : 4392) +# endif + INTERCEPT_LIBRARY_FUNCTION(atoi); INTERCEPT_LIBRARY_FUNCTION(atol); INTERCEPT_LIBRARY_FUNCTION(frexp); @@ -87,6 +94,10 @@ INTERCEPT_LIBRARY_FUNCTION(strtol); INTERCEPT_LIBRARY_FUNCTION(wcslen); INTERCEPT_LIBRARY_FUNCTION(wcsnlen); +# if defined(_MSC_VER) && !defined(__clang__) +# pragma warning(pop) +# endif + #ifdef _WIN64 INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler); #else diff --git a/libsanitizer/builtins/assembly.h b/libsanitizer/builtins/assembly.h index 9c015059af5..69a3d8620f9 100644 --- a/libsanitizer/builtins/assembly.h +++ b/libsanitizer/builtins/assembly.h @@ -14,6 +14,12 @@ #ifndef COMPILERRT_ASSEMBLY_H #define COMPILERRT_ASSEMBLY_H +#if defined(__linux__) && defined(__CET__) +#if __has_include() +#include +#endif +#endif + #if defined(__APPLE__) && defined(__aarch64__) #define SEPARATOR %% #else diff --git a/libsanitizer/hwasan/hwasan.cpp b/libsanitizer/hwasan/hwasan.cpp index 6f0ea64472c..f8725a17343 100644 --- a/libsanitizer/hwasan/hwasan.cpp +++ b/libsanitizer/hwasan/hwasan.cpp @@ -25,6 +25,7 @@ #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flag_parser.h" #include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_interface_internal.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stackdepot.h" diff --git a/libsanitizer/hwasan/hwasan.h b/libsanitizer/hwasan/hwasan.h index 371c43f3cbd..3cc2fc40b5f 100644 --- a/libsanitizer/hwasan/hwasan.h +++ b/libsanitizer/hwasan/hwasan.h @@ -172,21 +172,6 @@ void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame, } // namespace __hwasan -#define HWASAN_MALLOC_HOOK(ptr, size) \ - do { \ - if (&__sanitizer_malloc_hook) { \ - __sanitizer_malloc_hook(ptr, size); \ - } \ - RunMallocHooks(ptr, size); \ - } while (false) -#define HWASAN_FREE_HOOK(ptr) \ - do { \ - if (&__sanitizer_free_hook) { \ - __sanitizer_free_hook(ptr); \ - } \ - RunFreeHooks(ptr); \ - } while (false) - #if HWASAN_WITH_INTERCEPTORS // For both bionic and glibc __sigset_t is an unsigned long. typedef unsigned long __hw_sigset_t; diff --git a/libsanitizer/hwasan/hwasan_allocator.cpp b/libsanitizer/hwasan/hwasan_allocator.cpp index 9e1729964e2..842455150c7 100644 --- a/libsanitizer/hwasan/hwasan_allocator.cpp +++ b/libsanitizer/hwasan/hwasan_allocator.cpp @@ -132,6 +132,11 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment, } ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack); } + if (UNLIKELY(IsRssLimitExceeded())) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportRssLimitExceeded(stack); + } alignment = Max(alignment, kShadowAlignment); uptr size = TaggedSize(orig_size); @@ -194,7 +199,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment, } } - HWASAN_MALLOC_HOOK(user_ptr, size); + RunMallocHooks(user_ptr, size); return user_ptr; } @@ -221,7 +226,7 @@ static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr, static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) { CHECK(tagged_ptr); - HWASAN_FREE_HOOK(tagged_ptr); + RunFreeHooks(tagged_ptr); bool in_taggable_region = InTaggableRegion(reinterpret_cast(tagged_ptr)); diff --git a/libsanitizer/hwasan/hwasan_interceptors.cpp b/libsanitizer/hwasan/hwasan_interceptors.cpp index f96ed880410..8dc886e587e 100644 --- a/libsanitizer/hwasan/hwasan_interceptors.cpp +++ b/libsanitizer/hwasan/hwasan_interceptors.cpp @@ -47,6 +47,12 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*), return res; } +INTERCEPTOR(int, pthread_join, void *t, void **arg) { + return REAL(pthread_join)(t, arg); +} + +DEFINE_REAL_PTHREAD_FUNCTIONS + DEFINE_REAL(int, vfork) DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork) @@ -189,7 +195,8 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(vfork); #endif // __linux__ INTERCEPT_FUNCTION(pthread_create); -#endif + INTERCEPT_FUNCTION(pthread_join); +# endif inited = 1; } diff --git a/libsanitizer/hwasan/hwasan_memintrinsics.cpp b/libsanitizer/hwasan/hwasan_memintrinsics.cpp index fab017aae60..ea7f5ce40b0 100644 --- a/libsanitizer/hwasan/hwasan_memintrinsics.cpp +++ b/libsanitizer/hwasan/hwasan_memintrinsics.cpp @@ -40,5 +40,5 @@ void *__hwasan_memmove(void *to, const void *from, uptr size) { reinterpret_cast(to), size); CheckAddressSized( reinterpret_cast(from), size); - return memmove(UntagPtr(to), UntagPtr(from), size); + return memmove(to, from, size); } diff --git a/libsanitizer/hwasan/hwasan_new_delete.cpp b/libsanitizer/hwasan/hwasan_new_delete.cpp index 4e057a651e1..495046a754f 100644 --- a/libsanitizer/hwasan/hwasan_new_delete.cpp +++ b/libsanitizer/hwasan/hwasan_new_delete.cpp @@ -22,21 +22,23 @@ #if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE // TODO(alekseys): throw std::bad_alloc instead of dying on OOM. -#define OPERATOR_NEW_BODY(nothrow) \ - GET_MALLOC_STACK_TRACE; \ - void *res = hwasan_malloc(size, &stack);\ - if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ - return res -#define OPERATOR_NEW_ALIGN_BODY(nothrow) \ - GET_MALLOC_STACK_TRACE; \ - void *res = hwasan_aligned_alloc(static_cast(align), size, &stack); \ - if (!nothrow && UNLIKELY(!res)) \ - ReportOutOfMemory(size, &stack); \ - return res - -#define OPERATOR_DELETE_BODY \ - GET_MALLOC_STACK_TRACE; \ - if (ptr) hwasan_free(ptr, &stack) +# define OPERATOR_NEW_BODY(nothrow) \ + GET_MALLOC_STACK_TRACE; \ + void *res = hwasan_malloc(size, &stack); \ + if (!nothrow && UNLIKELY(!res)) \ + ReportOutOfMemory(size, &stack); \ + return res +# define OPERATOR_NEW_ALIGN_BODY(nothrow) \ + GET_MALLOC_STACK_TRACE; \ + void *res = hwasan_memalign(static_cast(align), size, &stack); \ + if (!nothrow && UNLIKELY(!res)) \ + ReportOutOfMemory(size, &stack); \ + return res + +# define OPERATOR_DELETE_BODY \ + GET_MALLOC_STACK_TRACE; \ + if (ptr) \ + hwasan_free(ptr, &stack) #elif defined(__ANDROID__) @@ -44,8 +46,8 @@ // since we previously released a runtime that intercepted these functions, // removing the interceptors would break ABI. Therefore we simply forward to // malloc and free. -#define OPERATOR_NEW_BODY(nothrow) return malloc(size) -#define OPERATOR_DELETE_BODY free(ptr) +# define OPERATOR_NEW_BODY(nothrow) return malloc(size) +# define OPERATOR_DELETE_BODY free(ptr) #endif @@ -55,26 +57,27 @@ using namespace __hwasan; // Fake std::nothrow_t to avoid including . namespace std { - struct nothrow_t {}; +struct nothrow_t {}; } // namespace std - - -INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void *operator new(size_t size, std::nothrow_t const&) { +INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(size_t size) { + OPERATOR_NEW_BODY(false /*nothrow*/); +} +INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[]( + size_t size) { + OPERATOR_NEW_BODY(false /*nothrow*/); +} +INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new( + size_t size, std::nothrow_t const &) { OPERATOR_NEW_BODY(true /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void *operator new[](size_t size, std::nothrow_t const&) { +INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[]( + size_t size, std::nothrow_t const &) { OPERATOR_NEW_BODY(true /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(void *ptr) - NOEXCEPT { +INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete( + void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[]( diff --git a/libsanitizer/hwasan/hwasan_preinit.cpp b/libsanitizer/hwasan/hwasan_preinit.cpp new file mode 100644 index 00000000000..8c9c95f413b --- /dev/null +++ b/libsanitizer/hwasan/hwasan_preinit.cpp @@ -0,0 +1,23 @@ +//===-- hwasan_preinit.cpp ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer, an address sanity checker. +// +// Call __hwasan_init at the very early stage of process startup. +//===----------------------------------------------------------------------===// +#include "hwasan_interface_internal.h" +#include "sanitizer_common/sanitizer_internal_defs.h" + +#if SANITIZER_CAN_USE_PREINIT_ARRAY +// The symbol is called __local_hwasan_preinit, because it's not intended to +// be exported. +// This code linked into the main executable when -fsanitize=hwaddress is in +// the link flags. It can only use exported interface functions. +__attribute__((section(".preinit_array"), used)) static void ( + *__local_hwasan_preinit)(void) = __hwasan_init; +#endif diff --git a/libsanitizer/include/sanitizer/common_interface_defs.h b/libsanitizer/include/sanitizer/common_interface_defs.h index 692b8f70c96..ba58ad46f32 100644 --- a/libsanitizer/include/sanitizer/common_interface_defs.h +++ b/libsanitizer/include/sanitizer/common_interface_defs.h @@ -211,6 +211,15 @@ void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf, // Same as __sanitizer_symbolize_pc, but for data section (i.e. globals). void __sanitizer_symbolize_global(void *data_ptr, const char *fmt, char *out_buf, size_t out_buf_size); +// Determine the return address. +#if !defined(_MSC_VER) || defined(__clang__) +#define __sanitizer_return_address() \ + __builtin_extract_return_addr(__builtin_return_address(0)) +#else +extern "C" void *_ReturnAddress(void); +#pragma intrinsic(_ReturnAddress) +#define __sanitizer_return_address() _ReturnAddress() +#endif /// Sets the callback to be called immediately before death on error. /// diff --git a/libsanitizer/include/sanitizer/dfsan_interface.h b/libsanitizer/include/sanitizer/dfsan_interface.h index d6209a3ea2b..8e581a67572 100644 --- a/libsanitizer/include/sanitizer/dfsan_interface.h +++ b/libsanitizer/include/sanitizer/dfsan_interface.h @@ -27,6 +27,10 @@ typedef uint32_t dfsan_origin; /// Signature of the callback argument to dfsan_set_write_callback(). typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count); +/// Signature of the callback argument to dfsan_set_conditional_callback(). +typedef void (*dfsan_conditional_callback_t)(dfsan_label label, + dfsan_origin origin); + /// Computes the union of \c l1 and \c l2, resulting in a union label. dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2); @@ -54,6 +58,10 @@ dfsan_origin dfsan_get_origin(long data); /// Retrieves the label associated with the data at the given address. dfsan_label dfsan_read_label(const void *addr, size_t size); +/// Return the origin associated with the first taint byte in the size bytes +/// from the address addr. +dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, size_t size); + /// Returns whether the given label label contains the label elem. int dfsan_has_label(dfsan_label label, dfsan_label elem); @@ -70,6 +78,19 @@ void dfsan_flush(void); /// callback executes. Pass in NULL to remove any callback. void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback); +/// Sets a callback to be invoked on any conditional expressions which have a +/// taint label set. This can be used to find where tainted data influences +/// the behavior of the program. +/// These callbacks will only be added when -dfsan-conditional-callbacks=true. +void dfsan_set_conditional_callback(dfsan_conditional_callback_t callback); + +/// Conditional expressions occur during signal handlers. +/// Making callbacks that handle signals well is tricky, so when +/// -dfsan-conditional-callbacks=true, conditional expressions used in signal +/// handlers will add the labels they see into a global (bitwise-or together). +/// This function returns all label bits seen in signal handler conditions. +dfsan_label dfsan_get_labels_in_signal_conditional(); + /// Interceptor hooks. /// Whenever a dfsan's custom function is called the corresponding /// hook is called it non-zero. The hooks should be defined by the user. @@ -87,6 +108,9 @@ void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2, /// prints description at the beginning of the trace. If origin tracking is not /// on, or the address is not labeled, it prints nothing. void dfsan_print_origin_trace(const void *addr, const char *description); +/// As above, but use an origin id from dfsan_get_origin() instead of address. +/// Does not include header line with taint label and address information. +void dfsan_print_origin_id_trace(dfsan_origin origin); /// Prints the origin trace of the label at the address \p addr to a /// pre-allocated output buffer. If origin tracking is not on, or the address is @@ -124,6 +148,10 @@ void dfsan_print_origin_trace(const void *addr, const char *description); /// return value is not less than \p out_buf_size. size_t dfsan_sprint_origin_trace(const void *addr, const char *description, char *out_buf, size_t out_buf_size); +/// As above, but use an origin id from dfsan_get_origin() instead of address. +/// Does not include header line with taint label and address information. +size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf, + size_t out_buf_size); /// Prints the stack trace leading to this call to a pre-allocated output /// buffer. diff --git a/libsanitizer/interception/interception_win.cpp b/libsanitizer/interception/interception_win.cpp index 38b8c058246..10b893391f4 100644 --- a/libsanitizer/interception/interception_win.cpp +++ b/libsanitizer/interception/interception_win.cpp @@ -401,6 +401,7 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) { // The following prologues cannot be patched because of the short jump // jumping to the patching region. +#if SANITIZER_WINDOWS64 // ntdll!wcslen in Win11 // 488bc1 mov rax,rcx // 0fb710 movzx edx,word ptr [rax] @@ -422,6 +423,7 @@ static const u8 kPrologueWithShortJump2[] = { 0x4c, 0x8b, 0xc1, 0x8a, 0x01, 0x48, 0xff, 0xc1, 0x84, 0xc0, 0x75, 0xf7, }; +#endif // Returns 0 on error. static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { @@ -602,6 +604,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi + case 0x247c8948: // 48 89 7c 24 XX : mov QWORD PTR [rsp + XX], rdi case 0x244C8948: // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx case 0x24548948: // 48 89 54 24 XX : mov QWORD PTR [rsp + XX], rdx case 0x244c894c: // 4c 89 4c 24 XX : mov QWORD PTR [rsp + XX], r9 diff --git a/libsanitizer/lsan/lsan.cpp b/libsanitizer/lsan/lsan.cpp index b6adc248157..489c5ca01fe 100644 --- a/libsanitizer/lsan/lsan.cpp +++ b/libsanitizer/lsan/lsan.cpp @@ -13,11 +13,12 @@ #include "lsan.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_flag_parser.h" #include "lsan_allocator.h" #include "lsan_common.h" #include "lsan_thread.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_interface_internal.h" bool lsan_inited; bool lsan_init_is_running; @@ -99,9 +100,7 @@ extern "C" void __lsan_init() { InitializeThreadRegistry(); InstallDeadlySignalHandlers(LsanOnDeadlySignal); InitializeMainThread(); - - if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) - Atexit(DoLeakCheck); + InstallAtExitCheckLeaks(); InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); diff --git a/libsanitizer/lsan/lsan.h b/libsanitizer/lsan/lsan.h index 1e82ad72f00..757edec8e10 100644 --- a/libsanitizer/lsan/lsan.h +++ b/libsanitizer/lsan/lsan.h @@ -13,17 +13,17 @@ #include "lsan_thread.h" #if SANITIZER_POSIX -#include "lsan_posix.h" +# include "lsan_posix.h" #elif SANITIZER_FUCHSIA -#include "lsan_fuchsia.h" +# include "lsan_fuchsia.h" #endif #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_stacktrace.h" -#define GET_STACK_TRACE(max_size, fast) \ - __sanitizer::BufferedStackTrace stack; \ - stack.Unwind(StackTrace::GetCurrentPc(), \ - GET_CURRENT_FRAME(), nullptr, fast, max_size); +#define GET_STACK_TRACE(max_size, fast) \ + __sanitizer::BufferedStackTrace stack; \ + stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, fast, \ + max_size); #define GET_STACK_TRACE_FATAL \ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal) @@ -39,12 +39,14 @@ namespace __lsan { void InitializeInterceptors(); void ReplaceSystemMalloc(); void LsanOnDeadlySignal(int signo, void *siginfo, void *context); - -#define ENSURE_LSAN_INITED do { \ - CHECK(!lsan_init_is_running); \ - if (!lsan_inited) \ - __lsan_init(); \ -} while (0) +void InstallAtExitCheckLeaks(); + +#define ENSURE_LSAN_INITED \ + do { \ + CHECK(!lsan_init_is_running); \ + if (!lsan_inited) \ + __lsan_init(); \ + } while (0) } // namespace __lsan diff --git a/libsanitizer/lsan/lsan_allocator.cpp b/libsanitizer/lsan/lsan_allocator.cpp index 91e34ebb321..b4fd7e904be 100644 --- a/libsanitizer/lsan/lsan_allocator.cpp +++ b/libsanitizer/lsan/lsan_allocator.cpp @@ -27,11 +27,11 @@ extern "C" void *memset(void *ptr, int value, uptr num); namespace __lsan { #if defined(__i386__) || defined(__arm__) -static const uptr kMaxAllowedMallocSize = 1UL << 30; +static const uptr kMaxAllowedMallocSize = 1ULL << 30; #elif defined(__mips64) || defined(__aarch64__) -static const uptr kMaxAllowedMallocSize = 4UL << 30; +static const uptr kMaxAllowedMallocSize = 4ULL << 30; #else -static const uptr kMaxAllowedMallocSize = 8UL << 30; +static const uptr kMaxAllowedMallocSize = 8ULL << 30; #endif static Allocator allocator; @@ -88,6 +88,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, size = 1; if (size > max_malloc_size) return ReportAllocationSizeTooBig(size, stack); + if (UNLIKELY(IsRssLimitExceeded())) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportRssLimitExceeded(&stack); + } void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); if (UNLIKELY(!p)) { SetAllocatorOutOfMemory(); @@ -99,7 +104,6 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, if (cleared && allocator.FromPrimary(p)) memset(p, 0, size); RegisterAllocation(stack, p, size); - if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); RunMallocHooks(p, size); return p; } @@ -115,7 +119,6 @@ static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { } void Deallocate(void *p) { - if (&__sanitizer_free_hook) __sanitizer_free_hook(p); RunFreeHooks(p); RegisterDeallocation(p); allocator.Deallocate(GetAllocatorCache(), p); @@ -359,16 +362,4 @@ uptr __sanitizer_get_allocated_size(const void *p) { return GetMallocUsableSize(p); } -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -// Provide default (no-op) implementation of malloc hooks. -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void __sanitizer_malloc_hook(void *ptr, uptr size) { - (void)ptr; - (void)size; -} -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void __sanitizer_free_hook(void *ptr) { - (void)ptr; -} -#endif } // extern "C" diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h index 45c6ac406f8..539330491b0 100644 --- a/libsanitizer/lsan/lsan_allocator.h +++ b/libsanitizer/lsan/lsan_allocator.h @@ -66,12 +66,9 @@ template using PrimaryAllocatorASVT = SizeClassAllocator32>; using PrimaryAllocator = PrimaryAllocatorASVT; #elif defined(__x86_64__) || defined(__powerpc64__) || defined(__s390x__) -# if SANITIZER_FUCHSIA +# if SANITIZER_FUCHSIA || defined(__powerpc64__) const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. -# elif defined(__powerpc64__) -const uptr kAllocatorSpace = 0xa0000000000ULL; -const uptr kAllocatorSize = 0x20000000000ULL; // 2T. #elif defined(__s390x__) const uptr kAllocatorSpace = 0x40000000000ULL; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. diff --git a/libsanitizer/lsan/lsan_common.cpp b/libsanitizer/lsan/lsan_common.cpp index 308dbb3e41d..8d1bf11fdab 100644 --- a/libsanitizer/lsan/lsan_common.cpp +++ b/libsanitizer/lsan/lsan_common.cpp @@ -34,7 +34,6 @@ Mutex global_mutex; Flags lsan_flags; - void DisableCounterUnderflow() { if (common_flags()->detect_leaks) { Report("Unmatched call to __lsan_enable().\n"); @@ -43,44 +42,48 @@ void DisableCounterUnderflow() { } void Flags::SetDefaults() { -#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; -#include "lsan_flags.inc" -#undef LSAN_FLAG +# define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +# include "lsan_flags.inc" +# undef LSAN_FLAG } void RegisterLsanFlags(FlagParser *parser, Flags *f) { -#define LSAN_FLAG(Type, Name, DefaultValue, Description) \ - RegisterFlag(parser, #Name, Description, &f->Name); -#include "lsan_flags.inc" -#undef LSAN_FLAG +# define LSAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +# include "lsan_flags.inc" +# undef LSAN_FLAG } -#define LOG_POINTERS(...) \ - do { \ - if (flags()->log_pointers) Report(__VA_ARGS__); \ - } while (0) +# define LOG_POINTERS(...) \ + do { \ + if (flags()->log_pointers) \ + Report(__VA_ARGS__); \ + } while (0) -#define LOG_THREADS(...) \ - do { \ - if (flags()->log_threads) Report(__VA_ARGS__); \ - } while (0) +# define LOG_THREADS(...) \ + do { \ + if (flags()->log_threads) \ + Report(__VA_ARGS__); \ + } while (0) class LeakSuppressionContext { bool parsed = false; SuppressionContext context; bool suppressed_stacks_sorted = true; InternalMmapVector suppressed_stacks; + const LoadedModule *suppress_module = nullptr; - Suppression *GetSuppressionForAddr(uptr addr); void LazyInit(); + Suppression *GetSuppressionForAddr(uptr addr); + bool SuppressInvalid(const StackTrace &stack); + bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size); public: LeakSuppressionContext(const char *supprression_types[], int suppression_types_num) : context(supprression_types, suppression_types_num) {} - Suppression *GetSuppressionForStack(u32 stack_trace_id, - const StackTrace &stack); + bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size); const InternalMmapVector &GetSortedSuppressedStacks() { if (!suppressed_stacks_sorted) { @@ -95,17 +98,17 @@ class LeakSuppressionContext { ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)]; static LeakSuppressionContext *suppression_ctx = nullptr; static const char kSuppressionLeak[] = "leak"; -static const char *kSuppressionTypes[] = { kSuppressionLeak }; +static const char *kSuppressionTypes[] = {kSuppressionLeak}; static const char kStdSuppressions[] = -#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT +# if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT // definition. "leak:*pthread_exit*\n" -#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT -#if SANITIZER_MAC +# endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT +# if SANITIZER_MAC // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173 "leak:*_os_trace*\n" -#endif +# endif // TLS leak in some glibc versions, described in // https://sourceware.org/bugzilla/show_bug.cgi?id=12650. "leak:*tls_get_addr*\n"; @@ -123,9 +126,93 @@ void LeakSuppressionContext::LazyInit() { if (&__lsan_default_suppressions) context.Parse(__lsan_default_suppressions()); context.Parse(kStdSuppressions); + if (flags()->use_tls && flags()->use_ld_allocations) + suppress_module = GetLinker(); } } +Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) { + Suppression *s = nullptr; + + // Suppress by module name. + const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr); + if (!module_name) + module_name = ""; + if (context.Match(module_name, kSuppressionLeak, &s)) + return s; + + // Suppress by file or function name. + SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { + if (context.Match(cur->info.function, kSuppressionLeak, &s) || + context.Match(cur->info.file, kSuppressionLeak, &s)) { + break; + } + } + frames->ClearAll(); + return s; +} + +static uptr GetCallerPC(const StackTrace &stack) { + // The top frame is our malloc/calloc/etc. The next frame is the caller. + if (stack.size >= 2) + return stack.trace[1]; + return 0; +} + +// On Linux, treats all chunks allocated from ld-linux.so as reachable, which +// covers dynamically allocated TLS blocks, internal dynamic loader's loaded +// modules accounting etc. +// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. +// They are allocated with a __libc_memalign() call in allocate_and_init() +// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those +// blocks, but we can make sure they come from our own allocator by intercepting +// __libc_memalign(). On top of that, there is no easy way to reach them. Their +// addresses are stored in a dynamically allocated array (the DTV) which is +// referenced from the static TLS. Unfortunately, we can't just rely on the DTV +// being reachable from the static TLS, and the dynamic TLS being reachable from +// the DTV. This is because the initial DTV is allocated before our interception +// mechanism kicks in, and thus we don't recognize it as allocated memory. We +// can't special-case it either, since we don't know its size. +// Our solution is to include in the root set all allocations made from +// ld-linux.so (which is where allocate_and_init() is implemented). This is +// guaranteed to include all dynamic TLS blocks (and possibly other allocations +// which we don't care about). +// On all other platforms, this simply checks to ensure that the caller pc is +// valid before reporting chunks as leaked. +bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) { + uptr caller_pc = GetCallerPC(stack); + // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark + // it as reachable, as we can't properly report its allocation stack anyway. + return !caller_pc || + (suppress_module && suppress_module->containsAddress(caller_pc)); +} + +bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack, + uptr hit_count, uptr total_size) { + for (uptr i = 0; i < stack.size; i++) { + Suppression *s = GetSuppressionForAddr( + StackTrace::GetPreviousInstructionPc(stack.trace[i])); + if (s) { + s->weight += total_size; + atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed); + return true; + } + } + return false; +} + +bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count, + uptr total_size) { + LazyInit(); + StackTrace stack = StackDepotGet(stack_trace_id); + if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size)) + return false; + suppressed_stacks_sorted = false; + suppressed_stacks.push_back(stack_trace_id); + return true; +} + static LeakSuppressionContext *GetSuppressionContext() { CHECK(suppression_ctx); return suppression_ctx; @@ -146,9 +233,9 @@ void InitCommonLsan() { } } -class Decorator: public __sanitizer::SanitizerCommonDecorator { +class Decorator : public __sanitizer::SanitizerCommonDecorator { public: - Decorator() : SanitizerCommonDecorator() { } + Decorator() : SanitizerCommonDecorator() {} const char *Error() { return Red(); } const char *Leak() { return Blue(); } }; @@ -157,19 +244,19 @@ static inline bool CanBeAHeapPointer(uptr p) { // Since our heap is located in mmap-ed memory, we can assume a sensible lower // bound on heap addresses. const uptr kMinAddress = 4 * 4096; - if (p < kMinAddress) return false; -#if defined(__x86_64__) + if (p < kMinAddress) + return false; +# if defined(__x86_64__) // Accept only canonical form user-space addresses. return ((p >> 47) == 0); -#elif defined(__mips64) +# elif defined(__mips64) return ((p >> 40) == 0); -#elif defined(__aarch64__) - unsigned runtimeVMA = - (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); +# elif defined(__aarch64__) + unsigned runtimeVMA = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); return ((p >> runtimeVMA) == 0); -#else +# else return true; -#endif +# endif } // Scans the memory range, looking for byte patterns that point into allocator @@ -178,8 +265,7 @@ static inline bool CanBeAHeapPointer(uptr p) { // (|tag| = kReachable) and finding indirectly leaked chunks // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, // so |frontier| = 0. -void ScanRangeForPointers(uptr begin, uptr end, - Frontier *frontier, +void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, const char *region_type, ChunkTag tag) { CHECK(tag == kReachable || tag == kIndirectlyLeaked); const uptr alignment = flags()->pointer_alignment(); @@ -190,13 +276,17 @@ void ScanRangeForPointers(uptr begin, uptr end, pp = pp + alignment - pp % alignment; for (; pp + sizeof(void *) <= end; pp += alignment) { void *p = *reinterpret_cast(pp); - if (!CanBeAHeapPointer(reinterpret_cast(p))) continue; + if (!CanBeAHeapPointer(reinterpret_cast(p))) + continue; uptr chunk = PointsIntoChunk(p); - if (!chunk) continue; + if (!chunk) + continue; // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. - if (chunk == begin) continue; + if (chunk == begin) + continue; LsanMetadata m(chunk); - if (m.tag() == kReachable || m.tag() == kIgnored) continue; + if (m.tag() == kReachable || m.tag() == kIgnored) + continue; // Do this check relatively late so we can log only the interesting cases. if (!flags()->use_poisoned && WordIsPoisoned(pp)) { @@ -234,23 +324,23 @@ void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) { } } -void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { +void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) { Frontier *frontier = reinterpret_cast(arg); ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); } -#if SANITIZER_FUCHSIA +# if SANITIZER_FUCHSIA // Fuchsia handles all threads together with its own callback. static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {} -#else +# else -#if SANITIZER_ANDROID +# if SANITIZER_ANDROID // FIXME: Move this out into *libcdep.cpp extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls( pid_t, void (*cb)(void *, void *, uptr, void *), void *); -#endif +# endif static void ProcessThreadRegistry(Frontier *frontier) { InternalMmapVector ptrs; @@ -282,9 +372,9 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, LOG_THREADS("Processing thread %llu.\n", os_id); uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; DTLS *dtls; - bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, - &tls_begin, &tls_end, - &cache_begin, &cache_end, &dtls); + bool thread_found = + GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin, + &tls_end, &cache_begin, &cache_end, &dtls); if (!thread_found) { // If a thread can't be found in the thread registry, it's probably in the // process of destruction. Log this event and move on. @@ -298,7 +388,8 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, Report("Unable to get registers from thread %llu.\n", os_id); // If unable to get SP, consider the entire stack to be reachable unless // GetRegistersAndSP failed with ESRCH. - if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue; + if (have_registers == REGISTERS_UNAVAILABLE_FATAL) + continue; sp = stack_begin; } @@ -353,7 +444,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, kReachable); } } -#if SANITIZER_ANDROID +# if SANITIZER_ANDROID auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/, void *arg) -> void { ScanRangeForPointers(reinterpret_cast(dtls_begin), @@ -366,7 +457,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, // thread is suspended in the middle of updating its DTLS. IOWs, we // could scan already freed memory. (probably fine for now) __libc_iterate_dynamic_tls(os_id, cb, frontier); -#else +# else if (dtls && !DTLSInDestruction(dtls)) { ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) { uptr dtls_beg = dtv.beg; @@ -383,7 +474,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, // this and continue. LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id); } -#endif +# endif } } @@ -391,13 +482,14 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, ProcessThreadRegistry(frontier); } -#endif // SANITIZER_FUCHSIA +# endif // SANITIZER_FUCHSIA void ScanRootRegion(Frontier *frontier, const RootRegion &root_region, uptr region_begin, uptr region_end, bool is_readable) { uptr intersection_begin = Max(root_region.begin, region_begin); uptr intersection_end = Min(region_end, root_region.begin + root_region.size); - if (intersection_begin >= intersection_end) return; + if (intersection_begin >= intersection_end) + return; LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n", (void *)root_region.begin, (void *)(root_region.begin + root_region.size), @@ -420,7 +512,8 @@ static void ProcessRootRegion(Frontier *frontier, // Scans root regions for heap pointers. static void ProcessRootRegions(Frontier *frontier) { - if (!flags()->use_root_regions) return; + if (!flags()->use_root_regions) + return; for (uptr i = 0; i < root_regions.size(); i++) ProcessRootRegion(frontier, root_regions[i]); } @@ -477,68 +570,6 @@ static void CollectIgnoredCb(uptr chunk, void *arg) { } } -static uptr GetCallerPC(const StackTrace &stack) { - // The top frame is our malloc/calloc/etc. The next frame is the caller. - if (stack.size >= 2) - return stack.trace[1]; - return 0; -} - -struct InvalidPCParam { - Frontier *frontier; - bool skip_linker_allocations; -}; - -// ForEachChunk callback. If the caller pc is invalid or is within the linker, -// mark as reachable. Called by ProcessPlatformSpecificAllocations. -static void MarkInvalidPCCb(uptr chunk, void *arg) { - CHECK(arg); - InvalidPCParam *param = reinterpret_cast(arg); - chunk = GetUserBegin(chunk); - LsanMetadata m(chunk); - if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { - u32 stack_id = m.stack_trace_id(); - uptr caller_pc = 0; - if (stack_id > 0) - caller_pc = GetCallerPC(StackDepotGet(stack_id)); - // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark - // it as reachable, as we can't properly report its allocation stack anyway. - if (caller_pc == 0 || (param->skip_linker_allocations && - GetLinker()->containsAddress(caller_pc))) { - m.set_tag(kReachable); - param->frontier->push_back(chunk); - } - } -} - -// On Linux, treats all chunks allocated from ld-linux.so as reachable, which -// covers dynamically allocated TLS blocks, internal dynamic loader's loaded -// modules accounting etc. -// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. -// They are allocated with a __libc_memalign() call in allocate_and_init() -// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those -// blocks, but we can make sure they come from our own allocator by intercepting -// __libc_memalign(). On top of that, there is no easy way to reach them. Their -// addresses are stored in a dynamically allocated array (the DTV) which is -// referenced from the static TLS. Unfortunately, we can't just rely on the DTV -// being reachable from the static TLS, and the dynamic TLS being reachable from -// the DTV. This is because the initial DTV is allocated before our interception -// mechanism kicks in, and thus we don't recognize it as allocated memory. We -// can't special-case it either, since we don't know its size. -// Our solution is to include in the root set all allocations made from -// ld-linux.so (which is where allocate_and_init() is implemented). This is -// guaranteed to include all dynamic TLS blocks (and possibly other allocations -// which we don't care about). -// On all other platforms, this simply checks to ensure that the caller pc is -// valid before reporting chunks as leaked. -static void ProcessPC(Frontier *frontier) { - InvalidPCParam arg; - arg.frontier = frontier; - arg.skip_linker_allocations = - flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr; - ForEachChunk(MarkInvalidPCCb, &arg); -} - // Sets the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, Frontier *frontier) { @@ -554,9 +585,6 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, ProcessRootRegions(frontier); FloodFillTag(frontier, kReachable); - CHECK_EQ(0, frontier->size()); - ProcessPC(frontier); - // The check here is relatively expensive, so we do this in a separate flood // fill. That way we can skip the check for chunks that are reachable // otherwise. @@ -583,14 +611,13 @@ static void ResetTagsCb(uptr chunk, void *arg) { // a LeakReport. static void CollectLeaksCb(uptr chunk, void *arg) { CHECK(arg); - LeakReport *leak_report = reinterpret_cast(arg); + LeakedChunks *leaks = reinterpret_cast(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); - if (!m.allocated()) return; - if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { - leak_report->AddLeakedChunk(chunk, m.stack_trace_id(), m.requested_size(), - m.tag()); - } + if (!m.allocated()) + return; + if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) + leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()}); } void LeakSuppressionContext::PrintMatchedSuppressions() { @@ -622,13 +649,13 @@ static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) { } } -#if SANITIZER_FUCHSIA +# if SANITIZER_FUCHSIA // Fuchsia provides a libc interface that guarantees all threads are // covered, and SuspendedThreadList is never really used. static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {} -#else // !SANITIZER_FUCHSIA +# else // !SANITIZER_FUCHSIA static void ReportUnsuspendedThreads( const SuspendedThreadsList &suspended_threads) { @@ -642,7 +669,7 @@ static void ReportUnsuspendedThreads( &ReportIfNotSuspended, &threads); } -#endif // !SANITIZER_FUCHSIA +# endif // !SANITIZER_FUCHSIA static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, void *arg) { @@ -651,7 +678,7 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, CHECK(!param->success); ReportUnsuspendedThreads(suspended_threads); ClassifyAllChunks(suspended_threads, ¶m->frontier); - ForEachChunk(CollectLeaksCb, ¶m->leak_report); + ForEachChunk(CollectLeaksCb, ¶m->leaks); // Clean up for subsequent leak checks. This assumes we did not overwrite any // kIgnored tags. ForEachChunk(ResetTagsCb, nullptr); @@ -700,17 +727,20 @@ static bool CheckForLeaks() { "etc)\n"); Die(); } + LeakReport leak_report; + leak_report.AddLeakedChunks(param.leaks); + // No new suppressions stacks, so rerun will not help and we can report. - if (!param.leak_report.ApplySuppressions()) - return PrintResults(param.leak_report); + if (!leak_report.ApplySuppressions()) + return PrintResults(leak_report); // No indirect leaks to report, so we are done here. - if (!param.leak_report.IndirectUnsuppressedLeakCount()) - return PrintResults(param.leak_report); + if (!leak_report.IndirectUnsuppressedLeakCount()) + return PrintResults(leak_report); if (i >= 8) { Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n"); - return PrintResults(param.leak_report); + return PrintResults(leak_report); } // We found a new previously unseen suppressed call stack. Rerun to make @@ -726,10 +756,12 @@ bool HasReportedLeaks() { return has_reported_leaks; } void DoLeakCheck() { Lock l(&global_mutex); static bool already_done; - if (already_done) return; + if (already_done) + return; already_done = true; has_reported_leaks = CheckForLeaks(); - if (has_reported_leaks) HandleLeaks(); + if (has_reported_leaks) + HandleLeaks(); } static int DoRecoverableLeakCheck() { @@ -740,80 +772,50 @@ static int DoRecoverableLeakCheck() { void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); } -Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) { - Suppression *s = nullptr; - - // Suppress by module name. - if (const char *module_name = - Symbolizer::GetOrInit()->GetModuleNameForPc(addr)) - if (context.Match(module_name, kSuppressionLeak, &s)) - return s; - - // Suppress by file or function name. - SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); - for (SymbolizedStack *cur = frames; cur; cur = cur->next) { - if (context.Match(cur->info.function, kSuppressionLeak, &s) || - context.Match(cur->info.file, kSuppressionLeak, &s)) { - break; - } - } - frames->ClearAll(); - return s; -} - -Suppression *LeakSuppressionContext::GetSuppressionForStack( - u32 stack_trace_id, const StackTrace &stack) { - LazyInit(); - for (uptr i = 0; i < stack.size; i++) { - Suppression *s = GetSuppressionForAddr( - StackTrace::GetPreviousInstructionPc(stack.trace[i])); - if (s) { - suppressed_stacks_sorted = false; - suppressed_stacks.push_back(stack_trace_id); - return s; - } - } - return nullptr; -} - ///// LeakReport implementation. ///// // A hard limit on the number of distinct leaks, to avoid quadratic complexity // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks // in real-world applications. -// FIXME: Get rid of this limit by changing the implementation of LeakReport to -// use a hash table. +// FIXME: Get rid of this limit by moving logic into DedupLeaks. const uptr kMaxLeaksConsidered = 5000; -void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id, - uptr leaked_size, ChunkTag tag) { - CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); - - if (u32 resolution = flags()->resolution) { - StackTrace stack = StackDepotGet(stack_trace_id); - stack.size = Min(stack.size, resolution); - stack_trace_id = StackDepotPut(stack); - } +void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) { + for (const LeakedChunk &leak : chunks) { + uptr chunk = leak.chunk; + u32 stack_trace_id = leak.stack_trace_id; + uptr leaked_size = leak.leaked_size; + ChunkTag tag = leak.tag; + CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); + + if (u32 resolution = flags()->resolution) { + StackTrace stack = StackDepotGet(stack_trace_id); + stack.size = Min(stack.size, resolution); + stack_trace_id = StackDepotPut(stack); + } - bool is_directly_leaked = (tag == kDirectlyLeaked); - uptr i; - for (i = 0; i < leaks_.size(); i++) { - if (leaks_[i].stack_trace_id == stack_trace_id && - leaks_[i].is_directly_leaked == is_directly_leaked) { - leaks_[i].hit_count++; - leaks_[i].total_size += leaked_size; - break; + bool is_directly_leaked = (tag == kDirectlyLeaked); + uptr i; + for (i = 0; i < leaks_.size(); i++) { + if (leaks_[i].stack_trace_id == stack_trace_id && + leaks_[i].is_directly_leaked == is_directly_leaked) { + leaks_[i].hit_count++; + leaks_[i].total_size += leaked_size; + break; + } + } + if (i == leaks_.size()) { + if (leaks_.size() == kMaxLeaksConsidered) + return; + Leak leak = {next_id_++, /* hit_count */ 1, + leaked_size, stack_trace_id, + is_directly_leaked, /* is_suppressed */ false}; + leaks_.push_back(leak); + } + if (flags()->report_objects) { + LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; + leaked_objects_.push_back(obj); } - } - if (i == leaks_.size()) { - if (leaks_.size() == kMaxLeaksConsidered) return; - Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id, - is_directly_leaked, /* is_suppressed */ false }; - leaks_.push_back(leak); - } - if (flags()->report_objects) { - LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; - leaked_objects_.push_back(obj); } } @@ -828,9 +830,10 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { CHECK(leaks_.size() <= kMaxLeaksConsidered); Printf("\n"); if (leaks_.size() == kMaxLeaksConsidered) - Printf("Too many leaks! Only the first %zu leaks encountered will be " - "reported.\n", - kMaxLeaksConsidered); + Printf( + "Too many leaks! Only the first %zu leaks encountered will be " + "reported.\n", + kMaxLeaksConsidered); uptr unsuppressed_count = UnsuppressedLeakCount(); if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) @@ -838,10 +841,12 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { Sort(leaks_.data(), leaks_.size(), &LeakComparator); uptr leaks_reported = 0; for (uptr i = 0; i < leaks_.size(); i++) { - if (leaks_[i].is_suppressed) continue; + if (leaks_[i].is_suppressed) + continue; PrintReportForLeak(i); leaks_reported++; - if (leaks_reported == num_leaks_to_report) break; + if (leaks_reported == num_leaks_to_report) + break; } if (leaks_reported < unsuppressed_count) { uptr remaining = unsuppressed_count - leaks_reported; @@ -880,9 +885,10 @@ void LeakReport::PrintSummary() { CHECK(leaks_.size() <= kMaxLeaksConsidered); uptr bytes = 0, allocations = 0; for (uptr i = 0; i < leaks_.size(); i++) { - if (leaks_[i].is_suppressed) continue; - bytes += leaks_[i].total_size; - allocations += leaks_[i].hit_count; + if (leaks_[i].is_suppressed) + continue; + bytes += leaks_[i].total_size; + allocations += leaks_[i].hit_count; } InternalScopedString summary; summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, @@ -894,12 +900,8 @@ uptr LeakReport::ApplySuppressions() { LeakSuppressionContext *suppressions = GetSuppressionContext(); uptr new_suppressions = false; for (uptr i = 0; i < leaks_.size(); i++) { - Suppression *s = suppressions->GetSuppressionForStack( - leaks_[i].stack_trace_id, StackDepotGet(leaks_[i].stack_trace_id)); - if (s) { - s->weight += leaks_[i].total_size; - atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + - leaks_[i].hit_count); + if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count, + leaks_[i].total_size)) { leaks_[i].is_suppressed = true; ++new_suppressions; } @@ -910,7 +912,8 @@ uptr LeakReport::ApplySuppressions() { uptr LeakReport::UnsuppressedLeakCount() { uptr result = 0; for (uptr i = 0; i < leaks_.size(); i++) - if (!leaks_[i].is_suppressed) result++; + if (!leaks_[i].is_suppressed) + result++; return result; } @@ -922,16 +925,16 @@ uptr LeakReport::IndirectUnsuppressedLeakCount() { return result; } -} // namespace __lsan -#else // CAN_SANITIZE_LEAKS +} // namespace __lsan +#else // CAN_SANITIZE_LEAKS namespace __lsan { -void InitCommonLsan() { } -void DoLeakCheck() { } -void DoRecoverableLeakCheckVoid() { } -void DisableInThisThread() { } -void EnableInThisThread() { } -} -#endif // CAN_SANITIZE_LEAKS +void InitCommonLsan() {} +void DoLeakCheck() {} +void DoRecoverableLeakCheckVoid() {} +void DisableInThisThread() {} +void EnableInThisThread() {} +} // namespace __lsan +#endif // CAN_SANITIZE_LEAKS using namespace __lsan; @@ -948,11 +951,13 @@ void __lsan_ignore_object(const void *p) { if (res == kIgnoreObjectInvalid) VReport(1, "__lsan_ignore_object(): no heap object found at %p", p); if (res == kIgnoreObjectAlreadyIgnored) - VReport(1, "__lsan_ignore_object(): " - "heap object at %p is already being ignored\n", p); + VReport(1, + "__lsan_ignore_object(): " + "heap object at %p is already being ignored\n", + p); if (res == kIgnoreObjectSuccess) VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p); -#endif // CAN_SANITIZE_LEAKS +#endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE @@ -962,7 +967,7 @@ void __lsan_register_root_region(const void *begin, uptr size) { RootRegion region = {reinterpret_cast(begin), size}; root_regions.push_back(region); VReport(1, "Registered root region at %p of size %zu\n", begin, size); -#endif // CAN_SANITIZE_LEAKS +#endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE @@ -988,7 +993,7 @@ void __lsan_unregister_root_region(const void *begin, uptr size) { begin, size); Die(); } -#endif // CAN_SANITIZE_LEAKS +#endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE @@ -1010,7 +1015,7 @@ void __lsan_do_leak_check() { #if CAN_SANITIZE_LEAKS if (common_flags()->detect_leaks) __lsan::DoLeakCheck(); -#endif // CAN_SANITIZE_LEAKS +#endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE @@ -1018,7 +1023,7 @@ int __lsan_do_recoverable_leak_check() { #if CAN_SANITIZE_LEAKS if (common_flags()->detect_leaks) return __lsan::DoRecoverableLeakCheck(); -#endif // CAN_SANITIZE_LEAKS +#endif // CAN_SANITIZE_LEAKS return 0; } @@ -1027,14 +1032,14 @@ SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) { } #if !SANITIZER_SUPPORTS_WEAK_HOOKS -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -int __lsan_is_turned_off() { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int +__lsan_is_turned_off() { return 0; } -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -const char *__lsan_default_suppressions() { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char * +__lsan_default_suppressions() { return ""; } #endif -} // extern "C" +} // extern "C" diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h index f9b55e4e800..6b06c4517cd 100644 --- a/libsanitizer/lsan/lsan_common.h +++ b/libsanitizer/lsan/lsan_common.h @@ -33,21 +33,21 @@ // Exclude leak-detection on arm32 for Android because `__aeabi_read_tp` // is missing. This caused a link error. #if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__)) -#define CAN_SANITIZE_LEAKS 0 +# define CAN_SANITIZE_LEAKS 0 #elif (SANITIZER_LINUX || SANITIZER_MAC) && (SANITIZER_WORDSIZE == 64) && \ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \ defined(__powerpc64__) || defined(__s390x__)) -#define CAN_SANITIZE_LEAKS 1 +# define CAN_SANITIZE_LEAKS 1 #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_MAC) -#define CAN_SANITIZE_LEAKS 1 +# define CAN_SANITIZE_LEAKS 1 #elif defined(__arm__) && SANITIZER_LINUX -#define CAN_SANITIZE_LEAKS 1 +# define CAN_SANITIZE_LEAKS 1 #elif SANITIZER_RISCV64 && SANITIZER_LINUX -#define CAN_SANITIZE_LEAKS 1 +# define CAN_SANITIZE_LEAKS 1 #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA -#define CAN_SANITIZE_LEAKS 1 +# define CAN_SANITIZE_LEAKS 1 #else -#define CAN_SANITIZE_LEAKS 0 +# define CAN_SANITIZE_LEAKS 0 #endif namespace __sanitizer { @@ -82,6 +82,15 @@ extern Flags lsan_flags; inline Flags *flags() { return &lsan_flags; } void RegisterLsanFlags(FlagParser *parser, Flags *f); +struct LeakedChunk { + uptr chunk; + u32 stack_trace_id; + uptr leaked_size; + ChunkTag tag; +}; + +using LeakedChunks = InternalMmapVector; + struct Leak { u32 id; uptr hit_count; @@ -101,8 +110,7 @@ struct LeakedObject { class LeakReport { public: LeakReport() {} - void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size, - ChunkTag tag); + void AddLeakedChunks(const LeakedChunks &chunks); void ReportTopLeaks(uptr max_leaks); void PrintSummary(); uptr ApplySuppressions(); @@ -136,7 +144,7 @@ struct RootRegion { // threads and enumerating roots. struct CheckForLeaksParam { Frontier frontier; - LeakReport leak_report; + LeakedChunks leaks; bool success = false; }; @@ -222,8 +230,24 @@ void UnlockAllocator(); // Returns true if [addr, addr + sizeof(void *)) is poisoned. bool WordIsPoisoned(uptr addr); // Wrappers for ThreadRegistry access. -void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS; -void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS; +void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; +void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; + +struct ScopedStopTheWorldLock { + ScopedStopTheWorldLock() { + LockThreadRegistry(); + LockAllocator(); + } + + ~ScopedStopTheWorldLock() { + UnlockAllocator(); + UnlockThreadRegistry(); + } + + ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete; + ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete; +}; + ThreadRegistry *GetThreadRegistryLocked(); bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, diff --git a/libsanitizer/lsan/lsan_common_fuchsia.cpp b/libsanitizer/lsan/lsan_common_fuchsia.cpp index 2d35fa5b1cf..edb4ca6c857 100644 --- a/libsanitizer/lsan/lsan_common_fuchsia.cpp +++ b/libsanitizer/lsan/lsan_common_fuchsia.cpp @@ -52,14 +52,22 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {} // behavior and causes rare race conditions. void HandleLeaks() {} +// This is defined differently in asan_fuchsia.cpp and lsan_fuchsia.cpp. +bool UseExitcodeOnLeak(); + int ExitHook(int status) { + if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { + if (UseExitcodeOnLeak()) + DoLeakCheck(); + else + DoRecoverableLeakCheckVoid(); + } return status == 0 && HasReportedLeaks() ? common_flags()->exitcode : status; } void LockStuffAndStopTheWorld(StopTheWorldCallback callback, CheckForLeaksParam *argument) { - LockThreadRegistry(); - LockAllocator(); + ScopedStopTheWorldLock lock; struct Params { InternalMmapVector allocator_caches; @@ -149,9 +157,6 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback, params->callback(SuspendedThreadsListFuchsia(), params->argument); }, ¶ms); - - UnlockAllocator(); - UnlockThreadRegistry(); } } // namespace __lsan diff --git a/libsanitizer/lsan/lsan_common_linux.cpp b/libsanitizer/lsan/lsan_common_linux.cpp index 3af586e220f..692ad35169e 100644 --- a/libsanitizer/lsan/lsan_common_linux.cpp +++ b/libsanitizer/lsan/lsan_common_linux.cpp @@ -122,12 +122,9 @@ void HandleLeaks() { static int LockStuffAndStopTheWorldCallback(struct dl_phdr_info *info, size_t size, void *data) { - LockThreadRegistry(); - LockAllocator(); + ScopedStopTheWorldLock lock; DoStopTheWorldParam *param = reinterpret_cast(data); StopTheWorld(param->callback, param->argument); - UnlockAllocator(); - UnlockThreadRegistry(); return 1; } diff --git a/libsanitizer/lsan/lsan_common_mac.cpp b/libsanitizer/lsan/lsan_common_mac.cpp index 4301dcc615d..a4204740c7f 100644 --- a/libsanitizer/lsan/lsan_common_mac.cpp +++ b/libsanitizer/lsan/lsan_common_mac.cpp @@ -143,16 +143,16 @@ void ProcessGlobalRegions(Frontier *frontier) { } void ProcessPlatformSpecificAllocations(Frontier *frontier) { - unsigned depth = 1; - vm_size_t size = 0; vm_address_t address = 0; kern_return_t err = KERN_SUCCESS; - mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; InternalMmapVectorNoCtor const *root_regions = GetRootRegions(); while (err == KERN_SUCCESS) { + vm_size_t size = 0; + unsigned depth = 1; struct vm_region_submap_info_64 info; + mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth, (vm_region_info_t)&info, &count); @@ -195,11 +195,8 @@ void HandleLeaks() {} void LockStuffAndStopTheWorld(StopTheWorldCallback callback, CheckForLeaksParam *argument) { - LockThreadRegistry(); - LockAllocator(); + ScopedStopTheWorldLock lock; StopTheWorld(callback, argument); - UnlockAllocator(); - UnlockThreadRegistry(); } } // namespace __lsan diff --git a/libsanitizer/lsan/lsan_fuchsia.cpp b/libsanitizer/lsan/lsan_fuchsia.cpp index 40e65c6fb72..2d96206754a 100644 --- a/libsanitizer/lsan/lsan_fuchsia.cpp +++ b/libsanitizer/lsan/lsan_fuchsia.cpp @@ -62,7 +62,7 @@ void InitializeMainThread() { OnCreatedArgs args; __sanitizer::GetThreadStackTopAndBottom(true, &args.stack_end, &args.stack_begin); - u32 tid = ThreadCreate(0, GetThreadSelf(), true, &args); + u32 tid = ThreadCreate(kMainTid, true, &args); CHECK_EQ(tid, 0); ThreadStart(tid); } @@ -76,6 +76,13 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector *caches) { caches); } +// On Fuchsia, leak detection is done by a special hook after atexit hooks. +// So this doesn't install any atexit hook like on other platforms. +void InstallAtExitCheckLeaks() {} + +// ASan defines this to check its `halt_on_error` flag. +bool UseExitcodeOnLeak() { return true; } + } // namespace __lsan // These are declared (in extern "C") by . @@ -86,14 +93,13 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector *caches) { void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached, const char *name, void *stack_base, size_t stack_size) { - uptr user_id = reinterpret_cast(thread); ENSURE_LSAN_INITED; EnsureMainThreadIDIsCorrect(); OnCreatedArgs args; args.stack_begin = reinterpret_cast(stack_base); args.stack_end = args.stack_begin + stack_size; u32 parent_tid = GetCurrentThread(); - u32 tid = ThreadCreate(parent_tid, user_id, detached, &args); + u32 tid = ThreadCreate(parent_tid, detached, &args); return reinterpret_cast(static_cast(tid)); } diff --git a/libsanitizer/lsan/lsan_interceptors.cpp b/libsanitizer/lsan/lsan_interceptors.cpp index 22999d567f6..205e85685a7 100644 --- a/libsanitizer/lsan/lsan_interceptors.cpp +++ b/libsanitizer/lsan/lsan_interceptors.cpp @@ -468,8 +468,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p); } if (res == 0) { - int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, - IsStateDetached(detached)); + int tid = ThreadCreate(GetCurrentThread(), IsStateDetached(detached)); CHECK_NE(tid, kMainTid); atomic_store(&p.tid, tid, memory_order_release); while (atomic_load(&p.tid, memory_order_acquire) != 0) @@ -480,23 +479,11 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, return res; } -INTERCEPTOR(int, pthread_join, void *th, void **ret) { - ENSURE_LSAN_INITED; - int tid = ThreadTid((uptr)th); - int res = REAL(pthread_join)(th, ret); - if (res == 0) - ThreadJoin(tid); - return res; +INTERCEPTOR(int, pthread_join, void *t, void **arg) { + return REAL(pthread_join)(t, arg); } -INTERCEPTOR(int, pthread_detach, void *th) { - ENSURE_LSAN_INITED; - int tid = ThreadTid((uptr)th); - int res = REAL(pthread_detach)(th); - if (res == 0) - ThreadDetach(tid); - return res; -} +DEFINE_REAL_PTHREAD_FUNCTIONS INTERCEPTOR(void, _exit, int status) { if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode; @@ -530,7 +517,6 @@ void InitializeInterceptors() { LSAN_MAYBE_INTERCEPT_MALLINFO; LSAN_MAYBE_INTERCEPT_MALLOPT; INTERCEPT_FUNCTION(pthread_create); - INTERCEPT_FUNCTION(pthread_detach); INTERCEPT_FUNCTION(pthread_join); INTERCEPT_FUNCTION(_exit); diff --git a/libsanitizer/lsan/lsan_mac.cpp b/libsanitizer/lsan/lsan_mac.cpp index b96893e2801..10a73f8fa93 100644 --- a/libsanitizer/lsan/lsan_mac.cpp +++ b/libsanitizer/lsan/lsan_mac.cpp @@ -68,7 +68,7 @@ typedef struct { ALWAYS_INLINE void lsan_register_worker_thread(int parent_tid) { if (GetCurrentThread() == kInvalidTid) { - u32 tid = ThreadCreate(parent_tid, 0, true); + u32 tid = ThreadCreate(parent_tid, true); ThreadStart(tid, GetTid()); SetCurrentThread(tid); } diff --git a/libsanitizer/lsan/lsan_posix.cpp b/libsanitizer/lsan/lsan_posix.cpp index 5d1c3f6260d..8f277db2237 100644 --- a/libsanitizer/lsan/lsan_posix.cpp +++ b/libsanitizer/lsan/lsan_posix.cpp @@ -75,7 +75,7 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, } void InitializeMainThread() { - u32 tid = ThreadCreate(kMainTid, 0, true); + u32 tid = ThreadCreate(kMainTid, true); CHECK_EQ(tid, kMainTid); ThreadStart(tid, GetTid()); } @@ -91,6 +91,11 @@ void LsanOnDeadlySignal(int signo, void *siginfo, void *context) { nullptr); } +void InstallAtExitCheckLeaks() { + if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) + Atexit(DoLeakCheck); +} + } // namespace __lsan #endif // SANITIZER_POSIX diff --git a/libsanitizer/lsan/lsan_thread.cpp b/libsanitizer/lsan/lsan_thread.cpp index 1d224ebca69..ca3dfd03b10 100644 --- a/libsanitizer/lsan/lsan_thread.cpp +++ b/libsanitizer/lsan/lsan_thread.cpp @@ -44,8 +44,8 @@ void ThreadContextLsanBase::OnFinished() { DTLS_Destroy(); } -u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached, void *arg) { - return thread_registry->CreateThread(user_id, detached, parent_tid, arg); +u32 ThreadCreate(u32 parent_tid, bool detached, void *arg) { + return thread_registry->CreateThread(0, detached, parent_tid, arg); } void ThreadContextLsanBase::ThreadStart(u32 tid, tid_t os_id, @@ -68,28 +68,6 @@ ThreadContext *CurrentThreadContext() { return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread()); } -static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { - uptr uid = (uptr)arg; - if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { - return true; - } - return false; -} - -u32 ThreadTid(uptr uid) { - return thread_registry->FindThread(FindThreadByUid, (void *)uid); -} - -void ThreadDetach(u32 tid) { - CHECK_NE(tid, kInvalidTid); - thread_registry->DetachThread(tid, /* arg */ nullptr); -} - -void ThreadJoin(u32 tid) { - CHECK_NE(tid, kInvalidTid); - thread_registry->JoinThread(tid, /* arg */ nullptr); -} - void EnsureMainThreadIDIsCorrect() { if (GetCurrentThread() == kMainTid) CurrentThreadContext()->os_id = GetTid(); diff --git a/libsanitizer/lsan/lsan_thread.h b/libsanitizer/lsan/lsan_thread.h index 36643753d01..6ab4172092a 100644 --- a/libsanitizer/lsan/lsan_thread.h +++ b/libsanitizer/lsan/lsan_thread.h @@ -45,11 +45,8 @@ class ThreadContext; void InitializeThreadRegistry(); void InitializeMainThread(); -u32 ThreadCreate(u32 tid, uptr uid, bool detached, void *arg = nullptr); +u32 ThreadCreate(u32 tid, bool detached, void *arg = nullptr); void ThreadFinish(); -void ThreadDetach(u32 tid); -void ThreadJoin(u32 tid); -u32 ThreadTid(uptr uid); u32 GetCurrentThread(); void SetCurrentThread(u32 tid); diff --git a/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h index 7e2fa91089f..fe48b9caf06 100644 --- a/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h +++ b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h @@ -201,7 +201,8 @@ AddrHashMap::AddrHashMap() { } template -void AddrHashMap::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS { +void AddrHashMap::acquire(Handle *h) + SANITIZER_NO_THREAD_SAFETY_ANALYSIS { uptr addr = h->addr_; uptr hash = calcHash(addr); Bucket *b = &table_[hash]; @@ -330,7 +331,8 @@ void AddrHashMap::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS { } template - void AddrHashMap::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS { + void AddrHashMap::release(Handle *h) + SANITIZER_NO_THREAD_SAFETY_ANALYSIS { if (!h->cell_) return; Bucket *b = h->bucket_; diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.cpp b/libsanitizer/sanitizer_common/sanitizer_allocator.cpp index bcb7370a790..25a43a59f04 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_allocator.cpp @@ -17,6 +17,7 @@ #include "sanitizer_allocator_internal.h" #include "sanitizer_atomic.h" #include "sanitizer_common.h" +#include "sanitizer_platform.h" namespace __sanitizer { @@ -24,66 +25,6 @@ namespace __sanitizer { const char *PrimaryAllocatorName = "SizeClassAllocator"; const char *SecondaryAllocatorName = "LargeMmapAllocator"; -// ThreadSanitizer for Go uses libc malloc/free. -#if defined(SANITIZER_USE_MALLOC) -# if SANITIZER_LINUX && !SANITIZER_ANDROID -extern "C" void *__libc_malloc(uptr size); -# if !SANITIZER_GO -extern "C" void *__libc_memalign(uptr alignment, uptr size); -# endif -extern "C" void *__libc_realloc(void *ptr, uptr size); -extern "C" void __libc_free(void *ptr); -# else -# include -# define __libc_malloc malloc -# if !SANITIZER_GO -static void *__libc_memalign(uptr alignment, uptr size) { - void *p; - uptr error = posix_memalign(&p, alignment, size); - if (error) return nullptr; - return p; -} -# endif -# define __libc_realloc realloc -# define __libc_free free -# endif - -static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, - uptr alignment) { - (void)cache; -#if !SANITIZER_GO - if (alignment == 0) - return __libc_malloc(size); - else - return __libc_memalign(alignment, size); -#else - // Windows does not provide __libc_memalign/posix_memalign. It provides - // __aligned_malloc, but the allocated blocks can't be passed to free, - // they need to be passed to __aligned_free. InternalAlloc interface does - // not account for such requirement. Alignemnt does not seem to be used - // anywhere in runtime, so just call __libc_malloc for now. - DCHECK_EQ(alignment, 0); - return __libc_malloc(size); -#endif -} - -static void *RawInternalRealloc(void *ptr, uptr size, - InternalAllocatorCache *cache) { - (void)cache; - return __libc_realloc(ptr, size); -} - -static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { - (void)cache; - __libc_free(ptr); -} - -InternalAllocator *internal_allocator() { - return 0; -} - -#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) - static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; static atomic_uint8_t internal_allocator_initialized; static StaticSpinMutex internal_alloc_init_mu; @@ -135,8 +76,6 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { internal_allocator()->Deallocate(cache, ptr); } -#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) - static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) { SetAllocatorOutOfMemory(); Report("FATAL: %s: internal allocator is out of memory trying to allocate " @@ -187,6 +126,16 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) { RawInternalFree(addr, cache); } +void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { + internal_allocator_cache_mu.Lock(); + internal_allocator()->ForceLock(); +} + +void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { + internal_allocator()->ForceUnlock(); + internal_allocator_cache_mu.Unlock(); +} + // LowLevelAllocator constexpr uptr kLowLevelAllocatorDefaultAlignment = 8; static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment; @@ -247,4 +196,14 @@ void PrintHintAllocatorCannotReturnNull() { "allocator_may_return_null=1\n"); } +static atomic_uint8_t rss_limit_exceeded; + +bool IsRssLimitExceeded() { + return atomic_load(&rss_limit_exceeded, memory_order_relaxed); +} + +void SetRssLimitExceeded(bool limit_exceeded) { + atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); +} + } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.h b/libsanitizer/sanitizer_common/sanitizer_allocator.h index ec23465d958..76b936ff5ea 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator.h @@ -70,6 +70,9 @@ inline void RandomShuffle(T *a, u32 n, u32 *rand_state) { #include "sanitizer_allocator_secondary.h" #include "sanitizer_allocator_combined.h" +bool IsRssLimitExceeded(); +void SetRssLimitExceeded(bool limit_exceeded); + } // namespace __sanitizer #endif // SANITIZER_ALLOCATOR_H diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h b/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h index 9a3602f730b..b92cfa5bf4c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h @@ -175,12 +175,12 @@ class CombinedAllocator { // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() NO_THREAD_SAFETY_ANALYSIS { + void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { primary_.ForceLock(); secondary_.ForceLock(); } - void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { + void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { secondary_.ForceUnlock(); primary_.ForceUnlock(); } diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h index 32849036fd0..38994736877 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h @@ -48,6 +48,8 @@ void *InternalReallocArray(void *p, uptr count, uptr size, void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache = nullptr); void InternalFree(void *p, InternalAllocatorCache *cache = nullptr); +void InternalAllocatorLock(); +void InternalAllocatorUnlock(); InternalAllocator *internal_allocator(); } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h b/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h index ae1b7e0d5f1..f2471efced6 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h @@ -238,13 +238,13 @@ class SizeClassAllocator32 { // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() NO_THREAD_SAFETY_ANALYSIS { + void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { for (uptr i = 0; i < kNumClasses; i++) { GetSizeClassInfo(i)->mutex.Lock(); } } - void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { + void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { for (int i = kNumClasses - 1; i >= 0; i--) { GetSizeClassInfo(i)->mutex.Unlock(); } diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h b/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h index f917310cfeb..66ba71d325d 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h @@ -354,13 +354,13 @@ class SizeClassAllocator64 { // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() NO_THREAD_SAFETY_ANALYSIS { + void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { for (uptr i = 0; i < kNumClasses; i++) { GetRegionInfo(i)->mutex.Lock(); } } - void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { + void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { for (int i = (int)kNumClasses - 1; i >= 0; i--) { GetRegionInfo(i)->mutex.Unlock(); } diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h b/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h index c24354cb5b2..48afb2a2983 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h @@ -267,9 +267,9 @@ class LargeMmapAllocator { // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); } + void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); } - void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); } + void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); } // Iterate over all existing chunks. // The allocator must be locked when calling this function. diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h index ccf18f0786d..4318d64d16c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h +++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h @@ -74,13 +74,12 @@ template inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) { - typedef typename T::Type Type; - Type cmpv = *cmp; - Type prev; - prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); - if (prev == cmpv) return true; - *cmp = prev; - return false; + // Transitioned from __sync_val_compare_and_swap to support targets like + // SPARC V8 that cannot inline atomic cmpxchg. __atomic_compare_exchange + // can then be resolved from libatomic. __ATOMIC_SEQ_CST is used to best + // match the __sync builtin memory order. + return __atomic_compare_exchange(&a->val_dont_use, cmp, &xchg, false, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } template diff --git a/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.cpp b/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.cpp index 626777d6943..472b83d63a0 100644 --- a/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.cpp @@ -11,7 +11,6 @@ #include "sanitizer_chained_origin_depot.h" -#include "sanitizer_persistent_allocator.h" #include "sanitizer_stackdepotbase.h" namespace __sanitizer { diff --git a/libsanitizer/sanitizer_common/sanitizer_common.cpp b/libsanitizer/sanitizer_common/sanitizer_common.cpp index 5fae8e33b90..e30a93da5b5 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_common.cpp @@ -11,10 +11,12 @@ //===----------------------------------------------------------------------===// #include "sanitizer_common.h" + #include "sanitizer_allocator_interface.h" #include "sanitizer_allocator_internal.h" #include "sanitizer_atomic.h" #include "sanitizer_flags.h" +#include "sanitizer_interface_internal.h" #include "sanitizer_libc.h" #include "sanitizer_placement_new.h" @@ -138,13 +140,21 @@ void LoadedModule::set(const char *module_name, uptr base_address, set(module_name, base_address); arch_ = arch; internal_memcpy(uuid_, uuid, sizeof(uuid_)); + uuid_size_ = kModuleUUIDSize; instrumented_ = instrumented; } +void LoadedModule::setUuid(const char *uuid, uptr size) { + if (size > kModuleUUIDSize) + size = kModuleUUIDSize; + internal_memcpy(uuid_, uuid, size); + uuid_size_ = size; +} + void LoadedModule::clear() { InternalFree(full_name_); base_address_ = 0; - max_executable_address_ = 0; + max_address_ = 0; full_name_ = nullptr; arch_ = kModuleArchUnknown; internal_memset(uuid_, 0, kModuleUUIDSize); @@ -162,8 +172,7 @@ void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable, AddressRange *r = new(mem) AddressRange(beg, end, executable, writable, name); ranges_.push_back(r); - if (executable && end > max_executable_address_) - max_executable_address_ = end; + max_address_ = Max(max_address_, end); } bool LoadedModule::containsAddress(uptr address) const { @@ -301,18 +310,22 @@ struct MallocFreeHook { static MallocFreeHook MFHooks[kMaxMallocFreeHooks]; -void RunMallocHooks(const void *ptr, uptr size) { +void RunMallocHooks(void *ptr, uptr size) { + __sanitizer_malloc_hook(ptr, size); for (int i = 0; i < kMaxMallocFreeHooks; i++) { auto hook = MFHooks[i].malloc_hook; - if (!hook) return; + if (!hook) + break; hook(ptr, size); } } -void RunFreeHooks(const void *ptr) { +void RunFreeHooks(void *ptr) { + __sanitizer_free_hook(ptr); for (int i = 0; i < kMaxMallocFreeHooks; i++) { auto hook = MFHooks[i].free_hook; - if (!hook) return; + if (!hook) + break; hook(ptr); } } @@ -360,4 +373,16 @@ int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *, void (*free_hook)(const void *)) { return InstallMallocFreeHooks(malloc_hook, free_hook); } + +// Provide default (no-op) implementation of malloc hooks. +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr, + uptr size) { + (void)ptr; + (void)size; +} + +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) { + (void)ptr; +} + } // extern "C" diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h index 065154496eb..17570d60688 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common.h +++ b/libsanitizer/sanitizer_common/sanitizer_common.h @@ -16,7 +16,6 @@ #define SANITIZER_COMMON_H #include "sanitizer_flags.h" -#include "sanitizer_interface_internal.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" #include "sanitizer_list.h" @@ -171,8 +170,8 @@ void SetShadowRegionHugePageMode(uptr addr, uptr length); bool DontDumpShadowMemory(uptr addr, uptr length); // Check if the built VMA size matches the runtime one. void CheckVMASize(); -void RunMallocHooks(const void *ptr, uptr size); -void RunFreeHooks(const void *ptr); +void RunMallocHooks(void *ptr, uptr size); +void RunFreeHooks(void *ptr); class ReservedAddressRange { public: @@ -238,12 +237,12 @@ void SetPrintfAndReportCallback(void (*callback)(const char *)); // Lock sanitizer error reporting and protects against nested errors. class ScopedErrorReportLock { public: - ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); } - ~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); } + ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); } + ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); } - static void Lock() ACQUIRE(mutex_); - static void Unlock() RELEASE(mutex_); - static void CheckLocked() CHECK_LOCKED(mutex_); + static void Lock() SANITIZER_ACQUIRE(mutex_); + static void Unlock() SANITIZER_RELEASE(mutex_); + static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_); private: static atomic_uintptr_t reporting_thread_; @@ -286,7 +285,7 @@ void SetStackSizeLimitInBytes(uptr limit); bool AddressSpaceIsUnlimited(); void SetAddressSpaceUnlimited(); void AdjustStackSize(void *attr); -void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args); +void PlatformPrepareForSandboxing(void *args); void SetSandboxingCallback(void (*f)()); void InitializeCoverage(bool enabled, const char *coverage_dir); @@ -326,12 +325,6 @@ void SetUserDieCallback(DieCallbackType callback); void SetCheckUnwindCallback(void (*callback)()); -// Callback will be called if soft_rss_limit_mb is given and the limit is -// exceeded (exceeded==true) or if rss went down below the limit -// (exceeded==false). -// The callback should be registered once at the tool init time. -void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)); - // Functions related to signal handling. typedef void (*SignalHandlerType)(int, void *, void *); HandleSignalMode GetHandleSignalMode(int signum); @@ -460,6 +453,10 @@ template constexpr T Max(T a, T b) { return a > b ? a : b; } +template +constexpr T Abs(T a) { + return a < 0 ? -a : a; +} template void Swap(T& a, T& b) { T tmp = a; a = b; @@ -669,11 +666,9 @@ void Sort(T *v, uptr size, Compare comp = {}) { // Works like std::lower_bound: finds the first element that is not less // than the val. -template > -uptr InternalLowerBound(const Container &v, - const typename Container::value_type &val, - Compare comp = {}) { +uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) { uptr first = 0; uptr last = v.size(); while (last > first) { @@ -743,6 +738,9 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, uptr *read_len, uptr max_len = kDefaultFileMaxSize, error_t *errno_p = nullptr); +int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len, + uptr *pc_offset); + // When adding a new architecture, don't forget to also update // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp. inline const char *ModuleArchToString(ModuleArch arch) { @@ -774,7 +772,7 @@ inline const char *ModuleArchToString(ModuleArch arch) { return ""; } -const uptr kModuleUUIDSize = 16; +const uptr kModuleUUIDSize = 32; const uptr kMaxSegName = 16; // Represents a binary loaded into virtual memory (e.g. this can be an @@ -784,8 +782,9 @@ class LoadedModule { LoadedModule() : full_name_(nullptr), base_address_(0), - max_executable_address_(0), + max_address_(0), arch_(kModuleArchUnknown), + uuid_size_(0), instrumented_(false) { internal_memset(uuid_, 0, kModuleUUIDSize); ranges_.clear(); @@ -793,6 +792,7 @@ class LoadedModule { void set(const char *module_name, uptr base_address); void set(const char *module_name, uptr base_address, ModuleArch arch, u8 uuid[kModuleUUIDSize], bool instrumented); + void setUuid(const char *uuid, uptr size); void clear(); void addAddressRange(uptr beg, uptr end, bool executable, bool writable, const char *name = nullptr); @@ -800,9 +800,10 @@ class LoadedModule { const char *full_name() const { return full_name_; } uptr base_address() const { return base_address_; } - uptr max_executable_address() const { return max_executable_address_; } + uptr max_address() const { return max_address_; } ModuleArch arch() const { return arch_; } const u8 *uuid() const { return uuid_; } + uptr uuid_size() const { return uuid_size_; } bool instrumented() const { return instrumented_; } struct AddressRange { @@ -829,8 +830,9 @@ class LoadedModule { private: char *full_name_; // Owned. uptr base_address_; - uptr max_executable_address_; + uptr max_address_; ModuleArch arch_; + uptr uuid_size_; u8 uuid_[kModuleUUIDSize]; bool instrumented_; IntrusiveList ranges_; @@ -956,7 +958,7 @@ struct SignalContext { uptr sp; uptr bp; bool is_memory_access; - enum WriteFlag { UNKNOWN, READ, WRITE } write_flag; + enum WriteFlag { Unknown, Read, Write } write_flag; // In some cases the kernel cannot provide the true faulting address; `addr` // will be zero then. This field allows to distinguish between these cases diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc index abb38ccfa15..43296e6c1f6 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc @@ -21,7 +21,7 @@ // COMMON_INTERCEPTOR_FD_RELEASE // COMMON_INTERCEPTOR_FD_ACCESS // COMMON_INTERCEPTOR_SET_THREAD_NAME -// COMMON_INTERCEPTOR_ON_DLOPEN +// COMMON_INTERCEPTOR_DLOPEN // COMMON_INTERCEPTOR_ON_EXIT // COMMON_INTERCEPTOR_MUTEX_PRE_LOCK // COMMON_INTERCEPTOR_MUTEX_POST_LOCK @@ -132,6 +132,76 @@ extern const short *_toupper_tab_; extern const short *_tolower_tab_; #endif +#if SANITIZER_MUSL && \ + (defined(__i386__) || defined(__arm__) || SANITIZER_MIPS32 || SANITIZER_PPC32) +// musl 1.2.0 on existing 32-bit architectures uses new symbol names for the +// time-related functions that take 64-bit time_t values. See +// https://musl.libc.org/time64.html +#define adjtime __adjtime64 +#define adjtimex __adjtimex_time64 +#define aio_suspend __aio_suspend_time64 +#define clock_adjtime __clock_adjtime64 +#define clock_getres __clock_getres_time64 +#define clock_gettime __clock_gettime64 +#define clock_nanosleep __clock_nanosleep_time64 +#define clock_settime __clock_settime64 +#define cnd_timedwait __cnd_timedwait_time64 +#define ctime __ctime64 +#define ctime_r __ctime64_r +#define difftime __difftime64 +#define dlsym __dlsym_time64 +#define fstatat __fstatat_time64 +#define fstat __fstat_time64 +#define ftime __ftime64 +#define futimens __futimens_time64 +#define futimesat __futimesat_time64 +#define futimes __futimes_time64 +#define getitimer __getitimer_time64 +#define getrusage __getrusage_time64 +#define gettimeofday __gettimeofday_time64 +#define gmtime __gmtime64 +#define gmtime_r __gmtime64_r +#define localtime __localtime64 +#define localtime_r __localtime64_r +#define lstat __lstat_time64 +#define lutimes __lutimes_time64 +#define mktime __mktime64 +#define mq_timedreceive __mq_timedreceive_time64 +#define mq_timedsend __mq_timedsend_time64 +#define mtx_timedlock __mtx_timedlock_time64 +#define nanosleep __nanosleep_time64 +#define ppoll __ppoll_time64 +#define pselect __pselect_time64 +#define pthread_cond_timedwait __pthread_cond_timedwait_time64 +#define pthread_mutex_timedlock __pthread_mutex_timedlock_time64 +#define pthread_rwlock_timedrdlock __pthread_rwlock_timedrdlock_time64 +#define pthread_rwlock_timedwrlock __pthread_rwlock_timedwrlock_time64 +#define pthread_timedjoin_np __pthread_timedjoin_np_time64 +#define recvmmsg __recvmmsg_time64 +#define sched_rr_get_interval __sched_rr_get_interval_time64 +#define select __select_time64 +#define semtimedop __semtimedop_time64 +#define sem_timedwait __sem_timedwait_time64 +#define setitimer __setitimer_time64 +#define settimeofday __settimeofday_time64 +#define sigtimedwait __sigtimedwait_time64 +#define stat __stat_time64 +#define stime __stime64 +#define thrd_sleep __thrd_sleep_time64 +#define timegm __timegm_time64 +#define timerfd_gettime __timerfd_gettime64 +#define timerfd_settime __timerfd_settime64 +#define timer_gettime __timer_gettime64 +#define timer_settime __timer_settime64 +#define timespec_get __timespec_get_time64 +#define time __time64 +#define utimensat __utimensat_time64 +#define utimes __utimes_time64 +#define utime __utime64 +#define wait3 __wait3_time64 +#define wait4 __wait4_time64 +#endif + // Platform-specific options. #if SANITIZER_MAC #define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0 @@ -206,9 +276,9 @@ extern const short *_tolower_tab_; COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \ common_flags()->strict_string_checks ? (internal_strlen(s)) + 1 : (n) ) -#ifndef COMMON_INTERCEPTOR_ON_DLOPEN -#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ - CheckNoDeepBind(filename, flag); +#ifndef COMMON_INTERCEPTOR_DLOPEN +#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \ + ({ CheckNoDeepBind(filename, flag); REAL(dlopen)(filename, flag); }) #endif #ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE @@ -1295,12 +1365,16 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3, void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5); static const int PR_SET_NAME = 15; + static const int PR_SCHED_CORE = 62; + static const int PR_SCHED_CORE_GET = 0; int res = REAL(prctl(option, arg2, arg3, arg4, arg5)); if (option == PR_SET_NAME) { char buff[16]; internal_strncpy(buff, (char *)arg2, 15); buff[15] = 0; COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff); + } else if (res != -1 && option == PR_SCHED_CORE && arg2 == PR_SCHED_CORE_GET) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64*)(arg5), sizeof(u64)); } return res; } @@ -2422,6 +2496,34 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags, #define INIT_GLOB64 #endif // SANITIZER_INTERCEPT_GLOB64 +#if SANITIZER_INTERCEPT___B64_TO +INTERCEPTOR(int, __b64_ntop, unsigned char const *src, SIZE_T srclength, + char *target, SIZE_T targsize) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, __b64_ntop, src, srclength, target, targsize); + COMMON_INTERCEPTOR_READ_RANGE(ctx, src, srclength); + int res = REAL(__b64_ntop)(src, srclength, target, targsize); + if (res >= 0) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, target, res + 1); + return res; +} +INTERCEPTOR(int, __b64_pton, char const *src, char *target, SIZE_T targsize) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, __b64_pton, src, target, targsize); + COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1); + int res = REAL(__b64_pton)(src, target, targsize); + if (res >= 0) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, target, res); + return res; +} +# define INIT___B64_TO \ + COMMON_INTERCEPT_FUNCTION(__b64_ntop); \ + COMMON_INTERCEPT_FUNCTION(__b64_pton); +#else // SANITIZER_INTERCEPT___B64_TO +#define INIT___B64_TO +#endif // SANITIZER_INTERCEPT___B64_TO + + #if SANITIZER_INTERCEPT_POSIX_SPAWN template @@ -6380,8 +6482,7 @@ INTERCEPTOR(void*, dlopen, const char *filename, int flag) { void *ctx; COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag); if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0); - COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag); - void *res = REAL(dlopen)(filename, flag); + void *res = COMMON_INTERCEPTOR_DLOPEN(filename, flag); Symbolizer::GetOrInit()->InvalidateModuleList(); COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res); return res; @@ -6872,6 +6973,23 @@ INTERCEPTOR(int, stat, const char *path, void *buf) { #define INIT_STAT #endif +#if SANITIZER_INTERCEPT_STAT64 +INTERCEPTOR(int, stat64, const char *path, void *buf) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, stat64, path, buf); + if (common_flags()->intercept_stat) + COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0); + int res = REAL(stat64)(path, buf); + if (!res) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz); + return res; +} +#define INIT_STAT64 COMMON_INTERCEPT_FUNCTION(stat64) +#else +#define INIT_STAT64 +#endif + + #if SANITIZER_INTERCEPT_LSTAT INTERCEPTOR(int, lstat, const char *path, void *buf) { void *ctx; @@ -6888,6 +7006,22 @@ INTERCEPTOR(int, lstat, const char *path, void *buf) { #define INIT_LSTAT #endif +#if SANITIZER_INTERCEPT_STAT64 +INTERCEPTOR(int, lstat64, const char *path, void *buf) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, lstat64, path, buf); + if (common_flags()->intercept_stat) + COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0); + int res = REAL(lstat64)(path, buf); + if (!res) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz); + return res; +} +#define INIT_LSTAT64 COMMON_INTERCEPT_FUNCTION(lstat64) +#else +#define INIT_LSTAT64 +#endif + #if SANITIZER_INTERCEPT___XSTAT INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) { void *ctx; @@ -7858,12 +7992,12 @@ INTERCEPTOR(void, setbuf, __sanitizer_FILE *stream, char *buf) { unpoison_file(stream); } -INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, int mode) { +INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, SIZE_T size) { void *ctx; - COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, mode); - REAL(setbuffer)(stream, buf, mode); + COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, size); + REAL(setbuffer)(stream, buf, size); if (buf) { - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer_bufsiz); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size); } if (stream) unpoison_file(stream); @@ -7905,7 +8039,7 @@ INTERCEPTOR(int, regcomp, void *preg, const char *pattern, int cflags) { if (pattern) COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, internal_strlen(pattern) + 1); int res = REAL(regcomp)(preg, pattern, cflags); - if (!res) + if (preg) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, preg, struct_regex_sz); return res; } @@ -10290,6 +10424,7 @@ static void InitializeCommonInterceptors() { INIT_TIME; INIT_GLOB; INIT_GLOB64; + INIT___B64_TO; INIT_POSIX_SPAWN; INIT_WAIT; INIT_WAIT4; @@ -10447,8 +10582,10 @@ static void InitializeCommonInterceptors() { INIT_RECV_RECVFROM; INIT_SEND_SENDTO; INIT_STAT; + INIT_STAT64; INIT_EVENTFD_READ_WRITE; INIT_LSTAT; + INIT_LSTAT64; INIT___XSTAT; INIT___XSTAT64; INIT___LXSTAT; diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc index b7da6598755..49ec4097c90 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc @@ -115,11 +115,19 @@ static void ioctl_table_fill() { // _(SOUND_MIXER_WRITE_MUTE, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE _(BLKFLSBUF, NONE, 0); _(BLKGETSIZE, WRITE, sizeof(uptr)); - _(BLKRAGET, WRITE, sizeof(int)); + _(BLKRAGET, WRITE, sizeof(uptr)); _(BLKRASET, NONE, 0); _(BLKROGET, WRITE, sizeof(int)); _(BLKROSET, READ, sizeof(int)); _(BLKRRPART, NONE, 0); + _(BLKFRASET, NONE, 0); + _(BLKFRAGET, WRITE, sizeof(uptr)); + _(BLKSECTSET, READ, sizeof(short)); + _(BLKSECTGET, WRITE, sizeof(short)); + _(BLKSSZGET, WRITE, sizeof(int)); + _(BLKBSZGET, WRITE, sizeof(int)); + _(BLKBSZSET, READ, sizeof(uptr)); + _(BLKGETSIZE64, WRITE, sizeof(u64)); _(CDROMEJECT, NONE, 0); _(CDROMEJECT_SW, NONE, 0); _(CDROMMULTISESSION, WRITE, struct_cdrom_multisession_sz); diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interface_posix.inc b/libsanitizer/sanitizer_common/sanitizer_common_interface_posix.inc index 38f9531148d..a5259be9335 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interface_posix.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_interface_posix.inc @@ -11,3 +11,5 @@ INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code) INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data) INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle) INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush) +INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_demangle) +INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_inline_frames) diff --git a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp index bc4b477e350..8fd39856428 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp @@ -10,25 +10,22 @@ // run-time libraries. //===----------------------------------------------------------------------===// +#include "sanitizer_allocator.h" #include "sanitizer_allocator_interface.h" #include "sanitizer_common.h" #include "sanitizer_flags.h" +#include "sanitizer_interface_internal.h" #include "sanitizer_procmaps.h" - +#include "sanitizer_stackdepot.h" namespace __sanitizer { -static void (*SoftRssLimitExceededCallback)(bool exceeded); -void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) { - CHECK_EQ(SoftRssLimitExceededCallback, nullptr); - SoftRssLimitExceededCallback = Callback; -} - #if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO // Weak default implementation for when sanitizer_stackdepot is not linked in. SANITIZER_WEAK_ATTRIBUTE StackDepotStats StackDepotGetStats() { return {}; } void *BackgroundThread(void *arg) { + VPrintf(1, "%s: Started BackgroundThread\n", SanitizerToolName); const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb; const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb; const bool heap_profile = common_flags()->heap_profile; @@ -66,13 +63,11 @@ void *BackgroundThread(void *arg) { reached_soft_rss_limit = true; Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n", SanitizerToolName, soft_rss_limit_mb, current_rss_mb); - if (SoftRssLimitExceededCallback) - SoftRssLimitExceededCallback(true); + SetRssLimitExceeded(true); } else if (soft_rss_limit_mb >= current_rss_mb && reached_soft_rss_limit) { reached_soft_rss_limit = false; - if (SoftRssLimitExceededCallback) - SoftRssLimitExceededCallback(false); + SetRssLimitExceeded(false); } } if (heap_profile && @@ -83,6 +78,42 @@ void *BackgroundThread(void *arg) { } } } + +void MaybeStartBackgroudThread() { + // Need to implement/test on other platforms. + // Start the background thread if one of the rss limits is given. + if (!common_flags()->hard_rss_limit_mb && + !common_flags()->soft_rss_limit_mb && + !common_flags()->heap_profile) return; + if (!&real_pthread_create) { + VPrintf(1, "%s: real_pthread_create undefined\n", SanitizerToolName); + return; // Can't spawn the thread anyway. + } + + static bool started = false; + if (!started) { + started = true; + internal_start_thread(BackgroundThread, nullptr); + } +} + +# if !SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL +# ifdef __clang__ +# pragma clang diagnostic push +// We avoid global-constructors to be sure that globals are ready when +// sanitizers need them. This can happend before global constructors executed. +// Here we don't mind if thread is started on later stages. +# pragma clang diagnostic ignored "-Wglobal-constructors" +# endif +static struct BackgroudThreadStarted { + BackgroudThreadStarted() { MaybeStartBackgroudThread(); } +} background_thread_strarter UNUSED; +# ifdef __clang__ +# pragma clang diagnostic pop +# endif +# endif +#else +void MaybeStartBackgroudThread() {} #endif void WriteToSyslog(const char *msg) { @@ -105,18 +136,6 @@ void WriteToSyslog(const char *msg) { WriteOneLineToSyslog(p); } -void MaybeStartBackgroudThread() { -#if (SANITIZER_LINUX || SANITIZER_NETBSD) && \ - !SANITIZER_GO // Need to implement/test on other platforms. - // Start the background thread if one of the rss limits is given. - if (!common_flags()->hard_rss_limit_mb && - !common_flags()->soft_rss_limit_mb && - !common_flags()->heap_profile) return; - if (!&real_pthread_create) return; // Can't spawn the thread anyway. - internal_start_thread(BackgroundThread, nullptr); -#endif -} - static void (*sandboxing_callback)(); void SetSandboxingCallback(void (*f)()) { sandboxing_callback = f; @@ -185,10 +204,22 @@ void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start, #endif // !SANITIZER_FUCHSIA +#if !SANITIZER_WINDOWS && !SANITIZER_GO +// Weak default implementation for when sanitizer_stackdepot is not linked in. +SANITIZER_WEAK_ATTRIBUTE void StackDepotStopBackgroundThread() {} +static void StopStackDepotBackgroundThread() { + StackDepotStopBackgroundThread(); +} +#else +// SANITIZER_WEAK_ATTRIBUTE is unsupported. +static void StopStackDepotBackgroundThread() {} +#endif + } // namespace __sanitizer SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify, __sanitizer_sandbox_arguments *args) { + __sanitizer::StopStackDepotBackgroundThread(); __sanitizer::PlatformPrepareForSandboxing(args); if (__sanitizer::sandboxing_callback) __sanitizer::sandboxing_callback(); diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage_fuchsia.cpp b/libsanitizer/sanitizer_common/sanitizer_coverage_fuchsia.cpp index 1d0dbe592b9..35c32535914 100644 --- a/libsanitizer/sanitizer_common/sanitizer_coverage_fuchsia.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_coverage_fuchsia.cpp @@ -33,6 +33,7 @@ #include "sanitizer_atomic.h" #include "sanitizer_common.h" +#include "sanitizer_interface_internal.h" #include "sanitizer_internal_defs.h" #include "sanitizer_symbolizer_fuchsia.h" diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep_new.cpp b/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep_new.cpp index 56220df2ac1..3dcb39f32f6 100644 --- a/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep_new.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep_new.cpp @@ -10,11 +10,13 @@ #include "sanitizer_platform.h" #if !SANITIZER_FUCHSIA -#include "sancov_flags.h" -#include "sanitizer_allocator_internal.h" -#include "sanitizer_atomic.h" -#include "sanitizer_common.h" -#include "sanitizer_file.h" +# include "sancov_flags.h" +# include "sanitizer_allocator_internal.h" +# include "sanitizer_atomic.h" +# include "sanitizer_common.h" +# include "sanitizer_common/sanitizer_stacktrace.h" +# include "sanitizer_file.h" +# include "sanitizer_interface_internal.h" using namespace __sanitizer; @@ -72,7 +74,7 @@ static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) { const uptr pc = pcs[i]; if (!pc) continue; - if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) { + if (!GetModuleAndOffsetForPc(pc, nullptr, 0, &pcs[i])) { Printf("ERROR: unknown pc 0x%zx (may happen if dlclose is used)\n", pc); continue; } @@ -87,8 +89,7 @@ static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) { last_base = module_base; module_start_idx = i; module_found = true; - __sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength, - &pcs[i]); + GetModuleAndOffsetForPc(pc, module_name, kMaxPathLength, &pcs[i]); } } @@ -222,7 +223,8 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(const uptr* pcs, SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) { if (!*guard) return; - __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1); + __sancov::pc_guard_controller.TracePcGuard( + guard, StackTrace::GetPreviousInstructionPc(GET_CALLER_PC())); } SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, diff --git a/libsanitizer/sanitizer_common/sanitizer_dense_map.h b/libsanitizer/sanitizer_common/sanitizer_dense_map.h new file mode 100644 index 00000000000..046d77dddc9 --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_dense_map.h @@ -0,0 +1,705 @@ +//===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is fork of llvm/ADT/DenseMap.h class with the following changes: +// * Use mmap to allocate. +// * No iterators. +// * Does not shrink. +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_DENSE_MAP_H +#define SANITIZER_DENSE_MAP_H + +#include "sanitizer_common.h" +#include "sanitizer_dense_map_info.h" +#include "sanitizer_internal_defs.h" +#include "sanitizer_type_traits.h" + +namespace __sanitizer { + +template +class DenseMapBase { + public: + using size_type = unsigned; + using key_type = KeyT; + using mapped_type = ValueT; + using value_type = BucketT; + + WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; } + unsigned size() const { return getNumEntries(); } + + /// Grow the densemap so that it can contain at least \p NumEntries items + /// before resizing again. + void reserve(size_type NumEntries) { + auto NumBuckets = getMinBucketToReserveForEntries(NumEntries); + if (NumBuckets > getNumBuckets()) + grow(NumBuckets); + } + + void clear() { + if (getNumEntries() == 0 && getNumTombstones() == 0) + return; + + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + if (__sanitizer::is_trivially_destructible::value) { + // Use a simpler loop when values don't need destruction. + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) + P->getFirst() = EmptyKey; + } else { + unsigned NumEntries = getNumEntries(); + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) { + if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { + P->getSecond().~ValueT(); + --NumEntries; + } + P->getFirst() = EmptyKey; + } + } + CHECK_EQ(NumEntries, 0); + } + setNumEntries(0); + setNumTombstones(0); + } + + /// Return 1 if the specified key is in the map, 0 otherwise. + size_type count(const KeyT &Key) const { + const BucketT *TheBucket; + return LookupBucketFor(Key, TheBucket) ? 1 : 0; + } + + value_type *find(const KeyT &Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return TheBucket; + return nullptr; + } + const value_type *find(const KeyT &Key) const { + const BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return TheBucket; + return nullptr; + } + + /// Alternate version of find() which allows a different, and possibly + /// less expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key + /// type used. + template + value_type *find_as(const LookupKeyT &Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return TheBucket; + return nullptr; + } + template + const value_type *find_as(const LookupKeyT &Key) const { + const BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return TheBucket; + return nullptr; + } + + /// lookup - Return the entry for the specified key, or a default + /// constructed value if no such entry exists. + ValueT lookup(const KeyT &Key) const { + const BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return TheBucket->getSecond(); + return ValueT(); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // If the key is already in the map, it returns false and doesn't update the + // value. + detail::DenseMapPair insert(const value_type &KV) { + return try_emplace(KV.first, KV.second); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // If the key is already in the map, it returns false and doesn't update the + // value. + detail::DenseMapPair insert(value_type &&KV) { + return try_emplace(__sanitizer::move(KV.first), + __sanitizer::move(KV.second)); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // The value is constructed in-place if the key is not in the map, otherwise + // it is not moved. + template + detail::DenseMapPair try_emplace(KeyT &&Key, + Ts &&...Args) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return {TheBucket, false}; // Already in map. + + // Otherwise, insert the new element. + TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key), + __sanitizer::forward(Args)...); + return {TheBucket, true}; + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // The value is constructed in-place if the key is not in the map, otherwise + // it is not moved. + template + detail::DenseMapPair try_emplace(const KeyT &Key, + Ts &&...Args) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return {TheBucket, false}; // Already in map. + + // Otherwise, insert the new element. + TheBucket = + InsertIntoBucket(TheBucket, Key, __sanitizer::forward(Args)...); + return {TheBucket, true}; + } + + /// Alternate version of insert() which allows a different, and possibly + /// less expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key + /// type used. + template + detail::DenseMapPair insert_as(value_type &&KV, + const LookupKeyT &Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return {TheBucket, false}; // Already in map. + + // Otherwise, insert the new element. + TheBucket = + InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first), + __sanitizer::move(KV.second), Val); + return {TheBucket, true}; + } + + bool erase(const KeyT &Val) { + BucketT *TheBucket; + if (!LookupBucketFor(Val, TheBucket)) + return false; // not in map. + + TheBucket->getSecond().~ValueT(); + TheBucket->getFirst() = getTombstoneKey(); + decrementNumEntries(); + incrementNumTombstones(); + return true; + } + + void erase(value_type *I) { + CHECK_NE(I, nullptr); + BucketT *TheBucket = &*I; + TheBucket->getSecond().~ValueT(); + TheBucket->getFirst() = getTombstoneKey(); + decrementNumEntries(); + incrementNumTombstones(); + } + + value_type &FindAndConstruct(const KeyT &Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return *TheBucket; + + return *InsertIntoBucket(TheBucket, Key); + } + + ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; } + + value_type &FindAndConstruct(KeyT &&Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return *TheBucket; + + return *InsertIntoBucket(TheBucket, __sanitizer::move(Key)); + } + + ValueT &operator[](KeyT &&Key) { + return FindAndConstruct(__sanitizer::move(Key)).second; + } + + /// Iterate over active entries of the container. + /// + /// Function can return fast to stop the process. + template + void forEach(Fn fn) { + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + const KeyT K = P->getFirst(); + if (!KeyInfoT::isEqual(K, EmptyKey) && + !KeyInfoT::isEqual(K, TombstoneKey)) { + if (!fn(*P)) + return; + } + } + } + + template + void forEach(Fn fn) const { + const_cast(this)->forEach( + [&](const value_type &KV) { return fn(KV); }); + } + + protected: + DenseMapBase() = default; + + void destroyAll() { + if (getNumBuckets() == 0) // Nothing to do. + return; + + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) + P->getSecond().~ValueT(); + P->getFirst().~KeyT(); + } + } + + void initEmpty() { + setNumEntries(0); + setNumTombstones(0); + + CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0); + const KeyT EmptyKey = getEmptyKey(); + for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) + ::new (&B->getFirst()) KeyT(EmptyKey); + } + + /// Returns the number of buckets to allocate to ensure that the DenseMap can + /// accommodate \p NumEntries without need to grow(). + unsigned getMinBucketToReserveForEntries(unsigned NumEntries) { + // Ensure that "NumEntries * 4 < NumBuckets * 3" + if (NumEntries == 0) + return 0; + // +1 is required because of the strict equality. + // For example if NumEntries is 48, we need to return 401. + return RoundUpToPowerOfTwo((NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 1); + } + + void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { + initEmpty(); + + // Insert all the old elements. + const KeyT EmptyKey = getEmptyKey(); + const KeyT TombstoneKey = getTombstoneKey(); + for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { + if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) { + // Insert the key/value into the new table. + BucketT *DestBucket; + bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket); + (void)FoundVal; // silence warning. + CHECK(!FoundVal); + DestBucket->getFirst() = __sanitizer::move(B->getFirst()); + ::new (&DestBucket->getSecond()) + ValueT(__sanitizer::move(B->getSecond())); + incrementNumEntries(); + + // Free the value. + B->getSecond().~ValueT(); + } + B->getFirst().~KeyT(); + } + } + + template + void copyFrom( + const DenseMapBase &other) { + CHECK_NE(&other, this); + CHECK_EQ(getNumBuckets(), other.getNumBuckets()); + + setNumEntries(other.getNumEntries()); + setNumTombstones(other.getNumTombstones()); + + if (__sanitizer::is_trivially_copyable::value && + __sanitizer::is_trivially_copyable::value) + internal_memcpy(reinterpret_cast(getBuckets()), + other.getBuckets(), getNumBuckets() * sizeof(BucketT)); + else + for (uptr i = 0; i < getNumBuckets(); ++i) { + ::new (&getBuckets()[i].getFirst()) + KeyT(other.getBuckets()[i].getFirst()); + if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) && + !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey())) + ::new (&getBuckets()[i].getSecond()) + ValueT(other.getBuckets()[i].getSecond()); + } + } + + static unsigned getHashValue(const KeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + + template + static unsigned getHashValue(const LookupKeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + + static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); } + + static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); } + + private: + unsigned getNumEntries() const { + return static_cast(this)->getNumEntries(); + } + + void setNumEntries(unsigned Num) { + static_cast(this)->setNumEntries(Num); + } + + void incrementNumEntries() { setNumEntries(getNumEntries() + 1); } + + void decrementNumEntries() { setNumEntries(getNumEntries() - 1); } + + unsigned getNumTombstones() const { + return static_cast(this)->getNumTombstones(); + } + + void setNumTombstones(unsigned Num) { + static_cast(this)->setNumTombstones(Num); + } + + void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); } + + void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); } + + const BucketT *getBuckets() const { + return static_cast(this)->getBuckets(); + } + + BucketT *getBuckets() { return static_cast(this)->getBuckets(); } + + unsigned getNumBuckets() const { + return static_cast(this)->getNumBuckets(); + } + + BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); } + + const BucketT *getBucketsEnd() const { + return getBuckets() + getNumBuckets(); + } + + void grow(unsigned AtLeast) { static_cast(this)->grow(AtLeast); } + + template + BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key, + ValueArgs &&...Values) { + TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket); + + TheBucket->getFirst() = __sanitizer::forward(Key); + ::new (&TheBucket->getSecond()) + ValueT(__sanitizer::forward(Values)...); + return TheBucket; + } + + template + BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key, + ValueT &&Value, LookupKeyT &Lookup) { + TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket); + + TheBucket->getFirst() = __sanitizer::move(Key); + ::new (&TheBucket->getSecond()) ValueT(__sanitizer::move(Value)); + return TheBucket; + } + + template + BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup, + BucketT *TheBucket) { + // If the load of the hash table is more than 3/4, or if fewer than 1/8 of + // the buckets are empty (meaning that many are filled with tombstones), + // grow the table. + // + // The later case is tricky. For example, if we had one empty bucket with + // tons of tombstones, failing lookups (e.g. for insertion) would have to + // probe almost the entire table until it found the empty bucket. If the + // table completely filled with tombstones, no lookup would ever succeed, + // causing infinite loops in lookup. + unsigned NewNumEntries = getNumEntries() + 1; + unsigned NumBuckets = getNumBuckets(); + if (UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) { + this->grow(NumBuckets * 2); + LookupBucketFor(Lookup, TheBucket); + NumBuckets = getNumBuckets(); + } else if (UNLIKELY(NumBuckets - (NewNumEntries + getNumTombstones()) <= + NumBuckets / 8)) { + this->grow(NumBuckets); + LookupBucketFor(Lookup, TheBucket); + } + CHECK(TheBucket); + + // Only update the state after we've grown our bucket space appropriately + // so that when growing buckets we have self-consistent entry count. + incrementNumEntries(); + + // If we are writing over a tombstone, remember this. + const KeyT EmptyKey = getEmptyKey(); + if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey)) + decrementNumTombstones(); + + return TheBucket; + } + + /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in + /// FoundBucket. If the bucket contains the key and a value, this returns + /// true, otherwise it returns a bucket with an empty marker or tombstone and + /// returns false. + template + bool LookupBucketFor(const LookupKeyT &Val, + const BucketT *&FoundBucket) const { + const BucketT *BucketsPtr = getBuckets(); + const unsigned NumBuckets = getNumBuckets(); + + if (NumBuckets == 0) { + FoundBucket = nullptr; + return false; + } + + // FoundTombstone - Keep track of whether we find a tombstone while probing. + const BucketT *FoundTombstone = nullptr; + const KeyT EmptyKey = getEmptyKey(); + const KeyT TombstoneKey = getTombstoneKey(); + CHECK(!KeyInfoT::isEqual(Val, EmptyKey)); + CHECK(!KeyInfoT::isEqual(Val, TombstoneKey)); + + unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1); + unsigned ProbeAmt = 1; + while (true) { + const BucketT *ThisBucket = BucketsPtr + BucketNo; + // Found Val's bucket? If so, return it. + if (LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) { + FoundBucket = ThisBucket; + return true; + } + + // If we found an empty bucket, the key doesn't exist in the set. + // Insert it and return the default value. + if (LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) { + // If we've already seen a tombstone while probing, fill it in instead + // of the empty bucket we eventually probed to. + FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; + return false; + } + + // If this is a tombstone, remember it. If Val ends up not in the map, we + // prefer to return it than something that would require more probing. + if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) && + !FoundTombstone) + FoundTombstone = ThisBucket; // Remember the first tombstone found. + + // Otherwise, it's a hash collision or a tombstone, continue quadratic + // probing. + BucketNo += ProbeAmt++; + BucketNo &= (NumBuckets - 1); + } + } + + template + bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { + const BucketT *ConstFoundBucket; + bool Result = const_cast(this)->LookupBucketFor( + Val, ConstFoundBucket); + FoundBucket = const_cast(ConstFoundBucket); + return Result; + } + + public: + /// Return the approximate size (in bytes) of the actual map. + /// This is just the raw memory used by DenseMap. + /// If entries are pointers to objects, the size of the referenced objects + /// are not included. + uptr getMemorySize() const { + return RoundUpTo(getNumBuckets() * sizeof(BucketT), GetPageSizeCached()); + } +}; + +/// Equality comparison for DenseMap. +/// +/// Iterates over elements of LHS confirming that each (key, value) pair in LHS +/// is also in RHS, and that no additional pairs are in RHS. +/// Equivalent to N calls to RHS.find and N value comparisons. Amortized +/// complexity is linear, worst case is O(N^2) (if every hash collides). +template +bool operator==( + const DenseMapBase &LHS, + const DenseMapBase &RHS) { + if (LHS.size() != RHS.size()) + return false; + + bool R = true; + LHS.forEach( + [&](const typename DenseMapBase::value_type &KV) -> bool { + const auto *I = RHS.find(KV.first); + if (!I || I->second != KV.second) { + R = false; + return false; + } + return true; + }); + + return R; +} + +/// Inequality comparison for DenseMap. +/// +/// Equivalent to !(LHS == RHS). See operator== for performance notes. +template +bool operator!=( + const DenseMapBase &LHS, + const DenseMapBase &RHS) { + return !(LHS == RHS); +} + +template , + typename BucketT = detail::DenseMapPair> +class DenseMap : public DenseMapBase, + KeyT, ValueT, KeyInfoT, BucketT> { + friend class DenseMapBase; + + // Lift some types from the dependent base class into this class for + // simplicity of referring to them. + using BaseT = DenseMapBase; + + BucketT *Buckets = nullptr; + unsigned NumEntries = 0; + unsigned NumTombstones = 0; + unsigned NumBuckets = 0; + + public: + /// Create a DenseMap with an optional \p InitialReserve that guarantee that + /// this number of elements can be inserted in the map without grow() + explicit DenseMap(unsigned InitialReserve) { init(InitialReserve); } + constexpr DenseMap() = default; + + DenseMap(const DenseMap &other) : BaseT() { + init(0); + copyFrom(other); + } + + DenseMap(DenseMap &&other) : BaseT() { + init(0); + swap(other); + } + + ~DenseMap() { + this->destroyAll(); + deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets); + } + + void swap(DenseMap &RHS) { + Swap(Buckets, RHS.Buckets); + Swap(NumEntries, RHS.NumEntries); + Swap(NumTombstones, RHS.NumTombstones); + Swap(NumBuckets, RHS.NumBuckets); + } + + DenseMap &operator=(const DenseMap &other) { + if (&other != this) + copyFrom(other); + return *this; + } + + DenseMap &operator=(DenseMap &&other) { + this->destroyAll(); + deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); + init(0); + swap(other); + return *this; + } + + void copyFrom(const DenseMap &other) { + this->destroyAll(); + deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets); + if (allocateBuckets(other.NumBuckets)) { + this->BaseT::copyFrom(other); + } else { + NumEntries = 0; + NumTombstones = 0; + } + } + + void init(unsigned InitNumEntries) { + auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries); + if (allocateBuckets(InitBuckets)) { + this->BaseT::initEmpty(); + } else { + NumEntries = 0; + NumTombstones = 0; + } + } + + void grow(unsigned AtLeast) { + unsigned OldNumBuckets = NumBuckets; + BucketT *OldBuckets = Buckets; + + allocateBuckets(RoundUpToPowerOfTwo(Max(64, AtLeast))); + CHECK(Buckets); + if (!OldBuckets) { + this->BaseT::initEmpty(); + return; + } + + this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets); + + // Free the old table. + deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets); + } + + private: + unsigned getNumEntries() const { return NumEntries; } + + void setNumEntries(unsigned Num) { NumEntries = Num; } + + unsigned getNumTombstones() const { return NumTombstones; } + + void setNumTombstones(unsigned Num) { NumTombstones = Num; } + + BucketT *getBuckets() const { return Buckets; } + + unsigned getNumBuckets() const { return NumBuckets; } + + bool allocateBuckets(unsigned Num) { + NumBuckets = Num; + if (NumBuckets == 0) { + Buckets = nullptr; + return false; + } + + uptr Size = sizeof(BucketT) * NumBuckets; + if (Size * 2 <= GetPageSizeCached()) { + // We always allocate at least a page, so use entire space. + unsigned Log2 = MostSignificantSetBitIndex(GetPageSizeCached() / Size); + Size <<= Log2; + NumBuckets <<= Log2; + CHECK_EQ(Size, sizeof(BucketT) * NumBuckets); + CHECK_GT(Size * 2, GetPageSizeCached()); + } + Buckets = static_cast(allocate_buffer(Size)); + return true; + } + + static void *allocate_buffer(uptr Size) { + return MmapOrDie(RoundUpTo(Size, GetPageSizeCached()), "DenseMap"); + } + + static void deallocate_buffer(void *Ptr, uptr Size) { + UnmapOrDie(Ptr, RoundUpTo(Size, GetPageSizeCached())); + } +}; + +} // namespace __sanitizer + +#endif // SANITIZER_DENSE_MAP_H diff --git a/libsanitizer/sanitizer_common/sanitizer_dense_map_info.h b/libsanitizer/sanitizer_common/sanitizer_dense_map_info.h new file mode 100644 index 00000000000..f4640369ae5 --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_dense_map_info.h @@ -0,0 +1,282 @@ +//===- sanitizer_dense_map_info.h - Type traits for DenseMap ----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_DENSE_MAP_INFO_H +#define SANITIZER_DENSE_MAP_INFO_H + +#include "sanitizer_common.h" +#include "sanitizer_internal_defs.h" +#include "sanitizer_type_traits.h" + +namespace __sanitizer { + +namespace detail { + +/// Simplistic combination of 32-bit hash values into 32-bit hash values. +static constexpr unsigned combineHashValue(unsigned a, unsigned b) { + u64 key = (u64)a << 32 | (u64)b; + key += ~(key << 32); + key ^= (key >> 22); + key += ~(key << 13); + key ^= (key >> 8); + key += (key << 3); + key ^= (key >> 15); + key += ~(key << 27); + key ^= (key >> 31); + return (unsigned)key; +} + +// We extend a pair to allow users to override the bucket type with their own +// implementation without requiring two members. +template +struct DenseMapPair { + KeyT first = {}; + ValueT second = {}; + constexpr DenseMapPair() = default; + constexpr DenseMapPair(const KeyT &f, const ValueT &s) + : first(f), second(s) {} + + template + constexpr DenseMapPair(KeyT2 &&f, ValueT2 &&s) + : first(__sanitizer::forward(f)), + second(__sanitizer::forward(s)) {} + + constexpr DenseMapPair(const DenseMapPair &other) = default; + constexpr DenseMapPair &operator=(const DenseMapPair &other) = default; + constexpr DenseMapPair(DenseMapPair &&other) = default; + constexpr DenseMapPair &operator=(DenseMapPair &&other) = default; + + KeyT &getFirst() { return first; } + const KeyT &getFirst() const { return first; } + ValueT &getSecond() { return second; } + const ValueT &getSecond() const { return second; } +}; + +} // end namespace detail + +template +struct DenseMapInfo { + // static T getEmptyKey(); + // static T getTombstoneKey(); + // static unsigned getHashValue(const T &Val); + // static bool isEqual(const T &LHS, const T &RHS); +}; + +// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values +// that are aligned to alignof(T) bytes, but try to avoid requiring T to be +// complete. This allows clients to instantiate DenseMap with forward +// declared key types. Assume that no pointer key type requires more than 4096 +// bytes of alignment. +template +struct DenseMapInfo { + // The following should hold, but it would require T to be complete: + // static_assert(alignof(T) <= (1 << Log2MaxAlign), + // "DenseMap does not support pointer keys requiring more than " + // "Log2MaxAlign bits of alignment"); + static constexpr uptr Log2MaxAlign = 12; + + static constexpr T *getEmptyKey() { + uptr Val = static_cast(-1); + Val <<= Log2MaxAlign; + return reinterpret_cast(Val); + } + + static constexpr T *getTombstoneKey() { + uptr Val = static_cast(-2); + Val <<= Log2MaxAlign; + return reinterpret_cast(Val); + } + + static constexpr unsigned getHashValue(const T *PtrVal) { + return (unsigned((uptr)PtrVal) >> 4) ^ (unsigned((uptr)PtrVal) >> 9); + } + + static constexpr bool isEqual(const T *LHS, const T *RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for chars. +template <> +struct DenseMapInfo { + static constexpr char getEmptyKey() { return ~0; } + static constexpr char getTombstoneKey() { return ~0 - 1; } + static constexpr unsigned getHashValue(const char &Val) { return Val * 37U; } + + static constexpr bool isEqual(const char &LHS, const char &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned chars. +template <> +struct DenseMapInfo { + static constexpr unsigned char getEmptyKey() { return ~0; } + static constexpr unsigned char getTombstoneKey() { return ~0 - 1; } + static constexpr unsigned getHashValue(const unsigned char &Val) { + return Val * 37U; + } + + static constexpr bool isEqual(const unsigned char &LHS, + const unsigned char &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned shorts. +template <> +struct DenseMapInfo { + static constexpr unsigned short getEmptyKey() { return 0xFFFF; } + static constexpr unsigned short getTombstoneKey() { return 0xFFFF - 1; } + static constexpr unsigned getHashValue(const unsigned short &Val) { + return Val * 37U; + } + + static constexpr bool isEqual(const unsigned short &LHS, + const unsigned short &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned ints. +template <> +struct DenseMapInfo { + static constexpr unsigned getEmptyKey() { return ~0U; } + static constexpr unsigned getTombstoneKey() { return ~0U - 1; } + static constexpr unsigned getHashValue(const unsigned &Val) { + return Val * 37U; + } + + static constexpr bool isEqual(const unsigned &LHS, const unsigned &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned longs. +template <> +struct DenseMapInfo { + static constexpr unsigned long getEmptyKey() { return ~0UL; } + static constexpr unsigned long getTombstoneKey() { return ~0UL - 1L; } + + static constexpr unsigned getHashValue(const unsigned long &Val) { + return (unsigned)(Val * 37UL); + } + + static constexpr bool isEqual(const unsigned long &LHS, + const unsigned long &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned long longs. +template <> +struct DenseMapInfo { + static constexpr unsigned long long getEmptyKey() { return ~0ULL; } + static constexpr unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; } + + static constexpr unsigned getHashValue(const unsigned long long &Val) { + return (unsigned)(Val * 37ULL); + } + + static constexpr bool isEqual(const unsigned long long &LHS, + const unsigned long long &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for shorts. +template <> +struct DenseMapInfo { + static constexpr short getEmptyKey() { return 0x7FFF; } + static constexpr short getTombstoneKey() { return -0x7FFF - 1; } + static constexpr unsigned getHashValue(const short &Val) { return Val * 37U; } + static constexpr bool isEqual(const short &LHS, const short &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for ints. +template <> +struct DenseMapInfo { + static constexpr int getEmptyKey() { return 0x7fffffff; } + static constexpr int getTombstoneKey() { return -0x7fffffff - 1; } + static constexpr unsigned getHashValue(const int &Val) { + return (unsigned)(Val * 37U); + } + + static constexpr bool isEqual(const int &LHS, const int &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for longs. +template <> +struct DenseMapInfo { + static constexpr long getEmptyKey() { + return (1UL << (sizeof(long) * 8 - 1)) - 1UL; + } + + static constexpr long getTombstoneKey() { return getEmptyKey() - 1L; } + + static constexpr unsigned getHashValue(const long &Val) { + return (unsigned)(Val * 37UL); + } + + static constexpr bool isEqual(const long &LHS, const long &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for long longs. +template <> +struct DenseMapInfo { + static constexpr long long getEmptyKey() { return 0x7fffffffffffffffLL; } + static constexpr long long getTombstoneKey() { + return -0x7fffffffffffffffLL - 1; + } + + static constexpr unsigned getHashValue(const long long &Val) { + return (unsigned)(Val * 37ULL); + } + + static constexpr bool isEqual(const long long &LHS, const long long &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for all pairs whose members have info. +template +struct DenseMapInfo> { + using Pair = detail::DenseMapPair; + using FirstInfo = DenseMapInfo; + using SecondInfo = DenseMapInfo; + + static constexpr Pair getEmptyKey() { + return detail::DenseMapPair(FirstInfo::getEmptyKey(), + SecondInfo::getEmptyKey()); + } + + static constexpr Pair getTombstoneKey() { + return detail::DenseMapPair(FirstInfo::getTombstoneKey(), + SecondInfo::getTombstoneKey()); + } + + static constexpr unsigned getHashValue(const Pair &PairVal) { + return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first), + SecondInfo::getHashValue(PairVal.second)); + } + + static constexpr bool isEqual(const Pair &LHS, const Pair &RHS) { + return FirstInfo::isEqual(LHS.first, RHS.first) && + SecondInfo::isEqual(LHS.second, RHS.second); + } +}; + +} // namespace __sanitizer + +#endif // SANITIZER_DENSE_MAP_INFO_H diff --git a/libsanitizer/sanitizer_common/sanitizer_file.cpp b/libsanitizer/sanitizer_common/sanitizer_file.cpp index 5492560df91..7ef499ce07b 100644 --- a/libsanitizer/sanitizer_common/sanitizer_file.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_file.cpp @@ -19,6 +19,7 @@ #include "sanitizer_common.h" #include "sanitizer_file.h" +# include "sanitizer_interface_internal.h" namespace __sanitizer { @@ -83,8 +84,12 @@ static void RecursiveCreateParentDirs(char *path) { if (!IsPathSeparator(path[i])) continue; path[i] = '\0'; - /* Some of these will fail, because the directory exists, ignore it. */ - CreateDir(path); + if (!DirExists(path) && !CreateDir(path)) { + const char *ErrorMsgPrefix = "ERROR: Can't create directory: "; + WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix)); + WriteToFile(kStderrFd, path, internal_strlen(path)); + Die(); + } path[i] = save; } } diff --git a/libsanitizer/sanitizer_common/sanitizer_file.h b/libsanitizer/sanitizer_common/sanitizer_file.h index 3d7916171c1..810c1e452f6 100644 --- a/libsanitizer/sanitizer_common/sanitizer_file.h +++ b/libsanitizer/sanitizer_common/sanitizer_file.h @@ -15,7 +15,6 @@ #ifndef SANITIZER_FILE_H #define SANITIZER_FILE_H -#include "sanitizer_interface_internal.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" #include "sanitizer_mutex.h" @@ -78,6 +77,7 @@ bool SupportsColoredOutput(fd_t fd); // OS const char *GetPwd(); bool FileExists(const char *filename); +bool DirExists(const char *path); char *FindPathToBinary(const char *name); bool IsPathSeparator(const char c); bool IsAbsolutePath(const char *path); diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.inc b/libsanitizer/sanitizer_common/sanitizer_flags.inc index 95da82b1a1d..0ca91aff8dd 100644 --- a/libsanitizer/sanitizer_common/sanitizer_flags.inc +++ b/libsanitizer/sanitizer_common/sanitizer_flags.inc @@ -179,6 +179,7 @@ COMMON_FLAG(bool, use_madv_dontdump, true, "in core file.") COMMON_FLAG(bool, symbolize_inline_frames, true, "Print inlined frames in stacktraces. Defaults to true.") +COMMON_FLAG(bool, demangle, true, "Print demangled symbols.") COMMON_FLAG(bool, symbolize_vs_style, false, "Print file locations in Visual Studio style (e.g: " " file(10,42): ...") @@ -191,6 +192,8 @@ COMMON_FLAG(const char *, stack_trace_format, "DEFAULT", "Format string used to render stack frames. " "See sanitizer_stacktrace_printer.h for the format description. " "Use DEFAULT to get default format.") +COMMON_FLAG(int, compress_stack_depot, 0, + "Compress stack depot to save memory.") COMMON_FLAG(bool, no_huge_pages_for_shadow, true, "If true, the shadow is not allowed to use huge pages. ") COMMON_FLAG(bool, strict_string_checks, false, diff --git a/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp b/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp index c7b30d98836..848953a6ab0 100644 --- a/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp @@ -14,17 +14,18 @@ #include "sanitizer_fuchsia.h" #if SANITIZER_FUCHSIA -#include -#include -#include -#include -#include -#include -#include - -#include "sanitizer_common.h" -#include "sanitizer_libc.h" -#include "sanitizer_mutex.h" +# include +# include +# include +# include +# include +# include +# include + +# include "sanitizer_common.h" +# include "sanitizer_interface_internal.h" +# include "sanitizer_libc.h" +# include "sanitizer_mutex.h" namespace __sanitizer { @@ -89,7 +90,7 @@ void InitializePlatformEarly() {} void MaybeReexec() {} void CheckASLR() {} void CheckMPROTECT() {} -void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} +void PlatformPrepareForSandboxing(void *args) {} void DisableCoreDumperIfNecessary() {} void InstallDeadlySignalHandlers(SignalHandlerType handler) {} void SetAlternateSignalStack() {} @@ -274,6 +275,15 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { UNIMPLEMENTED(); } +bool MprotectNoAccess(uptr addr, uptr size) { + return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK; +} + +bool MprotectReadOnly(uptr addr, uptr size) { + return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) == + ZX_OK; +} + void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, const char *mem_type) { CHECK_GE(size, GetPageSize()); @@ -376,29 +386,8 @@ void GetMemoryProfile(fill_profile_f cb, uptr *stats) {} bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, uptr *read_len, uptr max_len, error_t *errno_p) { - zx_handle_t vmo; - zx_status_t status = __sanitizer_get_configuration(file_name, &vmo); - if (status == ZX_OK) { - uint64_t vmo_size; - status = _zx_vmo_get_size(vmo, &vmo_size); - if (status == ZX_OK) { - if (vmo_size < max_len) - max_len = vmo_size; - size_t map_size = RoundUpTo(max_len, GetPageSize()); - uintptr_t addr; - status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, - map_size, &addr); - if (status == ZX_OK) { - *buff = reinterpret_cast(addr); - *buff_size = map_size; - *read_len = max_len; - } - } - _zx_handle_close(vmo); - } - if (status != ZX_OK && errno_p) - *errno_p = status; - return status == ZX_OK; + *errno_p = ZX_ERR_NOT_SUPPORTED; + return false; } void RawWrite(const char *buffer) { @@ -475,6 +464,9 @@ u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); } uptr GetRSS() { UNIMPLEMENTED(); } +void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; } +void internal_join_thread(void *th) {} + void InitializePlatformCommonFlags(CommonFlags *cf) {} } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_interface_internal.h b/libsanitizer/sanitizer_common/sanitizer_interface_internal.h index 1600d31c30c..e9dc78c6354 100644 --- a/libsanitizer/sanitizer_common/sanitizer_interface_internal.h +++ b/libsanitizer/sanitizer_common/sanitizer_interface_internal.h @@ -20,104 +20,102 @@ #include "sanitizer_internal_defs.h" extern "C" { - // Tell the tools to write their reports to "path." instead of stderr. - // The special values are "stdout" and "stderr". - SANITIZER_INTERFACE_ATTRIBUTE - void __sanitizer_set_report_path(const char *path); - // Tell the tools to write their reports to the provided file descriptor - // (casted to void *). - SANITIZER_INTERFACE_ATTRIBUTE - void __sanitizer_set_report_fd(void *fd); - // Get the current full report file path, if a path was specified by - // an earlier call to __sanitizer_set_report_path. Returns null otherwise. - SANITIZER_INTERFACE_ATTRIBUTE - const char *__sanitizer_get_report_path(); +// Tell the tools to write their reports to "path." instead of stderr. +// The special values are "stdout" and "stderr". +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_set_report_path(const char *path); +// Tell the tools to write their reports to the provided file descriptor +// (casted to void *). +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_set_report_fd(void *fd); +// Get the current full report file path, if a path was specified by +// an earlier call to __sanitizer_set_report_path. Returns null otherwise. +SANITIZER_INTERFACE_ATTRIBUTE +const char *__sanitizer_get_report_path(); - typedef struct { - int coverage_sandboxed; - __sanitizer::sptr coverage_fd; - unsigned int coverage_max_block_size; - } __sanitizer_sandbox_arguments; +typedef struct { + int coverage_sandboxed; + __sanitizer::sptr coverage_fd; + unsigned int coverage_max_block_size; +} __sanitizer_sandbox_arguments; - // Notify the tools that the sandbox is going to be turned on. - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void - __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args); +// Notify the tools that the sandbox is going to be turned on. +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args); - // This function is called by the tool when it has just finished reporting - // an error. 'error_summary' is a one-line string that summarizes - // the error message. This function can be overridden by the client. - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_report_error_summary(const char *error_summary); +// This function is called by the tool when it has just finished reporting +// an error. 'error_summary' is a one-line string that summarizes +// the error message. This function can be overridden by the client. +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_report_error_summary(const char *error_summary); - SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump(); - SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( - const __sanitizer::uptr *pcs, const __sanitizer::uptr len); - SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage(); +SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump(); +SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( + const __sanitizer::uptr *pcs, const __sanitizer::uptr len); +SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage(); - SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard); +SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard); - // Returns 1 on the first call, then returns 0 thereafter. Called by the tool - // to ensure only one report is printed when multiple errors occur - // simultaneously. - SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state(); +// Returns 1 on the first call, then returns 0 thereafter. Called by the tool +// to ensure only one report is printed when multiple errors occur +// simultaneously. +SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state(); - SANITIZER_INTERFACE_ATTRIBUTE - void __sanitizer_annotate_contiguous_container(const void *beg, - const void *end, - const void *old_mid, - const void *new_mid); - SANITIZER_INTERFACE_ATTRIBUTE - int __sanitizer_verify_contiguous_container(const void *beg, const void *mid, - const void *end); - SANITIZER_INTERFACE_ATTRIBUTE - const void *__sanitizer_contiguous_container_find_bad_address( - const void *beg, const void *mid, const void *end); +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_annotate_contiguous_container(const void *beg, const void *end, + const void *old_mid, + const void *new_mid); +SANITIZER_INTERFACE_ATTRIBUTE +int __sanitizer_verify_contiguous_container(const void *beg, const void *mid, + const void *end); +SANITIZER_INTERFACE_ATTRIBUTE +const void *__sanitizer_contiguous_container_find_bad_address(const void *beg, + const void *mid, + const void *end); - SANITIZER_INTERFACE_ATTRIBUTE - int __sanitizer_get_module_and_offset_for_pc( - __sanitizer::uptr pc, char *module_path, - __sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset); +SANITIZER_INTERFACE_ATTRIBUTE +int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path, + __sanitizer::uptr module_path_len, + void **pc_offset); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_cmp(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_cmp1(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_cmp2(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_cmp4(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_cmp8(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_const_cmp1(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_const_cmp2(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_const_cmp4(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_const_cmp8(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_switch(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_div4(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_div8(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_gep(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_pc_indir(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*, - __sanitizer::u32*); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void - __sanitizer_cov_8bit_counters_init(char *, char *); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void - __sanitizer_cov_bool_flag_init(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void - __sanitizer_cov_pcs_init(const __sanitizer::uptr *, - const __sanitizer::uptr *); -} // extern "C" +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_cmp(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_cmp1(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_cmp2(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_cmp4(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_cmp8(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_const_cmp1(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_const_cmp2(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_const_cmp4(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_const_cmp8(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_switch(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_div4(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_div8(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_gep(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_pc_indir(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_pc_guard(__sanitizer::u32 *); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_trace_pc_guard_init(__sanitizer::u32 *, __sanitizer::u32 *); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_8bit_counters_init(char *, char *); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_bool_flag_init(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_cov_pcs_init(const __sanitizer::uptr *, const __sanitizer::uptr *); +} // extern "C" #endif // SANITIZER_INTERFACE_INTERNAL_H diff --git a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h index e97cc9ac0df..95a80b4629c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h +++ b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h @@ -179,6 +179,7 @@ typedef int pid_t; #if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC || \ (SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \ + (SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \ (SANITIZER_LINUX && (defined(__x86_64__) || defined(__hexagon__))) typedef u64 OFF_T; #else @@ -258,7 +259,9 @@ typedef u64 tid_t; # define NOEXCEPT throw() #endif -#if __has_cpp_attribute(clang::fallthrough) +#if __has_cpp_attribute(fallthrough) +# define FALLTHROUGH [[fallthrough]] +#elif __has_cpp_attribute(clang::fallthrough) # define FALLTHROUGH [[clang::fallthrough]] #else # define FALLTHROUGH @@ -300,7 +303,8 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond, } \ } while (0) -#define RAW_CHECK(expr, ...) RAW_CHECK_MSG(expr, #expr "\n", __VA_ARGS__) +#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr "\n", ) +#define RAW_CHECK_VA(expr, ...) RAW_CHECK_MSG(expr, #expr "\n", __VA_ARGS__) #define CHECK_IMPL(c1, op, c2) \ do { \ diff --git a/libsanitizer/sanitizer_common/sanitizer_leb128.h b/libsanitizer/sanitizer_common/sanitizer_leb128.h new file mode 100644 index 00000000000..553550d2955 --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_leb128.h @@ -0,0 +1,87 @@ +//===-- sanitizer_leb128.h --------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_LEB128_H +#define SANITIZER_LEB128_H + +#include "sanitizer_common.h" +#include "sanitizer_internal_defs.h" + +namespace __sanitizer { + +template +It EncodeSLEB128(T value, It begin, It end) { + bool more; + do { + u8 byte = value & 0x7f; + // NOTE: this assumes that this signed shift is an arithmetic right shift. + value >>= 7; + more = !((((value == 0) && ((byte & 0x40) == 0)) || + ((value == -1) && ((byte & 0x40) != 0)))); + if (more) + byte |= 0x80; + if (UNLIKELY(begin == end)) + break; + *(begin++) = byte; + } while (more); + return begin; +} + +template +It DecodeSLEB128(It begin, It end, T* v) { + T value = 0; + unsigned shift = 0; + u8 byte; + do { + if (UNLIKELY(begin == end)) + return begin; + byte = *(begin++); + T slice = byte & 0x7f; + value |= slice << shift; + shift += 7; + } while (byte >= 128); + if (shift < 64 && (byte & 0x40)) + value |= (-1ULL) << shift; + *v = value; + return begin; +} + +template +It EncodeULEB128(T value, It begin, It end) { + do { + u8 byte = value & 0x7f; + value >>= 7; + if (value) + byte |= 0x80; + if (UNLIKELY(begin == end)) + break; + *(begin++) = byte; + } while (value); + return begin; +} + +template +It DecodeULEB128(It begin, It end, T* v) { + T value = 0; + unsigned shift = 0; + u8 byte; + do { + if (UNLIKELY(begin == end)) + return begin; + byte = *(begin++); + T slice = byte & 0x7f; + value += slice << shift; + shift += 7; + } while (byte >= 128); + *v = value; + return begin; +} + +} // namespace __sanitizer + +#endif // SANITIZER_LEB128_H diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_linux.cpp index aa59d9718ca..8e144a4e9a0 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_linux.cpp @@ -80,6 +80,7 @@ #if SANITIZER_FREEBSD #include +#include #include #include extern "C" { @@ -162,6 +163,12 @@ ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) { // on any thread, setuid call hangs. // See test/sanitizer_common/TestCases/Linux/setuid.c. internal_sigdelset(&set, 33); +# endif +# if SANITIZER_LINUX + // Seccomp-BPF-sandboxed processes rely on SIGSYS to handle trapped syscalls. + // If this signal is blocked, such calls cannot be handled and the process may + // hang. + internal_sigdelset(&set, 31); # endif SetSigProcMask(&set, &saved_); if (copy) @@ -226,7 +233,7 @@ uptr internal_close(fd_t fd) { } uptr internal_open(const char *filename, int flags) { -#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +# if SANITIZER_LINUX return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags); #else return internal_syscall(SYSCALL(open), (uptr)filename, flags); @@ -234,7 +241,7 @@ uptr internal_open(const char *filename, int flags) { } uptr internal_open(const char *filename, int flags, u32 mode) { -#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +# if SANITIZER_LINUX return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags, mode); #else @@ -335,50 +342,46 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) { uptr internal_stat(const char *path, void *buf) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); -#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +# elif SANITIZER_LINUX +# if SANITIZER_WORDSIZE == 64 return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); -#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS -# if defined(__mips64) - // For mips64, stat syscall fills buffer in the format of kernel_stat - struct kernel_stat kbuf; - int res = internal_syscall(SYSCALL(stat), path, &kbuf); - kernel_stat_to_stat(&kbuf, (struct stat *)buf); +# else + struct stat64 buf64; + int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path, + (uptr)&buf64, 0); + stat64_to_stat(&buf64, (struct stat *)buf); return res; -# else - return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf); -# endif -#else +# endif +# else struct stat64 buf64; int res = internal_syscall(SYSCALL(stat64), path, &buf64); stat64_to_stat(&buf64, (struct stat *)buf); return res; -#endif +# endif } uptr internal_lstat(const char *path, void *buf) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); -#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +# elif SANITIZER_LINUX +# if defined(_LP64) return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); -#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS -# if SANITIZER_MIPS64 - // For mips64, lstat syscall fills buffer in the format of kernel_stat - struct kernel_stat kbuf; - int res = internal_syscall(SYSCALL(lstat), path, &kbuf); - kernel_stat_to_stat(&kbuf, (struct stat *)buf); +# else + struct stat64 buf64; + int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path, + (uptr)&buf64, AT_SYMLINK_NOFOLLOW); + stat64_to_stat(&buf64, (struct stat *)buf); return res; -# else - return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf); -# endif -#else +# endif +# else struct stat64 buf64; int res = internal_syscall(SYSCALL(lstat64), path, &buf64); stat64_to_stat(&buf64, (struct stat *)buf); return res; -#endif +# endif } uptr internal_fstat(fd_t fd, void *buf) { @@ -412,7 +415,7 @@ uptr internal_dup(int oldfd) { } uptr internal_dup2(int oldfd, int newfd) { -#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +# if SANITIZER_LINUX return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0); #else return internal_syscall(SYSCALL(dup2), oldfd, newfd); @@ -420,7 +423,7 @@ uptr internal_dup2(int oldfd, int newfd) { } uptr internal_readlink(const char *path, char *buf, uptr bufsize) { -#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +# if SANITIZER_LINUX return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf, bufsize); #else @@ -429,7 +432,7 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) { } uptr internal_unlink(const char *path) { -#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +# if SANITIZER_LINUX return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0); #else return internal_syscall(SYSCALL(unlink), (uptr)path); @@ -440,12 +443,12 @@ uptr internal_rename(const char *oldpath, const char *newpath) { #if defined(__riscv) && defined(__linux__) return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD, (uptr)newpath, 0); -#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +# elif SANITIZER_LINUX return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD, (uptr)newpath); -#else +# else return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath); -#endif +# endif } uptr internal_sched_yield() { @@ -482,17 +485,20 @@ bool FileExists(const char *filename) { if (ShouldMockFailureToOpen(filename)) return false; struct stat st; -#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS - if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0)) -#else if (internal_stat(filename, &st)) -#endif return false; // Sanity check: filename is a regular file. return S_ISREG(st.st_mode); } -#if !SANITIZER_NETBSD +bool DirExists(const char *path) { + struct stat st; + if (internal_stat(path, &st)) + return false; + return S_ISDIR(st.st_mode); +} + +# if !SANITIZER_NETBSD tid_t GetTid() { #if SANITIZER_FREEBSD long Tid; @@ -691,17 +697,17 @@ void FutexWake(atomic_uint32_t *p, u32 count) { // Not used #else struct linux_dirent { -#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64 +# if SANITIZER_X32 || SANITIZER_LINUX u64 d_ino; u64 d_off; -#else +# else unsigned long d_ino; unsigned long d_off; -#endif +# endif unsigned short d_reclen; -#if defined(__aarch64__) || SANITIZER_RISCV64 +# if SANITIZER_LINUX unsigned char d_type; -#endif +# endif char d_name[256]; }; #endif @@ -737,11 +743,11 @@ int internal_dlinfo(void *handle, int request, void *p) { uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL); -#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +# elif SANITIZER_LINUX return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count); -#else +# else return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count); -#endif +# endif } uptr internal_lseek(fd_t fd, OFF_T offset, int whence) { @@ -759,11 +765,15 @@ uptr internal_sigaltstack(const void *ss, void *oss) { } int internal_fork() { -#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +# if SANITIZER_LINUX +# if SANITIZER_S390 + return internal_syscall(SYSCALL(clone), 0, SIGCHLD); +# else return internal_syscall(SYSCALL(clone), SIGCHLD, 0); -#else +# endif +# else return internal_syscall(SYSCALL(fork)); -#endif +# endif } #if SANITIZER_FREEBSD @@ -1380,7 +1390,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, #elif defined(__aarch64__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { - long long res; + register long long res __asm__("x0"); if (!fn || !child_stack) return -EINVAL; CHECK_EQ(0, (uptr)child_stack % 16); @@ -1760,6 +1770,8 @@ HandleSignalMode GetHandleSignalMode(int signum) { #if !SANITIZER_GO void *internal_start_thread(void *(*func)(void *arg), void *arg) { + if (&real_pthread_create == 0) + return nullptr; // Start the thread with signals blocked, otherwise it can steal user signals. ScopedBlockSignals block(nullptr); void *th; @@ -1768,7 +1780,8 @@ void *internal_start_thread(void *(*func)(void *arg), void *arg) { } void internal_join_thread(void *th) { - real_pthread_join(th, nullptr); + if (&real_pthread_join) + real_pthread_join(th, nullptr); } #else void *internal_start_thread(void *(*func)(void *), void *arg) { return 0; } @@ -1815,7 +1828,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { #else uptr err = ucontext->uc_mcontext.gregs[REG_ERR]; #endif // SANITIZER_FREEBSD - return err & PF_WRITE ? WRITE : READ; + return err & PF_WRITE ? Write : Read; #elif defined(__mips__) uint32_t *exception_source; uint32_t faulty_instruction; @@ -1838,7 +1851,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { case 0x2a: // swl case 0x2e: // swr #endif - return SignalContext::WRITE; + return SignalContext::Write; case 0x20: // lb case 0x24: // lbu @@ -1853,27 +1866,27 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { case 0x22: // lwl case 0x26: // lwr #endif - return SignalContext::READ; + return SignalContext::Read; #if __mips_isa_rev == 6 case 0x3b: // pcrel op_code = (faulty_instruction >> 19) & 0x3; switch (op_code) { case 0x1: // lwpc case 0x2: // lwupc - return SignalContext::READ; + return SignalContext::Read; } #endif } - return SignalContext::UNKNOWN; + return SignalContext::Unknown; #elif defined(__arm__) static const uptr FSR_WRITE = 1U << 11; uptr fsr = ucontext->uc_mcontext.error_code; - return fsr & FSR_WRITE ? WRITE : READ; + return fsr & FSR_WRITE ? Write : Read; #elif defined(__aarch64__) static const u64 ESR_ELx_WNR = 1U << 6; u64 esr; - if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN; - return esr & ESR_ELx_WNR ? WRITE : READ; + if (!Aarch64GetESR(ucontext, &esr)) return Unknown; + return esr & ESR_ELx_WNR ? Write : Read; #elif defined(__sparc__) // Decode the instruction to determine the access type. // From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype). @@ -1889,7 +1902,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { #endif #endif u32 instr = *(u32 *)pc; - return (instr >> 21) & 1 ? WRITE: READ; + return (instr >> 21) & 1 ? Write: Read; #elif defined(__riscv) #if SANITIZER_FREEBSD unsigned long pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc; @@ -1909,7 +1922,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { #if __riscv_xlen == 64 case 0b10'011: // c.ldsp (rd != x0) #endif - return rd ? SignalContext::READ : SignalContext::UNKNOWN; + return rd ? SignalContext::Read : SignalContext::Unknown; case 0b00'010: // c.lw #if __riscv_flen >= 32 && __riscv_xlen == 32 case 0b10'011: // c.flwsp @@ -1921,7 +1934,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { case 0b00'001: // c.fld case 0b10'001: // c.fldsp #endif - return SignalContext::READ; + return SignalContext::Read; case 0b00'110: // c.sw case 0b10'110: // c.swsp #if __riscv_flen >= 32 || __riscv_xlen == 64 @@ -1932,9 +1945,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { case 0b00'101: // c.fsd case 0b10'101: // c.fsdsp #endif - return SignalContext::WRITE; + return SignalContext::Write; default: - return SignalContext::UNKNOWN; + return SignalContext::Unknown; } } #endif @@ -1952,9 +1965,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { #endif case 0b100: // lbu case 0b101: // lhu - return SignalContext::READ; + return SignalContext::Read; default: - return SignalContext::UNKNOWN; + return SignalContext::Unknown; } case 0b0100011: // stores switch (funct3) { @@ -1964,9 +1977,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { #if __riscv_xlen == 64 case 0b011: // sd #endif - return SignalContext::WRITE; + return SignalContext::Write; default: - return SignalContext::UNKNOWN; + return SignalContext::Unknown; } #if __riscv_flen >= 32 case 0b0000111: // floating-point loads @@ -1975,9 +1988,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { #if __riscv_flen == 64 case 0b011: // fld #endif - return SignalContext::READ; + return SignalContext::Read; default: - return SignalContext::UNKNOWN; + return SignalContext::Unknown; } case 0b0100111: // floating-point stores switch (funct3) { @@ -1985,17 +1998,17 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { #if __riscv_flen == 64 case 0b011: // fsd #endif - return SignalContext::WRITE; + return SignalContext::Write; default: - return SignalContext::UNKNOWN; + return SignalContext::Unknown; } #endif default: - return SignalContext::UNKNOWN; + return SignalContext::Unknown; } #else (void)ucontext; - return UNKNOWN; // FIXME: Implement. + return Unknown; // FIXME: Implement. #endif } @@ -2070,12 +2083,19 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { *sp = ucontext->uc_mcontext.gregs[REG_UESP]; # endif #elif defined(__powerpc__) || defined(__powerpc64__) +# if SANITIZER_FREEBSD + ucontext_t *ucontext = (ucontext_t *)context; + *pc = ucontext->uc_mcontext.mc_srr0; + *sp = ucontext->uc_mcontext.mc_frame[1]; + *bp = ucontext->uc_mcontext.mc_frame[31]; +# else ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.regs->nip; *sp = ucontext->uc_mcontext.regs->gpr[PT_R1]; // The powerpc{,64}-linux ABIs do not specify r31 as the frame // pointer, but GCC always uses r31 when we need a frame pointer. *bp = ucontext->uc_mcontext.regs->gpr[PT_R31]; +# endif #elif defined(__sparc__) #if defined(__arch64__) || defined(__sparcv9) #define STACK_BIAS 2047 @@ -2164,49 +2184,34 @@ void CheckASLR() { GetArgv()[0]); Die(); } -#elif SANITIZER_PPC64V2 - // Disable ASLR for Linux PPC64LE. - int old_personality = personality(0xffffffff); - if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) { - VReport(1, "WARNING: Program is being run with address space layout " - "randomization (ASLR) enabled which prevents the thread and " - "memory sanitizers from working on powerpc64le.\n" - "ASLR will be disabled and the program re-executed.\n"); - CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); - ReExec(); - } #elif SANITIZER_FREEBSD - int aslr_pie; - uptr len = sizeof(aslr_pie); -#if SANITIZER_WORDSIZE == 64 - if (UNLIKELY(internal_sysctlbyname("kern.elf64.aslr.pie_enable", - &aslr_pie, &len, NULL, 0) == -1)) { + int aslr_status; + if (UNLIKELY(procctl(P_PID, 0, PROC_ASLR_STATUS, &aslr_status) == -1)) { // We're making things less 'dramatic' here since - // the OID is not necessarily guaranteed to be here + // the cmd is not necessarily guaranteed to be here // just yet regarding FreeBSD release return; } - - if (aslr_pie > 0) { + if ((aslr_status & PROC_ASLR_ACTIVE) != 0) { Printf("This sanitizer is not compatible with enabled ASLR " "and binaries compiled with PIE\n"); Die(); } -#endif - // there might be 32 bits compat for 64 bits - if (UNLIKELY(internal_sysctlbyname("kern.elf32.aslr.pie_enable", - &aslr_pie, &len, NULL, 0) == -1)) { - return; - } - - if (aslr_pie > 0) { - Printf("This sanitizer is not compatible with enabled ASLR " - "and binaries compiled with PIE\n"); - Die(); +# elif SANITIZER_PPC64V2 + // Disable ASLR for Linux PPC64LE. + int old_personality = personality(0xffffffff); + if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) { + VReport(1, + "WARNING: Program is being run with address space layout " + "randomization (ASLR) enabled which prevents the thread and " + "memory sanitizers from working on powerpc64le.\n" + "ASLR will be disabled and the program re-executed.\n"); + CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); + ReExec(); } -#else +# else // Do nothing -#endif +# endif } void CheckMPROTECT() { diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.h b/libsanitizer/sanitizer_common/sanitizer_linux.h index 6a235db0ee2..ebd60e0b10f 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux.h +++ b/libsanitizer/sanitizer_common/sanitizer_linux.h @@ -55,6 +55,9 @@ struct ScopedBlockSignals { explicit ScopedBlockSignals(__sanitizer_sigset_t *copy); ~ScopedBlockSignals(); + ScopedBlockSignals &operator=(const ScopedBlockSignals &) = delete; + ScopedBlockSignals(const ScopedBlockSignals &) = delete; + private: __sanitizer_sigset_t saved_; }; diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp index 0c260b6b516..25ad825f568 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp @@ -216,7 +216,8 @@ void InitTlsSize() { } // On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage // of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan // to get the pointer to thread-specific data keys in the thread control block. -#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !SANITIZER_ANDROID +#if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \ + !SANITIZER_ANDROID && !SANITIZER_GO // sizeof(struct pthread) from glibc. static atomic_uintptr_t thread_descriptor_size; @@ -319,7 +320,6 @@ static uptr TlsPreTcbSize() { } #endif -#if !SANITIZER_GO namespace { struct TlsBlock { uptr begin, end, align; @@ -407,9 +407,8 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size, *addr = ranges[l].begin; *size = ranges[r - 1].end - ranges[l].begin; } -#endif // !SANITIZER_GO #endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD || - // SANITIZER_LINUX) && !SANITIZER_ANDROID + // SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO #if SANITIZER_NETBSD static struct tls_tcb * ThreadSelfTlsTcb() { @@ -478,7 +477,7 @@ static void GetTls(uptr *addr, uptr *size) { const uptr pre_tcb_size = TlsPreTcbSize(); *addr = tp - pre_tcb_size; *size = g_tls_size + pre_tcb_size; -#elif SANITIZER_FREEBSD || SANITIZER_LINUX +#elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS uptr align; GetStaticTlsBoundary(addr, size, &align); #if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \ @@ -539,11 +538,6 @@ static void GetTls(uptr *addr, uptr *size) { *addr = (uptr)tcb->tcb_dtv[1]; } } -#elif SANITIZER_SOLARIS - // FIXME - *addr = 0; - *size = 0; -#else #error "Unknown OS" #endif } @@ -614,6 +608,34 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info, bool writable = phdr->p_flags & PF_W; cur_module.addAddressRange(cur_beg, cur_end, executable, writable); + } else if (phdr->p_type == PT_NOTE) { +# ifdef NT_GNU_BUILD_ID + uptr off = 0; + while (off + sizeof(ElfW(Nhdr)) < phdr->p_memsz) { + auto *nhdr = reinterpret_cast(info->dlpi_addr + + phdr->p_vaddr + off); + constexpr auto kGnuNamesz = 4; // "GNU" with NUL-byte. + static_assert(kGnuNamesz % 4 == 0, "kGnuNameSize is aligned to 4."); + if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == kGnuNamesz) { + if (off + sizeof(ElfW(Nhdr)) + nhdr->n_namesz + nhdr->n_descsz > + phdr->p_memsz) { + // Something is very wrong, bail out instead of reading potentially + // arbitrary memory. + break; + } + const char *name = + reinterpret_cast(nhdr) + sizeof(*nhdr); + if (internal_memcmp(name, "GNU", 3) == 0) { + const char *value = reinterpret_cast(nhdr) + + sizeof(*nhdr) + kGnuNamesz; + cur_module.setUuid(value, nhdr->n_descsz); + break; + } + } + off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) + + RoundUpTo(nhdr->n_descsz, 4); + } +# endif } } modules->push_back(cur_module); @@ -770,13 +792,9 @@ u32 GetNumberOfCPUs() { #elif SANITIZER_SOLARIS return sysconf(_SC_NPROCESSORS_ONLN); #else -#if defined(CPU_COUNT) cpu_set_t CPUs; CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0); return CPU_COUNT(&CPUs); -#else - return 1; -#endif #endif } diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_s390.cpp b/libsanitizer/sanitizer_common/sanitizer_linux_s390.cpp index bb2f5b5f9f7..74db831b0aa 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux_s390.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_linux_s390.cpp @@ -57,8 +57,10 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd, uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { - if (!fn || !child_stack) - return -EINVAL; + if (!fn || !child_stack) { + errno = EINVAL; + return -1; + } CHECK_EQ(0, (uptr)child_stack % 16); // Minimum frame size. #ifdef __s390x__ @@ -71,9 +73,9 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, // And pass parameters. ((unsigned long *)child_stack)[1] = (uptr)fn; ((unsigned long *)child_stack)[2] = (uptr)arg; - register long res __asm__("r2"); + register uptr res __asm__("r2"); register void *__cstack __asm__("r2") = child_stack; - register int __flags __asm__("r3") = flags; + register long __flags __asm__("r3") = flags; register int * __ptidptr __asm__("r4") = parent_tidptr; register int * __ctidptr __asm__("r5") = child_tidptr; register void * __newtls __asm__("r6") = newtls; @@ -113,6 +115,10 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, "r"(__ctidptr), "r"(__newtls) : "memory", "cc"); + if (res >= (uptr)-4095) { + errno = -res; + return -1; + } return res; } diff --git a/libsanitizer/sanitizer_common/sanitizer_lzw.h b/libsanitizer/sanitizer_common/sanitizer_lzw.h new file mode 100644 index 00000000000..42acfbdcea0 --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_lzw.h @@ -0,0 +1,159 @@ +//===-- sanitizer_lzw.h -----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Lempel–Ziv–Welch encoding/decoding +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_LZW_H +#define SANITIZER_LZW_H + +#include "sanitizer_dense_map.h" + +namespace __sanitizer { + +using LzwCodeType = u32; + +template +ItOut LzwEncode(ItIn begin, ItIn end, ItOut out) { + using Substring = + detail::DenseMapPair; + + // Sentinel value for substrings of len 1. + static constexpr LzwCodeType kNoPrefix = + Min(DenseMapInfo::getEmptyKey().first, + DenseMapInfo::getTombstoneKey().first) - + 1; + DenseMap prefix_to_code; + { + // Add all substring of len 1 as initial dictionary. + InternalMmapVector dict_len1; + for (auto it = begin; it != end; ++it) + if (prefix_to_code.try_emplace({kNoPrefix, *it}, 0).second) + dict_len1.push_back(*it); + + // Slightly helps with later delta encoding. + Sort(dict_len1.data(), dict_len1.size()); + + // For large sizeof(T) we have to store dict_len1. Smaller types like u8 can + // just generate them. + *out = dict_len1.size(); + ++out; + + for (uptr i = 0; i != dict_len1.size(); ++i) { + // Remap after the Sort. + prefix_to_code[{kNoPrefix, dict_len1[i]}] = i; + *out = dict_len1[i]; + ++out; + } + CHECK_EQ(prefix_to_code.size(), dict_len1.size()); + } + + if (begin == end) + return out; + + // Main LZW encoding loop. + LzwCodeType match = prefix_to_code.find({kNoPrefix, *begin})->second; + ++begin; + for (auto it = begin; it != end; ++it) { + // Extend match with the new item. + auto ins = prefix_to_code.try_emplace({match, *it}, prefix_to_code.size()); + if (ins.second) { + // This is a new substring, but emit the code for the current match + // (before extend). This allows LZW decoder to recover the dictionary. + *out = match; + ++out; + // Reset the match to a single item, which must be already in the map. + match = prefix_to_code.find({kNoPrefix, *it})->second; + } else { + // Already known, use as the current match. + match = ins.first->second; + } + } + + *out = match; + ++out; + + return out; +} + +template +ItOut LzwDecode(ItIn begin, ItIn end, ItOut out) { + if (begin == end) + return out; + + // Load dictionary of len 1 substrings. Theses correspont to lowest codes. + InternalMmapVector dict_len1(*begin); + ++begin; + + if (begin == end) + return out; + + for (auto& v : dict_len1) { + v = *begin; + ++begin; + } + + // Substrings of len 2 and up. Indexes are shifted because [0, + // dict_len1.size()) stored in dict_len1. Substings get here after being + // emitted to the output, so we can use output position. + InternalMmapVector> + code_to_substr; + + // Copies already emitted substrings into the output again. + auto copy = [&code_to_substr, &dict_len1](LzwCodeType code, ItOut out) { + if (code < dict_len1.size()) { + *out = dict_len1[code]; + ++out; + return out; + } + const auto& s = code_to_substr[code - dict_len1.size()]; + + for (ItOut it = s.first; it != s.second; ++it, ++out) *out = *it; + return out; + }; + + // Returns lens of the substring with the given code. + auto code_to_len = [&code_to_substr, &dict_len1](LzwCodeType code) -> uptr { + if (code < dict_len1.size()) + return 1; + const auto& s = code_to_substr[code - dict_len1.size()]; + return s.second - s.first; + }; + + // Main LZW decoding loop. + LzwCodeType prev_code = *begin; + ++begin; + out = copy(prev_code, out); + for (auto it = begin; it != end; ++it) { + LzwCodeType code = *it; + auto start = out; + if (code == dict_len1.size() + code_to_substr.size()) { + // Special LZW case. The code is not in the dictionary yet. This is + // possible only when the new substring is the same as previous one plus + // the first item of the previous substring. We can emit that in two + // steps. + out = copy(prev_code, out); + *out = *start; + ++out; + } else { + out = copy(code, out); + } + + // Every time encoded emits the code, it also creates substing of len + 1 + // including the first item of the just emmited substring. Do the same here. + uptr len = code_to_len(prev_code); + code_to_substr.push_back({start - len, start + 1}); + + prev_code = code; + } + return out; +} + +} // namespace __sanitizer +#endif diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_mac.cpp index a61cde891b9..05512a576ad 100644 --- a/libsanitizer/sanitizer_common/sanitizer_mac.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_mac.cpp @@ -25,6 +25,7 @@ #include "sanitizer_common.h" #include "sanitizer_file.h" #include "sanitizer_flags.h" +#include "sanitizer_interface_internal.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" #include "sanitizer_platform_limits_posix.h" @@ -37,7 +38,7 @@ extern char **environ; #endif -#if defined(__has_include) && __has_include() && defined(__BLOCKS__) +#if defined(__has_include) && __has_include() #define SANITIZER_OS_TRACE 1 #include #else @@ -70,15 +71,7 @@ extern "C" { #include #include #include -#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format) -# include -#else - /* Without support for __builtin_os_log_format, fall back to the older - method. */ -# define OS_LOG_DEFAULT 0 -# define os_log_error(A,B,C) \ - asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C)); -#endif +#include #include #include #include @@ -273,30 +266,32 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp, static fd_t internal_spawn_impl(const char *argv[], const char *envp[], pid_t *pid) { - fd_t master_fd = kInvalidFd; - fd_t slave_fd = kInvalidFd; + fd_t primary_fd = kInvalidFd; + fd_t secondary_fd = kInvalidFd; auto fd_closer = at_scope_exit([&] { - internal_close(master_fd); - internal_close(slave_fd); + internal_close(primary_fd); + internal_close(secondary_fd); }); // We need a new pseudoterminal to avoid buffering problems. The 'atos' tool // in particular detects when it's talking to a pipe and forgets to flush the // output stream after sending a response. - master_fd = posix_openpt(O_RDWR); - if (master_fd == kInvalidFd) return kInvalidFd; + primary_fd = posix_openpt(O_RDWR); + if (primary_fd == kInvalidFd) + return kInvalidFd; - int res = grantpt(master_fd) || unlockpt(master_fd); + int res = grantpt(primary_fd) || unlockpt(primary_fd); if (res != 0) return kInvalidFd; // Use TIOCPTYGNAME instead of ptsname() to avoid threading problems. - char slave_pty_name[128]; - res = ioctl(master_fd, TIOCPTYGNAME, slave_pty_name); + char secondary_pty_name[128]; + res = ioctl(primary_fd, TIOCPTYGNAME, secondary_pty_name); if (res == -1) return kInvalidFd; - slave_fd = internal_open(slave_pty_name, O_RDWR); - if (slave_fd == kInvalidFd) return kInvalidFd; + secondary_fd = internal_open(secondary_pty_name, O_RDWR); + if (secondary_fd == kInvalidFd) + return kInvalidFd; // File descriptor actions posix_spawn_file_actions_t acts; @@ -307,9 +302,9 @@ static fd_t internal_spawn_impl(const char *argv[], const char *envp[], posix_spawn_file_actions_destroy(&acts); }); - res = posix_spawn_file_actions_adddup2(&acts, slave_fd, STDIN_FILENO) || - posix_spawn_file_actions_adddup2(&acts, slave_fd, STDOUT_FILENO) || - posix_spawn_file_actions_addclose(&acts, slave_fd); + res = posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDIN_FILENO) || + posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDOUT_FILENO) || + posix_spawn_file_actions_addclose(&acts, secondary_fd); if (res != 0) return kInvalidFd; // Spawn attributes @@ -334,14 +329,14 @@ static fd_t internal_spawn_impl(const char *argv[], const char *envp[], // Disable echo in the new terminal, disable CR. struct termios termflags; - tcgetattr(master_fd, &termflags); + tcgetattr(primary_fd, &termflags); termflags.c_oflag &= ~ONLCR; termflags.c_lflag &= ~ECHO; - tcsetattr(master_fd, TCSANOW, &termflags); + tcsetattr(primary_fd, TCSANOW, &termflags); - // On success, do not close master_fd on scope exit. - fd_t fd = master_fd; - master_fd = kInvalidFd; + // On success, do not close primary_fd on scope exit. + fd_t fd = primary_fd; + primary_fd = kInvalidFd; return fd; } @@ -398,6 +393,13 @@ bool FileExists(const char *filename) { return S_ISREG(st.st_mode); } +bool DirExists(const char *path) { + struct stat st; + if (stat(path, &st)) + return false; + return S_ISDIR(st.st_mode); +} + tid_t GetTid() { tid_t tid; pthread_threadid_np(nullptr, &tid); @@ -877,9 +879,9 @@ void LogFullErrorReport(const char *buffer) { SignalContext::WriteFlag SignalContext::GetWriteFlag() const { #if defined(__x86_64__) || defined(__i386__) ucontext_t *ucontext = static_cast(context); - return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ; + return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? Write : Read; #else - return UNKNOWN; + return Unknown; #endif } @@ -894,18 +896,14 @@ bool SignalContext::IsTrueFaultingAddress() const { (uptr)ptrauth_strip( \ (void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0) #else - #define AARCH64_GET_REG(r) ucontext->uc_mcontext->__ss.__##r + #define AARCH64_GET_REG(r) (uptr)ucontext->uc_mcontext->__ss.__##r #endif static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { ucontext_t *ucontext = (ucontext_t*)context; # if defined(__aarch64__) *pc = AARCH64_GET_REG(pc); -# if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0 *bp = AARCH64_GET_REG(fp); -# else - *bp = AARCH64_GET_REG(lr); -# endif *sp = AARCH64_GET_REG(sp); # elif defined(__x86_64__) *pc = ucontext->uc_mcontext->__ss.__rip; @@ -1057,12 +1055,12 @@ void MaybeReexec() { } // Verify that interceptors really work. We'll use dlsym to locate - // "pthread_create", if interceptors are working, it should really point to - // "wrap_pthread_create" within our own dylib. - Dl_info info_pthread_create; - void *dlopen_addr = dlsym(RTLD_DEFAULT, "pthread_create"); - RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create)); - if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) { + // "puts", if interceptors are working, it should really point to + // "wrap_puts" within our own dylib. + Dl_info info_puts; + void *dlopen_addr = dlsym(RTLD_DEFAULT, "puts"); + RAW_CHECK(dladdr(dlopen_addr, &info_puts)); + if (internal_strcmp(info.dli_fname, info_puts.dli_fname) != 0) { Report( "ERROR: Interceptors are not working. This may be because %s is " "loaded too late (e.g. via dlopen). Please launch the executable " @@ -1229,7 +1227,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, uptr largest_gap_found = 0; uptr max_occupied_addr = 0; - VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); + VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size); uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity, &largest_gap_found, &max_occupied_addr); @@ -1238,20 +1236,21 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, VReport( 2, "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n", - largest_gap_found, max_occupied_addr); + (void *)largest_gap_found, (void *)max_occupied_addr); uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment); if (new_max_vm < max_occupied_addr) { Report("Unable to find a memory range for dynamic shadow.\n"); Report( "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, " "new_max_vm = %p\n", - space_size, largest_gap_found, max_occupied_addr, new_max_vm); + (void *)space_size, (void *)largest_gap_found, + (void *)max_occupied_addr, (void *)new_max_vm); CHECK(0 && "cannot place shadow"); } RestrictMemoryToMaxAddress(new_max_vm); high_mem_end = new_max_vm - 1; space_size = (high_mem_end >> shadow_scale) + left_padding; - VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); + VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size); shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity, nullptr, nullptr); if (shadow_start == 0) { @@ -1331,7 +1330,7 @@ void SignalContext::DumpAllRegisters(void *context) { # define DUMPREG64(r) \ Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r); # define DUMPREGA64(r) \ - Printf(" %s = 0x%016llx ", #r, AARCH64_GET_REG(r)); + Printf(" %s = 0x%016lx ", #r, AARCH64_GET_REG(r)); # define DUMPREG32(r) \ Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r); # define DUMPREG_(r) Printf(" "); DUMPREG(r); @@ -1401,7 +1400,7 @@ void DumpProcessMap() { char uuid_str[128]; FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid()); Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(), - modules[i].max_executable_address(), modules[i].full_name(), + modules[i].max_address(), modules[i].full_name(), ModuleArchToString(modules[i].arch()), uuid_str); } Printf("End of module map.\n"); diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h index 96a5986a47a..0b6af5a3c0e 100644 --- a/libsanitizer/sanitizer_common/sanitizer_mac.h +++ b/libsanitizer/sanitizer_common/sanitizer_mac.h @@ -14,26 +14,6 @@ #include "sanitizer_common.h" #include "sanitizer_platform.h" - -/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use - TARGET_OS_MAC (we have no support for iOS in any form for these versions, - so there's no ambiguity). */ -#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC -# define TARGET_OS_OSX 1 -#endif - -/* Other TARGET_OS_xxx are not present on earlier versions, define them to - 0 (we have no support for them; they are not valid targets anyway). */ -#ifndef TARGET_OS_IOS -#define TARGET_OS_IOS 0 -#endif -#ifndef TARGET_OS_TV -#define TARGET_OS_TV 0 -#endif -#ifndef TARGET_OS_WATCH -#define TARGET_OS_WATCH 0 -#endif - #if SANITIZER_MAC #include "sanitizer_posix.h" diff --git a/libsanitizer/sanitizer_common/sanitizer_mutex.h b/libsanitizer/sanitizer_common/sanitizer_mutex.h index 5ec6efaa649..d2188a9e6d6 100644 --- a/libsanitizer/sanitizer_common/sanitizer_mutex.h +++ b/libsanitizer/sanitizer_common/sanitizer_mutex.h @@ -20,25 +20,27 @@ namespace __sanitizer { -class MUTEX StaticSpinMutex { +class SANITIZER_MUTEX StaticSpinMutex { public: void Init() { atomic_store(&state_, 0, memory_order_relaxed); } - void Lock() ACQUIRE() { + void Lock() SANITIZER_ACQUIRE() { if (LIKELY(TryLock())) return; LockSlow(); } - bool TryLock() TRY_ACQUIRE(true) { + bool TryLock() SANITIZER_TRY_ACQUIRE(true) { return atomic_exchange(&state_, 1, memory_order_acquire) == 0; } - void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); } + void Unlock() SANITIZER_RELEASE() { + atomic_store(&state_, 0, memory_order_release); + } - void CheckLocked() const CHECK_LOCKED() { + void CheckLocked() const SANITIZER_CHECK_LOCKED() { CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1); } @@ -48,7 +50,7 @@ class MUTEX StaticSpinMutex { void LockSlow(); }; -class MUTEX SpinMutex : public StaticSpinMutex { +class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex { public: SpinMutex() { Init(); @@ -156,12 +158,12 @@ class CheckedMutex { // Derive from CheckedMutex for the purposes of EBO. // We could make it a field marked with [[no_unique_address]], // but this attribute is not supported by some older compilers. -class MUTEX Mutex : CheckedMutex { +class SANITIZER_MUTEX Mutex : CheckedMutex { public: explicit constexpr Mutex(MutexType type = MutexUnchecked) : CheckedMutex(type) {} - void Lock() ACQUIRE() { + void Lock() SANITIZER_ACQUIRE() { CheckedMutex::Lock(); u64 reset_mask = ~0ull; u64 state = atomic_load_relaxed(&state_); @@ -206,7 +208,21 @@ class MUTEX Mutex : CheckedMutex { } } - void Unlock() RELEASE() { + bool TryLock() SANITIZER_TRY_ACQUIRE(true) { + u64 state = atomic_load_relaxed(&state_); + for (;;) { + if (UNLIKELY(state & (kWriterLock | kReaderLockMask))) + return false; + // The mutex is not read-/write-locked, try to lock. + if (LIKELY(atomic_compare_exchange_weak( + &state_, &state, state | kWriterLock, memory_order_acquire))) { + CheckedMutex::Lock(); + return true; + } + } + } + + void Unlock() SANITIZER_RELEASE() { CheckedMutex::Unlock(); bool wake_writer; u64 wake_readers; @@ -234,7 +250,7 @@ class MUTEX Mutex : CheckedMutex { readers_.Post(wake_readers); } - void ReadLock() ACQUIRE_SHARED() { + void ReadLock() SANITIZER_ACQUIRE_SHARED() { CheckedMutex::Lock(); u64 reset_mask = ~0ull; u64 state = atomic_load_relaxed(&state_); @@ -271,7 +287,7 @@ class MUTEX Mutex : CheckedMutex { } } - void ReadUnlock() RELEASE_SHARED() { + void ReadUnlock() SANITIZER_RELEASE_SHARED() { CheckedMutex::Unlock(); bool wake; u64 new_state; @@ -297,13 +313,13 @@ class MUTEX Mutex : CheckedMutex { // owns the mutex but a child checks that it is locked. Rather than // maintaining complex state to work around those situations, the check only // checks that the mutex is owned. - void CheckWriteLocked() const CHECK_LOCKED() { + void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() { CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock); } - void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); } + void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); } - void CheckReadLocked() const CHECK_LOCKED() { + void CheckReadLocked() const SANITIZER_CHECK_LOCKED() { CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask); } @@ -361,13 +377,13 @@ void FutexWait(atomic_uint32_t *p, u32 cmp); void FutexWake(atomic_uint32_t *p, u32 count); template -class SCOPED_LOCK GenericScopedLock { +class SANITIZER_SCOPED_LOCK GenericScopedLock { public: - explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) { + explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) { mu_->Lock(); } - ~GenericScopedLock() RELEASE() { mu_->Unlock(); } + ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); } private: MutexType *mu_; @@ -377,13 +393,14 @@ class SCOPED_LOCK GenericScopedLock { }; template -class SCOPED_LOCK GenericScopedReadLock { +class SANITIZER_SCOPED_LOCK GenericScopedReadLock { public: - explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) { + explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu) + : mu_(mu) { mu_->ReadLock(); } - ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); } + ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); } private: MutexType *mu_; @@ -393,10 +410,10 @@ class SCOPED_LOCK GenericScopedReadLock { }; template -class SCOPED_LOCK GenericScopedRWLock { +class SANITIZER_SCOPED_LOCK GenericScopedRWLock { public: ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write) - ACQUIRE(mu) + SANITIZER_ACQUIRE(mu) : mu_(mu), write_(write) { if (write_) mu_->Lock(); @@ -404,7 +421,7 @@ class SCOPED_LOCK GenericScopedRWLock { mu_->ReadLock(); } - ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() { + ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() { if (write_) mu_->Unlock(); else diff --git a/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h deleted file mode 100644 index e18b0030567..00000000000 --- a/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h +++ /dev/null @@ -1,110 +0,0 @@ -//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// A fast memory allocator that does not support free() nor realloc(). -// All allocations are forever. -//===----------------------------------------------------------------------===// - -#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H -#define SANITIZER_PERSISTENT_ALLOCATOR_H - -#include "sanitizer_internal_defs.h" -#include "sanitizer_mutex.h" -#include "sanitizer_atomic.h" -#include "sanitizer_common.h" - -namespace __sanitizer { - -template -class PersistentAllocator { - public: - T *alloc(uptr count = 1); - uptr allocated() const { return atomic_load_relaxed(&mapped_size); } - - void TestOnlyUnmap(); - - private: - T *tryAlloc(uptr count); - T *refillAndAlloc(uptr count); - mutable StaticSpinMutex mtx; // Protects alloc of new blocks. - atomic_uintptr_t region_pos; // Region allocator for Node's. - atomic_uintptr_t region_end; - atomic_uintptr_t mapped_size; - - struct BlockInfo { - const BlockInfo *next; - uptr ptr; - uptr size; - }; - const BlockInfo *curr; -}; - -template -inline T *PersistentAllocator::tryAlloc(uptr count) { - // Optimisic lock-free allocation, essentially try to bump the region ptr. - for (;;) { - uptr cmp = atomic_load(®ion_pos, memory_order_acquire); - uptr end = atomic_load(®ion_end, memory_order_acquire); - uptr size = count * sizeof(T); - if (cmp == 0 || cmp + size > end) - return nullptr; - if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size, - memory_order_acquire)) - return reinterpret_cast(cmp); - } -} - -template -inline T *PersistentAllocator::alloc(uptr count) { - // First, try to allocate optimisitically. - T *s = tryAlloc(count); - if (LIKELY(s)) - return s; - return refillAndAlloc(count); -} - -template -inline T *PersistentAllocator::refillAndAlloc(uptr count) { - // If failed, lock, retry and alloc new superblock. - SpinMutexLock l(&mtx); - for (;;) { - T *s = tryAlloc(count); - if (s) - return s; - atomic_store(®ion_pos, 0, memory_order_relaxed); - uptr size = count * sizeof(T) + sizeof(BlockInfo); - uptr allocsz = RoundUpTo(Max(size, 64u * 1024u), GetPageSizeCached()); - uptr mem = (uptr)MmapOrDie(allocsz, "stack depot"); - BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1; - new_block->next = curr; - new_block->ptr = mem; - new_block->size = allocsz; - curr = new_block; - - atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed); - - allocsz -= sizeof(BlockInfo); - atomic_store(®ion_end, mem + allocsz, memory_order_release); - atomic_store(®ion_pos, mem, memory_order_release); - } -} - -template -void PersistentAllocator::TestOnlyUnmap() { - while (curr) { - uptr mem = curr->ptr; - uptr allocsz = curr->size; - curr = curr->next; - UnmapOrDie((void *)mem, allocsz); - } - internal_memset(this, 0, sizeof(*this)); -} - -} // namespace __sanitizer - -#endif // SANITIZER_PERSISTENT_ALLOCATOR_H diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h index 3153de34e5a..8fe0d831431 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform.h @@ -22,103 +22,110 @@ // function declarations into a .S file which doesn't compile. // https://crbug.com/1162741 #if __has_include() && !defined(__ANDROID__) -#include +# include #endif #if defined(__linux__) -# define SANITIZER_LINUX 1 +# define SANITIZER_LINUX 1 #else -# define SANITIZER_LINUX 0 +# define SANITIZER_LINUX 0 #endif #if defined(__GLIBC__) -# define SANITIZER_GLIBC 1 +# define SANITIZER_GLIBC 1 #else -# define SANITIZER_GLIBC 0 +# define SANITIZER_GLIBC 0 #endif #if defined(__FreeBSD__) -# define SANITIZER_FREEBSD 1 +# define SANITIZER_FREEBSD 1 #else -# define SANITIZER_FREEBSD 0 +# define SANITIZER_FREEBSD 0 #endif #if defined(__NetBSD__) -# define SANITIZER_NETBSD 1 +# define SANITIZER_NETBSD 1 #else -# define SANITIZER_NETBSD 0 +# define SANITIZER_NETBSD 0 #endif #if defined(__sun__) && defined(__svr4__) -# define SANITIZER_SOLARIS 1 +# define SANITIZER_SOLARIS 1 #else -# define SANITIZER_SOLARIS 0 +# define SANITIZER_SOLARIS 0 #endif #if defined(__APPLE__) -# define SANITIZER_MAC 1 -# include -# if TARGET_OS_OSX -# define SANITIZER_OSX 1 -# else -# define SANITIZER_OSX 0 -# endif -# if TARGET_OS_IPHONE -# define SANITIZER_IOS 1 -# else -# define SANITIZER_IOS 0 -# endif -# if TARGET_OS_SIMULATOR -# define SANITIZER_IOSSIM 1 -# else -# define SANITIZER_IOSSIM 0 -# endif +# define SANITIZER_MAC 1 +# include +# if TARGET_OS_OSX +# define SANITIZER_OSX 1 +# else +# define SANITIZER_OSX 0 +# endif +# if TARGET_OS_IPHONE +# define SANITIZER_IOS 1 +# else +# define SANITIZER_IOS 0 +# endif +# if TARGET_OS_SIMULATOR +# define SANITIZER_IOSSIM 1 +# else +# define SANITIZER_IOSSIM 0 +# endif #else -# define SANITIZER_MAC 0 -# define SANITIZER_IOS 0 -# define SANITIZER_IOSSIM 0 -# define SANITIZER_OSX 0 +# define SANITIZER_MAC 0 +# define SANITIZER_IOS 0 +# define SANITIZER_IOSSIM 0 +# define SANITIZER_OSX 0 #endif #if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH -# define SANITIZER_WATCHOS 1 +# define SANITIZER_WATCHOS 1 #else -# define SANITIZER_WATCHOS 0 +# define SANITIZER_WATCHOS 0 #endif #if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV -# define SANITIZER_TVOS 1 +# define SANITIZER_TVOS 1 #else -# define SANITIZER_TVOS 0 +# define SANITIZER_TVOS 0 #endif #if defined(_WIN32) -# define SANITIZER_WINDOWS 1 +# define SANITIZER_WINDOWS 1 #else -# define SANITIZER_WINDOWS 0 +# define SANITIZER_WINDOWS 0 #endif #if defined(_WIN64) -# define SANITIZER_WINDOWS64 1 +# define SANITIZER_WINDOWS64 1 #else -# define SANITIZER_WINDOWS64 0 +# define SANITIZER_WINDOWS64 0 #endif #if defined(__ANDROID__) -# define SANITIZER_ANDROID 1 +# define SANITIZER_ANDROID 1 #else -# define SANITIZER_ANDROID 0 +# define SANITIZER_ANDROID 0 #endif #if defined(__Fuchsia__) -# define SANITIZER_FUCHSIA 1 +# define SANITIZER_FUCHSIA 1 +#else +# define SANITIZER_FUCHSIA 0 +#endif + +// Assume linux that is not glibc or android is musl libc. +#if SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID +# define SANITIZER_MUSL 1 #else -# define SANITIZER_FUCHSIA 0 +# define SANITIZER_MUSL 0 #endif -#define SANITIZER_POSIX \ +#define SANITIZER_POSIX \ (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \ - SANITIZER_NETBSD || SANITIZER_SOLARIS) + SANITIZER_NETBSD || SANITIZER_SOLARIS) #if __LP64__ || defined(_WIN64) # define SANITIZER_WORDSIZE 64 @@ -127,58 +134,64 @@ #endif #if SANITIZER_WORDSIZE == 64 -# define FIRST_32_SECOND_64(a, b) (b) +# define FIRST_32_SECOND_64(a, b) (b) #else -# define FIRST_32_SECOND_64(a, b) (a) +# define FIRST_32_SECOND_64(a, b) (a) #endif #if defined(__x86_64__) && !defined(_LP64) -# define SANITIZER_X32 1 +# define SANITIZER_X32 1 #else -# define SANITIZER_X32 0 +# define SANITIZER_X32 0 +#endif + +#if defined(__x86_64__) || defined(_M_X64) +# define SANITIZER_X64 1 +#else +# define SANITIZER_X64 0 #endif #if defined(__i386__) || defined(_M_IX86) -# define SANITIZER_I386 1 +# define SANITIZER_I386 1 #else -# define SANITIZER_I386 0 +# define SANITIZER_I386 0 #endif #if defined(__mips__) -# define SANITIZER_MIPS 1 -# if defined(__mips64) +# define SANITIZER_MIPS 1 +# if defined(__mips64) +# define SANITIZER_MIPS32 0 +# define SANITIZER_MIPS64 1 +# else +# define SANITIZER_MIPS32 1 +# define SANITIZER_MIPS64 0 +# endif +#else +# define SANITIZER_MIPS 0 # define SANITIZER_MIPS32 0 -# define SANITIZER_MIPS64 1 -# else -# define SANITIZER_MIPS32 1 # define SANITIZER_MIPS64 0 -# endif -#else -# define SANITIZER_MIPS 0 -# define SANITIZER_MIPS32 0 -# define SANITIZER_MIPS64 0 #endif #if defined(__s390__) -# define SANITIZER_S390 1 -# if defined(__s390x__) +# define SANITIZER_S390 1 +# if defined(__s390x__) +# define SANITIZER_S390_31 0 +# define SANITIZER_S390_64 1 +# else +# define SANITIZER_S390_31 1 +# define SANITIZER_S390_64 0 +# endif +#else +# define SANITIZER_S390 0 # define SANITIZER_S390_31 0 -# define SANITIZER_S390_64 1 -# else -# define SANITIZER_S390_31 1 # define SANITIZER_S390_64 0 -# endif -#else -# define SANITIZER_S390 0 -# define SANITIZER_S390_31 0 -# define SANITIZER_S390_64 0 #endif #if defined(__powerpc__) -# define SANITIZER_PPC 1 -# if defined(__powerpc64__) -# define SANITIZER_PPC32 0 -# define SANITIZER_PPC64 1 +# define SANITIZER_PPC 1 +# if defined(__powerpc64__) +# define SANITIZER_PPC32 0 +# define SANITIZER_PPC64 1 // 64-bit PPC has two ABIs (v1 and v2). The old powerpc64 target is // big-endian, and uses v1 ABI (known for its function descriptors), // while the new powerpc64le target is little-endian and uses v2. @@ -186,43 +199,49 @@ // (eg. big-endian v2), but you won't find such combinations in the wild // (it'd require bootstrapping a whole system, which would be quite painful // - there's no target triple for that). LLVM doesn't support them either. -# if _CALL_ELF == 2 -# define SANITIZER_PPC64V1 0 -# define SANITIZER_PPC64V2 1 +# if _CALL_ELF == 2 +# define SANITIZER_PPC64V1 0 +# define SANITIZER_PPC64V2 1 +# else +# define SANITIZER_PPC64V1 1 +# define SANITIZER_PPC64V2 0 +# endif # else -# define SANITIZER_PPC64V1 1 -# define SANITIZER_PPC64V2 0 +# define SANITIZER_PPC32 1 +# define SANITIZER_PPC64 0 +# define SANITIZER_PPC64V1 0 +# define SANITIZER_PPC64V2 0 # endif -# else -# define SANITIZER_PPC32 1 +#else +# define SANITIZER_PPC 0 +# define SANITIZER_PPC32 0 # define SANITIZER_PPC64 0 # define SANITIZER_PPC64V1 0 # define SANITIZER_PPC64V2 0 -# endif +#endif + +#if defined(__arm__) || defined(_M_ARM) +# define SANITIZER_ARM 1 #else -# define SANITIZER_PPC 0 -# define SANITIZER_PPC32 0 -# define SANITIZER_PPC64 0 -# define SANITIZER_PPC64V1 0 -# define SANITIZER_PPC64V2 0 +# define SANITIZER_ARM 0 #endif -#if defined(__arm__) -# define SANITIZER_ARM 1 +#if defined(__aarch64__) || defined(_M_ARM64) +# define SANITIZER_ARM64 1 #else -# define SANITIZER_ARM 0 +# define SANITIZER_ARM64 0 #endif #if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32 -# define SANITIZER_SOLARIS32 1 +# define SANITIZER_SOLARIS32 1 #else -# define SANITIZER_SOLARIS32 0 +# define SANITIZER_SOLARIS32 0 #endif #if defined(__riscv) && (__riscv_xlen == 64) -#define SANITIZER_RISCV64 1 +# define SANITIZER_RISCV64 1 #else -#define SANITIZER_RISCV64 0 +# define SANITIZER_RISCV64 0 #endif // By default we allow to use SizeClassAllocator64 on 64-bit platform. @@ -231,62 +250,52 @@ // For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or // change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here. #ifndef SANITIZER_CAN_USE_ALLOCATOR64 -# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA -# define SANITIZER_CAN_USE_ALLOCATOR64 1 -# elif defined(__mips64) || defined(__aarch64__) -# define SANITIZER_CAN_USE_ALLOCATOR64 0 -# else -# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64) -# endif +# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA +# define SANITIZER_CAN_USE_ALLOCATOR64 1 +# elif defined(__mips64) || defined(__aarch64__) +# define SANITIZER_CAN_USE_ALLOCATOR64 0 +# else +# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64) +# endif #endif // The range of addresses which can be returned my mmap. // FIXME: this value should be different on different platforms. Larger values // will still work but will consume more memory for TwoLevelByteMap. #if defined(__mips__) -#if SANITIZER_GO && defined(__mips64) -#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) -#else -# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40) -#endif +# if SANITIZER_GO && defined(__mips64) +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) +# else +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40) +# endif #elif SANITIZER_RISCV64 -#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38) +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38) #elif defined(__aarch64__) -# if SANITIZER_MAC -# if SANITIZER_OSX || SANITIZER_IOSSIM -# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) +# if SANITIZER_MAC +# if SANITIZER_OSX || SANITIZER_IOSSIM +# define SANITIZER_MMAP_RANGE_SIZE \ + FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) +# else +// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM +# define SANITIZER_MMAP_RANGE_SIZE \ + FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36) +# endif # else - // Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM -# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36) +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48) # endif -# else -# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48) -# endif #elif defined(__sparc__) -#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52) +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52) #else -# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) #endif // Whether the addresses are sign-extended from the VMA range to the word. // The SPARC64 Linux port implements this to split the VMA space into two // non-contiguous halves with a huge hole in the middle. #if defined(__sparc__) && SANITIZER_WORDSIZE == 64 -#define SANITIZER_SIGN_EXTENDED_ADDRESSES 1 +# define SANITIZER_SIGN_EXTENDED_ADDRESSES 1 #else -#define SANITIZER_SIGN_EXTENDED_ADDRESSES 0 -#endif - -// The AArch64 and RISC-V linux ports use the canonical syscall set as -// mandated by the upstream linux community for all new ports. Other ports -// may still use legacy syscalls. -#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS -# if (defined(__aarch64__) || defined(__riscv) || defined(__hexagon__)) && \ - SANITIZER_LINUX -# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1 -# else -# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0 -# endif +# define SANITIZER_SIGN_EXTENDED_ADDRESSES 0 #endif // udi16 syscalls can only be used when the following conditions are @@ -297,15 +306,15 @@ // Since we don't want to include libc headers here, we check the // target only. #if defined(__arm__) || SANITIZER_X32 || defined(__sparc__) -#define SANITIZER_USES_UID16_SYSCALLS 1 +# define SANITIZER_USES_UID16_SYSCALLS 1 #else -#define SANITIZER_USES_UID16_SYSCALLS 0 +# define SANITIZER_USES_UID16_SYSCALLS 0 #endif #if defined(__mips__) -# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10) +# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10) #else -# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12) +# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12) #endif /// \macro MSC_PREREQ @@ -314,15 +323,15 @@ /// * 1800: Microsoft Visual Studio 2013 / 12.0 /// * 1900: Microsoft Visual Studio 2015 / 14.0 #ifdef _MSC_VER -# define MSC_PREREQ(version) (_MSC_VER >= (version)) +# define MSC_PREREQ(version) (_MSC_VER >= (version)) #else -# define MSC_PREREQ(version) 0 +# define MSC_PREREQ(version) 0 #endif -#if SANITIZER_MAC && !(defined(__arm64__) && SANITIZER_IOS) -# define SANITIZER_NON_UNIQUE_TYPEINFO 0 +#if SANITIZER_MAC && defined(__x86_64__) +# define SANITIZER_NON_UNIQUE_TYPEINFO 0 #else -# define SANITIZER_NON_UNIQUE_TYPEINFO 1 +# define SANITIZER_NON_UNIQUE_TYPEINFO 1 #endif // On linux, some architectures had an ABI transition from 64-bit long double @@ -330,11 +339,11 @@ // involving long doubles come in two versions, and we need to pass the // correct one to dlvsym when intercepting them. #if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1) -#define SANITIZER_NLDBL_VERSION "GLIBC_2.4" +# define SANITIZER_NLDBL_VERSION "GLIBC_2.4" #endif #if SANITIZER_GO == 0 -# define SANITIZER_GO 0 +# define SANITIZER_GO 0 #endif // On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks. @@ -342,40 +351,39 @@ // dlopen mallocs "libgcc_s.so" string which confuses LSan, it fails to realize // that this allocation happens in dynamic linker and should be ignored. #if SANITIZER_PPC || defined(__thumb__) -# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1 +# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1 #else -# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0 +# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0 #endif -#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS -# define SANITIZER_MADVISE_DONTNEED MADV_FREE +#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || SANITIZER_SOLARIS +# define SANITIZER_MADVISE_DONTNEED MADV_FREE #else -# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED +# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED #endif // Older gcc have issues aligning to a constexpr, and require an integer. // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others. #if defined(__powerpc__) || defined(__powerpc64__) -# define SANITIZER_CACHE_LINE_SIZE 128 +# define SANITIZER_CACHE_LINE_SIZE 128 #else -# define SANITIZER_CACHE_LINE_SIZE 64 +# define SANITIZER_CACHE_LINE_SIZE 64 #endif // Enable offline markup symbolizer for Fuchsia. #if SANITIZER_FUCHSIA # define SANITIZER_SYMBOLIZER_MARKUP 1 #else -#define SANITIZER_SYMBOLIZER_MARKUP 0 +# define SANITIZER_SYMBOLIZER_MARKUP 0 #endif // Enable ability to support sanitizer initialization that is // compatible with the sanitizer library being loaded via // `dlopen()`. #if SANITIZER_MAC -#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1 +# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1 #else -#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0 +# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0 #endif // SANITIZER_SUPPORTS_THREADLOCAL @@ -392,4 +400,15 @@ # endif #endif -#endif // SANITIZER_PLATFORM_H +#if defined(__thumb__) && defined(__linux__) +// Workaround for +// https://lab.llvm.org/buildbot/#/builders/clang-thumbv7-full-2stage +// or +// https://lab.llvm.org/staging/#/builders/clang-thumbv7-full-2stage +// It fails *rss_limit_mb_test* without meaningful errors. +# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 1 +#else +# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 0 +#endif + +#endif // SANITIZER_PLATFORM_H diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h index 14610f2df78..3cbbead4e98 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h @@ -235,6 +235,7 @@ #define SANITIZER_INTERCEPT_TIME SI_POSIX #define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS) #define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC +#define SANITIZER_INTERCEPT___B64_TO SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_POSIX_SPAWN SI_POSIX #define SANITIZER_INTERCEPT_WAIT SI_POSIX #define SANITIZER_INTERCEPT_INET SI_POSIX @@ -465,6 +466,7 @@ #define SANITIZER_INTERCEPT_STAT \ (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS || \ SI_STAT_LINUX) +#define SANITIZER_INTERCEPT_STAT64 SI_STAT_LINUX #define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX) #define SANITIZER_INTERCEPT___XSTAT \ ((!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX) diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp index 64535805e40..0d25fa80e2e 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp @@ -130,7 +130,7 @@ unsigned struct_sigevent_sz = sizeof(struct sigevent); unsigned struct_sched_param_sz = sizeof(struct sched_param); unsigned struct_statfs_sz = sizeof(struct statfs); unsigned struct_sockaddr_sz = sizeof(struct sockaddr); -unsigned ucontext_t_sz = sizeof(ucontext_t); +unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); } unsigned struct_rlimit_sz = sizeof(struct rlimit); unsigned struct_timespec_sz = sizeof(struct timespec); unsigned struct_utimbuf_sz = sizeof(struct utimbuf); diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h index 649e64fd1a3..9859c52ec69 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h @@ -57,7 +57,7 @@ extern unsigned struct_sched_param_sz; extern unsigned struct_statfs64_sz; extern unsigned struct_statfs_sz; extern unsigned struct_sockaddr_sz; -extern unsigned ucontext_t_sz; +unsigned ucontext_t_sz(void *ctx); extern unsigned struct_rlimit_sz; extern unsigned struct_utimbuf_sz; extern unsigned struct_timespec_sz; diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp index 2b1a2f7932c..9d577570ea1 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp @@ -26,10 +26,7 @@ // With old kernels (and even new kernels on powerpc) asm/stat.h uses types that // are not defined anywhere in userspace headers. Fake them. This seems to work -// fine with newer headers, too. Beware that with , struct stat -// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64. -// Also, for some platforms (e.g. mips) there are additional members in the -// struct stat:s. +// fine with newer headers, too. #include # if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__) # include diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.cpp index 531e07f2d4c..648e502b904 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.cpp @@ -554,7 +554,7 @@ unsigned struct_tms_sz = sizeof(struct tms); unsigned struct_sigevent_sz = sizeof(struct sigevent); unsigned struct_sched_param_sz = sizeof(struct sched_param); unsigned struct_sockaddr_sz = sizeof(struct sockaddr); -unsigned ucontext_t_sz = sizeof(ucontext_t); +unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); } unsigned struct_rlimit_sz = sizeof(struct rlimit); unsigned struct_timespec_sz = sizeof(struct timespec); unsigned struct_sembuf_sz = sizeof(struct sembuf); diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.h index 9407803fc9c..dc6eb59b280 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.h @@ -45,7 +45,7 @@ extern unsigned struct_stack_t_sz; extern unsigned struct_sched_param_sz; extern unsigned struct_statfs_sz; extern unsigned struct_sockaddr_sz; -extern unsigned ucontext_t_sz; +unsigned ucontext_t_sz(void *ctx); extern unsigned struct_rlimit_sz; extern unsigned struct_utimbuf_sz; diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp index a1c452855ae..e5cecaaaffc 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp @@ -170,8 +170,9 @@ typedef struct user_fpregs elf_fpregset_t; #endif // Include these after system headers to avoid name clashes and ambiguities. -#include "sanitizer_internal_defs.h" -#include "sanitizer_platform_limits_posix.h" +# include "sanitizer_common.h" +# include "sanitizer_internal_defs.h" +# include "sanitizer_platform_limits_posix.h" namespace __sanitizer { unsigned struct_utsname_sz = sizeof(struct utsname); @@ -214,10 +215,24 @@ namespace __sanitizer { #if !SANITIZER_ANDROID unsigned struct_statfs_sz = sizeof(struct statfs); unsigned struct_sockaddr_sz = sizeof(struct sockaddr); - unsigned ucontext_t_sz = sizeof(ucontext_t); -#endif // !SANITIZER_ANDROID -#if SANITIZER_LINUX + unsigned ucontext_t_sz(void *ctx) { +# if SANITIZER_GLIBC && SANITIZER_X64 + // See kernel arch/x86/kernel/fpu/signal.c for details. + const auto *fpregs = static_cast(ctx)->uc_mcontext.fpregs; + // The member names differ across header versions, but the actual layout + // is always the same. So avoid using members, just use arithmetic. + const uint32_t *after_xmm = + reinterpret_cast(fpregs + 1) - 24; + if (after_xmm[12] == FP_XSTATE_MAGIC1) + return reinterpret_cast(fpregs) + after_xmm[13] - + static_cast(ctx); +# endif + return sizeof(ucontext_t); + } +# endif // !SANITIZER_ANDROID + +# if SANITIZER_LINUX unsigned struct_epoll_event_sz = sizeof(struct epoll_event); unsigned struct_sysinfo_sz = sizeof(struct sysinfo); unsigned __user_cap_header_struct_sz = @@ -575,6 +590,14 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); unsigned IOCTL_BLKROGET = BLKROGET; unsigned IOCTL_BLKROSET = BLKROSET; unsigned IOCTL_BLKRRPART = BLKRRPART; + unsigned IOCTL_BLKFRASET = BLKFRASET; + unsigned IOCTL_BLKFRAGET = BLKFRAGET; + unsigned IOCTL_BLKSECTSET = BLKSECTSET; + unsigned IOCTL_BLKSECTGET = BLKSECTGET; + unsigned IOCTL_BLKSSZGET = BLKSSZGET; + unsigned IOCTL_BLKBSZGET = BLKBSZGET; + unsigned IOCTL_BLKBSZSET = BLKBSZSET; + unsigned IOCTL_BLKGETSIZE64 = BLKGETSIZE64; unsigned IOCTL_CDROMAUDIOBUFSIZ = CDROMAUDIOBUFSIZ; unsigned IOCTL_CDROMEJECT = CDROMEJECT; unsigned IOCTL_CDROMEJECT_SW = CDROMEJECT_SW; diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h index da53b5abef2..62a99035db3 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h @@ -57,12 +57,12 @@ extern unsigned struct_regmatch_sz; extern unsigned struct_fstab_sz; extern unsigned struct_statfs_sz; extern unsigned struct_sockaddr_sz; -extern unsigned ucontext_t_sz; -#endif // !SANITIZER_ANDROID +unsigned ucontext_t_sz(void *uctx); +# endif // !SANITIZER_ANDROID -#if SANITIZER_LINUX +# if SANITIZER_LINUX -#if defined(__x86_64__) +# if defined(__x86_64__) const unsigned struct_kernel_stat_sz = 144; const unsigned struct_kernel_stat64_sz = 0; #elif defined(__i386__) @@ -83,7 +83,7 @@ const unsigned struct_kernel_stat64_sz = 104; #elif defined(__mips__) const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID ? FIRST_32_SECOND_64(104, 128) - : FIRST_32_SECOND_64(144, 216); + : FIRST_32_SECOND_64(160, 216); const unsigned struct_kernel_stat64_sz = 104; #elif defined(__s390__) && !defined(__s390x__) const unsigned struct_kernel_stat_sz = 64; @@ -370,7 +370,8 @@ struct __sanitizer_group { char **gr_mem; }; -# if (defined(__x86_64__) && !defined(_LP64)) || defined(__hexagon__) +# if (SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \ + (defined(__x86_64__) && !defined(_LP64)) || defined(__hexagon__) typedef long long __sanitizer_time_t; #else typedef long __sanitizer_time_t; @@ -478,7 +479,8 @@ struct __sanitizer_dirent { unsigned short d_reclen; // more fields that we don't care about }; -# elif SANITIZER_ANDROID || defined(__x86_64__) || defined(__hexagon__) +# elif (SANITIZER_LINUX && !SANITIZER_GLIBC) || defined(__x86_64__) || \ + defined(__hexagon__) struct __sanitizer_dirent { unsigned long long d_ino; unsigned long long d_off; @@ -1108,6 +1110,14 @@ extern unsigned IOCTL_BLKRASET; extern unsigned IOCTL_BLKROGET; extern unsigned IOCTL_BLKROSET; extern unsigned IOCTL_BLKRRPART; +extern unsigned IOCTL_BLKFRASET; +extern unsigned IOCTL_BLKFRAGET; +extern unsigned IOCTL_BLKSECTSET; +extern unsigned IOCTL_BLKSECTGET; +extern unsigned IOCTL_BLKSSZGET; +extern unsigned IOCTL_BLKBSZGET; +extern unsigned IOCTL_BLKBSZSET; +extern unsigned IOCTL_BLKGETSIZE64; extern unsigned IOCTL_CDROMAUDIOBUFSIZ; extern unsigned IOCTL_CDROMEJECT; extern unsigned IOCTL_CDROMEJECT_SW; diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.cpp index a113cb0d349..dad7bde1498 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.cpp @@ -89,7 +89,7 @@ namespace __sanitizer { unsigned struct_sched_param_sz = sizeof(struct sched_param); unsigned struct_statfs_sz = sizeof(struct statfs); unsigned struct_sockaddr_sz = sizeof(struct sockaddr); - unsigned ucontext_t_sz = sizeof(ucontext_t); + unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); } unsigned struct_timespec_sz = sizeof(struct timespec); #if SANITIZER_SOLARIS32 unsigned struct_statvfs64_sz = sizeof(struct statvfs64); diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.h index cbab577bcf2..84a81265162 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.h @@ -43,7 +43,7 @@ extern unsigned struct_sched_param_sz; extern unsigned struct_statfs64_sz; extern unsigned struct_statfs_sz; extern unsigned struct_sockaddr_sz; -extern unsigned ucontext_t_sz; +unsigned ucontext_t_sz(void *ctx); extern unsigned struct_timespec_sz; extern unsigned struct_rlimit_sz; diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.cpp b/libsanitizer/sanitizer_common/sanitizer_posix.cpp index f8457a6aac4..3b330a3705e 100644 --- a/libsanitizer/sanitizer_common/sanitizer_posix.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_posix.cpp @@ -95,6 +95,7 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, UnmapOrDie((void*)map_res, res - map_res); } uptr end = res + size; + end = RoundUpTo(end, GetPageSizeCached()); if (end != map_end) UnmapOrDie((void*)end, map_end - end); return (void*)res; diff --git a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp index eed02ce4f6a..b6d8c7281bd 100644 --- a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp @@ -290,7 +290,7 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) { return result; } -void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) { +void PlatformPrepareForSandboxing(void *args) { // Some kinds of sandboxes may forbid filesystem access, so we won't be able // to read the file mappings from /proc/self/maps. Luckily, neither the // process will be able to load additional libraries, so it's fine to use the diff --git a/libsanitizer/sanitizer_common/sanitizer_printf.cpp b/libsanitizer/sanitizer_common/sanitizer_printf.cpp index 79aee8ba628..3a9e366d2df 100644 --- a/libsanitizer/sanitizer_common/sanitizer_printf.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_printf.cpp @@ -191,12 +191,12 @@ int VSNPrintf(char *buff, int buff_length, break; } case 'p': { - RAW_CHECK(!have_flags, kPrintfFormatsHelp, format); + RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format); result += AppendPointer(&buff, buff_end, va_arg(args, uptr)); break; } case 's': { - RAW_CHECK(!have_length, kPrintfFormatsHelp, format); + RAW_CHECK_VA(!have_length, kPrintfFormatsHelp, format); // Only left-justified width is supported. CHECK(!have_width || left_justified); result += AppendString(&buff, buff_end, left_justified ? -width : width, @@ -204,17 +204,17 @@ int VSNPrintf(char *buff, int buff_length, break; } case 'c': { - RAW_CHECK(!have_flags, kPrintfFormatsHelp, format); + RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format); result += AppendChar(&buff, buff_end, va_arg(args, int)); break; } case '%' : { - RAW_CHECK(!have_flags, kPrintfFormatsHelp, format); + RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format); result += AppendChar(&buff, buff_end, '%'); break; } default: { - RAW_CHECK(false, kPrintfFormatsHelp, format); + RAW_CHECK_VA(false, kPrintfFormatsHelp, format); } } } diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp index 1f53e3e46d8..62b2e5e0321 100644 --- a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp @@ -143,16 +143,16 @@ void MemoryMappingLayout::LoadFromCache() { // early in the process, when dyld is one of the only images loaded, // so it will be hit after only a few iterations. static mach_header *get_dyld_image_header() { - unsigned depth = 1; - vm_size_t size = 0; vm_address_t address = 0; - kern_return_t err = KERN_SUCCESS; - mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; while (true) { + vm_size_t size = 0; + unsigned depth = 1; struct vm_region_submap_info_64 info; - err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth, - (vm_region_info_t)&info, &count); + mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; + kern_return_t err = + vm_region_recurse_64(mach_task_self(), &address, &size, &depth, + (vm_region_info_t)&info, &count); if (err != KERN_SUCCESS) return nullptr; if (size >= sizeof(mach_header) && info.protection & kProtectionRead) { diff --git a/libsanitizer/sanitizer_common/sanitizer_quarantine.h b/libsanitizer/sanitizer_common/sanitizer_quarantine.h index 1a074d2bb70..4aa60548516 100644 --- a/libsanitizer/sanitizer_common/sanitizer_quarantine.h +++ b/libsanitizer/sanitizer_common/sanitizer_quarantine.h @@ -149,8 +149,8 @@ class Quarantine { Cache cache_; char pad2_[kCacheLineSize]; - void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_) - RELEASE(recycle_mutex_) { + void NOINLINE Recycle(uptr min_size, Callback cb) + SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) { Cache tmp; { SpinMutexLock l(&cache_mutex_); diff --git a/libsanitizer/sanitizer_common/sanitizer_ring_buffer.h b/libsanitizer/sanitizer_common/sanitizer_ring_buffer.h index 2a46e933b75..f22e40cac28 100644 --- a/libsanitizer/sanitizer_common/sanitizer_ring_buffer.h +++ b/libsanitizer/sanitizer_common/sanitizer_ring_buffer.h @@ -86,10 +86,13 @@ class CompactRingBuffer { // Lower bytes store the address of the next buffer element. static constexpr int kPageSizeBits = 12; static constexpr int kSizeShift = 56; + static constexpr int kSizeBits = 64 - kSizeShift; static constexpr uptr kNextMask = (1ULL << kSizeShift) - 1; uptr GetStorageSize() const { return (long_ >> kSizeShift) << kPageSizeBits; } + static uptr SignExtend(uptr x) { return ((sptr)x) << kSizeBits >> kSizeBits; } + void Init(void *storage, uptr size) { CHECK_EQ(sizeof(CompactRingBuffer), sizeof(void *)); CHECK(IsPowerOfTwo(size)); @@ -97,12 +100,14 @@ class CompactRingBuffer { CHECK_LE(size, 128 << kPageSizeBits); CHECK_EQ(size % 4096, 0); CHECK_EQ(size % sizeof(T), 0); - CHECK_EQ((uptr)storage % (size * 2), 0); - long_ = (uptr)storage | ((size >> kPageSizeBits) << kSizeShift); + uptr st = (uptr)storage; + CHECK_EQ(st % (size * 2), 0); + CHECK_EQ(st, SignExtend(st & kNextMask)); + long_ = (st & kNextMask) | ((size >> kPageSizeBits) << kSizeShift); } void SetNext(const T *next) { - long_ = (long_ & ~kNextMask) | (uptr)next; + long_ = (long_ & ~kNextMask) | ((uptr)next & kNextMask); } public: @@ -119,7 +124,7 @@ class CompactRingBuffer { SetNext((const T *)storage + Idx); } - T *Next() const { return (T *)(long_ & kNextMask); } + T *Next() const { return (T *)(SignExtend(long_ & kNextMask)); } void *StartOfStorage() const { return (void *)((uptr)Next() & ~(GetStorageSize() - 1)); diff --git a/libsanitizer/sanitizer_common/sanitizer_stack_store.cpp b/libsanitizer/sanitizer_common/sanitizer_stack_store.cpp new file mode 100644 index 00000000000..148470943b4 --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_stack_store.cpp @@ -0,0 +1,379 @@ +//===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_stack_store.h" + +#include "sanitizer_atomic.h" +#include "sanitizer_common.h" +#include "sanitizer_internal_defs.h" +#include "sanitizer_leb128.h" +#include "sanitizer_lzw.h" +#include "sanitizer_placement_new.h" +#include "sanitizer_stacktrace.h" + +namespace __sanitizer { + +namespace { +struct StackTraceHeader { + static constexpr u32 kStackSizeBits = 8; + + u8 size; + u8 tag; + explicit StackTraceHeader(const StackTrace &trace) + : size(Min(trace.size, (1u << 8) - 1)), tag(trace.tag) { + CHECK_EQ(trace.tag, static_cast(tag)); + } + explicit StackTraceHeader(uptr h) + : size(h & ((1 << kStackSizeBits) - 1)), tag(h >> kStackSizeBits) {} + + uptr ToUptr() const { + return static_cast(size) | (static_cast(tag) << kStackSizeBits); + } +}; +} // namespace + +StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) { + if (!trace.size && !trace.tag) + return 0; + StackTraceHeader h(trace); + uptr idx = 0; + *pack = 0; + uptr *stack_trace = Alloc(h.size + 1, &idx, pack); + *stack_trace = h.ToUptr(); + internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr)); + *pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1); + return OffsetToId(idx); +} + +StackTrace StackStore::Load(Id id) { + if (!id) + return {}; + uptr idx = IdToOffset(id); + uptr block_idx = GetBlockIdx(idx); + CHECK_LT(block_idx, ARRAY_SIZE(blocks_)); + const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(this); + if (!stack_trace) + return {}; + stack_trace += GetInBlockIdx(idx); + StackTraceHeader h(*stack_trace); + return StackTrace(stack_trace + 1, h.size, h.tag); +} + +uptr StackStore::Allocated() const { + return atomic_load_relaxed(&allocated_) + sizeof(*this); +} + +uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) { + for (;;) { + // Optimisic lock-free allocation, essentially try to bump the + // total_frames_. + uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed); + uptr block_idx = GetBlockIdx(start); + uptr last_idx = GetBlockIdx(start + count - 1); + if (LIKELY(block_idx == last_idx)) { + // Fits into the a single block. + CHECK_LT(block_idx, ARRAY_SIZE(blocks_)); + *idx = start; + return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start); + } + + // Retry. We can't use range allocated in two different blocks. + CHECK_LE(count, kBlockSizeFrames); + uptr in_first = kBlockSizeFrames - GetInBlockIdx(start); + // Mark tail/head of these blocks as "stored".to avoid waiting before we can + // Pack(). + *pack += blocks_[block_idx].Stored(in_first); + *pack += blocks_[last_idx].Stored(count - in_first); + } +} + +void *StackStore::Map(uptr size, const char *mem_type) { + atomic_fetch_add(&allocated_, size, memory_order_relaxed); + return MmapNoReserveOrDie(size, mem_type); +} + +void StackStore::Unmap(void *addr, uptr size) { + atomic_fetch_sub(&allocated_, size, memory_order_relaxed); + UnmapOrDie(addr, size); +} + +uptr StackStore::Pack(Compression type) { + uptr res = 0; + for (BlockInfo &b : blocks_) res += b.Pack(type, this); + return res; +} + +void StackStore::LockAll() { + for (BlockInfo &b : blocks_) b.Lock(); +} + +void StackStore::UnlockAll() { + for (BlockInfo &b : blocks_) b.Unlock(); +} + +void StackStore::TestOnlyUnmap() { + for (BlockInfo &b : blocks_) b.TestOnlyUnmap(this); + internal_memset(this, 0, sizeof(*this)); +} + +uptr *StackStore::BlockInfo::Get() const { + // Idiomatic double-checked locking uses memory_order_acquire here. But + // relaxed is fine for us, justification is similar to + // TwoLevelMap::GetOrCreate. + return reinterpret_cast(atomic_load_relaxed(&data_)); +} + +uptr *StackStore::BlockInfo::Create(StackStore *store) { + SpinMutexLock l(&mtx_); + uptr *ptr = Get(); + if (!ptr) { + ptr = reinterpret_cast(store->Map(kBlockSizeBytes, "StackStore")); + atomic_store(&data_, reinterpret_cast(ptr), memory_order_release); + } + return ptr; +} + +uptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) { + uptr *ptr = Get(); + if (LIKELY(ptr)) + return ptr; + return Create(store); +} + +class SLeb128Encoder { + public: + SLeb128Encoder(u8 *begin, u8 *end) : begin(begin), end(end) {} + + bool operator==(const SLeb128Encoder &other) const { + return begin == other.begin; + } + + bool operator!=(const SLeb128Encoder &other) const { + return begin != other.begin; + } + + SLeb128Encoder &operator=(uptr v) { + sptr diff = v - previous; + begin = EncodeSLEB128(diff, begin, end); + previous = v; + return *this; + } + SLeb128Encoder &operator*() { return *this; } + SLeb128Encoder &operator++() { return *this; } + + u8 *base() const { return begin; } + + private: + u8 *begin; + u8 *end; + uptr previous = 0; +}; + +class SLeb128Decoder { + public: + SLeb128Decoder(const u8 *begin, const u8 *end) : begin(begin), end(end) {} + + bool operator==(const SLeb128Decoder &other) const { + return begin == other.begin; + } + + bool operator!=(const SLeb128Decoder &other) const { + return begin != other.begin; + } + + uptr operator*() { + sptr diff; + begin = DecodeSLEB128(begin, end, &diff); + previous += diff; + return previous; + } + SLeb128Decoder &operator++() { return *this; } + + SLeb128Decoder operator++(int) { return *this; } + + private: + const u8 *begin; + const u8 *end; + uptr previous = 0; +}; + +static u8 *CompressDelta(const uptr *from, const uptr *from_end, u8 *to, + u8 *to_end) { + SLeb128Encoder encoder(to, to_end); + for (; from != from_end; ++from, ++encoder) *encoder = *from; + return encoder.base(); +} + +static uptr *UncompressDelta(const u8 *from, const u8 *from_end, uptr *to, + uptr *to_end) { + SLeb128Decoder decoder(from, from_end); + SLeb128Decoder end(from_end, from_end); + for (; decoder != end; ++to, ++decoder) *to = *decoder; + CHECK_EQ(to, to_end); + return to; +} + +static u8 *CompressLzw(const uptr *from, const uptr *from_end, u8 *to, + u8 *to_end) { + SLeb128Encoder encoder(to, to_end); + encoder = LzwEncode(from, from_end, encoder); + return encoder.base(); +} + +static uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to, + uptr *to_end) { + SLeb128Decoder decoder(from, from_end); + SLeb128Decoder end(from_end, from_end); + to = LzwDecode(decoder, end, to); + CHECK_EQ(to, to_end); + return to; +} + +#if defined(_MSC_VER) && !defined(__clang__) +# pragma warning(push) +// Disable 'nonstandard extension used: zero-sized array in struct/union'. +# pragma warning(disable : 4200) +#endif +namespace { +struct PackedHeader { + uptr size; + StackStore::Compression type; + u8 data[]; +}; +} // namespace +#if defined(_MSC_VER) && !defined(__clang__) +# pragma warning(pop) +#endif + +uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) { + SpinMutexLock l(&mtx_); + switch (state) { + case State::Storing: + state = State::Unpacked; + FALLTHROUGH; + case State::Unpacked: + return Get(); + case State::Packed: + break; + } + + u8 *ptr = reinterpret_cast(Get()); + CHECK_NE(nullptr, ptr); + const PackedHeader *header = reinterpret_cast(ptr); + CHECK_LE(header->size, kBlockSizeBytes); + CHECK_GE(header->size, sizeof(PackedHeader)); + + uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached()); + + uptr *unpacked = + reinterpret_cast(store->Map(kBlockSizeBytes, "StackStoreUnpack")); + + uptr *unpacked_end; + switch (header->type) { + case Compression::Delta: + unpacked_end = UncompressDelta(header->data, ptr + header->size, unpacked, + unpacked + kBlockSizeFrames); + break; + case Compression::LZW: + unpacked_end = UncompressLzw(header->data, ptr + header->size, unpacked, + unpacked + kBlockSizeFrames); + break; + default: + UNREACHABLE("Unexpected type"); + break; + } + + CHECK_EQ(kBlockSizeFrames, unpacked_end - unpacked); + + MprotectReadOnly(reinterpret_cast(unpacked), kBlockSizeBytes); + atomic_store(&data_, reinterpret_cast(unpacked), memory_order_release); + store->Unmap(ptr, packed_size_aligned); + + state = State::Unpacked; + return Get(); +} + +uptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) { + if (type == Compression::None) + return 0; + + SpinMutexLock l(&mtx_); + switch (state) { + case State::Unpacked: + case State::Packed: + return 0; + case State::Storing: + break; + } + + uptr *ptr = Get(); + if (!ptr || !Stored(0)) + return 0; + + u8 *packed = + reinterpret_cast(store->Map(kBlockSizeBytes, "StackStorePack")); + PackedHeader *header = reinterpret_cast(packed); + u8 *alloc_end = packed + kBlockSizeBytes; + + u8 *packed_end = nullptr; + switch (type) { + case Compression::Delta: + packed_end = + CompressDelta(ptr, ptr + kBlockSizeFrames, header->data, alloc_end); + break; + case Compression::LZW: + packed_end = + CompressLzw(ptr, ptr + kBlockSizeFrames, header->data, alloc_end); + break; + default: + UNREACHABLE("Unexpected type"); + break; + } + + header->type = type; + header->size = packed_end - packed; + + VPrintf(1, "Packed block of %zu KiB to %zu KiB\n", kBlockSizeBytes >> 10, + header->size >> 10); + + if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) { + VPrintf(1, "Undo and keep block unpacked\n"); + MprotectReadOnly(reinterpret_cast(ptr), kBlockSizeBytes); + store->Unmap(packed, kBlockSizeBytes); + state = State::Unpacked; + return 0; + } + + uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached()); + store->Unmap(packed + packed_size_aligned, + kBlockSizeBytes - packed_size_aligned); + MprotectReadOnly(reinterpret_cast(packed), packed_size_aligned); + + atomic_store(&data_, reinterpret_cast(packed), memory_order_release); + store->Unmap(ptr, kBlockSizeBytes); + + state = State::Packed; + return kBlockSizeBytes - packed_size_aligned; +} + +void StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) { + if (uptr *ptr = Get()) + store->Unmap(ptr, kBlockSizeBytes); +} + +bool StackStore::BlockInfo::Stored(uptr n) { + return n + atomic_fetch_add(&stored_, n, memory_order_release) == + kBlockSizeFrames; +} + +bool StackStore::BlockInfo::IsPacked() const { + SpinMutexLock l(&mtx_); + return state == State::Packed; +} + +} // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_stack_store.h b/libsanitizer/sanitizer_common/sanitizer_stack_store.h new file mode 100644 index 00000000000..4f1a8caac6e --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_stack_store.h @@ -0,0 +1,121 @@ +//===-- sanitizer_stack_store.h ---------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_STACK_STORE_H +#define SANITIZER_STACK_STORE_H + +#include "sanitizer_atomic.h" +#include "sanitizer_common.h" +#include "sanitizer_internal_defs.h" +#include "sanitizer_mutex.h" +#include "sanitizer_stacktrace.h" + +namespace __sanitizer { + +class StackStore { + static constexpr uptr kBlockSizeFrames = 0x100000; + static constexpr uptr kBlockCount = 0x1000; + static constexpr uptr kBlockSizeBytes = kBlockSizeFrames * sizeof(uptr); + + public: + enum class Compression : u8 { + None = 0, + Delta, + LZW, + }; + + constexpr StackStore() = default; + + using Id = u32; // Enough for 2^32 * sizeof(uptr) bytes of traces. + static_assert(u64(kBlockCount) * kBlockSizeFrames == 1ull << (sizeof(Id) * 8), + ""); + + Id Store(const StackTrace &trace, + uptr *pack /* number of blocks completed by this call */); + StackTrace Load(Id id); + uptr Allocated() const; + + // Packs all blocks which don't expect any more writes. A block is going to be + // packed once. As soon trace from that block was requested, it will unpack + // and stay unpacked after that. + // Returns the number of released bytes. + uptr Pack(Compression type); + + void LockAll(); + void UnlockAll(); + + void TestOnlyUnmap(); + + private: + friend class StackStoreTest; + static constexpr uptr GetBlockIdx(uptr frame_idx) { + return frame_idx / kBlockSizeFrames; + } + + static constexpr uptr GetInBlockIdx(uptr frame_idx) { + return frame_idx % kBlockSizeFrames; + } + + static constexpr uptr IdToOffset(Id id) { + CHECK_NE(id, 0); + return id - 1; // Avoid zero as id. + } + + static constexpr uptr OffsetToId(Id id) { + // This makes UINT32_MAX to 0 and it will be retrived as and empty stack. + // But this is not a problem as we will not be able to store anything after + // that anyway. + return id + 1; // Avoid zero as id. + } + + uptr *Alloc(uptr count, uptr *idx, uptr *pack); + + void *Map(uptr size, const char *mem_type); + void Unmap(void *addr, uptr size); + + // Total number of allocated frames. + atomic_uintptr_t total_frames_ = {}; + + // Tracks total allocated memory in bytes. + atomic_uintptr_t allocated_ = {}; + + // Each block will hold pointer to exactly kBlockSizeFrames. + class BlockInfo { + atomic_uintptr_t data_; + // Counter to track store progress to know when we can Pack() the block. + atomic_uint32_t stored_; + // Protects alloc of new blocks. + mutable StaticSpinMutex mtx_; + + enum class State : u8 { + Storing = 0, + Packed, + Unpacked, + }; + State state SANITIZER_GUARDED_BY(mtx_); + + uptr *Create(StackStore *store); + + public: + uptr *Get() const; + uptr *GetOrCreate(StackStore *store); + uptr *GetOrUnpack(StackStore *store); + uptr Pack(Compression type, StackStore *store); + void TestOnlyUnmap(StackStore *store); + bool Stored(uptr n); + bool IsPacked() const; + void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); } + void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); } + }; + + BlockInfo blocks_[kBlockCount] = {}; +}; + +} // namespace __sanitizer + +#endif // SANITIZER_STACK_STORE_H diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp index 02855459922..a746d462193 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp @@ -12,22 +12,22 @@ #include "sanitizer_stackdepot.h" +#include "sanitizer_atomic.h" #include "sanitizer_common.h" #include "sanitizer_hash.h" -#include "sanitizer_persistent_allocator.h" +#include "sanitizer_mutex.h" +#include "sanitizer_stack_store.h" #include "sanitizer_stackdepotbase.h" namespace __sanitizer { -static PersistentAllocator traceAllocator; - struct StackDepotNode { using hash_type = u64; hash_type stack_hash; u32 link; + StackStore::Id store_id; static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20; - static const u32 kStackSizeBits = 16; typedef StackTrace args_type; bool eq(hash_type hash, const args_type &args) const { @@ -50,14 +50,12 @@ struct StackDepotNode { typedef StackDepotHandle handle_type; }; +static StackStore stackStore; + // FIXME(dvyukov): this single reserved bit is used in TSan. typedef StackDepotBase StackDepot; static StackDepot theDepot; -// Keep rarely accessed stack traces out of frequently access nodes to improve -// caching efficiency. -static TwoLevelMap - tracePtrs; // Keep mutable data out of frequently access nodes to improve caching // efficiency. static TwoLevelMap= 1 ? MonotonicNanoTime() : 0; + uptr diff = stackStore.Pack(static_cast( + Abs(common_flags()->compress_stack_depot))); + if (!diff) + return; + if (Verbosity() >= 1) { + u64 finish = MonotonicNanoTime(); + uptr total_before = theDepot.GetStats().allocated + diff; + VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n", + SanitizerToolName, diff >> 10, total_before >> 10, + (finish - start) / 1000000); + } +} + +namespace { + +class CompressThread { + public: + constexpr CompressThread() = default; + void NewWorkNotify(); + void Stop(); + void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; + void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; + + private: + enum class State { + NotStarted = 0, + Started, + Failed, + Stopped, + }; + + void Run(); + + bool WaitForWork() { + semaphore_.Wait(); + return atomic_load(&run_, memory_order_acquire); + } + + Semaphore semaphore_ = {}; + StaticSpinMutex mutex_ = {}; + State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted; + void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr; + atomic_uint8_t run_ = {}; +}; + +static CompressThread compress_thread; + +void CompressThread::NewWorkNotify() { + int compress = common_flags()->compress_stack_depot; + if (!compress) + return; + if (compress > 0 /* for testing or debugging */) { + SpinMutexLock l(&mutex_); + if (state_ == State::NotStarted) { + atomic_store(&run_, 1, memory_order_release); + CHECK_EQ(nullptr, thread_); + thread_ = internal_start_thread( + [](void *arg) -> void * { + reinterpret_cast(arg)->Run(); + return nullptr; + }, + this); + state_ = thread_ ? State::Started : State::Failed; + } + if (state_ == State::Started) { + semaphore_.Post(); + return; + } + } + CompressStackStore(); +} + +void CompressThread::Run() { + VPrintf(1, "%s: StackDepot compression thread started\n", SanitizerToolName); + while (WaitForWork()) CompressStackStore(); + VPrintf(1, "%s: StackDepot compression thread stopped\n", SanitizerToolName); +} + +void CompressThread::Stop() { + void *t = nullptr; + { + SpinMutexLock l(&mutex_); + if (state_ != State::Started) + return; + state_ = State::Stopped; + CHECK_NE(nullptr, thread_); + t = thread_; + thread_ = nullptr; + } + atomic_store(&run_, 0, memory_order_release); + semaphore_.Post(); + internal_join_thread(t); } +void CompressThread::LockAndStop() { + mutex_.Lock(); + if (state_ != State::Started) + return; + CHECK_NE(nullptr, thread_); + + atomic_store(&run_, 0, memory_order_release); + semaphore_.Post(); + internal_join_thread(thread_); + // Allow to restart after Unlock() if needed. + state_ = State::NotStarted; + thread_ = nullptr; +} + +void CompressThread::Unlock() { mutex_.Unlock(); } + +} // namespace + void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) { stack_hash = hash; - uptr *stack_trace = traceAllocator.alloc(args.size + 1); - CHECK_LT(args.size, 1 << kStackSizeBits); - *stack_trace = args.size + (args.tag << kStackSizeBits); - internal_memcpy(stack_trace + 1, args.trace, args.size * sizeof(uptr)); - tracePtrs[id] = stack_trace; + uptr pack = 0; + store_id = stackStore.Store(args, &pack); + if (LIKELY(!pack)) + return; + compress_thread.NewWorkNotify(); } StackDepotNode::args_type StackDepotNode::load(u32 id) const { - const uptr *stack_trace = tracePtrs[id]; - if (!stack_trace) + if (!store_id) return {}; - uptr size = *stack_trace & ((1 << kStackSizeBits) - 1); - uptr tag = *stack_trace >> kStackSizeBits; - return args_type(stack_trace + 1, size, tag); + return stackStore.Load(store_id); } StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); } @@ -109,9 +217,13 @@ StackTrace StackDepotGet(u32 id) { void StackDepotLockAll() { theDepot.LockAll(); + compress_thread.LockAndStop(); + stackStore.LockAll(); } void StackDepotUnlockAll() { + stackStore.UnlockAll(); + compress_thread.Unlock(); theDepot.UnlockAll(); } @@ -121,14 +233,15 @@ void StackDepotPrintAll() { #endif } +void StackDepotStopBackgroundThread() { compress_thread.Stop(); } + StackDepotHandle StackDepotNode::get_handle(u32 id) { return StackDepotHandle(&theDepot.nodes[id], id); } void StackDepotTestOnlyUnmap() { theDepot.TestOnlyUnmap(); - tracePtrs.TestOnlyUnmap(); - traceAllocator.TestOnlyUnmap(); + stackStore.TestOnlyUnmap(); } } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h index 56d655d9404..cca6fd53468 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h +++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h @@ -42,6 +42,7 @@ StackTrace StackDepotGet(u32 id); void StackDepotLockAll(); void StackDepotUnlockAll(); void StackDepotPrintAll(); +void StackDepotStopBackgroundThread(); void StackDepotTestOnlyUnmap(); diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp index 5a12422fc6f..3013a0c4abd 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp @@ -20,11 +20,10 @@ namespace __sanitizer { uptr StackTrace::GetNextInstructionPc(uptr pc) { -#if defined(__sparc__) || defined(__mips__) - return pc + 8; -#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__) || \ - defined(__hexagon__) +#if defined(__aarch64__) return STRIP_PAC_PC((void *)pc) + 4; +#elif defined(__sparc__) || defined(__mips__) + return pc + 8; #elif SANITIZER_RISCV64 // Current check order is 4 -> 2 -> 6 -> 8 u8 InsnByte = *(u8 *)(pc); @@ -47,8 +46,10 @@ uptr StackTrace::GetNextInstructionPc(uptr pc) { } // bail-out if could not figure out the instruction size return 0; -#else +#elif SANITIZER_S390 || SANITIZER_I386 || SANITIZER_X32 || SANITIZER_X64 return pc + 1; +#else + return pc + 4; #endif } @@ -86,8 +87,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp, // Nope, this does not look right either. This means the frame after next does // not have a valid frame pointer, but we can still extract the caller PC. // Unfortunately, there is no way to decide between GCC and LLVM frame - // layouts. Assume GCC. - return bp_prev - 1; + // layouts. Assume LLVM. + return bp_prev; #else return (uhwptr*)bp; #endif @@ -110,21 +111,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top, IsAligned((uptr)frame, sizeof(*frame)) && size < max_depth) { #ifdef __powerpc__ - // PowerPC ABIs specify that the return address is saved on the - // *caller's* stack frame. Thus we must dereference the back chain - // to find the caller frame before extracting it. + // PowerPC ABIs specify that the return address is saved at offset + // 16 of the *caller's* stack frame. Thus we must dereference the + // back chain to find the caller frame before extracting it. uhwptr *caller_frame = (uhwptr*)frame[0]; if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) || !IsAligned((uptr)caller_frame, sizeof(uhwptr))) break; - // For most ABIs the offset where the return address is saved is two - // register sizes. The exception is the SVR4 ABI, which uses an - // offset of only one register size. -#ifdef _CALL_SYSV - uhwptr pc1 = caller_frame[1]; -#else uhwptr pc1 = caller_frame[2]; -#endif #elif defined(__s390__) uhwptr pc1 = frame[14]; #elif defined(__riscv) diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h index 11c6154b09e..9a5f8fb13a2 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h +++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h @@ -20,7 +20,7 @@ namespace __sanitizer { struct BufferedStackTrace; -static const u32 kStackTraceMax = 256; +static const u32 kStackTraceMax = 255; #if SANITIZER_LINUX && defined(__mips__) # define SANITIZER_CAN_FAST_UNWIND 0 @@ -88,9 +88,6 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) { // so we return (pc-2) in that case in order to be safe. // For A32 mode we return (pc-4) because all instructions are 32 bit long. return (pc - 3) & (~1); -#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__) - // PCs are always 4 byte aligned. - return pc - 4; #elif defined(__sparc__) || defined(__mips__) return pc - 8; #elif SANITIZER_RISCV64 @@ -101,8 +98,10 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) { // It seems difficult to figure out the exact instruction length - // pc - 2 seems like a safe option for the purposes of stack tracing return pc - 2; -#else +#elif SANITIZER_S390 || SANITIZER_I386 || SANITIZER_X32 || SANITIZER_X64 return pc - 1; +#else + return pc - 4; #endif } diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cpp index 2d1c03f7322..47983ee7ec7 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cpp @@ -166,8 +166,8 @@ void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context, UnwindFast(pc, bp, stack_top, stack_bottom, max_depth); } -static int GetModuleAndOffsetForPc(uptr pc, char *module_name, - uptr module_name_len, uptr *pc_offset) { +int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len, + uptr *pc_offset) { const char *found_module_name = nullptr; bool ok = Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC( pc, &found_module_name, pc_offset); @@ -216,10 +216,11 @@ void __sanitizer_symbolize_global(uptr data_addr, const char *fmt, } SANITIZER_INTERFACE_ATTRIBUTE -int __sanitizer_get_module_and_offset_for_pc(uptr pc, char *module_name, +int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_name, uptr module_name_len, - uptr *pc_offset) { - return __sanitizer::GetModuleAndOffsetForPc(pc, module_name, module_name_len, - pc_offset); + void **pc_offset) { + return __sanitizer::GetModuleAndOffsetForPc( + reinterpret_cast(pc), module_name, module_name_len, + reinterpret_cast(pc_offset)); } } // extern "C" diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cpp b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cpp index c6356dae23c..2d0eccc1602 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cpp @@ -104,6 +104,19 @@ static const char *DemangleFunctionName(const char *function) { return function; } +static void MaybeBuildIdToBuffer(const AddressInfo &info, bool PrefixSpace, + InternalScopedString *buffer) { + if (info.uuid_size) { + if (PrefixSpace) + buffer->append(" "); + buffer->append("(BuildId: "); + for (uptr i = 0; i < info.uuid_size; ++i) { + buffer->append("%02x", info.uuid[i]); + } + buffer->append(")"); + } +} + static const char kDefaultFormat[] = " #%n %p %F %L"; void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no, @@ -140,6 +153,9 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no, case 'o': buffer->append("0x%zx", info->module_offset); break; + case 'b': + MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/false, buffer); + break; case 'f': buffer->append("%s", DemangleFunctionName(StripFunctionName( info->function, strip_func_prefix))); @@ -181,6 +197,8 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no, } else if (info->module) { RenderModuleLocation(buffer, info->module, info->module_offset, info->module_arch, strip_path_prefix); + + MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer); } else { buffer->append("()"); } @@ -193,6 +211,7 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no, // Always strip the module name for %M. RenderModuleLocation(buffer, StripModuleName(info->module), info->module_offset, info->module_arch, ""); + MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer); } else { buffer->append("(%p)", (void *)address); } diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_win.cpp b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_win.cpp new file mode 100644 index 00000000000..f114acea79c --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_win.cpp @@ -0,0 +1,175 @@ +//===-- sanitizer_stoptheworld_win.cpp ------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// See sanitizer_stoptheworld.h for details. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" + +#if SANITIZER_WINDOWS + +# define WIN32_LEAN_AND_MEAN +# include +// windows.h needs to be included before tlhelp32.h +# include + +# include "sanitizer_stoptheworld.h" + +namespace __sanitizer { + +namespace { + +struct SuspendedThreadsListWindows final : public SuspendedThreadsList { + InternalMmapVector threadHandles; + InternalMmapVector threadIds; + + SuspendedThreadsListWindows() { + threadIds.reserve(1024); + threadHandles.reserve(1024); + } + + PtraceRegistersStatus GetRegistersAndSP(uptr index, + InternalMmapVector *buffer, + uptr *sp) const override; + + tid_t GetThreadID(uptr index) const override; + uptr ThreadCount() const override; +}; + +// Stack Pointer register names on different architectures +# if SANITIZER_X64 +# define SP_REG Rsp +# elif SANITIZER_I386 +# define SP_REG Esp +# elif SANITIZER_ARM | SANITIZER_ARM64 +# define SP_REG Sp +# else +# error Architecture not supported! +# endif + +PtraceRegistersStatus SuspendedThreadsListWindows::GetRegistersAndSP( + uptr index, InternalMmapVector *buffer, uptr *sp) const { + CHECK_LT(index, threadHandles.size()); + + buffer->resize(RoundUpTo(sizeof(CONTEXT), sizeof(uptr)) / sizeof(uptr)); + CONTEXT *thread_context = reinterpret_cast(buffer->data()); + thread_context->ContextFlags = CONTEXT_ALL; + CHECK(GetThreadContext(threadHandles[index], thread_context)); + *sp = thread_context->SP_REG; + + return REGISTERS_AVAILABLE; +} + +tid_t SuspendedThreadsListWindows::GetThreadID(uptr index) const { + CHECK_LT(index, threadIds.size()); + return threadIds[index]; +} + +uptr SuspendedThreadsListWindows::ThreadCount() const { + return threadIds.size(); +} + +struct RunThreadArgs { + StopTheWorldCallback callback; + void *argument; +}; + +DWORD WINAPI RunThread(void *argument) { + RunThreadArgs *run_args = (RunThreadArgs *)argument; + + const DWORD this_thread = GetCurrentThreadId(); + const DWORD this_process = GetCurrentProcessId(); + + SuspendedThreadsListWindows suspended_threads_list; + bool new_thread_found; + + do { + // Take a snapshot of all Threads + const HANDLE threads = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0); + CHECK(threads != INVALID_HANDLE_VALUE); + + THREADENTRY32 thread_entry; + thread_entry.dwSize = sizeof(thread_entry); + new_thread_found = false; + + if (!Thread32First(threads, &thread_entry)) + break; + + do { + if (thread_entry.th32ThreadID == this_thread || + thread_entry.th32OwnerProcessID != this_process) + continue; + + bool suspended_thread = false; + for (const auto thread_id : suspended_threads_list.threadIds) { + if (thread_id == thread_entry.th32ThreadID) { + suspended_thread = true; + break; + } + } + + // Skip the Thread if it was already suspended + if (suspended_thread) + continue; + + const HANDLE thread = + OpenThread(THREAD_ALL_ACCESS, FALSE, thread_entry.th32ThreadID); + CHECK(thread); + + if (SuspendThread(thread) == (DWORD)-1) { + DWORD last_error = GetLastError(); + + VPrintf(1, "Could not suspend thread %lu (error %lu)", + thread_entry.th32ThreadID, last_error); + continue; + } + + suspended_threads_list.threadIds.push_back(thread_entry.th32ThreadID); + suspended_threads_list.threadHandles.push_back(thread); + new_thread_found = true; + } while (Thread32Next(threads, &thread_entry)); + + CloseHandle(threads); + + // Between the call to `CreateToolhelp32Snapshot` and suspending the + // relevant Threads, new Threads could have potentially been created. So + // continue to find and suspend new Threads until we don't find any. + } while (new_thread_found); + + // Now all Threads of this Process except of this Thread should be suspended. + // Execute the callback function. + run_args->callback(suspended_threads_list, run_args->argument); + + // Resume all Threads + for (const auto suspended_thread_handle : + suspended_threads_list.threadHandles) { + CHECK_NE(ResumeThread(suspended_thread_handle), -1); + CloseHandle(suspended_thread_handle); + } + + return 0; +} + +} // namespace + +void StopTheWorld(StopTheWorldCallback callback, void *argument) { + struct RunThreadArgs arg = {callback, argument}; + DWORD trace_thread_id; + + auto trace_thread = + CreateThread(nullptr, 0, RunThread, &arg, 0, &trace_thread_id); + CHECK(trace_thread); + + WaitForSingleObject(trace_thread, INFINITE); + CloseHandle(trace_thread); +} + +} // namespace __sanitizer + +#endif // SANITIZER_WINDOWS diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cpp index 0c4b84c767a..d3cffaa6eef 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cpp @@ -11,10 +11,11 @@ //===----------------------------------------------------------------------===// #include "sanitizer_allocator_internal.h" -#include "sanitizer_platform.h" +#include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" #include "sanitizer_placement_new.h" +#include "sanitizer_platform.h" #include "sanitizer_symbolizer_internal.h" namespace __sanitizer { @@ -30,6 +31,7 @@ void AddressInfo::Clear() { InternalFree(file); internal_memset(this, 0, sizeof(AddressInfo)); function_offset = kUnknown; + uuid_size = 0; } void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset, @@ -37,6 +39,16 @@ void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset, module = internal_strdup(mod_name); module_offset = mod_offset; module_arch = mod_arch; + uuid_size = 0; +} + +void AddressInfo::FillModuleInfo(const LoadedModule &mod) { + module = internal_strdup(mod.full_name()); + module_offset = address - mod.base_address(); + module_arch = mod.arch(); + if (mod.uuid_size()) + internal_memcpy(uuid, mod.uuid(), mod.uuid_size()); + uuid_size = mod.uuid_size(); } SymbolizedStack::SymbolizedStack() : next(nullptr), info() {} @@ -126,10 +138,4 @@ Symbolizer::SymbolizerScope::~SymbolizerScope() { sym_->end_hook_(); } -void Symbolizer::LateInitializeTools() { - for (auto &tool : tools_) { - tool.LateInitialize(); - } -} - } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h index 42bd157fa62..bad4761e345 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h @@ -32,6 +32,8 @@ struct AddressInfo { char *module; uptr module_offset; ModuleArch module_arch; + u8 uuid[kModuleUUIDSize]; + uptr uuid_size; static const uptr kUnknown = ~(uptr)0; char *function; @@ -45,6 +47,8 @@ struct AddressInfo { // Deletes all strings and resets all fields. void Clear(); void FillModuleInfo(const char *mod_name, uptr mod_offset, ModuleArch arch); + void FillModuleInfo(const LoadedModule &mod); + uptr module_base() const { return address - module_offset; } }; // Linked list of symbolized frames (each frame is described by AddressInfo). @@ -209,9 +213,6 @@ class Symbolizer final { private: const Symbolizer *sym_; }; - - // Calls `LateInitialize()` on all items in `tools_`. - void LateInitializeTools(); }; #ifdef SANITIZER_WINDOWS diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h index b8670941a05..df122ed3425 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h @@ -70,11 +70,6 @@ class SymbolizerTool { return nullptr; } - // Called during the LateInitialize phase of Sanitizer initialization. - // Usually this is a safe place to call code that might need to use user - // memory allocators. - virtual void LateInitialize() {} - protected: ~SymbolizerTool() {} }; @@ -91,7 +86,7 @@ class SymbolizerProcess { ~SymbolizerProcess() {} /// The maximum number of arguments required to invoke a tool process. - static const unsigned kArgVMax = 6; + static const unsigned kArgVMax = 16; // Customizable by subclasses. virtual bool StartSymbolizerSubprocess(); diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp index 3fc994fd3de..8bbd4af0c7c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp @@ -84,15 +84,12 @@ const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter, SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) { Lock l(&mu_); - const char *module_name = nullptr; - uptr module_offset; - ModuleArch arch; SymbolizedStack *res = SymbolizedStack::New(addr); - if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset, - &arch)) + auto *mod = FindModuleForAddress(addr); + if (!mod) return res; // Always fill data about module name and offset. - res->info.FillModuleInfo(module_name, module_offset, arch); + res->info.FillModuleInfo(*mod); for (auto &tool : tools_) { SymbolizerScope sym_scope(this); if (tool.SymbolizePC(addr, res)) { @@ -277,14 +274,17 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess { const char* const kSymbolizerArch = "--default-arch=unknown"; #endif - const char *const inline_flag = common_flags()->symbolize_inline_frames - ? "--inlines" - : "--no-inlines"; + const char *const demangle_flag = + common_flags()->demangle ? "--demangle" : "--no-demangle"; + const char *const inline_flag = + common_flags()->symbolize_inline_frames ? "--inlines" : "--no-inlines"; int i = 0; argv[i++] = path_to_binary; + argv[i++] = demangle_flag; argv[i++] = inline_flag; argv[i++] = kSymbolizerArch; argv[i++] = nullptr; + CHECK_LE(i, kArgVMax); } }; diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cpp index 5c25b28b5dc..ac811c8a913 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cpp @@ -20,7 +20,6 @@ #include #include -#include #include #include #include @@ -58,13 +57,6 @@ bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) { return true; } -#define K_ATOS_ENV_VAR "__check_mach_ports_lookup" - -// This cannot live in `AtosSymbolizerProcess` because instances of that object -// are allocated by the internal allocator which under ASan is poisoned with -// kAsanInternalHeapMagic. -static char kAtosMachPortEnvEntry[] = K_ATOS_ENV_VAR "=000000000000000"; - class AtosSymbolizerProcess final : public SymbolizerProcess { public: explicit AtosSymbolizerProcess(const char *path) @@ -72,51 +64,13 @@ class AtosSymbolizerProcess final : public SymbolizerProcess { pid_str_[0] = '\0'; } - void LateInitialize() { - if (SANITIZER_IOSSIM) { - // `putenv()` may call malloc/realloc so it is only safe to do this - // during LateInitialize() or later (i.e. we can't do this in the - // constructor). We also can't do this in `StartSymbolizerSubprocess()` - // because in TSan we switch allocators when we're symbolizing. - // We use `putenv()` rather than `setenv()` so that we can later directly - // write into the storage without LibC getting involved to change what the - // variable is set to - int result = putenv(kAtosMachPortEnvEntry); - CHECK_EQ(result, 0); - } - } - private: bool StartSymbolizerSubprocess() override { - // Configure sandbox before starting atos process. - // Put the string command line argument in the object so that it outlives // the call to GetArgV. - internal_snprintf(pid_str_, sizeof(pid_str_), "%d", internal_getpid()); - - if (SANITIZER_IOSSIM) { - // `atos` in the simulator is restricted in its ability to retrieve the - // task port for the target process (us) so we need to do extra work - // to pass our task port to it. - mach_port_t ports[]{mach_task_self()}; - kern_return_t ret = - mach_ports_register(mach_task_self(), ports, /*count=*/1); - CHECK_EQ(ret, KERN_SUCCESS); - - // Set environment variable that signals to `atos` that it should look - // for our task port. We can't call `setenv()` here because it might call - // malloc/realloc. To avoid that we instead update the - // `mach_port_env_var_entry_` variable with our current PID. - uptr count = internal_snprintf(kAtosMachPortEnvEntry, - sizeof(kAtosMachPortEnvEntry), - K_ATOS_ENV_VAR "=%s", pid_str_); - CHECK_GE(count, sizeof(K_ATOS_ENV_VAR) + internal_strlen(pid_str_)); - // Document our assumption but without calling `getenv()` in normal - // builds. - DCHECK(getenv(K_ATOS_ENV_VAR)); - DCHECK_EQ(internal_strcmp(getenv(K_ATOS_ENV_VAR), pid_str_), 0); - } + internal_snprintf(pid_str_, sizeof(pid_str_), "%d", (int)internal_getpid()); + // Configure sandbox before starting atos process. return SymbolizerProcess::StartSymbolizerSubprocess(); } @@ -137,13 +91,10 @@ class AtosSymbolizerProcess final : public SymbolizerProcess { argv[i++] = "-d"; } argv[i++] = nullptr; + CHECK_LE(i, kArgVMax); } char pid_str_[16]; - // Space for `\0` in `K_ATOS_ENV_VAR` is reused for `=`. - static_assert(sizeof(kAtosMachPortEnvEntry) == - (sizeof(K_ATOS_ENV_VAR) + sizeof(pid_str_)), - "sizes should match"); }; #undef K_ATOS_ENV_VAR @@ -249,8 +200,6 @@ bool AtosSymbolizer::SymbolizeData(uptr addr, DataInfo *info) { return true; } -void AtosSymbolizer::LateInitialize() { process_->LateInitialize(); } - } // namespace __sanitizer #endif // SANITIZER_MAC diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h index 401d30fa503..d5abe9d98c1 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h @@ -35,7 +35,6 @@ class AtosSymbolizer final : public SymbolizerTool { bool SymbolizePC(uptr addr, SymbolizedStack *stack) override; bool SymbolizeData(uptr addr, DataInfo *info) override; - void LateInitialize() override; private: AtosSymbolizerProcess *process_; diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_markup.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_markup.cpp index 9a5b4a8c54c..1ec0c5cad7a 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_markup.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_markup.cpp @@ -100,9 +100,7 @@ Symbolizer *Symbolizer::PlatformInit() { return new (symbolizer_allocator_) Symbolizer({}); } -void Symbolizer::LateInitialize() { - Symbolizer::GetOrInit()->LateInitializeTools(); -} +void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); } void StartReportDeadlySignal() {} void ReportDeadlySignal(const SignalContext &sig, u32 tid, diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp index 4cd4b4636f0..5f6e4cc3180 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp @@ -213,9 +213,14 @@ class Addr2LineProcess final : public SymbolizerProcess { const char *(&argv)[kArgVMax]) const override { int i = 0; argv[i++] = path_to_binary; - argv[i++] = "-iCfe"; + if (common_flags()->demangle) + argv[i++] = "-C"; + if (common_flags()->symbolize_inline_frames) + argv[i++] = "-i"; + argv[i++] = "-fe"; argv[i++] = module_name_; argv[i++] = nullptr; + CHECK_LE(i, kArgVMax); } bool ReachedEndOfOutput(const char *buffer, uptr length) const override; @@ -312,37 +317,42 @@ class Addr2LinePool final : public SymbolizerTool { FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX); }; -#if SANITIZER_SUPPORTS_WEAK_HOOKS +# if SANITIZER_SUPPORTS_WEAK_HOOKS extern "C" { SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool __sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset, - char *Buffer, int MaxLength, - bool SymbolizeInlineFrames); -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset, - char *Buffer, int MaxLength); -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void __sanitizer_symbolize_flush(); -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -int __sanitizer_symbolize_demangle(const char *Name, char *Buffer, - int MaxLength); + char *Buffer, int MaxLength); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool +__sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset, + char *Buffer, int MaxLength); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_symbolize_flush(); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int +__sanitizer_symbolize_demangle(const char *Name, char *Buffer, int MaxLength); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool +__sanitizer_symbolize_set_demangle(bool Demangle); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool +__sanitizer_symbolize_set_inline_frames(bool InlineFrames); } // extern "C" class InternalSymbolizer final : public SymbolizerTool { public: static InternalSymbolizer *get(LowLevelAllocator *alloc) { - if (__sanitizer_symbolize_code != 0 && - __sanitizer_symbolize_data != 0) { - return new(*alloc) InternalSymbolizer(); - } + if (__sanitizer_symbolize_set_demangle) + CHECK(__sanitizer_symbolize_set_demangle(common_flags()->demangle)); + if (__sanitizer_symbolize_set_inline_frames) + CHECK(__sanitizer_symbolize_set_inline_frames( + common_flags()->symbolize_inline_frames)); + if (__sanitizer_symbolize_code && __sanitizer_symbolize_data) + return new (*alloc) InternalSymbolizer(); return 0; } bool SymbolizePC(uptr addr, SymbolizedStack *stack) override { bool result = __sanitizer_symbolize_code( - stack->info.module, stack->info.module_offset, buffer_, kBufferSize, - common_flags()->symbolize_inline_frames); - if (result) ParseSymbolizePCOutput(buffer_, stack); + stack->info.module, stack->info.module_offset, buffer_, kBufferSize); + if (result) + ParseSymbolizePCOutput(buffer_, stack); return result; } @@ -365,7 +375,7 @@ class InternalSymbolizer final : public SymbolizerTool { if (__sanitizer_symbolize_demangle) { for (uptr res_length = 1024; res_length <= InternalSizeClassMap::kMaxSize;) { - char *res_buff = static_cast(InternalAlloc(res_length)); + char *res_buff = static_cast(InternalAlloc(res_length)); uptr req_length = __sanitizer_symbolize_demangle(name, res_buff, res_length); if (req_length > res_length) { @@ -380,19 +390,19 @@ class InternalSymbolizer final : public SymbolizerTool { } private: - InternalSymbolizer() { } + InternalSymbolizer() {} static const int kBufferSize = 16 * 1024; char buffer_[kBufferSize]; }; -#else // SANITIZER_SUPPORTS_WEAK_HOOKS +# else // SANITIZER_SUPPORTS_WEAK_HOOKS class InternalSymbolizer final : public SymbolizerTool { public: static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; } }; -#endif // SANITIZER_SUPPORTS_WEAK_HOOKS +# endif // SANITIZER_SUPPORTS_WEAK_HOOKS const char *Symbolizer::PlatformDemangle(const char *name) { return DemangleSwiftAndCXX(name); @@ -492,7 +502,7 @@ Symbolizer *Symbolizer::PlatformInit() { } void Symbolizer::LateInitialize() { - Symbolizer::GetOrInit()->LateInitializeTools(); + Symbolizer::GetOrInit(); InitializeSwiftDemangler(); } diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp index 869c8935330..ac855c8be1c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp @@ -211,9 +211,9 @@ static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid, Report("Hint: pc points to the zero page.\n"); if (sig.is_memory_access) { const char *access_type = - sig.write_flag == SignalContext::WRITE + sig.write_flag == SignalContext::Write ? "WRITE" - : (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN"); + : (sig.write_flag == SignalContext::Read ? "READ" : "UNKNOWN"); Report("The signal is caused by a %s memory access.\n", access_type); if (!sig.is_true_faulting_addr) Report("Hint: this fault was caused by a dereference of a high value " diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cpp index 702d901353d..c647ab107ec 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cpp @@ -318,7 +318,7 @@ Symbolizer *Symbolizer::PlatformInit() { } void Symbolizer::LateInitialize() { - Symbolizer::GetOrInit()->LateInitializeTools(); + Symbolizer::GetOrInit(); } } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_syscalls_netbsd.inc b/libsanitizer/sanitizer_common/sanitizer_syscalls_netbsd.inc index c4a9d99fe2f..4ce5de06275 100644 --- a/libsanitizer/sanitizer_common/sanitizer_syscalls_netbsd.inc +++ b/libsanitizer/sanitizer_common/sanitizer_syscalls_netbsd.inc @@ -2255,13 +2255,13 @@ PRE_SYSCALL(getcontext)(void *ucp_) { /* Nothing to do */ } POST_SYSCALL(getcontext)(long long res, void *ucp_) { /* Nothing to do */ } PRE_SYSCALL(setcontext)(void *ucp_) { if (ucp_) { - PRE_READ(ucp_, ucontext_t_sz); + PRE_READ(ucp_, ucontext_t_sz(ucp_)); } } POST_SYSCALL(setcontext)(long long res, void *ucp_) {} PRE_SYSCALL(_lwp_create)(void *ucp_, long long flags_, void *new_lwp_) { if (ucp_) { - PRE_READ(ucp_, ucontext_t_sz); + PRE_READ(ucp_, ucontext_t_sz(ucp_)); } } POST_SYSCALL(_lwp_create) diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp index a34b8c15aa5..278f6defca9 100644 --- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp @@ -13,6 +13,8 @@ #include "sanitizer_thread_registry.h" +#include "sanitizer_placement_new.h" + namespace __sanitizer { ThreadContextBase::ThreadContextBase(u32 tid) @@ -108,7 +110,7 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads, max_threads_(max_threads), thread_quarantine_size_(thread_quarantine_size), max_reuse_(max_reuse), - mtx_(), + mtx_(MutexThreadRegistry), total_threads_(0), alive_threads_(0), max_alive_threads_(0), @@ -162,6 +164,12 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid, max_alive_threads_++; CHECK_EQ(alive_threads_, max_alive_threads_); } + if (user_id) { + // Ensure that user_id is unique. If it's not the case we are screwed. + // Ignoring this situation may lead to very hard to debug false + // positives later (e.g. if we join a wrong thread). + CHECK(live_.try_emplace(user_id, tid).second); + } tctx->SetCreated(user_id, total_threads_++, detached, parent_tid, arg); return tid; @@ -221,14 +229,8 @@ void ThreadRegistry::SetThreadName(u32 tid, const char *name) { void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) { ThreadRegistryLock l(this); - for (u32 tid = 0; tid < threads_.size(); tid++) { - ThreadContextBase *tctx = threads_[tid]; - if (tctx != 0 && tctx->user_id == user_id && - tctx->status != ThreadStatusInvalid) { - tctx->SetName(name); - return; - } - } + if (const auto *tid = live_.find(user_id)) + threads_[tid->second]->SetName(name); } void ThreadRegistry::DetachThread(u32 tid, void *arg) { @@ -241,6 +243,8 @@ void ThreadRegistry::DetachThread(u32 tid, void *arg) { } tctx->OnDetached(arg); if (tctx->status == ThreadStatusFinished) { + if (tctx->user_id) + live_.erase(tctx->user_id); tctx->SetDead(); QuarantinePush(tctx); } else { @@ -260,6 +264,8 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) { return; } if ((destroyed = tctx->GetDestroyed())) { + if (tctx->user_id) + live_.erase(tctx->user_id); tctx->SetJoined(arg); QuarantinePush(tctx); } @@ -292,6 +298,8 @@ ThreadStatus ThreadRegistry::FinishThread(u32 tid) { } tctx->SetFinished(); if (dead) { + if (tctx->user_id) + live_.erase(tctx->user_id); tctx->SetDead(); QuarantinePush(tctx); } @@ -333,6 +341,19 @@ ThreadContextBase *ThreadRegistry::QuarantinePop() { return tctx; } +u32 ThreadRegistry::ConsumeThreadUserId(uptr user_id) { + ThreadRegistryLock l(this); + u32 tid; + auto *t = live_.find(user_id); + CHECK(t); + tid = t->second; + live_.erase(t); + auto *tctx = threads_[tid]; + CHECK_EQ(tctx->user_id, user_id); + tctx->user_id = 0; + return tid; +} + void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) { ThreadRegistryLock l(this); ThreadContextBase *tctx = threads_[tid]; @@ -341,6 +362,23 @@ void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) { CHECK_NE(tctx->status, ThreadStatusDead); CHECK_EQ(tctx->user_id, 0); tctx->user_id = user_id; + CHECK(live_.try_emplace(user_id, tctx->tid).second); +} + +u32 ThreadRegistry::OnFork(u32 tid) { + ThreadRegistryLock l(this); + // We only purge user_id (pthread_t) of live threads because + // they cause CHECK failures if new threads with matching pthread_t + // created after fork. + // Potentially we could purge more info (ThreadContextBase themselves), + // but it's hard to test and easy to introduce new issues by doing this. + for (auto *tctx : threads_) { + if (tctx->tid == tid || !tctx->user_id) + continue; + CHECK(live_.erase(tctx->user_id)); + tctx->user_id = 0; + } + return alive_threads_; } } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h index a8a4d4d86a0..2c7e5c276fa 100644 --- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h +++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h @@ -15,6 +15,7 @@ #define SANITIZER_THREAD_REGISTRY_H #include "sanitizer_common.h" +#include "sanitizer_dense_map.h" #include "sanitizer_list.h" #include "sanitizer_mutex.h" @@ -85,7 +86,7 @@ class ThreadContextBase { typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid); -class MUTEX ThreadRegistry { +class SANITIZER_MUTEX ThreadRegistry { public: ThreadRegistry(ThreadContextFactory factory); ThreadRegistry(ThreadContextFactory factory, u32 max_threads, @@ -94,15 +95,17 @@ class MUTEX ThreadRegistry { uptr *alive = nullptr); uptr GetMaxAliveThreads(); - void Lock() ACQUIRE() { mtx_.Lock(); } - void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); } - void Unlock() RELEASE() { mtx_.Unlock(); } + void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); } + void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); } + void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); } // Should be guarded by ThreadRegistryLock. ThreadContextBase *GetThreadLocked(u32 tid) { return threads_.empty() ? nullptr : threads_[tid]; } + u32 NumThreadsLocked() const { return threads_.size(); } + u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg); typedef void (*ThreadCallback)(ThreadContextBase *tctx, void *arg); @@ -127,8 +130,14 @@ class MUTEX ThreadRegistry { // Finishes thread and returns previous status. ThreadStatus FinishThread(u32 tid); void StartThread(u32 tid, tid_t os_id, ThreadType thread_type, void *arg); + u32 ConsumeThreadUserId(uptr user_id); void SetThreadUserId(u32 tid, uptr user_id); + // OnFork must be called in the child process after fork to purge old + // threads that don't exist anymore (except for the current thread tid). + // Returns number of alive threads before fork. + u32 OnFork(u32 tid); + private: const ThreadContextFactory context_factory_; const u32 max_threads_; @@ -146,6 +155,7 @@ class MUTEX ThreadRegistry { InternalMmapVector threads_; IntrusiveList dead_threads_; IntrusiveList invalid_threads_; + DenseMap live_; void QuarantinePush(ThreadContextBase *tctx); ThreadContextBase *QuarantinePop(); diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_safety.h b/libsanitizer/sanitizer_common/sanitizer_thread_safety.h index 52b25edaa7a..c34ea804da2 100644 --- a/libsanitizer/sanitizer_common/sanitizer_thread_safety.h +++ b/libsanitizer/sanitizer_common/sanitizer_thread_safety.h @@ -16,27 +16,34 @@ #define SANITIZER_THREAD_SAFETY_H #if defined(__clang__) -# define THREAD_ANNOTATION(x) __attribute__((x)) +# define SANITIZER_THREAD_ANNOTATION(x) __attribute__((x)) #else -# define THREAD_ANNOTATION(x) +# define SANITIZER_THREAD_ANNOTATION(x) #endif -#define MUTEX THREAD_ANNOTATION(capability("mutex")) -#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable) -#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x)) -#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x)) -#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__)) -#define REQUIRES_SHARED(...) \ - THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__)) -#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__)) -#define ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__)) -#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__)) -#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__)) -#define RELEASE_SHARED(...) \ - THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__)) -#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__)) -#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__)) -#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis) +#define SANITIZER_MUTEX SANITIZER_THREAD_ANNOTATION(capability("mutex")) +#define SANITIZER_SCOPED_LOCK SANITIZER_THREAD_ANNOTATION(scoped_lockable) +#define SANITIZER_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(guarded_by(x)) +#define SANITIZER_PT_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(pt_guarded_by(x)) +#define SANITIZER_REQUIRES(...) \ + SANITIZER_THREAD_ANNOTATION(requires_capability(__VA_ARGS__)) +#define SANITIZER_REQUIRES_SHARED(...) \ + SANITIZER_THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__)) +#define SANITIZER_ACQUIRE(...) \ + SANITIZER_THREAD_ANNOTATION(acquire_capability(__VA_ARGS__)) +#define SANITIZER_ACQUIRE_SHARED(...) \ + SANITIZER_THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__)) +#define SANITIZER_TRY_ACQUIRE(...) \ + SANITIZER_THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__)) +#define SANITIZER_RELEASE(...) \ + SANITIZER_THREAD_ANNOTATION(release_capability(__VA_ARGS__)) +#define SANITIZER_RELEASE_SHARED(...) \ + SANITIZER_THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__)) +#define SANITIZER_EXCLUDES(...) \ + SANITIZER_THREAD_ANNOTATION(locks_excluded(__VA_ARGS__)) +#define SANITIZER_CHECK_LOCKED(...) \ + SANITIZER_THREAD_ANNOTATION(assert_capability(__VA_ARGS__)) +#define SANITIZER_NO_THREAD_SAFETY_ANALYSIS \ + SANITIZER_THREAD_ANNOTATION(no_thread_safety_analysis) #endif diff --git a/libsanitizer/sanitizer_common/sanitizer_type_traits.h b/libsanitizer/sanitizer_common/sanitizer_type_traits.h index 2a58d9874d2..06a44d1b5c7 100644 --- a/libsanitizer/sanitizer_common/sanitizer_type_traits.h +++ b/libsanitizer/sanitizer_common/sanitizer_type_traits.h @@ -13,6 +13,8 @@ #ifndef SANITIZER_TYPE_TRAITS_H #define SANITIZER_TYPE_TRAITS_H +#include "sanitizer_common/sanitizer_internal_defs.h" + namespace __sanitizer { struct true_type { @@ -57,6 +59,83 @@ struct conditional { using type = F; }; +template +struct remove_reference { + using type = T; +}; +template +struct remove_reference { + using type = T; +}; +template +struct remove_reference { + using type = T; +}; + +template +WARN_UNUSED_RESULT inline typename remove_reference::type&& move(T&& t) { + return static_cast::type&&>(t); +} + +template +WARN_UNUSED_RESULT inline constexpr T&& forward( + typename remove_reference::type& t) { + return static_cast(t); +} + +template +WARN_UNUSED_RESULT inline constexpr T&& forward( + typename remove_reference::type&& t) { + return static_cast(t); +} + +template +struct integral_constant { + static constexpr const T value = v; + typedef T value_type; + typedef integral_constant type; + constexpr operator value_type() const { return value; } + constexpr value_type operator()() const { return value; } +}; + +#ifndef __has_builtin +# define __has_builtin(x) 0 +#endif + +#if __has_builtin(__is_trivially_destructible) + +template +struct is_trivially_destructible + : public integral_constant {}; + +#elif __has_builtin(__has_trivial_destructor) + +template +struct is_trivially_destructible + : public integral_constant {}; + +#else + +template +struct is_trivially_destructible + : public integral_constant {}; + +#endif + +#if __has_builtin(__is_trivially_copyable) + +template +struct is_trivially_copyable + : public integral_constant {}; + +#else + +template +struct is_trivially_copyable + : public integral_constant {}; + +#endif + } // namespace __sanitizer #endif diff --git a/libsanitizer/sanitizer_common/sanitizer_unwind_win.cpp b/libsanitizer/sanitizer_common/sanitizer_unwind_win.cpp index 7e01c81d042..afcd01dae0b 100644 --- a/libsanitizer/sanitizer_common/sanitizer_unwind_win.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_unwind_win.cpp @@ -57,30 +57,37 @@ void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) { InitializeDbgHelpIfNeeded(); size = 0; -#if defined(_WIN64) +# if SANITIZER_WINDOWS64 +# if SANITIZER_ARM64 + int machine_type = IMAGE_FILE_MACHINE_ARM64; + stack_frame.AddrPC.Offset = ctx.Pc; + stack_frame.AddrFrame.Offset = ctx.Fp; + stack_frame.AddrStack.Offset = ctx.Sp; +# else int machine_type = IMAGE_FILE_MACHINE_AMD64; stack_frame.AddrPC.Offset = ctx.Rip; stack_frame.AddrFrame.Offset = ctx.Rbp; stack_frame.AddrStack.Offset = ctx.Rsp; -#else +# endif +# else int machine_type = IMAGE_FILE_MACHINE_I386; stack_frame.AddrPC.Offset = ctx.Eip; stack_frame.AddrFrame.Offset = ctx.Ebp; stack_frame.AddrStack.Offset = ctx.Esp; -#endif +# endif stack_frame.AddrPC.Mode = AddrModeFlat; stack_frame.AddrFrame.Mode = AddrModeFlat; stack_frame.AddrStack.Mode = AddrModeFlat; while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(), - &stack_frame, &ctx, NULL, SymFunctionTableAccess64, - SymGetModuleBase64, NULL) && - size < Min(max_depth, kStackTraceMax)) { + &stack_frame, &ctx, NULL, SymFunctionTableAccess64, + SymGetModuleBase64, NULL) && + size < Min(max_depth, kStackTraceMax)) { trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset; } } -#ifdef __clang__ -#pragma clang diagnostic pop -#endif -#endif // #if !SANITIZER_GO +# ifdef __clang__ +# pragma clang diagnostic pop +# endif +# endif // #if !SANITIZER_GO #endif // SANITIZER_WINDOWS diff --git a/libsanitizer/sanitizer_common/sanitizer_win.cpp b/libsanitizer/sanitizer_common/sanitizer_win.cpp index c3607dbed23..53770331199 100644 --- a/libsanitizer/sanitizer_common/sanitizer_win.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_win.cpp @@ -16,7 +16,6 @@ #define WIN32_LEAN_AND_MEAN #define NOGDI -#include #include #include #include @@ -94,6 +93,11 @@ bool FileExists(const char *filename) { return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES; } +bool DirExists(const char *path) { + auto attr = ::GetFileAttributesA(path); + return (attr != INVALID_FILE_ATTRIBUTES) && (attr & FILE_ATTRIBUTE_DIRECTORY); +} + uptr internal_getpid() { return GetProcessId(GetCurrentProcess()); } @@ -337,6 +341,11 @@ bool MprotectNoAccess(uptr addr, uptr size) { return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection); } +bool MprotectReadOnly(uptr addr, uptr size) { + DWORD old_protection; + return VirtualProtect((LPVOID)addr, size, PAGE_READONLY, &old_protection); +} + void ReleaseMemoryPagesToOS(uptr beg, uptr end) { uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()), end_aligned = RoundDownTo(end, GetPageSizeCached()); @@ -513,7 +522,7 @@ void ReExec() { UNIMPLEMENTED(); } -void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} +void PlatformPrepareForSandboxing(void *args) {} bool StackSizeIsUnlimited() { UNIMPLEMENTED(); @@ -566,7 +575,9 @@ void Abort() { internal__exit(3); } -bool CreateDir(const char *pathname) { return _mkdir(pathname) == 0; } +bool CreateDir(const char *pathname) { + return CreateDirectoryA(pathname, nullptr) != 0; +} #if !SANITIZER_GO // Read the file to extract the ImageBase field from the PE header. If ASLR is @@ -944,13 +955,18 @@ void SignalContext::InitPcSpBp() { CONTEXT *context_record = (CONTEXT *)context; pc = (uptr)exception_record->ExceptionAddress; -#ifdef _WIN64 +# if SANITIZER_WINDOWS64 +# if SANITIZER_ARM64 + bp = (uptr)context_record->Fp; + sp = (uptr)context_record->Sp; +# else bp = (uptr)context_record->Rbp; sp = (uptr)context_record->Rsp; -#else +# endif +# else bp = (uptr)context_record->Ebp; sp = (uptr)context_record->Esp; -#endif +# endif } uptr SignalContext::GetAddress() const { @@ -972,7 +988,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { // The write flag is only available for access violation exceptions. if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) - return SignalContext::UNKNOWN; + return SignalContext::Unknown; // The contents of this array are documented at // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record @@ -980,13 +996,13 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { // second element is the faulting address. switch (exception_record->ExceptionInformation[0]) { case 0: - return SignalContext::READ; + return SignalContext::Read; case 1: - return SignalContext::WRITE; + return SignalContext::Write; case 8: - return SignalContext::UNKNOWN; + return SignalContext::Unknown; } - return SignalContext::UNKNOWN; + return SignalContext::Unknown; } void SignalContext::DumpAllRegisters(void *context) { diff --git a/libsanitizer/tsan/tsan_clock.cpp b/libsanitizer/tsan/tsan_clock.cpp deleted file mode 100644 index d122b67c0aa..00000000000 --- a/libsanitizer/tsan/tsan_clock.cpp +++ /dev/null @@ -1,625 +0,0 @@ -//===-- tsan_clock.cpp ----------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of ThreadSanitizer (TSan), a race detector. -// -//===----------------------------------------------------------------------===// -#include "tsan_clock.h" -#include "tsan_rtl.h" -#include "sanitizer_common/sanitizer_placement_new.h" - -// SyncClock and ThreadClock implement vector clocks for sync variables -// (mutexes, atomic variables, file descriptors, etc) and threads, respectively. -// ThreadClock contains fixed-size vector clock for maximum number of threads. -// SyncClock contains growable vector clock for currently necessary number of -// threads. -// Together they implement very simple model of operations, namely: -// -// void ThreadClock::acquire(const SyncClock *src) { -// for (int i = 0; i < kMaxThreads; i++) -// clock[i] = max(clock[i], src->clock[i]); -// } -// -// void ThreadClock::release(SyncClock *dst) const { -// for (int i = 0; i < kMaxThreads; i++) -// dst->clock[i] = max(dst->clock[i], clock[i]); -// } -// -// void ThreadClock::releaseStoreAcquire(SyncClock *sc) const { -// for (int i = 0; i < kMaxThreads; i++) { -// tmp = clock[i]; -// clock[i] = max(clock[i], sc->clock[i]); -// sc->clock[i] = tmp; -// } -// } -// -// void ThreadClock::ReleaseStore(SyncClock *dst) const { -// for (int i = 0; i < kMaxThreads; i++) -// dst->clock[i] = clock[i]; -// } -// -// void ThreadClock::acq_rel(SyncClock *dst) { -// acquire(dst); -// release(dst); -// } -// -// Conformance to this model is extensively verified in tsan_clock_test.cpp. -// However, the implementation is significantly more complex. The complexity -// allows to implement important classes of use cases in O(1) instead of O(N). -// -// The use cases are: -// 1. Singleton/once atomic that has a single release-store operation followed -// by zillions of acquire-loads (the acquire-load is O(1)). -// 2. Thread-local mutex (both lock and unlock can be O(1)). -// 3. Leaf mutex (unlock is O(1)). -// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)). -// 5. An atomic with a single writer (writes can be O(1)). -// The implementation dynamically adopts to workload. So if an atomic is in -// read-only phase, these reads will be O(1); if it later switches to read/write -// phase, the implementation will correctly handle that by switching to O(N). -// -// Thread-safety note: all const operations on SyncClock's are conducted under -// a shared lock; all non-const operations on SyncClock's are conducted under -// an exclusive lock; ThreadClock's are private to respective threads and so -// do not need any protection. -// -// Description of SyncClock state: -// clk_ - variable size vector clock, low kClkBits hold timestamp, -// the remaining bits hold "acquired" flag (the actual value is thread's -// reused counter); -// if acquired == thr->reused_, then the respective thread has already -// acquired this clock (except possibly for dirty elements). -// dirty_ - holds up to two indices in the vector clock that other threads -// need to acquire regardless of "acquired" flag value; -// release_store_tid_ - denotes that the clock state is a result of -// release-store operation by the thread with release_store_tid_ index. -// release_store_reused_ - reuse count of release_store_tid_. - -namespace __tsan { - -static atomic_uint32_t *ref_ptr(ClockBlock *cb) { - return reinterpret_cast(&cb->table[ClockBlock::kRefIdx]); -} - -// Drop reference to the first level block idx. -static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) { - ClockBlock *cb = ctx->clock_alloc.Map(idx); - atomic_uint32_t *ref = ref_ptr(cb); - u32 v = atomic_load(ref, memory_order_acquire); - for (;;) { - CHECK_GT(v, 0); - if (v == 1) - break; - if (atomic_compare_exchange_strong(ref, &v, v - 1, memory_order_acq_rel)) - return; - } - // First level block owns second level blocks, so them as well. - for (uptr i = 0; i < blocks; i++) - ctx->clock_alloc.Free(c, cb->table[ClockBlock::kBlockIdx - i]); - ctx->clock_alloc.Free(c, idx); -} - -ThreadClock::ThreadClock(unsigned tid, unsigned reused) - : tid_(tid) - , reused_(reused + 1) // 0 has special meaning - , last_acquire_() - , global_acquire_() - , cached_idx_() - , cached_size_() - , cached_blocks_() { - CHECK_LT(tid, kMaxTidInClock); - CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits); - nclk_ = tid_ + 1; - internal_memset(clk_, 0, sizeof(clk_)); -} - -void ThreadClock::ResetCached(ClockCache *c) { - if (cached_idx_) { - UnrefClockBlock(c, cached_idx_, cached_blocks_); - cached_idx_ = 0; - cached_size_ = 0; - cached_blocks_ = 0; - } -} - -void ThreadClock::acquire(ClockCache *c, SyncClock *src) { - DCHECK_LE(nclk_, kMaxTid); - DCHECK_LE(src->size_, kMaxTid); - - // Check if it's empty -> no need to do anything. - const uptr nclk = src->size_; - if (nclk == 0) - return; - - bool acquired = false; - for (unsigned i = 0; i < kDirtyTids; i++) { - SyncClock::Dirty dirty = src->dirty_[i]; - unsigned tid = dirty.tid(); - if (tid != kInvalidTid) { - if (clk_[tid] < dirty.epoch) { - clk_[tid] = dirty.epoch; - acquired = true; - } - } - } - - // Check if we've already acquired src after the last release operation on src - if (tid_ >= nclk || src->elem(tid_).reused != reused_) { - // O(N) acquire. - nclk_ = max(nclk_, nclk); - u64 *dst_pos = &clk_[0]; - for (ClockElem &src_elem : *src) { - u64 epoch = src_elem.epoch; - if (*dst_pos < epoch) { - *dst_pos = epoch; - acquired = true; - } - dst_pos++; - } - - // Remember that this thread has acquired this clock. - if (nclk > tid_) - src->elem(tid_).reused = reused_; - } - - if (acquired) { - last_acquire_ = clk_[tid_]; - ResetCached(c); - } -} - -void ThreadClock::releaseStoreAcquire(ClockCache *c, SyncClock *sc) { - DCHECK_LE(nclk_, kMaxTid); - DCHECK_LE(sc->size_, kMaxTid); - - if (sc->size_ == 0) { - // ReleaseStore will correctly set release_store_tid_, - // which can be important for future operations. - ReleaseStore(c, sc); - return; - } - - nclk_ = max(nclk_, (uptr) sc->size_); - - // Check if we need to resize sc. - if (sc->size_ < nclk_) - sc->Resize(c, nclk_); - - bool acquired = false; - - sc->Unshare(c); - // Update sc->clk_. - sc->FlushDirty(); - uptr i = 0; - for (ClockElem &ce : *sc) { - u64 tmp = clk_[i]; - if (clk_[i] < ce.epoch) { - clk_[i] = ce.epoch; - acquired = true; - } - ce.epoch = tmp; - ce.reused = 0; - i++; - } - sc->release_store_tid_ = kInvalidTid; - sc->release_store_reused_ = 0; - - if (acquired) { - last_acquire_ = clk_[tid_]; - ResetCached(c); - } -} - -void ThreadClock::release(ClockCache *c, SyncClock *dst) { - DCHECK_LE(nclk_, kMaxTid); - DCHECK_LE(dst->size_, kMaxTid); - - if (dst->size_ == 0) { - // ReleaseStore will correctly set release_store_tid_, - // which can be important for future operations. - ReleaseStore(c, dst); - return; - } - - // Check if we need to resize dst. - if (dst->size_ < nclk_) - dst->Resize(c, nclk_); - - // Check if we had not acquired anything from other threads - // since the last release on dst. If so, we need to update - // only dst->elem(tid_). - if (!HasAcquiredAfterRelease(dst)) { - UpdateCurrentThread(c, dst); - if (dst->release_store_tid_ != tid_ || - dst->release_store_reused_ != reused_) - dst->release_store_tid_ = kInvalidTid; - return; - } - - // O(N) release. - dst->Unshare(c); - // First, remember whether we've acquired dst. - bool acquired = IsAlreadyAcquired(dst); - // Update dst->clk_. - dst->FlushDirty(); - uptr i = 0; - for (ClockElem &ce : *dst) { - ce.epoch = max(ce.epoch, clk_[i]); - ce.reused = 0; - i++; - } - // Clear 'acquired' flag in the remaining elements. - dst->release_store_tid_ = kInvalidTid; - dst->release_store_reused_ = 0; - // If we've acquired dst, remember this fact, - // so that we don't need to acquire it on next acquire. - if (acquired) - dst->elem(tid_).reused = reused_; -} - -void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) { - DCHECK_LE(nclk_, kMaxTid); - DCHECK_LE(dst->size_, kMaxTid); - - if (dst->size_ == 0 && cached_idx_ != 0) { - // Reuse the cached clock. - // Note: we could reuse/cache the cached clock in more cases: - // we could update the existing clock and cache it, or replace it with the - // currently cached clock and release the old one. And for a shared - // existing clock, we could replace it with the currently cached; - // or unshare, update and cache. But, for simplicity, we currently reuse - // cached clock only when the target clock is empty. - dst->tab_ = ctx->clock_alloc.Map(cached_idx_); - dst->tab_idx_ = cached_idx_; - dst->size_ = cached_size_; - dst->blocks_ = cached_blocks_; - CHECK_EQ(dst->dirty_[0].tid(), kInvalidTid); - // The cached clock is shared (immutable), - // so this is where we store the current clock. - dst->dirty_[0].set_tid(tid_); - dst->dirty_[0].epoch = clk_[tid_]; - dst->release_store_tid_ = tid_; - dst->release_store_reused_ = reused_; - // Remember that we don't need to acquire it in future. - dst->elem(tid_).reused = reused_; - // Grab a reference. - atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed); - return; - } - - // Check if we need to resize dst. - if (dst->size_ < nclk_) - dst->Resize(c, nclk_); - - if (dst->release_store_tid_ == tid_ && - dst->release_store_reused_ == reused_ && - !HasAcquiredAfterRelease(dst)) { - UpdateCurrentThread(c, dst); - return; - } - - // O(N) release-store. - dst->Unshare(c); - // Note: dst can be larger than this ThreadClock. - // This is fine since clk_ beyond size is all zeros. - uptr i = 0; - for (ClockElem &ce : *dst) { - ce.epoch = clk_[i]; - ce.reused = 0; - i++; - } - for (uptr i = 0; i < kDirtyTids; i++) dst->dirty_[i].set_tid(kInvalidTid); - dst->release_store_tid_ = tid_; - dst->release_store_reused_ = reused_; - // Remember that we don't need to acquire it in future. - dst->elem(tid_).reused = reused_; - - // If the resulting clock is cachable, cache it for future release operations. - // The clock is always cachable if we released to an empty sync object. - if (cached_idx_ == 0 && dst->Cachable()) { - // Grab a reference to the ClockBlock. - atomic_uint32_t *ref = ref_ptr(dst->tab_); - if (atomic_load(ref, memory_order_acquire) == 1) - atomic_store_relaxed(ref, 2); - else - atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed); - cached_idx_ = dst->tab_idx_; - cached_size_ = dst->size_; - cached_blocks_ = dst->blocks_; - } -} - -void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) { - acquire(c, dst); - ReleaseStore(c, dst); -} - -// Updates only single element related to the current thread in dst->clk_. -void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const { - // Update the threads time, but preserve 'acquired' flag. - for (unsigned i = 0; i < kDirtyTids; i++) { - SyncClock::Dirty *dirty = &dst->dirty_[i]; - const unsigned tid = dirty->tid(); - if (tid == tid_ || tid == kInvalidTid) { - dirty->set_tid(tid_); - dirty->epoch = clk_[tid_]; - return; - } - } - // Reset all 'acquired' flags, O(N). - // We are going to touch dst elements, so we need to unshare it. - dst->Unshare(c); - dst->elem(tid_).epoch = clk_[tid_]; - for (uptr i = 0; i < dst->size_; i++) - dst->elem(i).reused = 0; - dst->FlushDirty(); -} - -// Checks whether the current thread has already acquired src. -bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const { - if (src->elem(tid_).reused != reused_) - return false; - for (unsigned i = 0; i < kDirtyTids; i++) { - SyncClock::Dirty dirty = src->dirty_[i]; - if (dirty.tid() != kInvalidTid) { - if (clk_[dirty.tid()] < dirty.epoch) - return false; - } - } - return true; -} - -// Checks whether the current thread has acquired anything -// from other clocks after releasing to dst (directly or indirectly). -bool ThreadClock::HasAcquiredAfterRelease(const SyncClock *dst) const { - const u64 my_epoch = dst->elem(tid_).epoch; - return my_epoch <= last_acquire_ || - my_epoch <= atomic_load_relaxed(&global_acquire_); -} - -// Sets a single element in the vector clock. -// This function is called only from weird places like AcquireGlobal. -void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) { - DCHECK_LT(tid, kMaxTid); - DCHECK_GE(v, clk_[tid]); - clk_[tid] = v; - if (nclk_ <= tid) - nclk_ = tid + 1; - last_acquire_ = clk_[tid_]; - ResetCached(c); -} - -void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) { - printf("clock=["); - for (uptr i = 0; i < nclk_; i++) - printf("%s%llu", i == 0 ? "" : ",", clk_[i]); - printf("] tid=%u/%u last_acq=%llu", tid_, reused_, last_acquire_); -} - -SyncClock::SyncClock() { - ResetImpl(); -} - -SyncClock::~SyncClock() { - // Reset must be called before dtor. - CHECK_EQ(size_, 0); - CHECK_EQ(blocks_, 0); - CHECK_EQ(tab_, 0); - CHECK_EQ(tab_idx_, 0); -} - -void SyncClock::Reset(ClockCache *c) { - if (size_) - UnrefClockBlock(c, tab_idx_, blocks_); - ResetImpl(); -} - -void SyncClock::ResetImpl() { - tab_ = 0; - tab_idx_ = 0; - size_ = 0; - blocks_ = 0; - release_store_tid_ = kInvalidTid; - release_store_reused_ = 0; - for (uptr i = 0; i < kDirtyTids; i++) dirty_[i].set_tid(kInvalidTid); -} - -void SyncClock::Resize(ClockCache *c, uptr nclk) { - Unshare(c); - if (nclk <= capacity()) { - // Memory is already allocated, just increase the size. - size_ = nclk; - return; - } - if (size_ == 0) { - // Grow from 0 to one-level table. - CHECK_EQ(size_, 0); - CHECK_EQ(blocks_, 0); - CHECK_EQ(tab_, 0); - CHECK_EQ(tab_idx_, 0); - tab_idx_ = ctx->clock_alloc.Alloc(c); - tab_ = ctx->clock_alloc.Map(tab_idx_); - internal_memset(tab_, 0, sizeof(*tab_)); - atomic_store_relaxed(ref_ptr(tab_), 1); - size_ = 1; - } else if (size_ > blocks_ * ClockBlock::kClockCount) { - u32 idx = ctx->clock_alloc.Alloc(c); - ClockBlock *new_cb = ctx->clock_alloc.Map(idx); - uptr top = size_ - blocks_ * ClockBlock::kClockCount; - CHECK_LT(top, ClockBlock::kClockCount); - const uptr move = top * sizeof(tab_->clock[0]); - internal_memcpy(&new_cb->clock[0], tab_->clock, move); - internal_memset(&new_cb->clock[top], 0, sizeof(*new_cb) - move); - internal_memset(tab_->clock, 0, move); - append_block(idx); - } - // At this point we have first level table allocated and all clock elements - // are evacuated from it to a second level block. - // Add second level tables as necessary. - while (nclk > capacity()) { - u32 idx = ctx->clock_alloc.Alloc(c); - ClockBlock *cb = ctx->clock_alloc.Map(idx); - internal_memset(cb, 0, sizeof(*cb)); - append_block(idx); - } - size_ = nclk; -} - -// Flushes all dirty elements into the main clock array. -void SyncClock::FlushDirty() { - for (unsigned i = 0; i < kDirtyTids; i++) { - Dirty *dirty = &dirty_[i]; - if (dirty->tid() != kInvalidTid) { - CHECK_LT(dirty->tid(), size_); - elem(dirty->tid()).epoch = dirty->epoch; - dirty->set_tid(kInvalidTid); - } - } -} - -bool SyncClock::IsShared() const { - if (size_ == 0) - return false; - atomic_uint32_t *ref = ref_ptr(tab_); - u32 v = atomic_load(ref, memory_order_acquire); - CHECK_GT(v, 0); - return v > 1; -} - -// Unshares the current clock if it's shared. -// Shared clocks are immutable, so they need to be unshared before any updates. -// Note: this does not apply to dirty entries as they are not shared. -void SyncClock::Unshare(ClockCache *c) { - if (!IsShared()) - return; - // First, copy current state into old. - SyncClock old; - old.tab_ = tab_; - old.tab_idx_ = tab_idx_; - old.size_ = size_; - old.blocks_ = blocks_; - old.release_store_tid_ = release_store_tid_; - old.release_store_reused_ = release_store_reused_; - for (unsigned i = 0; i < kDirtyTids; i++) - old.dirty_[i] = dirty_[i]; - // Then, clear current object. - ResetImpl(); - // Allocate brand new clock in the current object. - Resize(c, old.size_); - // Now copy state back into this object. - Iter old_iter(&old); - for (ClockElem &ce : *this) { - ce = *old_iter; - ++old_iter; - } - release_store_tid_ = old.release_store_tid_; - release_store_reused_ = old.release_store_reused_; - for (unsigned i = 0; i < kDirtyTids; i++) - dirty_[i] = old.dirty_[i]; - // Drop reference to old and delete if necessary. - old.Reset(c); -} - -// Can we cache this clock for future release operations? -ALWAYS_INLINE bool SyncClock::Cachable() const { - if (size_ == 0) - return false; - for (unsigned i = 0; i < kDirtyTids; i++) { - if (dirty_[i].tid() != kInvalidTid) - return false; - } - return atomic_load_relaxed(ref_ptr(tab_)) == 1; -} - -// elem linearizes the two-level structure into linear array. -// Note: this is used only for one time accesses, vector operations use -// the iterator as it is much faster. -ALWAYS_INLINE ClockElem &SyncClock::elem(unsigned tid) const { - DCHECK_LT(tid, size_); - const uptr block = tid / ClockBlock::kClockCount; - DCHECK_LE(block, blocks_); - tid %= ClockBlock::kClockCount; - if (block == blocks_) - return tab_->clock[tid]; - u32 idx = get_block(block); - ClockBlock *cb = ctx->clock_alloc.Map(idx); - return cb->clock[tid]; -} - -ALWAYS_INLINE uptr SyncClock::capacity() const { - if (size_ == 0) - return 0; - uptr ratio = sizeof(ClockBlock::clock[0]) / sizeof(ClockBlock::table[0]); - // How many clock elements we can fit into the first level block. - // +1 for ref counter. - uptr top = ClockBlock::kClockCount - RoundUpTo(blocks_ + 1, ratio) / ratio; - return blocks_ * ClockBlock::kClockCount + top; -} - -ALWAYS_INLINE u32 SyncClock::get_block(uptr bi) const { - DCHECK(size_); - DCHECK_LT(bi, blocks_); - return tab_->table[ClockBlock::kBlockIdx - bi]; -} - -ALWAYS_INLINE void SyncClock::append_block(u32 idx) { - uptr bi = blocks_++; - CHECK_EQ(get_block(bi), 0); - tab_->table[ClockBlock::kBlockIdx - bi] = idx; -} - -// Used only by tests. -u64 SyncClock::get(unsigned tid) const { - for (unsigned i = 0; i < kDirtyTids; i++) { - Dirty dirty = dirty_[i]; - if (dirty.tid() == tid) - return dirty.epoch; - } - return elem(tid).epoch; -} - -// Used only by Iter test. -u64 SyncClock::get_clean(unsigned tid) const { - return elem(tid).epoch; -} - -void SyncClock::DebugDump(int(*printf)(const char *s, ...)) { - printf("clock=["); - for (uptr i = 0; i < size_; i++) - printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch); - printf("] reused=["); - for (uptr i = 0; i < size_; i++) - printf("%s%llu", i == 0 ? "" : ",", elem(i).reused); - printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]", - release_store_tid_, release_store_reused_, dirty_[0].tid(), - dirty_[0].epoch, dirty_[1].tid(), dirty_[1].epoch); -} - -void SyncClock::Iter::Next() { - // Finished with the current block, move on to the next one. - block_++; - if (block_ < parent_->blocks_) { - // Iterate over the next second level block. - u32 idx = parent_->get_block(block_); - ClockBlock *cb = ctx->clock_alloc.Map(idx); - pos_ = &cb->clock[0]; - end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount, - ClockBlock::kClockCount); - return; - } - if (block_ == parent_->blocks_ && - parent_->size_ > parent_->blocks_ * ClockBlock::kClockCount) { - // Iterate over elements in the first level block. - pos_ = &parent_->tab_->clock[0]; - end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount, - ClockBlock::kClockCount); - return; - } - parent_ = nullptr; // denotes end -} -} // namespace __tsan diff --git a/libsanitizer/tsan/tsan_clock.h b/libsanitizer/tsan/tsan_clock.h deleted file mode 100644 index 11cbc0c0b86..00000000000 --- a/libsanitizer/tsan/tsan_clock.h +++ /dev/null @@ -1,293 +0,0 @@ -//===-- tsan_clock.h --------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of ThreadSanitizer (TSan), a race detector. -// -//===----------------------------------------------------------------------===// -#ifndef TSAN_CLOCK_H -#define TSAN_CLOCK_H - -#include "tsan_defs.h" -#include "tsan_dense_alloc.h" - -namespace __tsan { - -typedef DenseSlabAlloc ClockAlloc; -typedef DenseSlabAllocCache ClockCache; - -// The clock that lives in sync variables (mutexes, atomics, etc). -class SyncClock { - public: - SyncClock(); - ~SyncClock(); - - uptr size() const; - - // These are used only in tests. - u64 get(unsigned tid) const; - u64 get_clean(unsigned tid) const; - - void Resize(ClockCache *c, uptr nclk); - void Reset(ClockCache *c); - - void DebugDump(int(*printf)(const char *s, ...)); - - // Clock element iterator. - // Note: it iterates only over the table without regard to dirty entries. - class Iter { - public: - explicit Iter(SyncClock* parent); - Iter& operator++(); - bool operator!=(const Iter& other); - ClockElem &operator*(); - - private: - SyncClock *parent_; - // [pos_, end_) is the current continuous range of clock elements. - ClockElem *pos_; - ClockElem *end_; - int block_; // Current number of second level block. - - NOINLINE void Next(); - }; - - Iter begin(); - Iter end(); - - private: - friend class ThreadClock; - friend class Iter; - static const uptr kDirtyTids = 2; - - struct Dirty { - u32 tid() const { return tid_ == kShortInvalidTid ? kInvalidTid : tid_; } - void set_tid(u32 tid) { - tid_ = tid == kInvalidTid ? kShortInvalidTid : tid; - } - u64 epoch : kClkBits; - - private: - // Full kInvalidTid won't fit into Dirty::tid. - static const u64 kShortInvalidTid = (1ull << (64 - kClkBits)) - 1; - u64 tid_ : 64 - kClkBits; // kInvalidId if not active - }; - - static_assert(sizeof(Dirty) == 8, "Dirty is not 64bit"); - - unsigned release_store_tid_; - unsigned release_store_reused_; - Dirty dirty_[kDirtyTids]; - // If size_ is 0, tab_ is nullptr. - // If size <= 64 (kClockCount), tab_ contains pointer to an array with - // 64 ClockElem's (ClockBlock::clock). - // Otherwise, tab_ points to an array with up to 127 u32 elements, - // each pointing to the second-level 512b block with 64 ClockElem's. - // Unused space in the first level ClockBlock is used to store additional - // clock elements. - // The last u32 element in the first level ClockBlock is always used as - // reference counter. - // - // See the following scheme for details. - // All memory blocks are 512 bytes (allocated from ClockAlloc). - // Clock (clk) elements are 64 bits. - // Idx and ref are 32 bits. - // - // tab_ - // | - // \/ - // +----------------------------------------------------+ - // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref | - // +----------------------------------------------------+ - // | | - // | \/ - // | +----------------+ - // | | clk0 ... clk63 | - // | +----------------+ - // \/ - // +------------------+ - // | clk64 ... clk127 | - // +------------------+ - // - // Note: dirty entries, if active, always override what's stored in the clock. - ClockBlock *tab_; - u32 tab_idx_; - u16 size_; - u16 blocks_; // Number of second level blocks. - - void Unshare(ClockCache *c); - bool IsShared() const; - bool Cachable() const; - void ResetImpl(); - void FlushDirty(); - uptr capacity() const; - u32 get_block(uptr bi) const; - void append_block(u32 idx); - ClockElem &elem(unsigned tid) const; -}; - -// The clock that lives in threads. -class ThreadClock { - public: - typedef DenseSlabAllocCache Cache; - - explicit ThreadClock(unsigned tid, unsigned reused = 0); - - u64 get(unsigned tid) const; - void set(ClockCache *c, unsigned tid, u64 v); - void set(u64 v); - void tick(); - uptr size() const; - - void acquire(ClockCache *c, SyncClock *src); - void releaseStoreAcquire(ClockCache *c, SyncClock *src); - void release(ClockCache *c, SyncClock *dst); - void acq_rel(ClockCache *c, SyncClock *dst); - void ReleaseStore(ClockCache *c, SyncClock *dst); - void ResetCached(ClockCache *c); - void NoteGlobalAcquire(u64 v); - - void DebugReset(); - void DebugDump(int(*printf)(const char *s, ...)); - - private: - static const uptr kDirtyTids = SyncClock::kDirtyTids; - // Index of the thread associated with he clock ("current thread"). - const unsigned tid_; - const unsigned reused_; // tid_ reuse count. - // Current thread time when it acquired something from other threads. - u64 last_acquire_; - - // Last time another thread has done a global acquire of this thread's clock. - // It helps to avoid problem described in: - // https://github.com/golang/go/issues/39186 - // See test/tsan/java_finalizer2.cpp for a regression test. - // Note the failuire is _extremely_ hard to hit, so if you are trying - // to reproduce it, you may want to run something like: - // $ go get golang.org/x/tools/cmd/stress - // $ stress -p=64 ./a.out - // - // The crux of the problem is roughly as follows. - // A number of O(1) optimizations in the clocks algorithm assume proper - // transitive cumulative propagation of clock values. The AcquireGlobal - // operation may produce an inconsistent non-linearazable view of - // thread clocks. Namely, it may acquire a later value from a thread - // with a higher ID, but fail to acquire an earlier value from a thread - // with a lower ID. If a thread that executed AcquireGlobal then releases - // to a sync clock, it will spoil the sync clock with the inconsistent - // values. If another thread later releases to the sync clock, the optimized - // algorithm may break. - // - // The exact sequence of events that leads to the failure. - // - thread 1 executes AcquireGlobal - // - thread 1 acquires value 1 for thread 2 - // - thread 2 increments clock to 2 - // - thread 2 releases to sync object 1 - // - thread 3 at time 1 - // - thread 3 acquires from sync object 1 - // - thread 3 increments clock to 2 - // - thread 1 acquires value 2 for thread 3 - // - thread 1 releases to sync object 2 - // - sync object 2 clock has 1 for thread 2 and 2 for thread 3 - // - thread 3 releases to sync object 2 - // - thread 3 sees value 2 in the clock for itself - // and decides that it has already released to the clock - // and did not acquire anything from other threads after that - // (the last_acquire_ check in release operation) - // - thread 3 does not update the value for thread 2 in the clock from 1 to 2 - // - thread 4 acquires from sync object 2 - // - thread 4 detects a false race with thread 2 - // as it should have been synchronized with thread 2 up to time 2, - // but because of the broken clock it is now synchronized only up to time 1 - // - // The global_acquire_ value helps to prevent this scenario. - // Namely, thread 3 will not trust any own clock values up to global_acquire_ - // for the purposes of the last_acquire_ optimization. - atomic_uint64_t global_acquire_; - - // Cached SyncClock (without dirty entries and release_store_tid_). - // We reuse it for subsequent store-release operations without intervening - // acquire operations. Since it is shared (and thus constant), clock value - // for the current thread is then stored in dirty entries in the SyncClock. - // We host a reference to the table while it is cached here. - u32 cached_idx_; - u16 cached_size_; - u16 cached_blocks_; - - // Number of active elements in the clk_ table (the rest is zeros). - uptr nclk_; - u64 clk_[kMaxTidInClock]; // Fixed size vector clock. - - bool IsAlreadyAcquired(const SyncClock *src) const; - bool HasAcquiredAfterRelease(const SyncClock *dst) const; - void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const; -}; - -ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const { - DCHECK_LT(tid, kMaxTidInClock); - return clk_[tid]; -} - -ALWAYS_INLINE void ThreadClock::set(u64 v) { - DCHECK_GE(v, clk_[tid_]); - clk_[tid_] = v; -} - -ALWAYS_INLINE void ThreadClock::tick() { - clk_[tid_]++; -} - -ALWAYS_INLINE uptr ThreadClock::size() const { - return nclk_; -} - -ALWAYS_INLINE void ThreadClock::NoteGlobalAcquire(u64 v) { - // Here we rely on the fact that AcquireGlobal is protected by - // ThreadRegistryLock, thus only one thread at a time executes it - // and values passed to this function should not go backwards. - CHECK_LE(atomic_load_relaxed(&global_acquire_), v); - atomic_store_relaxed(&global_acquire_, v); -} - -ALWAYS_INLINE SyncClock::Iter SyncClock::begin() { - return Iter(this); -} - -ALWAYS_INLINE SyncClock::Iter SyncClock::end() { - return Iter(nullptr); -} - -ALWAYS_INLINE uptr SyncClock::size() const { - return size_; -} - -ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent) - : parent_(parent) - , pos_(nullptr) - , end_(nullptr) - , block_(-1) { - if (parent) - Next(); -} - -ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() { - pos_++; - if (UNLIKELY(pos_ >= end_)) - Next(); - return *this; -} - -ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) { - return parent_ != other.parent_; -} - -ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() { - return *pos_; -} -} // namespace __tsan - -#endif // TSAN_CLOCK_H diff --git a/libsanitizer/tsan/tsan_debugging.cpp b/libsanitizer/tsan/tsan_debugging.cpp index 1d3c3849a44..1e61c31c5a9 100644 --- a/libsanitizer/tsan/tsan_debugging.cpp +++ b/libsanitizer/tsan/tsan_debugging.cpp @@ -157,7 +157,7 @@ int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr, ReportMutex *mutex = rep->mutexes[idx]; *mutex_id = mutex->id; *addr = (void *)mutex->addr; - *destroyed = mutex->destroyed; + *destroyed = false; if (mutex->stack) CopyTrace(mutex->stack->frames, trace, trace_size); return 1; } diff --git a/libsanitizer/tsan/tsan_defs.h b/libsanitizer/tsan/tsan_defs.h index fe0c1da3159..1ffa3d6aec4 100644 --- a/libsanitizer/tsan/tsan_defs.h +++ b/libsanitizer/tsan/tsan_defs.h @@ -63,41 +63,14 @@ enum class Epoch : u16 {}; constexpr uptr kEpochBits = 14; constexpr Epoch kEpochZero = static_cast(0); constexpr Epoch kEpochOver = static_cast(1 << kEpochBits); +constexpr Epoch kEpochLast = static_cast((1 << kEpochBits) - 1); -const int kClkBits = 42; -const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1; - -struct ClockElem { - u64 epoch : kClkBits; - u64 reused : 64 - kClkBits; // tid reuse count -}; - -struct ClockBlock { - static const uptr kSize = 512; - static const uptr kTableSize = kSize / sizeof(u32); - static const uptr kClockCount = kSize / sizeof(ClockElem); - static const uptr kRefIdx = kTableSize - 1; - static const uptr kBlockIdx = kTableSize - 2; - - union { - u32 table[kTableSize]; - ClockElem clock[kClockCount]; - }; +inline Epoch EpochInc(Epoch epoch) { + return static_cast(static_cast(epoch) + 1); +} - ClockBlock() { - } -}; +inline bool EpochOverflow(Epoch epoch) { return epoch == kEpochOver; } -const int kTidBits = 13; -// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is -// occupied by reference counter, so total number of elements we can store -// in SyncClock is kClockCount * (kTableSize - 1). -const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount; -#if !SANITIZER_GO -const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit. -#else -const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory. -#endif const uptr kShadowStackSize = 64 * 1024; // Count of shadow values in a shadow cell. @@ -107,7 +80,7 @@ const uptr kShadowCnt = 4; const uptr kShadowCell = 8; // Single shadow value. -typedef u64 RawShadow; +enum class RawShadow : u32 {}; const uptr kShadowSize = sizeof(RawShadow); // Shadow memory is kShadowMultiplier times larger than user memory. @@ -184,10 +157,13 @@ MD5Hash md5_hash(const void *data, uptr size); struct Processor; struct ThreadState; class ThreadContext; +struct TidSlot; struct Context; struct ReportStack; class ReportDesc; class RegionAlloc; +struct Trace; +struct TracePart; typedef uptr AccessType; @@ -198,6 +174,9 @@ enum : AccessType { kAccessVptr = 1 << 2, // read or write of an object virtual table pointer kAccessFree = 1 << 3, // synthetic memory access during memory freeing kAccessExternalPC = 1 << 4, // access PC can have kExternalPCBit set + kAccessCheckOnly = 1 << 5, // check for races, but don't store + kAccessNoRodata = 1 << 6, // don't check for .rodata marker + kAccessSlotLocked = 1 << 7, // memory access with TidSlot locked }; // Descriptor of user's memory block. @@ -219,15 +198,18 @@ enum ExternalTag : uptr { // as 16-bit values, see tsan_defs.h. }; -enum MutexType { - MutexTypeTrace = MutexLastCommon, - MutexTypeReport, +enum { + MutexTypeReport = MutexLastCommon, MutexTypeSyncVar, MutexTypeAnnotations, MutexTypeAtExit, MutexTypeFired, MutexTypeRacy, MutexTypeGlobalProc, + MutexTypeInternalAlloc, + MutexTypeTrace, + MutexTypeSlot, + MutexTypeSlots, }; } // namespace __tsan diff --git a/libsanitizer/tsan/tsan_dense_alloc.h b/libsanitizer/tsan/tsan_dense_alloc.h index 9e15f74a061..7a39a39d51d 100644 --- a/libsanitizer/tsan/tsan_dense_alloc.h +++ b/libsanitizer/tsan/tsan_dense_alloc.h @@ -104,6 +104,15 @@ class DenseSlabAlloc { return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T); } + template + void ForEach(Func func) { + SpinMutexLock lock(&mtx_); + uptr fillpos = atomic_load_relaxed(&fillpos_); + for (uptr l1 = 0; l1 < fillpos; l1++) { + for (IndexT l2 = l1 == 0 ? 1 : 0; l2 < kL2Size; l2++) func(&map_[l1][l2]); + } + } + private: T *map_[kL1Size]; SpinMutex mtx_; diff --git a/libsanitizer/tsan/tsan_fd.cpp b/libsanitizer/tsan/tsan_fd.cpp index 255ffa8daf7..cf8f491fdbf 100644 --- a/libsanitizer/tsan/tsan_fd.cpp +++ b/libsanitizer/tsan/tsan_fd.cpp @@ -11,9 +11,12 @@ //===----------------------------------------------------------------------===// #include "tsan_fd.h" -#include "tsan_rtl.h" + #include +#include "tsan_interceptors.h" +#include "tsan_rtl.h" + namespace __tsan { const int kTableSizeL1 = 1024; @@ -26,6 +29,9 @@ struct FdSync { struct FdDesc { FdSync *sync; + // This is used to establish write -> epoll_wait synchronization + // where epoll_wait receives notification about the write. + atomic_uintptr_t aux_sync; // FdSync* Tid creation_tid; StackID creation_stack; }; @@ -100,6 +106,10 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s, unref(thr, pc, d->sync); d->sync = 0; } + unref(thr, pc, + reinterpret_cast( + atomic_load(&d->aux_sync, memory_order_relaxed))); + atomic_store(&d->aux_sync, 0, memory_order_relaxed); if (flags()->io_sync == 0) { unref(thr, pc, s); } else if (flags()->io_sync == 1) { @@ -110,12 +120,17 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s, } d->creation_tid = thr->tid; d->creation_stack = CurrentStackId(thr, pc); + // This prevents false positives on fd_close_norace3.cpp test. + // The mechanics of the false positive are not completely clear, + // but it happens only if global reset is enabled (flush_memory_ms=1) + // and may be related to lost writes during asynchronous MADV_DONTNEED. + SlotLocker locker(thr); if (write) { // To catch races between fd usage and open. MemoryRangeImitateWrite(thr, pc, (uptr)d, 8); } else { // See the dup-related comment in FdClose. - MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead); + MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead | kAccessSlotLocked); } } @@ -177,6 +192,8 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) { MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead); if (s) Release(thr, pc, (uptr)s); + if (uptr aux_sync = atomic_load(&d->aux_sync, memory_order_acquire)) + Release(thr, pc, aux_sync); } void FdAccess(ThreadState *thr, uptr pc, int fd) { @@ -192,25 +209,39 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) { if (bogusfd(fd)) return; FdDesc *d = fddesc(thr, pc, fd); - if (write) { - // To catch races between fd usage and close. - MemoryAccess(thr, pc, (uptr)d, 8, kAccessWrite); - } else { - // This path is used only by dup2/dup3 calls. - // We do read instead of write because there is a number of legitimate - // cases where write would lead to false positives: - // 1. Some software dups a closed pipe in place of a socket before closing - // the socket (to prevent races actually). - // 2. Some daemons dup /dev/null in place of stdin/stdout. - // On the other hand we have not seen cases when write here catches real - // bugs. - MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead); + { + // Need to lock the slot to make MemoryAccess and MemoryResetRange atomic + // with respect to global reset. See the comment in MemoryRangeFreed. + SlotLocker locker(thr); + if (!MustIgnoreInterceptor(thr)) { + if (write) { + // To catch races between fd usage and close. + MemoryAccess(thr, pc, (uptr)d, 8, + kAccessWrite | kAccessCheckOnly | kAccessSlotLocked); + } else { + // This path is used only by dup2/dup3 calls. + // We do read instead of write because there is a number of legitimate + // cases where write would lead to false positives: + // 1. Some software dups a closed pipe in place of a socket before + // closing + // the socket (to prevent races actually). + // 2. Some daemons dup /dev/null in place of stdin/stdout. + // On the other hand we have not seen cases when write here catches real + // bugs. + MemoryAccess(thr, pc, (uptr)d, 8, + kAccessRead | kAccessCheckOnly | kAccessSlotLocked); + } + } + // We need to clear it, because if we do not intercept any call out there + // that creates fd, we will hit false postives. + MemoryResetRange(thr, pc, (uptr)d, 8); } - // We need to clear it, because if we do not intercept any call out there - // that creates fd, we will hit false postives. - MemoryResetRange(thr, pc, (uptr)d, 8); unref(thr, pc, d->sync); d->sync = 0; + unref(thr, pc, + reinterpret_cast( + atomic_load(&d->aux_sync, memory_order_relaxed))); + atomic_store(&d->aux_sync, 0, memory_order_relaxed); d->creation_tid = kInvalidTid; d->creation_stack = kInvalidStackID; } @@ -269,6 +300,30 @@ void FdPollCreate(ThreadState *thr, uptr pc, int fd) { init(thr, pc, fd, allocsync(thr, pc)); } +void FdPollAdd(ThreadState *thr, uptr pc, int epfd, int fd) { + DPrintf("#%d: FdPollAdd(%d, %d)\n", thr->tid, epfd, fd); + if (bogusfd(epfd) || bogusfd(fd)) + return; + FdDesc *d = fddesc(thr, pc, fd); + // Associate fd with epoll fd only once. + // While an fd can be associated with multiple epolls at the same time, + // or with different epolls during different phases of lifetime, + // synchronization semantics (and examples) of this are unclear. + // So we don't support this for now. + // If we change the association, it will also create lifetime management + // problem for FdRelease which accesses the aux_sync. + if (atomic_load(&d->aux_sync, memory_order_relaxed)) + return; + FdDesc *epd = fddesc(thr, pc, epfd); + FdSync *s = epd->sync; + if (!s) + return; + uptr cmp = 0; + if (atomic_compare_exchange_strong( + &d->aux_sync, &cmp, reinterpret_cast(s), memory_order_release)) + ref(s); +} + void FdSocketCreate(ThreadState *thr, uptr pc, int fd) { DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd); if (bogusfd(fd)) diff --git a/libsanitizer/tsan/tsan_fd.h b/libsanitizer/tsan/tsan_fd.h index d9648178481..92625dc4b4a 100644 --- a/libsanitizer/tsan/tsan_fd.h +++ b/libsanitizer/tsan/tsan_fd.h @@ -49,6 +49,7 @@ void FdEventCreate(ThreadState *thr, uptr pc, int fd); void FdSignalCreate(ThreadState *thr, uptr pc, int fd); void FdInotifyCreate(ThreadState *thr, uptr pc, int fd); void FdPollCreate(ThreadState *thr, uptr pc, int fd); +void FdPollAdd(ThreadState *thr, uptr pc, int epfd, int fd); void FdSocketCreate(ThreadState *thr, uptr pc, int fd); void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd); void FdSocketConnecting(ThreadState *thr, uptr pc, int fd); diff --git a/libsanitizer/tsan/tsan_flags.cpp b/libsanitizer/tsan/tsan_flags.cpp index ee89862d17b..ee78f25cc65 100644 --- a/libsanitizer/tsan/tsan_flags.cpp +++ b/libsanitizer/tsan/tsan_flags.cpp @@ -97,7 +97,7 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) { ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS"); #endif - // Sanity check. + // Check flags. if (!f->report_bugs) { f->report_thread_leaks = false; f->report_destroy_locked = false; @@ -110,12 +110,6 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) { if (common_flags()->help) parser.PrintFlagDescriptions(); - if (f->history_size < 0 || f->history_size > 7) { - Printf("ThreadSanitizer: incorrect value for history_size" - " (must be [0..7])\n"); - Die(); - } - if (f->io_sync < 0 || f->io_sync > 2) { Printf("ThreadSanitizer: incorrect value for io_sync" " (must be [0..2])\n"); diff --git a/libsanitizer/tsan/tsan_flags.inc b/libsanitizer/tsan/tsan_flags.inc index 7954a4307fa..32cf3bbf152 100644 --- a/libsanitizer/tsan/tsan_flags.inc +++ b/libsanitizer/tsan/tsan_flags.inc @@ -43,6 +43,9 @@ TSAN_FLAG( bool, force_seq_cst_atomics, false, "If set, all atomics are effectively sequentially consistent (seq_cst), " "regardless of what user actually specified.") +TSAN_FLAG(bool, force_background_thread, false, + "If set, eagerly launch a background thread for memory reclamation " + "instead of waiting for a user call to pthread_create.") TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.") TSAN_FLAG(int, atexit_sleep_ms, 1000, "Sleep in main thread before exiting for that many ms " @@ -59,14 +62,10 @@ TSAN_FLAG(bool, stop_on_start, false, "Stops on start until __tsan_resume() is called (for debugging).") TSAN_FLAG(bool, running_on_valgrind, false, "Controls whether RunningOnValgrind() returns true or false.") -// There are a lot of goroutines in Go, so we use smaller history. TSAN_FLAG( - int, history_size, SANITIZER_GO ? 1 : 3, - "Per-thread history size, controls how many previous memory accesses " - "are remembered per thread. Possible values are [0..7]. " - "history_size=0 amounts to 32K memory accesses. Each next value doubles " - "the amount of memory accesses, up to history_size=7 that amounts to " - "4M memory accesses. The default value is 2 (128K memory accesses).") + uptr, history_size, 0, + "Per-thread history size," + " controls how many extra previous memory accesses are remembered per thread.") TSAN_FLAG(int, io_sync, 1, "Controls level of synchronization implied by IO operations. " "0 - no synchronization " @@ -82,3 +81,6 @@ TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false, "modules.") TSAN_FLAG(bool, shared_ptr_interceptor, true, "Track atomic reference counting in libc++ shared_ptr and weak_ptr.") +TSAN_FLAG(bool, print_full_thread_history, false, + "If set, prints thread creation stacks for the threads involved in " + "the report and their ancestors up to the main thread.") diff --git a/libsanitizer/tsan/tsan_interceptors.h b/libsanitizer/tsan/tsan_interceptors.h index 61dbb81ffec..3091ad809c4 100644 --- a/libsanitizer/tsan/tsan_interceptors.h +++ b/libsanitizer/tsan/tsan_interceptors.h @@ -36,6 +36,10 @@ inline bool in_symbolizer() { } #endif +inline bool MustIgnoreInterceptor(ThreadState *thr) { + return !thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib; +} + } // namespace __tsan #define SCOPED_INTERCEPTOR_RAW(func, ...) \ @@ -60,10 +64,10 @@ inline bool in_symbolizer() { # define CHECK_REAL_FUNC(func) DCHECK(REAL(func)) #endif -#define SCOPED_TSAN_INTERCEPTOR(func, ...) \ - SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ - CHECK_REAL_FUNC(func); \ - if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \ +#define SCOPED_TSAN_INTERCEPTOR(func, ...) \ + SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ + CHECK_REAL_FUNC(func); \ + if (MustIgnoreInterceptor(thr)) \ return REAL(func)(__VA_ARGS__); #define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \ @@ -74,6 +78,14 @@ inline bool in_symbolizer() { #define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__) +#if SANITIZER_FREEBSD +# define TSAN_INTERCEPTOR_FREEBSD_ALIAS(ret, func, ...) \ + TSAN_INTERCEPTOR(ret, _pthread_##func, __VA_ARGS__) \ + ALIAS(WRAPPER_NAME(pthread_##func)); +#else +# define TSAN_INTERCEPTOR_FREEBSD_ALIAS(ret, func, ...) +#endif + #if SANITIZER_NETBSD # define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...) \ TSAN_INTERCEPTOR(ret, __libc_##func, __VA_ARGS__) \ diff --git a/libsanitizer/tsan/tsan_interceptors_posix.cpp b/libsanitizer/tsan/tsan_interceptors_posix.cpp index 9a85ee00d2d..60ca9633868 100644 --- a/libsanitizer/tsan/tsan_interceptors_posix.cpp +++ b/libsanitizer/tsan/tsan_interceptors_posix.cpp @@ -90,6 +90,7 @@ DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *) DECLARE_REAL(int, fflush, __sanitizer_FILE *fp) DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size) DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr) +extern "C" int pthread_equal(void *t1, void *t2); extern "C" void *pthread_self(); extern "C" void _exit(int status); #if !SANITIZER_NETBSD @@ -176,6 +177,7 @@ struct ThreadSignalContext { struct AtExitCtx { void (*f)(); void *arg; + uptr pc; }; // InterceptorContext holds all global data required for interceptors. @@ -287,20 +289,25 @@ void ScopedInterceptor::DisableIgnoresImpl() { } #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) +#if SANITIZER_FREEBSD || SANITIZER_NETBSD +# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) +#else +# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) +#endif #if SANITIZER_FREEBSD -# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) -# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) -# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) -#elif SANITIZER_NETBSD -# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) -# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \ - INTERCEPT_FUNCTION(__libc_##func) -# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \ - INTERCEPT_FUNCTION(__libc_thr_##func) +# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \ + INTERCEPT_FUNCTION(_pthread_##func) #else -# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) -# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) -# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) +# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) +#endif +#if SANITIZER_NETBSD +# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \ + INTERCEPT_FUNCTION(__libc_##func) +# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \ + INTERCEPT_FUNCTION(__libc_thr_##func) +#else +# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) +# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) #endif #define READ_STRING_OF_LEN(thr, pc, s, len, n) \ @@ -366,7 +373,10 @@ TSAN_INTERCEPTOR(int, pause, int fake) { return BLOCK_REAL(pause)(fake); } -static void at_exit_wrapper() { +// Note: we specifically call the function in such strange way +// with "installed_at" because in reports it will appear between +// callback frames and the frame that installed the callback. +static void at_exit_callback_installed_at() { AtExitCtx *ctx; { // Ensure thread-safety. @@ -378,15 +388,21 @@ static void at_exit_wrapper() { interceptor_ctx()->AtExitStack.PopBack(); } - Acquire(cur_thread(), (uptr)0, (uptr)ctx); + ThreadState *thr = cur_thread(); + Acquire(thr, ctx->pc, (uptr)ctx); + FuncEntry(thr, ctx->pc); ((void(*)())ctx->f)(); + FuncExit(thr); Free(ctx); } -static void cxa_at_exit_wrapper(void *arg) { - Acquire(cur_thread(), 0, (uptr)arg); +static void cxa_at_exit_callback_installed_at(void *arg) { + ThreadState *thr = cur_thread(); AtExitCtx *ctx = (AtExitCtx*)arg; + Acquire(thr, ctx->pc, (uptr)arg); + FuncEntry(thr, ctx->pc); ((void(*)(void *arg))ctx->f)(ctx->arg); + FuncExit(thr); Free(ctx); } @@ -400,7 +416,7 @@ TSAN_INTERCEPTOR(int, atexit, void (*f)()) { // We want to setup the atexit callback even if we are in ignored lib // or after fork. SCOPED_INTERCEPTOR_RAW(atexit, f); - return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0); + return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0); } #endif @@ -408,7 +424,7 @@ TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) { if (in_symbolizer()) return 0; SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso); - return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso); + return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso); } static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), @@ -416,6 +432,7 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), auto *ctx = New(); ctx->f = f; ctx->arg = arg; + ctx->pc = pc; Release(thr, pc, (uptr)ctx); // Memory allocation in __cxa_atexit will race with free during exit, // because we do not see synchronization around atexit callback list. @@ -431,25 +448,27 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), // due to atexit_mu held on exit from the calloc interceptor. ScopedIgnoreInterceptors ignore; - res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0); + res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at, + 0, 0); // Push AtExitCtx on the top of the stack of callback functions if (!res) { interceptor_ctx()->AtExitStack.PushBack(ctx); } } else { - res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso); + res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso); } ThreadIgnoreEnd(thr); return res; } #if !SANITIZER_MAC && !SANITIZER_NETBSD -static void on_exit_wrapper(int status, void *arg) { +static void on_exit_callback_installed_at(int status, void *arg) { ThreadState *thr = cur_thread(); - uptr pc = 0; - Acquire(thr, pc, (uptr)arg); AtExitCtx *ctx = (AtExitCtx*)arg; + Acquire(thr, ctx->pc, (uptr)arg); + FuncEntry(thr, ctx->pc); ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg); + FuncExit(thr); Free(ctx); } @@ -460,11 +479,12 @@ TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) { auto *ctx = New(); ctx->f = (void(*)())f; ctx->arg = arg; + ctx->pc = GET_CALLER_PC(); Release(thr, pc, (uptr)ctx); // Memory allocation in __cxa_atexit will race with free during exit, // because we do not see synchronization around atexit callback list. ThreadIgnoreBegin(thr, pc); - int res = REAL(on_exit)(on_exit_wrapper, ctx); + int res = REAL(on_exit)(on_exit_callback_installed_at, ctx); ThreadIgnoreEnd(thr); return res; } @@ -880,10 +900,11 @@ static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g, } } -static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g) { +static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g, + u32 v) { if (!thr->in_ignored_lib) Release(thr, pc, (uptr)g); - u32 old = atomic_exchange(g, kGuardDone, memory_order_release); + u32 old = atomic_exchange(g, v, memory_order_release); if (old & kGuardWaiter) FutexWake(g, 1 << 30); } @@ -913,12 +934,12 @@ STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) { STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) { SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g); - guard_release(thr, pc, g); + guard_release(thr, pc, g, kGuardDone); } STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) { SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g); - atomic_store(g, kGuardInit, memory_order_relaxed); + guard_release(thr, pc, g, kGuardInit); } namespace __tsan { @@ -1515,12 +1536,12 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { // result in crashes due to too little stack space. if (guard_acquire(thr, pc, a, !SANITIZER_MAC)) { (*f)(); - guard_release(thr, pc, a); + guard_release(thr, pc, a, kGuardDone); } return 0; } -#if SANITIZER_LINUX && !SANITIZER_ANDROID +#if SANITIZER_GLIBC TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf); if (fd > 0) @@ -1533,20 +1554,20 @@ TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { #endif TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) { -#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD - SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf); +#if SANITIZER_GLIBC + SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf); if (fd > 0) FdAccess(thr, pc, fd); - return REAL(fstat)(fd, buf); + return REAL(__fxstat)(0, fd, buf); #else - SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf); + SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf); if (fd > 0) FdAccess(thr, pc, fd); - return REAL(__fxstat)(0, fd, buf); + return REAL(fstat)(fd, buf); #endif } -#if SANITIZER_LINUX && !SANITIZER_ANDROID +#if SANITIZER_GLIBC TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf); if (fd > 0) @@ -1558,7 +1579,7 @@ TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { #define TSAN_MAYBE_INTERCEPT___FXSTAT64 #endif -#if SANITIZER_LINUX && !SANITIZER_ANDROID +#if SANITIZER_GLIBC TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) { SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf); if (fd > 0) @@ -1665,11 +1686,10 @@ TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) { #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) { - SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags); - if (fd >= 0) - FdClose(thr, pc, fd); + SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags); + FdClose(thr, pc, fd); fd = REAL(signalfd)(fd, mask, flags); - if (fd >= 0) + if (!MustIgnoreInterceptor(thr)) FdSignalCreate(thr, pc, fd); return fd; } @@ -1746,17 +1766,16 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) { } TSAN_INTERCEPTOR(int, close, int fd) { - SCOPED_TSAN_INTERCEPTOR(close, fd); - if (fd >= 0) + SCOPED_INTERCEPTOR_RAW(close, fd); + if (!in_symbolizer()) FdClose(thr, pc, fd); return REAL(close)(fd); } #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, __close, int fd) { - SCOPED_TSAN_INTERCEPTOR(__close, fd); - if (fd >= 0) - FdClose(thr, pc, fd); + SCOPED_INTERCEPTOR_RAW(__close, fd); + FdClose(thr, pc, fd); return REAL(__close)(fd); } #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close) @@ -1767,13 +1786,10 @@ TSAN_INTERCEPTOR(int, __close, int fd) { // glibc guts #if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) { - SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr); + SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr); int fds[64]; int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds)); - for (int i = 0; i < cnt; i++) { - if (fds[i] > 0) - FdClose(thr, pc, fds[i]); - } + for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]); REAL(__res_iclose)(state, free_addr); } #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose) @@ -1854,7 +1870,7 @@ TSAN_INTERCEPTOR(int, rmdir, char *path) { } TSAN_INTERCEPTOR(int, closedir, void *dirp) { - SCOPED_TSAN_INTERCEPTOR(closedir, dirp); + SCOPED_INTERCEPTOR_RAW(closedir, dirp); if (dirp) { int fd = dirfd(dirp); FdClose(thr, pc, fd); @@ -1885,8 +1901,10 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { FdAccess(thr, pc, epfd); if (epfd >= 0 && fd >= 0) FdAccess(thr, pc, fd); - if (op == EPOLL_CTL_ADD && epfd >= 0) + if (op == EPOLL_CTL_ADD && epfd >= 0) { + FdPollAdd(thr, pc, epfd, fd); FdRelease(thr, pc, epfd); + } int res = REAL(epoll_ctl)(epfd, op, fd, ev); return res; } @@ -1949,13 +1967,14 @@ TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set, namespace __tsan { -static void ReportErrnoSpoiling(ThreadState *thr, uptr pc) { +static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) { VarSizeStackTrace stack; // StackTrace::GetNestInstructionPc(pc) is used because return address is // expected, OutputReport() will undo this. ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); ThreadRegistryLock l(&ctx->thread_registry); ScopedReport rep(ReportTypeErrnoInSignal); + rep.SetSigNum(sig); if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { rep.AddStack(stack, true); OutputReport(thr, rep); @@ -1965,6 +1984,7 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc) { static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, int sig, __sanitizer_siginfo *info, void *uctx) { + CHECK(thr->slot); __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions; if (acquire) Acquire(thr, 0, (uptr)&sigactions[sig]); @@ -2021,7 +2041,7 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, // signal; and it looks too fragile to intercept all ways to reraise a signal. if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM && errno != 99) - ReportErrnoSpoiling(thr, pc); + ReportErrnoSpoiling(thr, pc, sig); errno = saved_errno; } @@ -2132,11 +2152,11 @@ TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) { ThreadSignalContext *sctx = SigCtx(thr); CHECK_NE(sctx, 0); int prev = sctx->int_signal_send; - if (tid == pthread_self()) { + bool self = pthread_equal(tid, pthread_self()); + if (self) sctx->int_signal_send = sig; - } int res = REAL(pthread_kill)(tid, sig); - if (tid == pthread_self()) { + if (self) { CHECK_EQ(sctx->int_signal_send, sig); sctx->int_signal_send = prev; } @@ -2193,6 +2213,7 @@ void atfork_child() { FdOnFork(thr, pc); } +#if !SANITIZER_IOS TSAN_INTERCEPTOR(int, vfork, int fake) { // Some programs (e.g. openjdk) call close for all file descriptors // in the child process. Under tsan it leads to false positives, because @@ -2209,6 +2230,7 @@ TSAN_INTERCEPTOR(int, vfork, int fake) { // Instead we simply turn vfork into fork. return WRAP(fork)(fake); } +#endif #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags, @@ -2252,7 +2274,7 @@ struct dl_iterate_phdr_data { }; static bool IsAppNotRodata(uptr addr) { - return IsAppMem(addr) && *MemToShadow(addr) != kShadowRodata; + return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata; } static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, @@ -2358,9 +2380,18 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc, #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \ if (file) { \ int fd = fileno_unlocked(file); \ - if (fd >= 0) FdClose(thr, pc, fd); \ + FdClose(thr, pc, fd); \ } +#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \ + ({ \ + CheckNoDeepBind(filename, flag); \ + ThreadIgnoreBegin(thr, 0); \ + void *res = REAL(dlopen)(filename, flag); \ + ThreadIgnoreEnd(thr); \ + res; \ + }) + #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ libignore()->OnLibraryLoaded(filename) @@ -2391,8 +2422,11 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc, #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name) -#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ - __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name) +#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ + if (pthread_equal(pthread_self(), reinterpret_cast(thread))) \ + COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \ + else \ + __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name) #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name) @@ -2553,7 +2587,7 @@ static USED void syscall_release(uptr pc, uptr addr) { } static void syscall_fd_close(uptr pc, int fd) { - TSAN_SYSCALL(); + auto *thr = cur_thread(); FdClose(thr, pc, fd); } @@ -2688,6 +2722,26 @@ TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) { #define TSAN_MAYBE_INTERCEPT_THR_EXIT #endif +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)()) +TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o) + TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a) TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c) TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c) @@ -2916,6 +2970,26 @@ void InitializeInterceptors() { } #endif + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once); + TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask); + TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init); TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal); TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast); diff --git a/libsanitizer/tsan/tsan_interface.cpp b/libsanitizer/tsan/tsan_interface.cpp index 04871518515..e6c4bf2e60a 100644 --- a/libsanitizer/tsan/tsan_interface.cpp +++ b/libsanitizer/tsan/tsan_interface.cpp @@ -26,20 +26,6 @@ void __tsan_flush_memory() { FlushShadowMemory(); } -void __tsan_read16(void *addr) { - uptr pc = CALLERPC; - ThreadState *thr = cur_thread(); - MemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead); - MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead); -} - -void __tsan_write16(void *addr) { - uptr pc = CALLERPC; - ThreadState *thr = cur_thread(); - MemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite); - MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite); -} - void __tsan_read16_pc(void *addr, void *pc) { uptr pc_no_pac = STRIP_PAC_PC(pc); ThreadState *thr = cur_thread(); diff --git a/libsanitizer/tsan/tsan_interface.inc b/libsanitizer/tsan/tsan_interface.inc index 0031800e851..b0a424ff9c2 100644 --- a/libsanitizer/tsan/tsan_interface.inc +++ b/libsanitizer/tsan/tsan_interface.inc @@ -34,6 +34,10 @@ void __tsan_read8(void *addr) { MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead); } +void __tsan_read16(void *addr) { + MemoryAccess16(cur_thread(), CALLERPC, (uptr)addr, kAccessRead); +} + void __tsan_write1(void *addr) { MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessWrite); } @@ -50,6 +54,10 @@ void __tsan_write8(void *addr) { MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite); } +void __tsan_write16(void *addr) { + MemoryAccess16(cur_thread(), CALLERPC, (uptr)addr, kAccessWrite); +} + void __tsan_read1_pc(void *addr, void *pc) { MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessRead | kAccessExternalPC); } diff --git a/libsanitizer/tsan/tsan_interface_atomic.cpp b/libsanitizer/tsan/tsan_interface_atomic.cpp index 24ba3bb1f65..f794a2fcdd0 100644 --- a/libsanitizer/tsan/tsan_interface_atomic.cpp +++ b/libsanitizer/tsan/tsan_interface_atomic.cpp @@ -235,8 +235,9 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) { T v = NoTsanAtomicLoad(a, mo); SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a); if (s) { - ReadLock l(&s->mtx); - AcquireImpl(thr, pc, &s->clock); + SlotLocker locker(thr); + ReadLock lock(&s->mtx); + thr->clock.Acquire(s->clock); // Re-read under sync mutex because we need a consistent snapshot // of the value and the clock we acquire. v = NoTsanAtomicLoad(a, mo); @@ -270,14 +271,14 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, NoTsanAtomicStore(a, v, mo); return; } - __sync_synchronize(); - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); - Lock l(&s->mtx); - thr->fast_state.IncrementEpoch(); - // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - ReleaseStoreImpl(thr, pc, &s->clock); - NoTsanAtomicStore(a, v, mo); + SlotLocker locker(thr); + { + auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); + Lock lock(&s->mtx); + thr->clock.ReleaseStore(&s->clock); + NoTsanAtomicStore(a, v, mo); + } + IncrementEpoch(thr); } template @@ -285,18 +286,21 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { MemoryAccess(thr, pc, (uptr)a, AccessSize(), kAccessWrite | kAccessAtomic); if (LIKELY(mo == mo_relaxed)) return F(a, v); - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); - Lock l(&s->mtx); - thr->fast_state.IncrementEpoch(); - // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - if (IsAcqRelOrder(mo)) - AcquireReleaseImpl(thr, pc, &s->clock); - else if (IsReleaseOrder(mo)) - ReleaseImpl(thr, pc, &s->clock); - else if (IsAcquireOrder(mo)) - AcquireImpl(thr, pc, &s->clock); - return F(a, v); + SlotLocker locker(thr); + { + auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); + RWLock lock(&s->mtx, IsReleaseOrder(mo)); + if (IsAcqRelOrder(mo)) + thr->clock.ReleaseAcquire(&s->clock); + else if (IsReleaseOrder(mo)) + thr->clock.Release(&s->clock); + else if (IsAcquireOrder(mo)) + thr->clock.Acquire(s->clock); + v = F(a, v); + } + if (IsReleaseOrder(mo)) + IncrementEpoch(thr); + return v; } template @@ -416,27 +420,28 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, *c = pr; return false; } - + SlotLocker locker(thr); bool release = IsReleaseOrder(mo); - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); - RWLock l(&s->mtx, release); - T cc = *c; - T pr = func_cas(a, cc, v); - bool success = pr == cc; - if (!success) { - *c = pr; - mo = fmo; + bool success; + { + auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); + RWLock lock(&s->mtx, release); + T cc = *c; + T pr = func_cas(a, cc, v); + success = pr == cc; + if (!success) { + *c = pr; + mo = fmo; + } + if (success && IsAcqRelOrder(mo)) + thr->clock.ReleaseAcquire(&s->clock); + else if (success && IsReleaseOrder(mo)) + thr->clock.Release(&s->clock); + else if (IsAcquireOrder(mo)) + thr->clock.Acquire(s->clock); } - thr->fast_state.IncrementEpoch(); - // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - - if (success && IsAcqRelOrder(mo)) - AcquireReleaseImpl(thr, pc, &s->clock); - else if (success && IsReleaseOrder(mo)) - ReleaseImpl(thr, pc, &s->clock); - else if (IsAcquireOrder(mo)) - AcquireImpl(thr, pc, &s->clock); + if (success && release) + IncrementEpoch(thr); return success; } diff --git a/libsanitizer/tsan/tsan_interface_java.cpp b/libsanitizer/tsan/tsan_interface_java.cpp index c090c1f08cb..7c15a163882 100644 --- a/libsanitizer/tsan/tsan_interface_java.cpp +++ b/libsanitizer/tsan/tsan_interface_java.cpp @@ -106,7 +106,7 @@ void __tsan_java_free(jptr ptr, jptr size) { DCHECK_GE(ptr, jctx->heap_begin); DCHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size); - ctx->metamap.FreeRange(thr->proc(), ptr, size); + ctx->metamap.FreeRange(thr->proc(), ptr, size, false); } void __tsan_java_move(jptr src, jptr dst, jptr size) { @@ -133,7 +133,7 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) { // support that anymore as it contains addresses of accesses. RawShadow *d = MemToShadow(dst); RawShadow *dend = MemToShadow(dst + size); - internal_memset(d, 0, (dend - d) * sizeof(*d)); + ShadowSet(d, dend, Shadow::kEmpty); } jptr __tsan_java_find(jptr *from_ptr, jptr to) { diff --git a/libsanitizer/tsan/tsan_mman.cpp b/libsanitizer/tsan/tsan_mman.cpp index f1b6768c592..0937e521193 100644 --- a/libsanitizer/tsan/tsan_mman.cpp +++ b/libsanitizer/tsan/tsan_mman.cpp @@ -20,18 +20,6 @@ #include "tsan_report.h" #include "tsan_flags.h" -// May be overriden by front-end. -SANITIZER_WEAK_DEFAULT_IMPL -void __sanitizer_malloc_hook(void *ptr, uptr size) { - (void)ptr; - (void)size; -} - -SANITIZER_WEAK_DEFAULT_IMPL -void __sanitizer_free_hook(void *ptr) { - (void)ptr; -} - namespace __tsan { struct MapUnmapCallback { @@ -69,8 +57,17 @@ Allocator *allocator() { struct GlobalProc { Mutex mtx; Processor *proc; - - GlobalProc() : mtx(MutexTypeGlobalProc), proc(ProcCreate()) {} + // This mutex represents the internal allocator combined for + // the purposes of deadlock detection. The internal allocator + // uses multiple mutexes, moreover they are locked only occasionally + // and they are spin mutexes which don't support deadlock detection. + // So we use this fake mutex to serve as a substitute for these mutexes. + CheckedMutex internal_alloc_mtx; + + GlobalProc() + : mtx(MutexTypeGlobalProc), + proc(ProcCreate()), + internal_alloc_mtx(MutexTypeInternalAlloc) {} }; static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64); @@ -78,6 +75,11 @@ GlobalProc *global_proc() { return reinterpret_cast(&global_proc_placeholder); } +static void InternalAllocAccess() { + global_proc()->internal_alloc_mtx.Lock(); + global_proc()->internal_alloc_mtx.Unlock(); +} + ScopedGlobalProcessor::ScopedGlobalProcessor() { GlobalProc *gp = global_proc(); ThreadState *thr = cur_thread(); @@ -110,6 +112,24 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() { gp->mtx.Unlock(); } +void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { + global_proc()->internal_alloc_mtx.Lock(); + InternalAllocatorLock(); +} + +void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { + InternalAllocatorUnlock(); + global_proc()->internal_alloc_mtx.Unlock(); +} + +void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { + global_proc()->mtx.Lock(); +} + +void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { + global_proc()->mtx.Unlock(); +} + static constexpr uptr kMaxAllowedMallocSize = 1ull << 40; static uptr max_user_defined_malloc_size; @@ -166,6 +186,12 @@ void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align, GET_STACK_TRACE_FATAL(thr, pc); ReportAllocationSizeTooBig(sz, malloc_limit, &stack); } + if (UNLIKELY(IsRssLimitExceeded())) { + if (AllocatorMayReturnNull()) + return nullptr; + GET_STACK_TRACE_FATAL(thr, pc); + ReportRssLimitExceeded(&stack); + } void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align); if (UNLIKELY(!p)) { SetAllocatorOutOfMemory(); @@ -219,8 +245,17 @@ void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) { void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) { DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p); + // Note: this can run before thread initialization/after finalization. + // As a result this is not necessarily synchronized with DoReset, + // which iterates over and resets all sync objects, + // but it is fine to create new MBlocks in this context. ctx->metamap.AllocBlock(thr, pc, p, sz); - if (write && thr->ignore_reads_and_writes == 0) + // If this runs before thread initialization/after finalization + // and we don't have trace initialized, we can't imitate writes. + // In such case just reset the shadow range, it is fine since + // it affects only a small fraction of special objects. + if (write && thr->ignore_reads_and_writes == 0 && + atomic_load_relaxed(&thr->trace_pos)) MemoryRangeImitateWrite(thr, pc, (uptr)p, sz); else MemoryResetRange(thr, pc, (uptr)p, sz); @@ -228,7 +263,14 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) { void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) { CHECK_NE(p, (void*)0); - uptr sz = ctx->metamap.FreeBlock(thr->proc(), p); + if (!thr->slot) { + // Very early/late in thread lifetime, or during fork. + UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false); + DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz); + return; + } + SlotLocker locker(thr); + uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true); DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz); if (write && thr->ignore_reads_and_writes == 0) MemoryRangeFreed(thr, pc, (uptr)p, sz); @@ -310,7 +352,7 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) { } uptr user_alloc_usable_size(const void *p) { - if (p == 0) + if (p == 0 || !IsAppMem((uptr)p)) return 0; MBlock *b = ctx->metamap.GetBlock((uptr)p); if (!b) @@ -324,7 +366,6 @@ void invoke_malloc_hook(void *ptr, uptr size) { ThreadState *thr = cur_thread(); if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) return; - __sanitizer_malloc_hook(ptr, size); RunMallocHooks(ptr, size); } @@ -332,7 +373,6 @@ void invoke_free_hook(void *ptr) { ThreadState *thr = cur_thread(); if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) return; - __sanitizer_free_hook(ptr); RunFreeHooks(ptr); } @@ -342,6 +382,7 @@ void *Alloc(uptr sz) { thr->nomalloc = 0; // CHECK calls internal_malloc(). CHECK(0); } + InternalAllocAccess(); return InternalAlloc(sz, &thr->proc()->internal_alloc_cache); } @@ -351,6 +392,7 @@ void FreeImpl(void *p) { thr->nomalloc = 0; // CHECK calls internal_malloc(). CHECK(0); } + InternalAllocAccess(); InternalFree(p, &thr->proc()->internal_alloc_cache); } @@ -393,8 +435,6 @@ uptr __sanitizer_get_allocated_size(const void *p) { void __tsan_on_thread_idle() { ThreadState *thr = cur_thread(); - thr->clock.ResetCached(&thr->proc()->clock_cache); - thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache); allocator()->SwallowCache(&thr->proc()->alloc_cache); internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache); ctx->metamap.OnProcIdle(thr->proc()); diff --git a/libsanitizer/tsan/tsan_mman.h b/libsanitizer/tsan/tsan_mman.h index efea5e5abde..2095f28c025 100644 --- a/libsanitizer/tsan/tsan_mman.h +++ b/libsanitizer/tsan/tsan_mman.h @@ -24,6 +24,10 @@ void ReplaceSystemMalloc(); void AllocatorProcStart(Processor *proc); void AllocatorProcFinish(Processor *proc); void AllocatorPrintStats(); +void AllocatorLock(); +void AllocatorUnlock(); +void GlobalProcessorLock(); +void GlobalProcessorUnlock(); // For user allocations. void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, diff --git a/libsanitizer/tsan/tsan_mutexset.cpp b/libsanitizer/tsan/tsan_mutexset.cpp index 735179686ba..3a75b80ac30 100644 --- a/libsanitizer/tsan/tsan_mutexset.cpp +++ b/libsanitizer/tsan/tsan_mutexset.cpp @@ -19,57 +19,7 @@ namespace __tsan { MutexSet::MutexSet() { } -void MutexSet::Add(u64 id, bool write, u64 epoch) { - // Look up existing mutex with the same id. - for (uptr i = 0; i < size_; i++) { - if (descs_[i].id == id) { - descs_[i].count++; - descs_[i].epoch = epoch; - return; - } - } - // On overflow, find the oldest mutex and drop it. - if (size_ == kMaxSize) { - u64 minepoch = (u64)-1; - u64 mini = (u64)-1; - for (uptr i = 0; i < size_; i++) { - if (descs_[i].epoch < minepoch) { - minepoch = descs_[i].epoch; - mini = i; - } - } - RemovePos(mini); - CHECK_EQ(size_, kMaxSize - 1); - } - // Add new mutex descriptor. - descs_[size_].addr = 0; - descs_[size_].stack_id = kInvalidStackID; - descs_[size_].id = id; - descs_[size_].write = write; - descs_[size_].epoch = epoch; - descs_[size_].seq = seq_++; - descs_[size_].count = 1; - size_++; -} - -void MutexSet::Del(u64 id, bool write) { - for (uptr i = 0; i < size_; i++) { - if (descs_[i].id == id) { - if (--descs_[i].count == 0) - RemovePos(i); - return; - } - } -} - -void MutexSet::Remove(u64 id) { - for (uptr i = 0; i < size_; i++) { - if (descs_[i].id == id) { - RemovePos(i); - return; - } - } -} +void MutexSet::Reset() { internal_memset(this, 0, sizeof(*this)); } void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) { // Look up existing mutex with the same id. @@ -93,9 +43,7 @@ void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) { // Add new mutex descriptor. descs_[size_].addr = addr; descs_[size_].stack_id = stack_id; - descs_[size_].id = 0; descs_[size_].write = write; - descs_[size_].epoch = 0; descs_[size_].seq = seq_++; descs_[size_].count = 1; size_++; diff --git a/libsanitizer/tsan/tsan_mutexset.h b/libsanitizer/tsan/tsan_mutexset.h index 93776a66413..aabd361e6af 100644 --- a/libsanitizer/tsan/tsan_mutexset.h +++ b/libsanitizer/tsan/tsan_mutexset.h @@ -25,8 +25,6 @@ class MutexSet { struct Desc { uptr addr; StackID stack_id; - u64 id; - u64 epoch; u32 seq; u32 count; bool write; @@ -40,10 +38,7 @@ class MutexSet { }; MutexSet(); - // The 'id' is obtained from SyncVar::GetId(). - void Add(u64 id, bool write, u64 epoch); - void Del(u64 id, bool write); - void Remove(u64 id); // Removes the mutex completely (if it's destroyed). + void Reset(); void AddAddr(uptr addr, StackID stack_id, bool write); void DelAddr(uptr addr, bool destroy = false); uptr Size() const; @@ -82,9 +77,7 @@ class DynamicMutexSet { // in different goroutine). #if SANITIZER_GO MutexSet::MutexSet() {} -void MutexSet::Add(u64 id, bool write, u64 epoch) {} -void MutexSet::Del(u64 id, bool write) {} -void MutexSet::Remove(u64 id) {} +void MutexSet::Reset() {} void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {} void MutexSet::DelAddr(uptr addr, bool destroy) {} uptr MutexSet::Size() const { return 0; } diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h index 7ff0acace8f..233bf0a39df 100644 --- a/libsanitizer/tsan/tsan_platform.h +++ b/libsanitizer/tsan/tsan_platform.h @@ -18,8 +18,8 @@ # error "Only 64-bit is supported" #endif +#include "sanitizer_common/sanitizer_common.h" #include "tsan_defs.h" -#include "tsan_trace.h" namespace __tsan { @@ -40,14 +40,12 @@ enum { C/C++ on linux/x86_64 and freebsd/x86_64 0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB) 0040 0000 0000 - 0100 0000 0000: - -0100 0000 0000 - 2000 0000 0000: shadow -2000 0000 0000 - 3000 0000 0000: - +0100 0000 0000 - 1000 0000 0000: shadow +1000 0000 0000 - 3000 0000 0000: - 3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) 4000 0000 0000 - 5500 0000 0000: - 5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels -5680 0000 0000 - 6000 0000 0000: - -6000 0000 0000 - 6200 0000 0000: traces -6200 0000 0000 - 7d00 0000 0000: - +5680 0000 0000 - 7d00 0000 0000: - 7b00 0000 0000 - 7c00 0000 0000: heap 7c00 0000 0000 - 7e80 0000 0000: - 7e80 0000 0000 - 8000 0000 0000: modules and main thread stack @@ -67,10 +65,8 @@ C/C++ on netbsd/amd64 can reuse the same mapping: struct Mapping48AddressSpace { static const uptr kMetaShadowBeg = 0x300000000000ull; static const uptr kMetaShadowEnd = 0x340000000000ull; - static const uptr kTraceMemBeg = 0x600000000000ull; - static const uptr kTraceMemEnd = 0x620000000000ull; static const uptr kShadowBeg = 0x010000000000ull; - static const uptr kShadowEnd = 0x200000000000ull; + static const uptr kShadowEnd = 0x100000000000ull; static const uptr kHeapMemBeg = 0x7b0000000000ull; static const uptr kHeapMemEnd = 0x7c0000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; @@ -89,14 +85,13 @@ struct Mapping48AddressSpace { C/C++ on linux/mips64 (40-bit VMA) 0000 0000 00 - 0100 0000 00: - (4 GB) 0100 0000 00 - 0200 0000 00: main binary (4 GB) -0200 0000 00 - 2000 0000 00: - (120 GB) -2000 0000 00 - 4000 0000 00: shadow (128 GB) +0200 0000 00 - 1200 0000 00: - (64 GB) +1200 0000 00 - 2200 0000 00: shadow (64 GB) +2200 0000 00 - 4000 0000 00: - (120 GB) 4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB) 5000 0000 00 - aa00 0000 00: - (360 GB) aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB) -ab00 0000 00 - b000 0000 00: - (20 GB) -b000 0000 00 - b200 0000 00: traces (8 GB) -b200 0000 00 - fe00 0000 00: - (304 GB) +ab00 0000 00 - fe00 0000 00: - (332 GB) fe00 0000 00 - ff00 0000 00: heap (4 GB) ff00 0000 00 - ff80 0000 00: - (2 GB) ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB) @@ -104,10 +99,8 @@ ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB) struct MappingMips64_40 { static const uptr kMetaShadowBeg = 0x4000000000ull; static const uptr kMetaShadowEnd = 0x5000000000ull; - static const uptr kTraceMemBeg = 0xb000000000ull; - static const uptr kTraceMemEnd = 0xb200000000ull; - static const uptr kShadowBeg = 0x2000000000ull; - static const uptr kShadowEnd = 0x4000000000ull; + static const uptr kShadowBeg = 0x1200000000ull; + static const uptr kShadowEnd = 0x2200000000ull; static const uptr kHeapMemBeg = 0xfe00000000ull; static const uptr kHeapMemEnd = 0xff00000000ull; static const uptr kLoAppMemBeg = 0x0100000000ull; @@ -128,12 +121,10 @@ C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM) 0100 0000 00 - 0200 0000 00: main binary, modules, thread stacks (4 GB) 0200 0000 00 - 0300 0000 00: heap (4 GB) 0300 0000 00 - 0400 0000 00: - (4 GB) -0400 0000 00 - 0c00 0000 00: shadow memory (32 GB) -0c00 0000 00 - 0d00 0000 00: - (4 GB) +0400 0000 00 - 0800 0000 00: shadow memory (16 GB) +0800 0000 00 - 0d00 0000 00: - (20 GB) 0d00 0000 00 - 0e00 0000 00: metainfo (4 GB) -0e00 0000 00 - 0f00 0000 00: - (4 GB) -0f00 0000 00 - 0fc0 0000 00: traces (3 GB) -0fc0 0000 00 - 1000 0000 00: - +0e00 0000 00 - 1000 0000 00: - */ struct MappingAppleAarch64 { static const uptr kLoAppMemBeg = 0x0100000000ull; @@ -141,16 +132,14 @@ struct MappingAppleAarch64 { static const uptr kHeapMemBeg = 0x0200000000ull; static const uptr kHeapMemEnd = 0x0300000000ull; static const uptr kShadowBeg = 0x0400000000ull; - static const uptr kShadowEnd = 0x0c00000000ull; + static const uptr kShadowEnd = 0x0800000000ull; static const uptr kMetaShadowBeg = 0x0d00000000ull; static const uptr kMetaShadowEnd = 0x0e00000000ull; - static const uptr kTraceMemBeg = 0x0f00000000ull; - static const uptr kTraceMemEnd = 0x0fc0000000ull; static const uptr kHiAppMemBeg = 0x0fc0000000ull; static const uptr kHiAppMemEnd = 0x0fc0000000ull; static const uptr kShadowMsk = 0x0ull; static const uptr kShadowXor = 0x0ull; - static const uptr kShadowAdd = 0x0ull; + static const uptr kShadowAdd = 0x0200000000ull; static const uptr kVdsoBeg = 0x7000000000000000ull; static const uptr kMidAppMemBeg = 0; static const uptr kMidAppMemEnd = 0; @@ -159,29 +148,25 @@ struct MappingAppleAarch64 { /* C/C++ on linux/aarch64 (39-bit VMA) 0000 0010 00 - 0100 0000 00: main binary -0100 0000 00 - 0800 0000 00: - -0800 0000 00 - 2000 0000 00: shadow memory +0100 0000 00 - 0400 0000 00: - +0400 0000 00 - 1000 0000 00: shadow memory 2000 0000 00 - 3100 0000 00: - 3100 0000 00 - 3400 0000 00: metainfo 3400 0000 00 - 5500 0000 00: - 5500 0000 00 - 5600 0000 00: main binary (PIE) -5600 0000 00 - 6000 0000 00: - -6000 0000 00 - 6200 0000 00: traces -6200 0000 00 - 7d00 0000 00: - +5600 0000 00 - 7c00 0000 00: - 7c00 0000 00 - 7d00 0000 00: heap 7d00 0000 00 - 7fff ffff ff: modules and main thread stack */ struct MappingAarch64_39 { static const uptr kLoAppMemBeg = 0x0000001000ull; static const uptr kLoAppMemEnd = 0x0100000000ull; - static const uptr kShadowBeg = 0x0800000000ull; - static const uptr kShadowEnd = 0x2000000000ull; + static const uptr kShadowBeg = 0x0400000000ull; + static const uptr kShadowEnd = 0x1000000000ull; static const uptr kMetaShadowBeg = 0x3100000000ull; static const uptr kMetaShadowEnd = 0x3400000000ull; static const uptr kMidAppMemBeg = 0x5500000000ull; - static const uptr kMidAppMemEnd = 0x5600000000ull; - static const uptr kTraceMemBeg = 0x6000000000ull; - static const uptr kTraceMemEnd = 0x6200000000ull; + static const uptr kMidAppMemEnd = 0x5600000000ull; static const uptr kHeapMemBeg = 0x7c00000000ull; static const uptr kHeapMemEnd = 0x7d00000000ull; static const uptr kHiAppMemBeg = 0x7e00000000ull; @@ -195,15 +180,13 @@ struct MappingAarch64_39 { /* C/C++ on linux/aarch64 (42-bit VMA) 00000 0010 00 - 01000 0000 00: main binary -01000 0000 00 - 10000 0000 00: - -10000 0000 00 - 20000 0000 00: shadow memory -20000 0000 00 - 26000 0000 00: - +01000 0000 00 - 08000 0000 00: - +08000 0000 00 - 10000 0000 00: shadow memory +10000 0000 00 - 26000 0000 00: - 26000 0000 00 - 28000 0000 00: metainfo 28000 0000 00 - 2aa00 0000 00: - 2aa00 0000 00 - 2ab00 0000 00: main binary (PIE) -2ab00 0000 00 - 36200 0000 00: - -36200 0000 00 - 36240 0000 00: traces -36240 0000 00 - 3e000 0000 00: - +2ab00 0000 00 - 3e000 0000 00: - 3e000 0000 00 - 3f000 0000 00: heap 3f000 0000 00 - 3ffff ffff ff: modules and main thread stack */ @@ -211,14 +194,12 @@ struct MappingAarch64_42 { static const uptr kBroken = kBrokenReverseMapping; static const uptr kLoAppMemBeg = 0x00000001000ull; static const uptr kLoAppMemEnd = 0x01000000000ull; - static const uptr kShadowBeg = 0x10000000000ull; - static const uptr kShadowEnd = 0x20000000000ull; + static const uptr kShadowBeg = 0x08000000000ull; + static const uptr kShadowEnd = 0x10000000000ull; static const uptr kMetaShadowBeg = 0x26000000000ull; static const uptr kMetaShadowEnd = 0x28000000000ull; static const uptr kMidAppMemBeg = 0x2aa00000000ull; - static const uptr kMidAppMemEnd = 0x2ab00000000ull; - static const uptr kTraceMemBeg = 0x36200000000ull; - static const uptr kTraceMemEnd = 0x36400000000ull; + static const uptr kMidAppMemEnd = 0x2ab00000000ull; static const uptr kHeapMemBeg = 0x3e000000000ull; static const uptr kHeapMemEnd = 0x3f000000000ull; static const uptr kHiAppMemBeg = 0x3f000000000ull; @@ -232,14 +213,12 @@ struct MappingAarch64_42 { struct MappingAarch64_48 { static const uptr kLoAppMemBeg = 0x0000000001000ull; static const uptr kLoAppMemEnd = 0x0000200000000ull; - static const uptr kShadowBeg = 0x0002000000000ull; - static const uptr kShadowEnd = 0x0004000000000ull; + static const uptr kShadowBeg = 0x0001000000000ull; + static const uptr kShadowEnd = 0x0002000000000ull; static const uptr kMetaShadowBeg = 0x0005000000000ull; static const uptr kMetaShadowEnd = 0x0006000000000ull; static const uptr kMidAppMemBeg = 0x0aaaa00000000ull; - static const uptr kMidAppMemEnd = 0x0aaaf00000000ull; - static const uptr kTraceMemBeg = 0x0f06000000000ull; - static const uptr kTraceMemEnd = 0x0f06200000000ull; + static const uptr kMidAppMemEnd = 0x0aaaf00000000ull; static const uptr kHeapMemBeg = 0x0ffff00000000ull; static const uptr kHeapMemEnd = 0x0ffff00000000ull; static const uptr kHiAppMemBeg = 0x0ffff00000000ull; @@ -257,9 +236,7 @@ C/C++ on linux/powerpc64 (44-bit VMA) 0001 0000 0000 - 0b00 0000 0000: shadow 0b00 0000 0000 - 0b00 0000 0000: - 0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects) -0d00 0000 0000 - 0d00 0000 0000: - -0d00 0000 0000 - 0f00 0000 0000: traces -0f00 0000 0000 - 0f00 0000 0000: - +0d00 0000 0000 - 0f00 0000 0000: - 0f00 0000 0000 - 0f50 0000 0000: heap 0f50 0000 0000 - 0f60 0000 0000: - 0f60 0000 0000 - 1000 0000 0000: modules and main thread stack @@ -269,8 +246,6 @@ struct MappingPPC64_44 { kBrokenMapping | kBrokenReverseMapping | kBrokenLinearity; static const uptr kMetaShadowBeg = 0x0b0000000000ull; static const uptr kMetaShadowEnd = 0x0d0000000000ull; - static const uptr kTraceMemBeg = 0x0d0000000000ull; - static const uptr kTraceMemEnd = 0x0f0000000000ull; static const uptr kShadowBeg = 0x000100000000ull; static const uptr kShadowEnd = 0x0b0000000000ull; static const uptr kLoAppMemBeg = 0x000000000100ull; @@ -291,23 +266,19 @@ struct MappingPPC64_44 { C/C++ on linux/powerpc64 (46-bit VMA) 0000 0000 1000 - 0100 0000 0000: main binary 0100 0000 0000 - 0200 0000 0000: - -0100 0000 0000 - 1000 0000 0000: shadow -1000 0000 0000 - 1000 0000 0000: - -1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects) -2000 0000 0000 - 2000 0000 0000: - -2000 0000 0000 - 2200 0000 0000: traces -2200 0000 0000 - 3d00 0000 0000: - +0100 0000 0000 - 0800 0000 0000: shadow +0800 0000 0000 - 1000 0000 0000: - +1000 0000 0000 - 1200 0000 0000: metainfo (memory blocks and sync objects) +1200 0000 0000 - 3d00 0000 0000: - 3d00 0000 0000 - 3e00 0000 0000: heap 3e00 0000 0000 - 3e80 0000 0000: - 3e80 0000 0000 - 4000 0000 0000: modules and main thread stack */ struct MappingPPC64_46 { static const uptr kMetaShadowBeg = 0x100000000000ull; - static const uptr kMetaShadowEnd = 0x200000000000ull; - static const uptr kTraceMemBeg = 0x200000000000ull; - static const uptr kTraceMemEnd = 0x220000000000ull; + static const uptr kMetaShadowEnd = 0x120000000000ull; static const uptr kShadowBeg = 0x010000000000ull; - static const uptr kShadowEnd = 0x100000000000ull; + static const uptr kShadowEnd = 0x080000000000ull; static const uptr kHeapMemBeg = 0x3d0000000000ull; static const uptr kHeapMemEnd = 0x3e0000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; @@ -326,23 +297,19 @@ struct MappingPPC64_46 { C/C++ on linux/powerpc64 (47-bit VMA) 0000 0000 1000 - 0100 0000 0000: main binary 0100 0000 0000 - 0200 0000 0000: - -0100 0000 0000 - 1000 0000 0000: shadow -1000 0000 0000 - 1000 0000 0000: - -1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects) -2000 0000 0000 - 2000 0000 0000: - -2000 0000 0000 - 2200 0000 0000: traces -2200 0000 0000 - 7d00 0000 0000: - +0100 0000 0000 - 0800 0000 0000: shadow +0800 0000 0000 - 1000 0000 0000: - +1000 0000 0000 - 1200 0000 0000: metainfo (memory blocks and sync objects) +1200 0000 0000 - 7d00 0000 0000: - 7d00 0000 0000 - 7e00 0000 0000: heap 7e00 0000 0000 - 7e80 0000 0000: - 7e80 0000 0000 - 8000 0000 0000: modules and main thread stack */ struct MappingPPC64_47 { static const uptr kMetaShadowBeg = 0x100000000000ull; - static const uptr kMetaShadowEnd = 0x200000000000ull; - static const uptr kTraceMemBeg = 0x200000000000ull; - static const uptr kTraceMemEnd = 0x220000000000ull; + static const uptr kMetaShadowEnd = 0x120000000000ull; static const uptr kShadowBeg = 0x010000000000ull; - static const uptr kShadowEnd = 0x100000000000ull; + static const uptr kShadowEnd = 0x080000000000ull; static const uptr kHeapMemBeg = 0x7d0000000000ull; static const uptr kHeapMemEnd = 0x7e0000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; @@ -362,22 +329,18 @@ C/C++ on linux/s390x While the kernel provides a 64-bit address space, we have to restrict ourselves to 48 bits due to how e.g. SyncVar::GetId() works. 0000 0000 1000 - 0e00 0000 0000: binary, modules, stacks - 14 TiB -0e00 0000 0000 - 4000 0000 0000: - -4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app) -8000 0000 0000 - 9000 0000 0000: - +0e00 0000 0000 - 2000 0000 0000: - +2000 0000 0000 - 4000 0000 0000: shadow - 32TiB (2 * app) +4000 0000 0000 - 9000 0000 0000: - 9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app) -9800 0000 0000 - a000 0000 0000: - -a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads) -b000 0000 0000 - be00 0000 0000: - +9800 0000 0000 - be00 0000 0000: - be00 0000 0000 - c000 0000 0000: heap - 2TiB (max supported by the allocator) */ struct MappingS390x { static const uptr kMetaShadowBeg = 0x900000000000ull; static const uptr kMetaShadowEnd = 0x980000000000ull; - static const uptr kTraceMemBeg = 0xa00000000000ull; - static const uptr kTraceMemEnd = 0xb00000000000ull; - static const uptr kShadowBeg = 0x400000000000ull; - static const uptr kShadowEnd = 0x800000000000ull; + static const uptr kShadowBeg = 0x200000000000ull; + static const uptr kShadowEnd = 0x400000000000ull; static const uptr kHeapMemBeg = 0xbe0000000000ull; static const uptr kHeapMemEnd = 0xc00000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; @@ -397,21 +360,17 @@ struct MappingS390x { 0000 1000 0000 - 00c0 0000 0000: - 00c0 0000 0000 - 00e0 0000 0000: heap 00e0 0000 0000 - 2000 0000 0000: - -2000 0000 0000 - 2380 0000 0000: shadow -2380 0000 0000 - 3000 0000 0000: - +2000 0000 0000 - 21c0 0000 0000: shadow +21c0 0000 0000 - 3000 0000 0000: - 3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) -4000 0000 0000 - 6000 0000 0000: - -6000 0000 0000 - 6200 0000 0000: traces -6200 0000 0000 - 8000 0000 0000: - +4000 0000 0000 - 8000 0000 0000: - */ struct MappingGo48 { static const uptr kMetaShadowBeg = 0x300000000000ull; static const uptr kMetaShadowEnd = 0x400000000000ull; - static const uptr kTraceMemBeg = 0x600000000000ull; - static const uptr kTraceMemEnd = 0x620000000000ull; static const uptr kShadowBeg = 0x200000000000ull; - static const uptr kShadowEnd = 0x238000000000ull; + static const uptr kShadowEnd = 0x21c000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; static const uptr kLoAppMemEnd = 0x00e000000000ull; static const uptr kMidAppMemBeg = 0; @@ -431,8 +390,8 @@ struct MappingGo48 { 0000 1000 0000 - 00f8 0000 0000: - 00c0 0000 0000 - 00e0 0000 0000: heap 00e0 0000 0000 - 0100 0000 0000: - -0100 0000 0000 - 0500 0000 0000: shadow -0500 0000 0000 - 0700 0000 0000: traces +0100 0000 0000 - 0300 0000 0000: shadow +0300 0000 0000 - 0700 0000 0000: - 0700 0000 0000 - 0770 0000 0000: metainfo (memory blocks and sync objects) 07d0 0000 0000 - 8000 0000 0000: - */ @@ -440,10 +399,8 @@ struct MappingGo48 { struct MappingGoWindows { static const uptr kMetaShadowBeg = 0x070000000000ull; static const uptr kMetaShadowEnd = 0x077000000000ull; - static const uptr kTraceMemBeg = 0x050000000000ull; - static const uptr kTraceMemEnd = 0x070000000000ull; static const uptr kShadowBeg = 0x010000000000ull; - static const uptr kShadowEnd = 0x050000000000ull; + static const uptr kShadowEnd = 0x030000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; static const uptr kLoAppMemEnd = 0x00e000000000ull; static const uptr kMidAppMemBeg = 0; @@ -463,21 +420,17 @@ struct MappingGoWindows { 0000 1000 0000 - 00c0 0000 0000: - 00c0 0000 0000 - 00e0 0000 0000: heap 00e0 0000 0000 - 2000 0000 0000: - -2000 0000 0000 - 2380 0000 0000: shadow -2380 0000 0000 - 2400 0000 0000: - -2400 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects) -3400 0000 0000 - 3600 0000 0000: - -3600 0000 0000 - 3800 0000 0000: traces -3800 0000 0000 - 4000 0000 0000: - +2000 0000 0000 - 21c0 0000 0000: shadow +21c0 0000 0000 - 2400 0000 0000: - +2400 0000 0000 - 2470 0000 0000: metainfo (memory blocks and sync objects) +2470 0000 0000 - 4000 0000 0000: - */ struct MappingGoPPC64_46 { static const uptr kMetaShadowBeg = 0x240000000000ull; - static const uptr kMetaShadowEnd = 0x340000000000ull; - static const uptr kTraceMemBeg = 0x360000000000ull; - static const uptr kTraceMemEnd = 0x380000000000ull; + static const uptr kMetaShadowEnd = 0x247000000000ull; static const uptr kShadowBeg = 0x200000000000ull; - static const uptr kShadowEnd = 0x238000000000ull; + static const uptr kShadowEnd = 0x21c000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; static const uptr kLoAppMemEnd = 0x00e000000000ull; static const uptr kMidAppMemBeg = 0; @@ -497,21 +450,17 @@ struct MappingGoPPC64_46 { 0000 1000 0000 - 00c0 0000 0000: - 00c0 0000 0000 - 00e0 0000 0000: heap 00e0 0000 0000 - 2000 0000 0000: - -2000 0000 0000 - 3000 0000 0000: shadow -3000 0000 0000 - 3000 0000 0000: - -3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) -4000 0000 0000 - 6000 0000 0000: - -6000 0000 0000 - 6200 0000 0000: traces -6200 0000 0000 - 8000 0000 0000: - +2000 0000 0000 - 2800 0000 0000: shadow +2800 0000 0000 - 3000 0000 0000: - +3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects) +3200 0000 0000 - 8000 0000 0000: - */ struct MappingGoPPC64_47 { static const uptr kMetaShadowBeg = 0x300000000000ull; - static const uptr kMetaShadowEnd = 0x400000000000ull; - static const uptr kTraceMemBeg = 0x600000000000ull; - static const uptr kTraceMemEnd = 0x620000000000ull; + static const uptr kMetaShadowEnd = 0x320000000000ull; static const uptr kShadowBeg = 0x200000000000ull; - static const uptr kShadowEnd = 0x300000000000ull; + static const uptr kShadowEnd = 0x280000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; static const uptr kLoAppMemEnd = 0x00e000000000ull; static const uptr kMidAppMemBeg = 0; @@ -531,20 +480,16 @@ struct MappingGoPPC64_47 { 0000 1000 0000 - 00c0 0000 0000: - 00c0 0000 0000 - 00e0 0000 0000: heap 00e0 0000 0000 - 2000 0000 0000: - -2000 0000 0000 - 3000 0000 0000: shadow -3000 0000 0000 - 3000 0000 0000: - -3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) -4000 0000 0000 - 6000 0000 0000: - -6000 0000 0000 - 6200 0000 0000: traces -6200 0000 0000 - 8000 0000 0000: - +2000 0000 0000 - 2800 0000 0000: shadow +2800 0000 0000 - 3000 0000 0000: - +3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects) +3200 0000 0000 - 8000 0000 0000: - */ struct MappingGoAarch64 { static const uptr kMetaShadowBeg = 0x300000000000ull; - static const uptr kMetaShadowEnd = 0x400000000000ull; - static const uptr kTraceMemBeg = 0x600000000000ull; - static const uptr kTraceMemEnd = 0x620000000000ull; + static const uptr kMetaShadowEnd = 0x320000000000ull; static const uptr kShadowBeg = 0x200000000000ull; - static const uptr kShadowEnd = 0x300000000000ull; + static const uptr kShadowEnd = 0x280000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; static const uptr kLoAppMemEnd = 0x00e000000000ull; static const uptr kMidAppMemBeg = 0; @@ -565,20 +510,16 @@ Go on linux/mips64 (47-bit VMA) 0000 1000 0000 - 00c0 0000 0000: - 00c0 0000 0000 - 00e0 0000 0000: heap 00e0 0000 0000 - 2000 0000 0000: - -2000 0000 0000 - 3000 0000 0000: shadow -3000 0000 0000 - 3000 0000 0000: - -3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) -4000 0000 0000 - 6000 0000 0000: - -6000 0000 0000 - 6200 0000 0000: traces -6200 0000 0000 - 8000 0000 0000: - +2000 0000 0000 - 2800 0000 0000: shadow +2800 0000 0000 - 3000 0000 0000: - +3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects) +3200 0000 0000 - 8000 0000 0000: - */ struct MappingGoMips64_47 { static const uptr kMetaShadowBeg = 0x300000000000ull; - static const uptr kMetaShadowEnd = 0x400000000000ull; - static const uptr kTraceMemBeg = 0x600000000000ull; - static const uptr kTraceMemEnd = 0x620000000000ull; + static const uptr kMetaShadowEnd = 0x320000000000ull; static const uptr kShadowBeg = 0x200000000000ull; - static const uptr kShadowEnd = 0x300000000000ull; + static const uptr kShadowEnd = 0x280000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; static const uptr kLoAppMemEnd = 0x00e000000000ull; static const uptr kMidAppMemBeg = 0; @@ -597,19 +538,15 @@ struct MappingGoMips64_47 { Go on linux/s390x 0000 0000 1000 - 1000 0000 0000: executable and heap - 16 TiB 1000 0000 0000 - 4000 0000 0000: - -4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app) -8000 0000 0000 - 9000 0000 0000: - +4000 0000 0000 - 6000 0000 0000: shadow - 64TiB (4 * app) +6000 0000 0000 - 9000 0000 0000: - 9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app) -9800 0000 0000 - a000 0000 0000: - -a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads) */ struct MappingGoS390x { static const uptr kMetaShadowBeg = 0x900000000000ull; static const uptr kMetaShadowEnd = 0x980000000000ull; - static const uptr kTraceMemBeg = 0xa00000000000ull; - static const uptr kTraceMemEnd = 0xb00000000000ull; static const uptr kShadowBeg = 0x400000000000ull; - static const uptr kShadowEnd = 0x800000000000ull; + static const uptr kShadowEnd = 0x600000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; static const uptr kLoAppMemEnd = 0x100000000000ull; static const uptr kMidAppMemBeg = 0; @@ -648,11 +585,11 @@ ALWAYS_INLINE auto SelectMapping(Arg arg) { return Func::template Apply(arg); # endif #else // SANITIZER_GO -# if defined(__x86_64__) || SANITIZER_IOSSIM || SANITIZER_MAC && !SANITIZER_IOS - return Func::template Apply(arg); -# elif defined(__aarch64__) && defined(__APPLE__) +# if SANITIZER_IOS && !SANITIZER_IOSSIM return Func::template Apply(arg); -# elif defined(__aarch64__) && !defined(__APPLE__) +# elif defined(__x86_64__) || SANITIZER_MAC + return Func::template Apply(arg); +# elif defined(__aarch64__) switch (vmaSize) { case 39: return Func::template Apply(arg); @@ -715,8 +652,6 @@ enum MappingType { kShadowEnd, kMetaShadowBeg, kMetaShadowEnd, - kTraceMemBeg, - kTraceMemEnd, kVdsoBeg, }; @@ -750,10 +685,6 @@ struct MappingField { return Mapping::kMetaShadowBeg; case kMetaShadowEnd: return Mapping::kMetaShadowEnd; - case kTraceMemBeg: - return Mapping::kTraceMemBeg; - case kTraceMemEnd: - return Mapping::kTraceMemEnd; } Die(); } @@ -792,11 +723,6 @@ uptr MetaShadowBeg(void) { return SelectMapping(kMetaShadowBeg); } ALWAYS_INLINE uptr MetaShadowEnd(void) { return SelectMapping(kMetaShadowEnd); } -ALWAYS_INLINE -uptr TraceMemBeg(void) { return SelectMapping(kTraceMemBeg); } -ALWAYS_INLINE -uptr TraceMemEnd(void) { return SelectMapping(kTraceMemEnd); } - struct IsAppMemImpl { template static bool Apply(uptr mem) { @@ -934,43 +860,10 @@ inline uptr RestoreAddr(uptr addr) { return SelectMapping(addr); } -// The additional page is to catch shadow stack overflow as paging fault. -// Windows wants 64K alignment for mmaps. -const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) - + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1); - -struct GetThreadTraceImpl { - template - static uptr Apply(uptr tid) { - uptr p = Mapping::kTraceMemBeg + tid * kTotalTraceSize; - DCHECK_LT(p, Mapping::kTraceMemEnd); - return p; - } -}; - -ALWAYS_INLINE -uptr GetThreadTrace(int tid) { return SelectMapping(tid); } - -struct GetThreadTraceHeaderImpl { - template - static uptr Apply(uptr tid) { - uptr p = Mapping::kTraceMemBeg + tid * kTotalTraceSize + - kTraceSize * sizeof(Event); - DCHECK_LT(p, Mapping::kTraceMemEnd); - return p; - } -}; - -ALWAYS_INLINE -uptr GetThreadTraceHeader(int tid) { - return SelectMapping(tid); -} - void InitializePlatform(); void InitializePlatformEarly(); void CheckAndProtect(); void InitializeShadowMemoryPlatform(); -void FlushShadowMemory(); void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns); int ExtractResolvFDs(void *state, int *fds, int nfd); int ExtractRecvmsgFDs(void *msg, int *fds, int nfd); diff --git a/libsanitizer/tsan/tsan_platform_linux.cpp b/libsanitizer/tsan/tsan_platform_linux.cpp index 73ec14892d2..17dbdff8a53 100644 --- a/libsanitizer/tsan/tsan_platform_linux.cpp +++ b/libsanitizer/tsan/tsan_platform_linux.cpp @@ -94,7 +94,6 @@ enum { MemMeta, MemFile, MemMmap, - MemTrace, MemHeap, MemOther, MemCount, @@ -112,8 +111,6 @@ void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem) { mem[file ? MemFile : MemMmap] += rss; else if (p >= HeapMemBeg() && p < HeapMemEnd()) mem[MemHeap] += rss; - else if (p >= TraceMemBeg() && p < TraceMemEnd()) - mem[MemTrace] += rss; else mem[MemOther] += rss; } @@ -126,42 +123,33 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) { StackDepotStats stacks = StackDepotGetStats(); uptr nthread, nlive; ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive); + uptr trace_mem; + { + Lock l(&ctx->slot_mtx); + trace_mem = ctx->trace_part_total_allocated * sizeof(TracePart); + } uptr internal_stats[AllocatorStatCount]; internal_allocator()->GetStats(internal_stats); // All these are allocated from the common mmap region. - mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks.allocated + - internal_stats[AllocatorStatMapped]; + mem[MemMmap] -= meta.mem_block + meta.sync_obj + trace_mem + + stacks.allocated + internal_stats[AllocatorStatMapped]; if (s64(mem[MemMmap]) < 0) mem[MemMmap] = 0; internal_snprintf( buf, buf_size, - "%llus: RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" - " trace:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu" - " stacks=%zd[%zd] nthr=%zd/%zd\n", - uptime_ns / (1000 * 1000 * 1000), mem[MemTotal] >> 20, - mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20, - mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20, + "==%zu== %llus [%zu]: RSS %zd MB: shadow:%zd meta:%zd file:%zd" + " mmap:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu" + " trace:%zu stacks=%zd threads=%zu/%zu\n", + internal_getpid(), uptime_ns / (1000 * 1000 * 1000), ctx->global_epoch, + mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, + mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemHeap] >> 20, mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20, - meta.mem_block >> 20, meta.sync_obj >> 20, stacks.allocated >> 20, - stacks.n_uniq_ids, nlive, nthread); -} - -# if SANITIZER_LINUX -void FlushShadowMemoryCallback( - const SuspendedThreadsList &suspended_threads_list, - void *argument) { - ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd()); -} -#endif - -void FlushShadowMemory() { -#if SANITIZER_LINUX - StopTheWorld(FlushShadowMemoryCallback, 0); -#endif + meta.mem_block >> 20, meta.sync_obj >> 20, trace_mem >> 20, + stacks.allocated >> 20, nlive, nthread); } #if !SANITIZER_GO -// Mark shadow for .rodata sections with the special kShadowRodata marker. +// Mark shadow for .rodata sections with the special Shadow::kRodata marker. // Accesses to .rodata can't race, so this saves time, memory and trace space. static void MapRodata() { // First create temp file. @@ -182,13 +170,13 @@ static void MapRodata() { return; internal_unlink(name); // Unlink it now, so that we can reuse the buffer. fd_t fd = openrv; - // Fill the file with kShadowRodata. + // Fill the file with Shadow::kRodata. const uptr kMarkerSize = 512 * 1024 / sizeof(RawShadow); InternalMmapVector marker(kMarkerSize); // volatile to prevent insertion of memset for (volatile RawShadow *p = marker.data(); p < marker.data() + kMarkerSize; p++) - *p = kShadowRodata; + *p = Shadow::kRodata; internal_write(fd, marker.data(), marker.size() * sizeof(RawShadow)); // Map the file into memory. uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, diff --git a/libsanitizer/tsan/tsan_platform_mac.cpp b/libsanitizer/tsan/tsan_platform_mac.cpp index 3faa2d0c619..44b98d46cfb 100644 --- a/libsanitizer/tsan/tsan_platform_mac.cpp +++ b/libsanitizer/tsan/tsan_platform_mac.cpp @@ -25,6 +25,7 @@ #include "tsan_rtl.h" #include "tsan_flags.h" +#include #include #include #include @@ -45,76 +46,86 @@ namespace __tsan { #if !SANITIZER_GO -static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) { - atomic_uintptr_t *a = (atomic_uintptr_t *)dst; - void *val = (void *)atomic_load_relaxed(a); - atomic_signal_fence(memory_order_acquire); // Turns the previous load into - // acquire wrt signals. - if (UNLIKELY(val == nullptr)) { - val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANON, -1, 0); - CHECK(val); - void *cmp = nullptr; - if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val, - memory_order_acq_rel)) { - internal_munmap(val, size); - val = cmp; - } - } - return val; +static char main_thread_state[sizeof(ThreadState)] ALIGNED( + SANITIZER_CACHE_LINE_SIZE); +static ThreadState *dead_thread_state; +static pthread_key_t thread_state_key; + +// We rely on the following documented, but Darwin-specific behavior to keep the +// reference to the ThreadState object alive in TLS: +// pthread_key_create man page: +// If, after all the destructors have been called for all non-NULL values with +// associated destructors, there are still some non-NULL values with +// associated destructors, then the process is repeated. If, after at least +// [PTHREAD_DESTRUCTOR_ITERATIONS] iterations of destructor calls for +// outstanding non-NULL values, there are still some non-NULL values with +// associated destructors, the implementation stops calling destructors. +static_assert(PTHREAD_DESTRUCTOR_ITERATIONS == 4, "Small number of iterations"); +static void ThreadStateDestructor(void *thr) { + int res = pthread_setspecific(thread_state_key, thr); + CHECK_EQ(res, 0); } -// On OS X, accessing TLVs via __thread or manually by using pthread_key_* is -// problematic, because there are several places where interceptors are called -// when TLVs are not accessible (early process startup, thread cleanup, ...). -// The following provides a "poor man's TLV" implementation, where we use the -// shadow memory of the pointer returned by pthread_self() to store a pointer to -// the ThreadState object. The main thread's ThreadState is stored separately -// in a static variable, because we need to access it even before the -// shadow memory is set up. -static uptr main_thread_identity = 0; -ALIGNED(64) static char main_thread_state[sizeof(ThreadState)]; -static ThreadState *main_thread_state_loc = (ThreadState *)main_thread_state; - -// We cannot use pthread_self() before libpthread has been initialized. Our -// current heuristic for guarding this is checking `main_thread_identity` which -// is only assigned in `__tsan::InitializePlatform`. -static ThreadState **cur_thread_location() { - if (main_thread_identity == 0) - return &main_thread_state_loc; - uptr thread_identity = (uptr)pthread_self(); - if (thread_identity == main_thread_identity) - return &main_thread_state_loc; - return (ThreadState **)MemToShadow(thread_identity); +static void InitializeThreadStateStorage() { + int res; + CHECK_EQ(thread_state_key, 0); + res = pthread_key_create(&thread_state_key, ThreadStateDestructor); + CHECK_EQ(res, 0); + res = pthread_setspecific(thread_state_key, main_thread_state); + CHECK_EQ(res, 0); + + auto dts = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState"); + dts->fast_state.SetIgnoreBit(); + dts->ignore_interceptors = 1; + dts->is_dead = true; + const_cast(dts->tid) = kInvalidTid; + res = internal_mprotect(dts, sizeof(ThreadState), PROT_READ); // immutable + CHECK_EQ(res, 0); + dead_thread_state = dts; } ThreadState *cur_thread() { - return (ThreadState *)SignalSafeGetOrAllocate( - (uptr *)cur_thread_location(), sizeof(ThreadState)); + // Some interceptors get called before libpthread has been initialized and in + // these cases we must avoid calling any pthread APIs. + if (UNLIKELY(!thread_state_key)) { + return (ThreadState *)main_thread_state; + } + + // We only reach this line after InitializeThreadStateStorage() ran, i.e, + // after TSan (and therefore libpthread) have been initialized. + ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key); + if (UNLIKELY(!thr)) { + thr = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState"); + int res = pthread_setspecific(thread_state_key, thr); + CHECK_EQ(res, 0); + } + return thr; } void set_cur_thread(ThreadState *thr) { - *cur_thread_location() = thr; + int res = pthread_setspecific(thread_state_key, thr); + CHECK_EQ(res, 0); } -// TODO(kuba.brecka): This is not async-signal-safe. In particular, we call -// munmap first and then clear `fake_tls`; if we receive a signal in between, -// handler will try to access the unmapped ThreadState. void cur_thread_finalize() { - ThreadState **thr_state_loc = cur_thread_location(); - if (thr_state_loc == &main_thread_state_loc) { + ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key); + CHECK(thr); + if (thr == (ThreadState *)main_thread_state) { // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to // exit the main thread. Let's keep the main thread's ThreadState. return; } - internal_munmap(*thr_state_loc, sizeof(ThreadState)); - *thr_state_loc = nullptr; + // Intercepted functions can still get called after cur_thread_finalize() + // (called from DestroyThreadState()), so put a fake thread state for "dead" + // threads. An alternative solution would be to release the ThreadState + // object from THREAD_DESTROY (which is delivered later and on the parent + // thread) instead of THREAD_TERMINATE. + int res = pthread_setspecific(thread_state_key, dead_thread_state); + CHECK_EQ(res, 0); + UnmapOrDie(thr, sizeof(ThreadState)); } #endif -void FlushShadowMemory() { -} - static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) { vm_address_t address = start; vm_address_t end_address = end; @@ -142,12 +153,10 @@ static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) { void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) { uptr shadow_res, shadow_dirty; uptr meta_res, meta_dirty; - uptr trace_res, trace_dirty; RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty); RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty); - RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty); -#if !SANITIZER_GO +# if !SANITIZER_GO uptr low_res, low_dirty; uptr high_res, high_dirty; uptr heap_res, heap_dirty; @@ -166,7 +175,6 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) { buf, buf_size, "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" - "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" # if !SANITIZER_GO "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" @@ -179,7 +187,6 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) { "------------------------------\n", ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024, MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024, - TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024, # if !SANITIZER_GO LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024, HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024, @@ -222,11 +229,10 @@ static void my_pthread_introspection_hook(unsigned int [...] [diff truncated at 524288 bytes]