public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/users/marxin/heads/merge-libsanitizer-v7)] libsanitizer: merge from upstream ae59131d3ef311fb4b1e50627c6457be00e60dc9
@ 2022-11-15 14:45 Martin Liska
  0 siblings, 0 replies; 2+ messages in thread
From: Martin Liska @ 2022-11-15 14:45 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:47538f13016073fc0352e66a5f4476dfbba0d3d0

commit 47538f13016073fc0352e66a5f4476dfbba0d3d0
Author: Martin Liska <mliska@suse.cz>
Date:   Tue Nov 15 11:11:41 2022 +0100

    libsanitizer: merge from upstream ae59131d3ef311fb4b1e50627c6457be00e60dc9

Diff:
---
 libsanitizer/MERGE                                 |  2 +-
 libsanitizer/asan/asan_allocator.cpp               |  4 +-
 libsanitizer/asan/asan_allocator.h                 |  6 --
 libsanitizer/asan/asan_descriptions.cpp            | 12 +--
 libsanitizer/asan/asan_errors.cpp                  |  3 +-
 libsanitizer/asan/asan_globals.cpp                 | 19 +++++
 libsanitizer/asan/asan_interceptors.h              |  9 +--
 libsanitizer/asan/asan_interface.inc               |  7 ++
 libsanitizer/asan/asan_interface_internal.h        | 14 ++++
 libsanitizer/asan/asan_mapping.h                   | 10 ++-
 libsanitizer/asan/asan_mapping_sparc64.h           | 19 +++++
 libsanitizer/asan/asan_poisoning.cpp               | 92 +++++++++++++++++++--
 libsanitizer/asan/asan_rtl.cpp                     | 17 ++--
 libsanitizer/hwasan/hwasan.cpp                     |  8 +-
 libsanitizer/hwasan/hwasan_flags.inc               |  2 +-
 libsanitizer/hwasan/hwasan_fuchsia.cpp             |  4 +
 libsanitizer/hwasan/hwasan_report.cpp              | 24 +++---
 libsanitizer/include/sanitizer/msan_interface.h    |  2 +
 libsanitizer/lsan/lsan_common.cpp                  | 49 ++++++++++--
 libsanitizer/lsan/lsan_common.h                    |  2 +
 libsanitizer/lsan/lsan_common_mac.cpp              | 74 ++++++++++++-----
 libsanitizer/sanitizer_common/sanitizer_common.h   |  3 +
 .../sanitizer_common_interceptors.inc              |  2 +-
 ...zer_common_interceptors_vfork_loongarch64.inc.S |  6 --
 libsanitizer/sanitizer_common/sanitizer_linux.cpp  | 10 ++-
 .../sanitizer_common/sanitizer_linux_libcdep.cpp   |  4 -
 libsanitizer/sanitizer_common/sanitizer_mac.cpp    | 17 ++--
 libsanitizer/sanitizer_common/sanitizer_mac.h      | 20 -----
 libsanitizer/sanitizer_common/sanitizer_platform.h |  4 +-
 .../sanitizer_platform_limits_linux.cpp            |  5 +-
 .../sanitizer_platform_limits_posix.h              |  2 +-
 .../sanitizer_common/sanitizer_procmaps_mac.cpp    | 62 ++++++++++++---
 .../sanitizer_common/sanitizer_stacktrace.cpp      | 17 ++--
 .../sanitizer_stoptheworld_mac.cpp                 |  7 +-
 .../sanitizer_symbolizer_libcdep.cpp               |  2 +
 .../sanitizer_syscall_linux_loongarch64.inc        | 90 +++++++++++----------
 libsanitizer/tsan/tsan_interceptors.h              |  5 +-
 libsanitizer/tsan/tsan_interceptors_posix.cpp      | 93 +++++++++++++++-------
 libsanitizer/tsan/tsan_interface.h                 |  7 ++
 libsanitizer/tsan/tsan_rtl.h                       | 12 ++-
 libsanitizer/tsan/tsan_rtl_ppc64.S                 |  1 -
 libsanitizer/ubsan/ubsan_flags.cpp                 |  1 -
 libsanitizer/ubsan/ubsan_handlers.cpp              | 15 ----
 libsanitizer/ubsan/ubsan_handlers.h                |  8 --
 libsanitizer/ubsan/ubsan_platform.h                |  2 -
 45 files changed, 523 insertions(+), 251 deletions(-)

diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE
index 3ca670c1184..6bb19c7d5ba 100644
--- a/libsanitizer/MERGE
+++ b/libsanitizer/MERGE
@@ -1,4 +1,4 @@
-84a71d5259c2682403cdbd8710592410a2f128ab
+ae59131d3ef311fb4b1e50627c6457be00e60dc9
 
 The first line of this file holds the git revision number of the
 last merge done from the master library sources.
diff --git a/libsanitizer/asan/asan_allocator.cpp b/libsanitizer/asan/asan_allocator.cpp
index 7b7a289c2d2..52d7eff7281 100644
--- a/libsanitizer/asan/asan_allocator.cpp
+++ b/libsanitizer/asan/asan_allocator.cpp
@@ -803,8 +803,8 @@ struct Allocator {
     sptr offset = 0;
     if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
       // The address is in the chunk's left redzone, so maybe it is actually
-      // a right buffer overflow from the other chunk to the left.
-      // Search a bit to the left to see if there is another chunk.
+      // a right buffer overflow from the other chunk before.
+      // Search a bit before to see if there is another chunk.
       AsanChunk *m2 = nullptr;
       for (uptr l = 1; l < GetPageSizeCached(); l++) {
         m2 = GetAsanChunkByAddr(addr - l);
diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
index 27d826fb613..0b4dbf03bb9 100644
--- a/libsanitizer/asan/asan_allocator.h
+++ b/libsanitizer/asan/asan_allocator.h
@@ -135,12 +135,6 @@ typedef VeryCompactSizeClassMap SizeClassMap;
 const uptr kAllocatorSpace = ~(uptr)0;
 const uptr kAllocatorSize = 0x2000000000ULL;  // 128G.
 typedef VeryDenseSizeClassMap SizeClassMap;
-# elif defined(__aarch64__)
-// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA
-// so no need to different values for different VMA.
-const uptr kAllocatorSpace =  0x10000000000ULL;
-const uptr kAllocatorSize  =  0x10000000000ULL;  // 3T.
-typedef DefaultSizeClassMap SizeClassMap;
 #elif defined(__sparc__)
 const uptr kAllocatorSpace = ~(uptr)0;
 const uptr kAllocatorSize = 0x20000000000ULL;  // 2T.
diff --git a/libsanitizer/asan/asan_descriptions.cpp b/libsanitizer/asan/asan_descriptions.cpp
index d7d96168579..fbe92572b55 100644
--- a/libsanitizer/asan/asan_descriptions.cpp
+++ b/libsanitizer/asan/asan_descriptions.cpp
@@ -129,11 +129,11 @@ static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
   str.append("%s", d.Location());
   switch (descr.access_type) {
     case kAccessTypeLeft:
-      str.append("%p is located %zd bytes to the left of",
+      str.append("%p is located %zd bytes before",
                  (void *)descr.bad_addr, descr.offset);
       break;
     case kAccessTypeRight:
-      str.append("%p is located %zd bytes to the right of",
+      str.append("%p is located %zd bytes after",
                  (void *)descr.bad_addr, descr.offset);
       break;
     case kAccessTypeInside:
@@ -279,17 +279,17 @@ static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
   Decorator d;
   str.append("%s", d.Location());
   if (addr < g.beg) {
-    str.append("%p is located %zd bytes to the left", (void *)addr,
+    str.append("%p is located %zd bytes before", (void *)addr,
                g.beg - addr);
   } else if (addr + access_size > g.beg + g.size) {
     if (addr < g.beg + g.size) addr = g.beg + g.size;
-    str.append("%p is located %zd bytes to the right", (void *)addr,
+    str.append("%p is located %zd bytes after", (void *)addr,
                addr - (g.beg + g.size));
   } else {
     // Can it happen?
-    str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
+    str.append("%p is located %zd bytes inside of", (void *)addr, addr - g.beg);
   }
-  str.append(" of global variable '%s' defined in '",
+  str.append(" global variable '%s' defined in '",
              MaybeDemangleGlobalName(g.name));
   PrintGlobalLocation(&str, g);
   str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
diff --git a/libsanitizer/asan/asan_errors.cpp b/libsanitizer/asan/asan_errors.cpp
index 10f7c17991d..f3befdf3c2b 100644
--- a/libsanitizer/asan/asan_errors.cpp
+++ b/libsanitizer/asan/asan_errors.cpp
@@ -539,7 +539,8 @@ static void PrintShadowBytes(InternalScopedString *str, const char *before,
                              u8 *bytes, u8 *guilty, uptr n) {
   Decorator d;
   if (before)
-    str->append("%s%p:", before, (void *)bytes);
+    str->append("%s%p:", before,
+                (void *)ShadowToMem(reinterpret_cast<uptr>(bytes)));
   for (uptr i = 0; i < n; i++) {
     u8 *p = bytes + i;
     const char *before =
diff --git a/libsanitizer/asan/asan_globals.cpp b/libsanitizer/asan/asan_globals.cpp
index 8f3491f0199..b780128c9ad 100644
--- a/libsanitizer/asan/asan_globals.cpp
+++ b/libsanitizer/asan/asan_globals.cpp
@@ -154,6 +154,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
   }
 }
 
+// Check ODR violation for given global G by checking if it's already poisoned.
+// We use this method in case compiler doesn't use private aliases for global
+// variables.
+static void CheckODRViolationViaPoisoning(const Global *g) {
+  if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+    // This check may not be enough: if the first global is much larger
+    // the entire redzone of the second global may be within the first global.
+    for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+      if (g->beg == l->g->beg &&
+          (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+          !IsODRViolationSuppressed(g->name))
+        ReportODRViolation(g, FindRegistrationSite(g),
+                           l->g, FindRegistrationSite(l->g));
+    }
+  }
+}
+
 // Clang provides two different ways for global variables protection:
 // it can poison the global itself or its private alias. In former
 // case we may poison same symbol multiple times, that can help us to
@@ -199,6 +216,8 @@ static void RegisterGlobal(const Global *g) {
     // where two globals with the same name are defined in different modules.
     if (UseODRIndicator(g))
       CheckODRViolationViaIndicator(g);
+    else
+      CheckODRViolationViaPoisoning(g);
   }
   if (CanPoisonMemory())
     PoisonRedZones(*g);
diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h
index 7edae006271..c4bf087ea17 100644
--- a/libsanitizer/asan/asan_interceptors.h
+++ b/libsanitizer/asan/asan_interceptors.h
@@ -81,12 +81,7 @@ void InitializePlatformInterceptors();
 #if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
     !SANITIZER_NETBSD
 # define ASAN_INTERCEPT___CXA_THROW 1
-# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
-     || ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
-#   define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
-# else
-#   define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
-# endif
+# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
 # if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
 #  define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
 # else
@@ -119,7 +114,7 @@ void InitializePlatformInterceptors();
 
 #if SANITIZER_LINUX &&                                                \
     (defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \
-     defined(__x86_64__) || SANITIZER_RISCV64)
+     defined(__x86_64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64)
 # define ASAN_INTERCEPT_VFORK 1
 #else
 # define ASAN_INTERCEPT_VFORK 0
diff --git a/libsanitizer/asan/asan_interface.inc b/libsanitizer/asan/asan_interface.inc
index 89ef552b711..bfc44b46196 100644
--- a/libsanitizer/asan/asan_interface.inc
+++ b/libsanitizer/asan/asan_interface.inc
@@ -108,6 +108,13 @@ INTERFACE_FUNCTION(__asan_report_store_n_noabort)
 INTERFACE_FUNCTION(__asan_set_death_callback)
 INTERFACE_FUNCTION(__asan_set_error_report_callback)
 INTERFACE_FUNCTION(__asan_set_shadow_00)
+INTERFACE_FUNCTION(__asan_set_shadow_01)
+INTERFACE_FUNCTION(__asan_set_shadow_02)
+INTERFACE_FUNCTION(__asan_set_shadow_03)
+INTERFACE_FUNCTION(__asan_set_shadow_04)
+INTERFACE_FUNCTION(__asan_set_shadow_05)
+INTERFACE_FUNCTION(__asan_set_shadow_06)
+INTERFACE_FUNCTION(__asan_set_shadow_07)
 INTERFACE_FUNCTION(__asan_set_shadow_f1)
 INTERFACE_FUNCTION(__asan_set_shadow_f2)
 INTERFACE_FUNCTION(__asan_set_shadow_f3)
diff --git a/libsanitizer/asan/asan_interface_internal.h b/libsanitizer/asan/asan_interface_internal.h
index b0802a89ddb..987f855c0f9 100644
--- a/libsanitizer/asan/asan_interface_internal.h
+++ b/libsanitizer/asan/asan_interface_internal.h
@@ -90,6 +90,20 @@ extern "C" {
   SANITIZER_INTERFACE_ATTRIBUTE
   void __asan_set_shadow_00(uptr addr, uptr size);
   SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_01(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_02(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_03(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_04(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_05(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_06(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_07(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
   void __asan_set_shadow_f1(uptr addr, uptr size);
   SANITIZER_INTERFACE_ATTRIBUTE
   void __asan_set_shadow_f2(uptr addr, uptr size);
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index 34ffc7b5cd0..c5f95c07a21 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -190,7 +190,7 @@
 #  elif defined(__aarch64__)
 #    define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000
 #  elif defined(__powerpc64__)
-#    define ASAN_SHADOW_OFFSET_CONST 0x0000020000000000
+#    define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
 #  elif defined(__s390x__)
 #    define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000
 #  elif SANITIZER_FREEBSD
@@ -272,6 +272,8 @@ extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;  // Initialized in __asan_init.
 #  else
 #    define MEM_TO_SHADOW(mem) \
       (((mem) >> ASAN_SHADOW_SCALE) + (ASAN_SHADOW_OFFSET))
+#    define SHADOW_TO_MEM(mem) \
+      (((mem) - (ASAN_SHADOW_OFFSET)) << (ASAN_SHADOW_SCALE))
 
 #    define kLowMemBeg 0
 #    define kLowMemEnd (ASAN_SHADOW_OFFSET ? ASAN_SHADOW_OFFSET - 1 : 0)
@@ -376,6 +378,12 @@ static inline bool AddrIsInShadow(uptr a) {
   return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
 }
 
+static inline uptr ShadowToMem(uptr p) {
+  PROFILE_ASAN_MAPPING();
+  CHECK(AddrIsInShadow(p));
+  return SHADOW_TO_MEM(p);
+}
+
 static inline bool AddrIsAlignedByGranularity(uptr a) {
   PROFILE_ASAN_MAPPING();
   return (a & (ASAN_SHADOW_GRANULARITY - 1)) == 0;
diff --git a/libsanitizer/asan/asan_mapping_sparc64.h b/libsanitizer/asan/asan_mapping_sparc64.h
index 90261d301f7..e310c12fe30 100644
--- a/libsanitizer/asan/asan_mapping_sparc64.h
+++ b/libsanitizer/asan/asan_mapping_sparc64.h
@@ -28,6 +28,7 @@
 #define MEM_TO_SHADOW(mem)                                       \
   ((((mem) << HIGH_BITS) >> (HIGH_BITS + (ASAN_SHADOW_SCALE))) + \
    (ASAN_SHADOW_OFFSET))
+#define SHADOW_TO_MEM(ptr) (__asan::ShadowToMemSparc64(ptr))
 
 #define kLowMemBeg 0
 #define kLowMemEnd (ASAN_SHADOW_OFFSET - 1)
@@ -97,6 +98,24 @@ static inline bool AddrIsInShadowGap(uptr a) {
   return a >= kShadowGapBeg && a <= kShadowGapEnd;
 }
 
+static inline constexpr uptr ShadowToMemSparc64(uptr p) {
+  PROFILE_ASAN_MAPPING();
+  p -= ASAN_SHADOW_OFFSET;
+  p <<= ASAN_SHADOW_SCALE;
+  if (p >= 0x8000000000000) {
+    p |= (~0ULL) << VMA_BITS;
+  }
+  return p;
+}
+
+static_assert(ShadowToMemSparc64(MEM_TO_SHADOW(0x0000000000000000)) ==
+              0x0000000000000000);
+static_assert(ShadowToMemSparc64(MEM_TO_SHADOW(0xfff8000000000000)) ==
+              0xfff8000000000000);
+// Gets aligned down.
+static_assert(ShadowToMemSparc64(MEM_TO_SHADOW(0x0007ffffffffffff)) ==
+              0x0007fffffffffff8);
+
 }  // namespace __asan
 
 #endif  // ASAN_MAPPING_SPARC64_H
diff --git a/libsanitizer/asan/asan_poisoning.cpp b/libsanitizer/asan/asan_poisoning.cpp
index 3b7c9d1312d..e4702563463 100644
--- a/libsanitizer/asan/asan_poisoning.cpp
+++ b/libsanitizer/asan/asan_poisoning.cpp
@@ -312,6 +312,34 @@ void __asan_set_shadow_00(uptr addr, uptr size) {
   REAL(memset)((void *)addr, 0, size);
 }
 
+void __asan_set_shadow_01(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x01, size);
+}
+
+void __asan_set_shadow_02(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x02, size);
+}
+
+void __asan_set_shadow_03(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x03, size);
+}
+
+void __asan_set_shadow_04(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x04, size);
+}
+
+void __asan_set_shadow_05(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x05, size);
+}
+
+void __asan_set_shadow_06(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x06, size);
+}
+
+void __asan_set_shadow_07(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x07, size);
+}
+
 void __asan_set_shadow_f1(uptr addr, uptr size) {
   REAL(memset)((void *)addr, 0xf1, size);
 }
@@ -354,8 +382,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
   uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
   uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
   uptr granularity = ASAN_SHADOW_GRANULARITY;
-  if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
-        IsAligned(beg, granularity))) {
+  if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end)) {
     GET_STACK_TRACE_FATAL_HERE;
     ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
                                                  &stack);
@@ -363,6 +390,56 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
   CHECK_LE(end - beg,
            FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
 
+  if (old_mid == new_mid)
+    return;  // Nothing to do here.
+
+  // Handle misaligned end and cut it off.
+  if (UNLIKELY(!AddrIsAlignedByGranularity(end))) {
+    uptr end_down = RoundDownTo(end, granularity);
+    // Either new or old mid must be in the granule to affect it.
+    if (new_mid > end_down) {
+      if (AddressIsPoisoned(end)) {
+        *(u8 *)MemToShadow(end_down) = static_cast<u8>(new_mid - end_down);
+      } else {
+        // Something after the container - don't touch.
+      }
+    } else if (old_mid > end_down) {
+      if (AddressIsPoisoned(end)) {
+        *(u8 *)MemToShadow(end_down) = kAsanContiguousContainerOOBMagic;
+      } else {
+        // Something after the container - don't touch.
+      }
+    }
+
+    if (beg >= end_down)
+      return;  // Same granule.
+
+    old_mid = Min(end_down, old_mid);
+    new_mid = Min(end_down, new_mid);
+  }
+
+  // Handle misaligned begin and cut it off.
+  if (UNLIKELY(!AddrIsAlignedByGranularity(beg))) {
+    uptr beg_up = RoundUpTo(beg, granularity);
+    uptr beg_down = RoundDownTo(beg, granularity);
+    // As soon as we add first byte into container we will not be able to
+    // determine the state of the byte before the container. So we assume it's
+    // always unpoison.
+
+    // Either new or old mid must be in the granule to affect it.
+    if (new_mid < beg_up) {
+      *(u8 *)MemToShadow(beg_down) = static_cast<u8>(new_mid - beg_down);
+    } else if (old_mid < beg_up) {
+      *(u8 *)MemToShadow(beg_down) = 0;
+    }
+
+    old_mid = Max(beg_up, old_mid);
+    new_mid = Max(beg_up, new_mid);
+  }
+
+  if (old_mid == new_mid)
+    return;
+
   uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
   uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
   uptr d1 = RoundDownTo(old_mid, granularity);
@@ -397,8 +474,13 @@ const void *__sanitizer_contiguous_container_find_bad_address(
     const void *beg_p, const void *mid_p, const void *end_p) {
   if (!flags()->detect_container_overflow)
     return nullptr;
+  uptr granularity = ASAN_SHADOW_GRANULARITY;
   uptr beg = reinterpret_cast<uptr>(beg_p);
   uptr end = reinterpret_cast<uptr>(end_p);
+  uptr annotations_end =
+      (!AddrIsAlignedByGranularity(end) && !AddressIsPoisoned(end))
+          ? RoundDownTo(end, granularity)
+          : end;
   uptr mid = reinterpret_cast<uptr>(mid_p);
   CHECK_LE(beg, mid);
   CHECK_LE(mid, end);
@@ -408,9 +490,9 @@ const void *__sanitizer_contiguous_container_find_bad_address(
   uptr r1_beg = beg;
   uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
   uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
-  uptr r2_end = Min(end, mid + kMaxRangeToCheck);
-  uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
-  uptr r3_end = end;
+  uptr r2_end = Min(annotations_end, mid + kMaxRangeToCheck);
+  uptr r3_beg = Max(annotations_end - kMaxRangeToCheck, mid);
+  uptr r3_end = annotations_end;
   for (uptr i = r1_beg; i < r1_end; i++)
     if (AddressIsPoisoned(i))
       return reinterpret_cast<const void *>(i);
diff --git a/libsanitizer/asan/asan_rtl.cpp b/libsanitizer/asan/asan_rtl.cpp
index 88f66de5669..853083182b4 100644
--- a/libsanitizer/asan/asan_rtl.cpp
+++ b/libsanitizer/asan/asan_rtl.cpp
@@ -288,11 +288,18 @@ static NOINLINE void force_interface_symbols() {
     case 38: __asan_region_is_poisoned(0, 0); break;
     case 39: __asan_describe_address(0); break;
     case 40: __asan_set_shadow_00(0, 0); break;
-    case 41: __asan_set_shadow_f1(0, 0); break;
-    case 42: __asan_set_shadow_f2(0, 0); break;
-    case 43: __asan_set_shadow_f3(0, 0); break;
-    case 44: __asan_set_shadow_f5(0, 0); break;
-    case 45: __asan_set_shadow_f8(0, 0); break;
+    case 41: __asan_set_shadow_01(0, 0); break;
+    case 42: __asan_set_shadow_02(0, 0); break;
+    case 43: __asan_set_shadow_03(0, 0); break;
+    case 44: __asan_set_shadow_04(0, 0); break;
+    case 45: __asan_set_shadow_05(0, 0); break;
+    case 46: __asan_set_shadow_06(0, 0); break;
+    case 47: __asan_set_shadow_07(0, 0); break;
+    case 48: __asan_set_shadow_f1(0, 0); break;
+    case 49: __asan_set_shadow_f2(0, 0); break;
+    case 50: __asan_set_shadow_f3(0, 0); break;
+    case 51: __asan_set_shadow_f5(0, 0); break;
+    case 52: __asan_set_shadow_f8(0, 0); break;
   }
   // clang-format on
 }
diff --git a/libsanitizer/hwasan/hwasan.cpp b/libsanitizer/hwasan/hwasan.cpp
index bb946c2ffe0..9db4fb09409 100644
--- a/libsanitizer/hwasan/hwasan.cpp
+++ b/libsanitizer/hwasan/hwasan.cpp
@@ -340,7 +340,13 @@ __attribute__((constructor(0))) void __hwasan_init() {
   DisableCoreDumperIfNecessary();
 
   InitInstrumentation();
-  InitLoadedGlobals();
+  if constexpr (!SANITIZER_FUCHSIA) {
+    // Fuchsia's libc provides a hook (__sanitizer_module_loaded) that runs on
+    // the startup path which calls into __hwasan_library_loaded on all
+    // initially loaded modules, so explicitly registering the globals here
+    // isn't needed.
+    InitLoadedGlobals();
+  }
 
   // Needs to be called here because flags()->random_tags might not have been
   // initialized when InitInstrumentation() was called.
diff --git a/libsanitizer/hwasan/hwasan_flags.inc b/libsanitizer/hwasan/hwasan_flags.inc
index 18ea47f981b..4a226ee2ab8 100644
--- a/libsanitizer/hwasan/hwasan_flags.inc
+++ b/libsanitizer/hwasan/hwasan_flags.inc
@@ -39,7 +39,7 @@ HWASAN_FLAG(
 
 HWASAN_FLAG(bool, free_checks_tail_magic, 1,
     "If set, free() will check the magic values "
-    "to the right of the allocated object "
+    "after the allocated object "
     "if the allocation size is not a divident of the granule size")
 HWASAN_FLAG(
     int, max_free_fill_size, 0,
diff --git a/libsanitizer/hwasan/hwasan_fuchsia.cpp b/libsanitizer/hwasan/hwasan_fuchsia.cpp
index 967c796c339..858fac05af2 100644
--- a/libsanitizer/hwasan/hwasan_fuchsia.cpp
+++ b/libsanitizer/hwasan/hwasan_fuchsia.cpp
@@ -224,6 +224,10 @@ void __sanitizer_thread_exit_hook(void *hook, thrd_t self) {
   __hwasan::ThreadExitHook(hook, self);
 }
 
+void __sanitizer_module_loaded(const struct dl_phdr_info *info, size_t) {
+  __hwasan_library_loaded(info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum);
+}
+
 }  // extern "C"
 
 #endif  // SANITIZER_FUCHSIA
diff --git a/libsanitizer/hwasan/hwasan_report.cpp b/libsanitizer/hwasan/hwasan_report.cpp
index fe769589186..de082150b70 100644
--- a/libsanitizer/hwasan/hwasan_report.cpp
+++ b/libsanitizer/hwasan/hwasan_report.cpp
@@ -309,16 +309,16 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
       whence = "inside";
     } else if (candidate == left) {
       offset = untagged_addr - chunk.End();
-      whence = "to the right of";
+      whence = "after";
     } else {
       offset = chunk.Beg() - untagged_addr;
-      whence = "to the left of";
+      whence = "before";
     }
     Printf("%s", d.Error());
     Printf("\nCause: heap-buffer-overflow\n");
     Printf("%s", d.Default());
     Printf("%s", d.Location());
-    Printf("%p is located %zd bytes %s %zd-byte region [%p,%p)\n",
+    Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
            untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
            chunk.End());
     Printf("%s", d.Allocation());
@@ -340,27 +340,27 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
     Printf("%s", d.Location());
     if (sym->SymbolizeData(mem, &info) && info.start) {
       Printf(
-          "%p is located %zd bytes to the %s of %zd-byte global variable "
+          "%p is located %zd bytes %s a %zd-byte global variable "
           "%s [%p,%p) in %s\n",
           untagged_addr,
           candidate == left ? untagged_addr - (info.start + info.size)
                             : info.start - untagged_addr,
-          candidate == left ? "right" : "left", info.size, info.name,
+          candidate == left ? "after" : "before", info.size, info.name,
           info.start, info.start + info.size, module_name);
     } else {
       uptr size = GetGlobalSizeFromDescriptor(mem);
       if (size == 0)
         // We couldn't find the size of the global from the descriptors.
         Printf(
-            "%p is located to the %s of a global variable in "
+            "%p is located %s a global variable in "
             "\n    #0 0x%x (%s+0x%x)\n",
-            untagged_addr, candidate == left ? "right" : "left", mem,
+            untagged_addr, candidate == left ? "after" : "before", mem,
             module_name, module_address);
       else
         Printf(
-            "%p is located to the %s of a %zd-byte global variable in "
+            "%p is located %s a %zd-byte global variable in "
             "\n    #0 0x%x (%s+0x%x)\n",
-            untagged_addr, candidate == left ? "right" : "left", size, mem,
+            untagged_addr, candidate == left ? "after" : "before", size, mem,
             module_name, module_address);
     }
     Printf("%s", d.Default());
@@ -459,7 +459,7 @@ void PrintAddressDescription(
       Printf("%s", d.Error());
       Printf("\nCause: use-after-free\n");
       Printf("%s", d.Location());
-      Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
+      Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
              untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
              har.requested_size, UntagAddr(har.tagged_addr),
              UntagAddr(har.tagged_addr) + har.requested_size);
@@ -518,7 +518,7 @@ static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
   InternalScopedString s;
   for (tag_t *row = beg_row; row < end_row; row += row_len) {
     s.append("%s", row == center_row_beg ? "=>" : "  ");
-    s.append("%p:", (void *)row);
+    s.append("%p:", (void *)ShadowToMem(reinterpret_cast<uptr>(row)));
     for (uptr i = 0; i < row_len; i++) {
       s.append("%s", row + i == tag_ptr ? "[" : " ");
       print_tag(s, &row[i]);
@@ -660,7 +660,7 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
     s.append("%s ", actual_expected[i] != tail[i] ? "^^" : "  ");
 
   s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
-    "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
+    "after a heap object, but within the %zd-byte granule, e.g.\n"
     "   char *x = new char[20];\n"
     "   x[25] = 42;\n"
     "%s does not detect such bugs in uninstrumented code at the time of write,"
diff --git a/libsanitizer/include/sanitizer/msan_interface.h b/libsanitizer/include/sanitizer/msan_interface.h
index eeb39fbed8b..854b12cda36 100644
--- a/libsanitizer/include/sanitizer/msan_interface.h
+++ b/libsanitizer/include/sanitizer/msan_interface.h
@@ -92,6 +92,8 @@ extern "C" {
 
   /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
   void __sanitizer_dtor_callback(const volatile void* data, size_t size);
+  void __sanitizer_dtor_callback_fields(const volatile void *data, size_t size);
+  void __sanitizer_dtor_callback_vptr(const volatile void *data);
 
   /* This function may be optionally provided by user and should return
      a string containing Msan runtime options. See msan_flags.h for details. */
diff --git a/libsanitizer/lsan/lsan_common.cpp b/libsanitizer/lsan/lsan_common.cpp
index 94bb3cca008..576274608c8 100644
--- a/libsanitizer/lsan/lsan_common.cpp
+++ b/libsanitizer/lsan/lsan_common.cpp
@@ -26,6 +26,18 @@
 #include "sanitizer_common/sanitizer_tls_get_addr.h"
 
 #if CAN_SANITIZE_LEAKS
+
+#  if SANITIZER_APPLE
+// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
+#    if SANITIZER_IOS && !SANITIZER_IOSSIM
+#      define OBJC_DATA_MASK 0x0000007ffffffff8UL
+#    else
+#      define OBJC_DATA_MASK 0x00007ffffffffff8UL
+#    endif
+// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139
+#    define OBJC_FAST_IS_RW 0x8000000000000000UL
+#  endif
+
 namespace __lsan {
 
 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
@@ -160,6 +172,17 @@ static uptr GetCallerPC(const StackTrace &stack) {
   return 0;
 }
 
+#  if SANITIZER_APPLE
+// Objective-C class data pointers are stored with flags in the low bits, so
+// they need to be transformed back into something that looks like a pointer.
+static inline void *MaybeTransformPointer(void *p) {
+  uptr ptr = reinterpret_cast<uptr>(p);
+  if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW)
+    ptr &= OBJC_DATA_MASK;
+  return reinterpret_cast<void *>(ptr);
+}
+#  endif
+
 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
 // modules accounting etc.
@@ -276,6 +299,9 @@ void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
     pp = pp + alignment - pp % alignment;
   for (; pp + sizeof(void *) <= end; pp += alignment) {
     void *p = *reinterpret_cast<void **>(pp);
+#  if SANITIZER_APPLE
+    p = MaybeTransformPointer(p);
+#  endif
     if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
       continue;
     uptr chunk = PointsIntoChunk(p);
@@ -332,7 +358,8 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
 #  if SANITIZER_FUCHSIA
 
 // Fuchsia handles all threads together with its own callback.
-static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
+static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
+                           uptr) {}
 
 #  else
 
@@ -365,7 +392,8 @@ static void ProcessThreadRegistry(Frontier *frontier) {
 
 // Scans thread data (stacks and TLS) for heap pointers.
 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
-                           Frontier *frontier) {
+                           Frontier *frontier, tid_t caller_tid,
+                           uptr caller_sp) {
   InternalMmapVector<uptr> registers;
   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
@@ -392,6 +420,9 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
         continue;
       sp = stack_begin;
     }
+    if (suspended_threads.GetThreadID(i) == caller_tid) {
+      sp = caller_sp;
+    }
 
     if (flags()->use_registers && have_registers) {
       uptr registers_begin = reinterpret_cast<uptr>(registers.data());
@@ -572,7 +603,8 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
 
 // Sets the appropriate tag on each chunk.
 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
-                              Frontier *frontier) {
+                              Frontier *frontier, tid_t caller_tid,
+                              uptr caller_sp) {
   const InternalMmapVector<u32> &suppressed_stacks =
       GetSuppressionContext()->GetSortedSuppressedStacks();
   if (!suppressed_stacks.empty()) {
@@ -581,7 +613,7 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
   }
   ForEachChunk(CollectIgnoredCb, frontier);
   ProcessGlobalRegions(frontier);
-  ProcessThreads(suspended_threads, frontier);
+  ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
   ProcessRootRegions(frontier);
   FloodFillTag(frontier, kReachable);
 
@@ -677,7 +709,8 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
   CHECK(param);
   CHECK(!param->success);
   ReportUnsuspendedThreads(suspended_threads);
-  ClassifyAllChunks(suspended_threads, &param->frontier);
+  ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
+                    param->caller_sp);
   ForEachChunk(CollectLeaksCb, &param->leaks);
   // Clean up for subsequent leak checks. This assumes we did not overwrite any
   // kIgnored tags.
@@ -716,6 +749,12 @@ static bool CheckForLeaks() {
   for (int i = 0;; ++i) {
     EnsureMainThreadIDIsCorrect();
     CheckForLeaksParam param;
+    // Capture calling thread's stack pointer early, to avoid false negatives.
+    // Old frame with dead pointers might be overlapped by new frame inside
+    // CheckForLeaks which does not use bytes with pointers before the
+    // threads are suspended and stack pointers captured.
+    param.caller_tid = GetTid();
+    param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
     if (!param.success) {
       Report("LeakSanitizer has encountered a fatal error.\n");
diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h
index d7153751fae..20ef7c458b4 100644
--- a/libsanitizer/lsan/lsan_common.h
+++ b/libsanitizer/lsan/lsan_common.h
@@ -145,6 +145,8 @@ struct RootRegion {
 struct CheckForLeaksParam {
   Frontier frontier;
   LeakedChunks leaks;
+  tid_t caller_tid;
+  uptr caller_sp;
   bool success = false;
 };
 
diff --git a/libsanitizer/lsan/lsan_common_mac.cpp b/libsanitizer/lsan/lsan_common_mac.cpp
index 26b623fb1d4..b6b15095744 100644
--- a/libsanitizer/lsan/lsan_common_mac.cpp
+++ b/libsanitizer/lsan/lsan_common_mac.cpp
@@ -17,21 +17,36 @@
 
 #if CAN_SANITIZE_LEAKS && SANITIZER_APPLE
 
-#include "sanitizer_common/sanitizer_allocator_internal.h"
-#include "lsan_allocator.h"
+#  include <mach/mach.h>
+#  include <mach/vm_statistics.h>
+#  include <pthread.h>
 
-#include <pthread.h>
+#  include "lsan_allocator.h"
+#  include "sanitizer_common/sanitizer_allocator_internal.h"
+namespace __lsan {
 
-#include <mach/mach.h>
+enum class SeenRegion {
+  None = 0,
+  AllocOnce = 1 << 0,
+  LibDispatch = 1 << 1,
+  Foundation = 1 << 2,
+  All = AllocOnce | LibDispatch | Foundation
+};
+
+inline SeenRegion operator|(SeenRegion left, SeenRegion right) {
+  return static_cast<SeenRegion>(static_cast<int>(left) |
+                                 static_cast<int>(right));
+}
 
-// Only introduced in Mac OS X 10.9.
-#ifdef VM_MEMORY_OS_ALLOC_ONCE
-static const int kSanitizerVmMemoryOsAllocOnce = VM_MEMORY_OS_ALLOC_ONCE;
-#else
-static const int kSanitizerVmMemoryOsAllocOnce = 73;
-#endif
+inline SeenRegion &operator|=(SeenRegion &left, const SeenRegion &right) {
+  left = left | right;
+  return left;
+}
 
-namespace __lsan {
+struct RegionScanState {
+  SeenRegion seen_regions = SeenRegion::None;
+  bool in_libdispatch = false;
+};
 
 typedef struct {
   int disable_counter;
@@ -148,6 +163,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
 
   InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions();
 
+  RegionScanState scan_state;
   while (err == KERN_SUCCESS) {
     vm_size_t size = 0;
     unsigned depth = 1;
@@ -157,17 +173,35 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
                                (vm_region_info_t)&info, &count);
 
     uptr end_address = address + size;
-
-    // libxpc stashes some pointers in the Kernel Alloc Once page,
-    // make sure not to report those as leaks.
-    if (info.user_tag == kSanitizerVmMemoryOsAllocOnce) {
+    if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) {
+      // libxpc stashes some pointers in the Kernel Alloc Once page,
+      // make sure not to report those as leaks.
+      scan_state.seen_regions |= SeenRegion::AllocOnce;
       ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
                            kReachable);
+    } else if (info.user_tag == VM_MEMORY_FOUNDATION) {
+      // Objective-C block trampolines use the Foundation region.
+      scan_state.seen_regions |= SeenRegion::Foundation;
+      ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+                           kReachable);
+    } else if (info.user_tag == VM_MEMORY_LIBDISPATCH) {
+      // Dispatch continuations use the libdispatch region. Empirically, there
+      // can be more than one region with this tag, so we'll optimistically
+      // assume that they're continguous. Otherwise, we would need to scan every
+      // region to ensure we find them all.
+      scan_state.in_libdispatch = true;
+      ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+                           kReachable);
+    } else if (scan_state.in_libdispatch) {
+      scan_state.seen_regions |= SeenRegion::LibDispatch;
+      scan_state.in_libdispatch = false;
+    }
 
-      // Recursing over the full memory map is very slow, break out
-      // early if we don't need the full iteration.
-      if (!flags()->use_root_regions || !root_regions->size())
-        break;
+    // Recursing over the full memory map is very slow, break out
+    // early if we don't need the full iteration.
+    if (scan_state.seen_regions == SeenRegion::All &&
+        !(flags()->use_root_regions && root_regions->size() > 0)) {
+      break;
     }
 
     // This additional root region scan is required on Darwin in order to
@@ -199,6 +233,6 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
   StopTheWorld(callback, argument);
 }
 
-} // namespace __lsan
+}  // namespace __lsan
 
 #endif // CAN_SANITIZE_LEAKS && SANITIZER_APPLE
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h
index 08c6062ba06..b462e388c23 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.h
+++ b/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -709,6 +709,7 @@ enum ModuleArch {
   kModuleArchARMV7S,
   kModuleArchARMV7K,
   kModuleArchARM64,
+  kModuleArchLoongArch64,
   kModuleArchRISCV64,
   kModuleArchHexagon
 };
@@ -781,6 +782,8 @@ inline const char *ModuleArchToString(ModuleArch arch) {
       return "armv7k";
     case kModuleArchARM64:
       return "arm64";
+    case kModuleArchLoongArch64:
+      return "loongarch64";
     case kModuleArchRISCV64:
       return "riscv64";
     case kModuleArchHexagon:
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
index cd9235e503b..ba4b80081f0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
@@ -6727,7 +6727,7 @@ INTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) {
   COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value);
   // Workaround a bug in glibc's "old" semaphore implementation by
   // zero-initializing the sem_t contents. This has to be done here because
-  // interceptors bind to the lowest symbols version by default, hitting the
+  // interceptors bind to the lowest version before glibc 2.36, hitting the
   // buggy code path while the non-sanitized build of the same code works fine.
   REAL(memset)(s, 0, sizeof(*s));
   int res = REAL(sem_init)(s, pshared, value);
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
index 05192485d59..68782acb379 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
@@ -5,12 +5,6 @@
 ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
 ASM_HIDDEN(_ZN14__interception10real_vforkE)
 
-.bss
-.type _ZN14__interception10real_vforkE, @object
-.size _ZN14__interception10real_vforkE, 8
-_ZN14__interception10real_vforkE:
-        .zero     8
-
 .text
 .globl ASM_WRAPPER_NAME(vfork)
 ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_linux.cpp
index dc2ea933fad..f23ea9da371 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cpp
@@ -1105,7 +1105,7 @@ uptr GetMaxVirtualAddress() {
 #if SANITIZER_NETBSD && defined(__x86_64__)
   return 0x7f7ffffff000ULL;  // (0x00007f8000000000 - PAGE_SIZE)
 #elif SANITIZER_WORDSIZE == 64
-# if defined(__powerpc64__) || defined(__aarch64__)
+# if defined(__powerpc64__) || defined(__aarch64__) || defined(__loongarch__)
   // On PowerPC64 we have two different address space layouts: 44- and 46-bit.
   // We somehow need to figure out which one we are using now and choose
   // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
@@ -1113,6 +1113,7 @@ uptr GetMaxVirtualAddress() {
   // of the address space, so simply checking the stack address is not enough.
   // This should (does) work for both PowerPC64 Endian modes.
   // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
+  // loongarch64 also has multiple address space layouts: default is 47-bit.
   return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
 #elif SANITIZER_RISCV64
   return (1ULL << 38) - 1;
@@ -1955,6 +1956,13 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
   u64 esr;
   if (!Aarch64GetESR(ucontext, &esr)) return Unknown;
   return esr & ESR_ELx_WNR ? Write : Read;
+#elif defined(__loongarch__)
+  u32 flags = ucontext->uc_mcontext.__flags;
+  if (flags & SC_ADDRERR_RD)
+    return SignalContext::Read;
+  if (flags & SC_ADDRERR_WR)
+    return SignalContext::Write;
+  return SignalContext::Unknown;
 #elif defined(__sparc__)
   // Decode the instruction to determine the access type.
   // From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype).
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
index 56d231643ba..d74851c43e1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -822,13 +822,9 @@ u32 GetNumberOfCPUs() {
 #elif SANITIZER_SOLARIS
   return sysconf(_SC_NPROCESSORS_ONLN);
 #else
-#if defined(CPU_COUNT)
   cpu_set_t CPUs;
   CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
   return CPU_COUNT(&CPUs);
-#else
-  return 1;
-#endif
 #endif
 }
 
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_mac.cpp
index 9ee799be3c8..23c4c6619de 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cpp
@@ -38,7 +38,7 @@
 extern char **environ;
 #endif
 
-#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
+#if defined(__has_include) && __has_include(<os/trace.h>)
 #define SANITIZER_OS_TRACE 1
 #include <os/trace.h>
 #else
@@ -71,15 +71,7 @@ extern "C" {
 #include <mach/mach_time.h>
 #include <mach/vm_statistics.h>
 #include <malloc/malloc.h>
-#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format)
-# include <os/log.h>
-#else
-   /* Without support for __builtin_os_log_format, fall back to the older
-      method.  */
-# define OS_LOG_DEFAULT 0
-# define os_log_error(A,B,C) \
-  asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
-#endif
+#include <os/log.h>
 #include <pthread.h>
 #include <pthread/introspection.h>
 #include <sched.h>
@@ -1259,6 +1251,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
   mach_vm_address_t start_address =
     (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
 
+  const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1;
   mach_vm_address_t address = start_address;
   mach_vm_address_t free_begin = start_address;
   kern_return_t kr = KERN_SUCCESS;
@@ -1273,7 +1266,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
                                 (vm_region_info_t)&vminfo, &count);
     if (kr == KERN_INVALID_ADDRESS) {
       // No more regions beyond "address", consider the gap at the end of VM.
-      address = GetMaxVirtualAddress() + 1;
+      address = max_vm_address;
       vmsize = 0;
     } else {
       if (max_occupied_addr) *max_occupied_addr = address + vmsize;
@@ -1281,7 +1274,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
     if (free_begin != address) {
       // We found a free region [free_begin..address-1].
       uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
-      uptr gap_end = RoundDownTo((uptr)address, alignment);
+      uptr gap_end = RoundDownTo((uptr)Min(address, max_vm_address), alignment);
       uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
       if (size < gap_size) {
         return gap_start;
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h
index 1cf2e298cc9..f0a97d098ee 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.h
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.h
@@ -14,26 +14,6 @@
 
 #include "sanitizer_common.h"
 #include "sanitizer_platform.h"
-
-/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
-   TARGET_OS_MAC (we have no support for iOS in any form for these versions,
-   so there's no ambiguity).  */
-#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
-# define TARGET_OS_OSX 1
-#endif
-
-/* Other TARGET_OS_xxx are not present on earlier versions, define them to
-   0 (we have no support for them; they are not valid targets anyway).  */
-#ifndef TARGET_OS_IOS
-#define TARGET_OS_IOS 0
-#endif
-#ifndef TARGET_OS_TV
-#define TARGET_OS_TV 0
-#endif
-#ifndef TARGET_OS_WATCH
-#define TARGET_OS_WATCH 0
-#endif
-
 #if SANITIZER_APPLE
 #include "sanitizer_posix.h"
 
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h
index 32005eef08c..7ecc465bea9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform.h
@@ -286,8 +286,8 @@
 #ifndef SANITIZER_CAN_USE_ALLOCATOR64
 #  if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
 #    define SANITIZER_CAN_USE_ALLOCATOR64 1
-#  elif defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
-      defined(__arm__) || SANITIZER_RISCV64 || defined(__hexagon__)
+#  elif defined(__mips64) || defined(__arm__) || defined(__i386__) || \
+      SANITIZER_RISCV64 || defined(__hexagon__)
 #    define SANITIZER_CAN_USE_ALLOCATOR64 0
 #  else
 #    define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
index c278c8797f7..bf0f355847c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
@@ -26,10 +26,7 @@
 
 // With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
 // are not defined anywhere in userspace headers. Fake them. This seems to work
-// fine with newer headers, too.  Beware that with <sys/stat.h>, struct stat
-// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
-// Also, for some platforms (e.g. mips) there are additional members in the
-// <sys/stat.h> struct stat:s.
+// fine with newer headers, too.
 #include <linux/posix_types.h>
 #  if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
 #    include <sys/stat.h>
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
index bd5692ed511..44dd3d9e22d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -101,7 +101,7 @@ const unsigned struct_kernel_stat64_sz = 104;
 const unsigned struct_kernel_stat_sz =
     SANITIZER_ANDROID
         ? FIRST_32_SECOND_64(104, 128)
-        : FIRST_32_SECOND_64((_MIPS_SIM == _ABIN32) ? 160 : 144, 216);
+        : FIRST_32_SECOND_64((_MIPS_SIM == _ABIN32) ? 176 : 160, 216);
 const unsigned struct_kernel_stat64_sz = 104;
 #elif defined(__s390__) && !defined(__s390x__)
 const unsigned struct_kernel_stat_sz = 64;
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp
index ba4259acd46..4b0e6781976 100644
--- a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp
@@ -146,13 +146,8 @@ static bool IsDyldHdr(const mach_header *hdr) {
 // until we hit a Mach header matching dyld instead. These recurse
 // calls are expensive, but the first memory map generation occurs
 // early in the process, when dyld is one of the only images loaded,
-// so it will be hit after only a few iterations.  These assumptions don't
-// hold on macOS 13+ anymore (dyld itself has moved into the shared cache).
-
-// FIXME: Unfortunately, the upstream revised version to deal with macOS 13+
-// is incompatible with GCC and also uses APIs not available on earlier
-// systems which we support; backed out for now.
-
+// so it will be hit after only a few iterations.  These assumptions don't hold
+// on macOS 13+ anymore (dyld itself has moved into the shared cache).
 static mach_header *GetDyldImageHeaderViaVMRegion() {
   vm_address_t address = 0;
 
@@ -176,17 +171,64 @@ static mach_header *GetDyldImageHeaderViaVMRegion() {
   }
 }
 
+extern "C" {
+struct dyld_shared_cache_dylib_text_info {
+  uint64_t version;  // current version 2
+  // following fields all exist in version 1
+  uint64_t loadAddressUnslid;
+  uint64_t textSegmentSize;
+  uuid_t dylibUuid;
+  const char *path;  // pointer invalid at end of iterations
+  // following fields all exist in version 2
+  uint64_t textSegmentOffset;  // offset from start of cache
+};
+typedef struct dyld_shared_cache_dylib_text_info
+    dyld_shared_cache_dylib_text_info;
+
+extern bool _dyld_get_shared_cache_uuid(uuid_t uuid);
+extern const void *_dyld_get_shared_cache_range(size_t *length);
+extern int dyld_shared_cache_iterate_text(
+    const uuid_t cacheUuid,
+    void (^callback)(const dyld_shared_cache_dylib_text_info *info));
+}  // extern "C"
+
+static mach_header *GetDyldImageHeaderViaSharedCache() {
+  uuid_t uuid;
+  bool hasCache = _dyld_get_shared_cache_uuid(uuid);
+  if (!hasCache)
+    return nullptr;
+
+  size_t cacheLength;
+  __block uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
+  CHECK(cacheStart && cacheLength);
+
+  __block mach_header *dyldHdr = nullptr;
+  int res = dyld_shared_cache_iterate_text(
+      uuid, ^(const dyld_shared_cache_dylib_text_info *info) {
+        CHECK_GE(info->version, 2);
+        mach_header *hdr =
+            (mach_header *)(cacheStart + info->textSegmentOffset);
+        if (IsDyldHdr(hdr))
+          dyldHdr = hdr;
+      });
+  CHECK_EQ(res, 0);
+
+  return dyldHdr;
+}
+
 const mach_header *get_dyld_hdr() {
   if (!dyld_hdr) {
     // On macOS 13+, dyld itself has moved into the shared cache.  Looking it up
     // via vm_region_recurse_64() causes spins/hangs/crashes.
-    // FIXME: find a way to do this compatible with GCC.
     if (GetMacosAlignedVersion() >= MacosVersion(13, 0)) {
+      dyld_hdr = GetDyldImageHeaderViaSharedCache();
+      if (!dyld_hdr) {
         VReport(1,
-                "looking up the dyld image header in the shared cache on "
-                "macOS 13+ is not yet supported.  Falling back to "
+                "Failed to lookup the dyld image header in the shared cache on "
+                "macOS 13+ (or no shared cache in use).  Falling back to "
                 "lookup via vm_region_recurse_64().\n");
         dyld_hdr = GetDyldImageHeaderViaVMRegion();
+      }
     } else {
       dyld_hdr = GetDyldImageHeaderViaVMRegion();
     }
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
index 661495e2340..d24fae98213 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
@@ -87,8 +87,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
   // Nope, this does not look right either. This means the frame after next does
   // not have a valid frame pointer, but we can still extract the caller PC.
   // Unfortunately, there is no way to decide between GCC and LLVM frame
-  // layouts. Assume GCC.
-  return bp_prev - 1;
+  // layouts. Assume LLVM.
+  return bp_prev;
 #else
   return (uhwptr*)bp;
 #endif
@@ -111,21 +111,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
          IsAligned((uptr)frame, sizeof(*frame)) &&
          size < max_depth) {
 #ifdef __powerpc__
-    // PowerPC ABIs specify that the return address is saved on the
-    // *caller's* stack frame.  Thus we must dereference the back chain
-    // to find the caller frame before extracting it.
+    // PowerPC ABIs specify that the return address is saved at offset
+    // 16 of the *caller's* stack frame.  Thus we must dereference the
+    // back chain to find the caller frame before extracting it.
     uhwptr *caller_frame = (uhwptr*)frame[0];
     if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
         !IsAligned((uptr)caller_frame, sizeof(uhwptr)))
       break;
-    // For most ABIs the offset where the return address is saved is two
-    // register sizes.  The exception is the SVR4 ABI, which uses an
-    // offset of only one register size.
-#ifdef _CALL_SYSV
-    uhwptr pc1 = caller_frame[1];
-#else
     uhwptr pc1 = caller_frame[2];
-#endif
 #elif defined(__s390__)
     uhwptr pc1 = frame[14];
 #elif defined(__loongarch__) || defined(__riscv)
diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp
index 87f5250db64..3ebeac52280 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp
@@ -87,11 +87,13 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
 
 #if defined(__x86_64__)
 typedef x86_thread_state64_t regs_struct;
+#define regs_flavor x86_THREAD_STATE64
 
 #define SP_REG __rsp
 
 #elif defined(__aarch64__)
 typedef arm_thread_state64_t regs_struct;
+#define regs_flavor ARM_THREAD_STATE64
 
 # if __DARWIN_UNIX03
 #  define SP_REG __sp
@@ -101,6 +103,7 @@ typedef arm_thread_state64_t regs_struct;
 
 #elif defined(__i386)
 typedef x86_thread_state32_t regs_struct;
+#define regs_flavor x86_THREAD_STATE32
 
 #define SP_REG __esp
 
@@ -146,8 +149,8 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
   thread_t thread = GetThread(index);
   regs_struct regs;
   int err;
-  mach_msg_type_number_t reg_count = MACHINE_THREAD_STATE_COUNT;
-  err = thread_get_state(thread, MACHINE_THREAD_STATE, (thread_state_t)&regs,
+  mach_msg_type_number_t reg_count = sizeof(regs) / sizeof(natural_t);
+  err = thread_get_state(thread, regs_flavor, (thread_state_t)&regs,
                          &reg_count);
   if (err != KERN_SUCCESS) {
     VReport(1, "Error - unable to get registers for a thread\n");
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
index 461fe966136..a6f82ced203 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
@@ -256,6 +256,8 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess {
     const char* const kSymbolizerArch = "--default-arch=x86_64";
 #elif defined(__i386__)
     const char* const kSymbolizerArch = "--default-arch=i386";
+#elif SANITIZER_LOONGARCH64
+    const char *const kSymbolizerArch = "--default-arch=loongarch64";
 #elif SANITIZER_RISCV64
     const char *const kSymbolizerArch = "--default-arch=riscv64";
 #elif defined(__aarch64__)
diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc
index 97ca7f2f3f9..80f5e6be8ad 100644
--- a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc
@@ -14,18 +14,22 @@
 // About local register variables:
 // https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
 //
-// Kernel ABI...
-//  syscall number is passed in a7
-//  (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in
-//  a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments
-//  are passed in: a0-a7 (confirmed by inspecting glibc sources).
+// Kernel ABI:
+// https://lore.kernel.org/loongarch/1f353678-3398-e30b-1c87-6edb278f74db@xen0n.name/T/#m1613bc86c2d7bf5f6da92bd62984302bfd699a2f
+//  syscall number is placed in a7
+//  parameters, if present, are placed in a0-a6
+//  upon return:
+//    the return value is placed in a0
+//    t0-t8 should be considered clobbered
+//    all other registers are preserved
 #define SYSCALL(name) __NR_##name
 
-#define INTERNAL_SYSCALL_CLOBBERS "memory"
+#define INTERNAL_SYSCALL_CLOBBERS \
+  "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8"
 
 static uptr __internal_syscall(u64 nr) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0");
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0");
   __asm__ volatile("syscall 0\n\t"
                    : "=r"(a0)
                    : "r"(a7)
@@ -35,8 +39,8 @@ static uptr __internal_syscall(u64 nr) {
 #define __internal_syscall0(n) (__internal_syscall)(n)
 
 static uptr __internal_syscall(u64 nr, u64 arg1) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7)
@@ -46,9 +50,9 @@ static uptr __internal_syscall(u64 nr, u64 arg1) {
 #define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1)
@@ -59,10 +63,10 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
   (__internal_syscall)(n, (u64)(a1), (long)(a2))
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
-  register u64 a2 asm("a2") = arg3;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
+  register u64 a2 asm("$a2") = arg3;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1), "r"(a2)
@@ -74,11 +78,11 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
                                u64 arg4) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
-  register u64 a2 asm("a2") = arg3;
-  register u64 a3 asm("a3") = arg4;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
+  register u64 a2 asm("$a2") = arg3;
+  register u64 a3 asm("$a3") = arg4;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1), "r"(a2), "r"(a3)
@@ -90,12 +94,12 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
                                long arg5) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
-  register u64 a2 asm("a2") = arg3;
-  register u64 a3 asm("a3") = arg4;
-  register u64 a4 asm("a4") = arg5;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
+  register u64 a2 asm("$a2") = arg3;
+  register u64 a3 asm("$a3") = arg4;
+  register u64 a4 asm("$a4") = arg5;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
@@ -108,13 +112,13 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
                                long arg5, long arg6) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
-  register u64 a2 asm("a2") = arg3;
-  register u64 a3 asm("a3") = arg4;
-  register u64 a4 asm("a4") = arg5;
-  register u64 a5 asm("a5") = arg6;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
+  register u64 a2 asm("$a2") = arg3;
+  register u64 a3 asm("$a3") = arg4;
+  register u64 a4 asm("$a4") = arg5;
+  register u64 a5 asm("$a5") = arg6;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
@@ -127,14 +131,14 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
                                long arg5, long arg6, long arg7) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
-  register u64 a2 asm("a2") = arg3;
-  register u64 a3 asm("a3") = arg4;
-  register u64 a4 asm("a4") = arg5;
-  register u64 a5 asm("a5") = arg6;
-  register u64 a6 asm("a6") = arg7;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
+  register u64 a2 asm("$a2") = arg3;
+  register u64 a3 asm("$a3") = arg4;
+  register u64 a4 asm("$a4") = arg5;
+  register u64 a5 asm("$a5") = arg6;
+  register u64 a6 asm("$a6") = arg7;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
diff --git a/libsanitizer/tsan/tsan_interceptors.h b/libsanitizer/tsan/tsan_interceptors.h
index 3091ad809c4..60fbc58f988 100644
--- a/libsanitizer/tsan/tsan_interceptors.h
+++ b/libsanitizer/tsan/tsan_interceptors.h
@@ -21,8 +21,9 @@ class ScopedInterceptor {
 
  private:
   ThreadState *const thr_;
-  bool in_ignored_lib_;
-  bool ignoring_;
+  bool in_ignored_lib_ = false;
+  bool in_blocking_func_ = false;
+  bool ignoring_ = false;
 
   void DisableIgnoresImpl();
   void EnableIgnoresImpl();
diff --git a/libsanitizer/tsan/tsan_interceptors_posix.cpp b/libsanitizer/tsan/tsan_interceptors_posix.cpp
index 17f6b1f472d..c557d5ddc6a 100644
--- a/libsanitizer/tsan/tsan_interceptors_posix.cpp
+++ b/libsanitizer/tsan/tsan_interceptors_posix.cpp
@@ -165,13 +165,26 @@ struct SignalDesc {
 
 struct ThreadSignalContext {
   int int_signal_send;
-  atomic_uintptr_t in_blocking_func;
   SignalDesc pending_signals[kSigCount];
   // emptyset and oldset are too big for stack.
   __sanitizer_sigset_t emptyset;
   __sanitizer_sigset_t oldset;
 };
 
+void EnterBlockingFunc(ThreadState *thr) {
+  for (;;) {
+    // The order is important to not delay a signal infinitely if it's
+    // delivered right before we set in_blocking_func. Note: we can't call
+    // ProcessPendingSignals when in_blocking_func is set, or we can handle
+    // a signal synchronously when we are already handling a signal.
+    atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
+    if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
+      break;
+    atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+    ProcessPendingSignals(thr);
+  }
+}
+
 // The sole reason tsan wraps atexit callbacks is to establish synchronization
 // between callback setup and callback execution.
 struct AtExitCtx {
@@ -245,8 +258,18 @@ static ThreadSignalContext *SigCtx(ThreadState *thr) {
 
 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
                                      uptr pc)
-    : thr_(thr), in_ignored_lib_(false), ignoring_(false) {
+    : thr_(thr) {
   LazyInitialize(thr);
+  if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
+    // pthread_join is marked as blocking, but it's also known to call other
+    // intercepted functions (mmap, free). If we don't reset in_blocking_func
+    // we can get deadlocks and memory corruptions if we deliver a synchronous
+    // signal inside of an mmap/free interceptor.
+    // So reset it and restore it back in the destructor.
+    // See https://github.com/google/sanitizers/issues/1540
+    atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+    in_blocking_func_ = true;
+  }
   if (!thr_->is_inited) return;
   if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
   DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
@@ -259,6 +282,8 @@ ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
 ScopedInterceptor::~ScopedInterceptor() {
   if (!thr_->is_inited) return;
   DisableIgnores();
+  if (UNLIKELY(in_blocking_func_))
+    EnterBlockingFunc(thr_);
   if (!thr_->ignore_interceptors) {
     ProcessPendingSignals(thr_);
     FuncExit(thr_);
@@ -321,15 +346,8 @@ void ScopedInterceptor::DisableIgnoresImpl() {
 
 struct BlockingCall {
   explicit BlockingCall(ThreadState *thr)
-      : thr(thr)
-      , ctx(SigCtx(thr)) {
-    for (;;) {
-      atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
-      if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
-        break;
-      atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
-      ProcessPendingSignals(thr);
-    }
+      : thr(thr) {
+    EnterBlockingFunc(thr);
     // When we are in a "blocking call", we process signals asynchronously
     // (right when they arrive). In this context we do not expect to be
     // executing any user/runtime code. The known interceptor sequence when
@@ -340,11 +358,10 @@ struct BlockingCall {
 
   ~BlockingCall() {
     thr->ignore_interceptors--;
-    atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+    atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
   }
 
   ThreadState *thr;
-  ThreadSignalContext *ctx;
 };
 
 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
@@ -517,9 +534,7 @@ static void SetJmp(ThreadState *thr, uptr sp) {
   buf->shadow_stack_pos = thr->shadow_stack_pos;
   ThreadSignalContext *sctx = SigCtx(thr);
   buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
-  buf->in_blocking_func = sctx ?
-      atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
-      false;
+  buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
   buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
       memory_order_relaxed);
 }
@@ -535,11 +550,10 @@ static void LongJmp(ThreadState *thr, uptr *env) {
       while (thr->shadow_stack_pos > buf->shadow_stack_pos)
         FuncExit(thr);
       ThreadSignalContext *sctx = SigCtx(thr);
-      if (sctx) {
+      if (sctx)
         sctx->int_signal_send = buf->int_signal_send;
-        atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
-            memory_order_relaxed);
-      }
+      atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
+          memory_order_relaxed);
       atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
           memory_order_relaxed);
       JmpBufGarbageCollect(thr, buf->sp - 1);  // do not collect buf->sp
@@ -1198,9 +1212,8 @@ void CondMutexUnlockCtx<Fn>::Unlock() const {
   // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
   // since the thread is cancelled, so we have to manually execute them
   // (the thread still can run some user code due to pthread_cleanup_push).
-  ThreadSignalContext *ctx = SigCtx(thr);
-  CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
-  atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+  CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
+  atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
   // Undo BlockingCall ctor effects.
   thr->ignore_interceptors--;
@@ -2089,12 +2102,12 @@ void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
       // If we are in blocking function, we can safely process it now
       // (but check if we are in a recursive interceptor,
       // i.e. pthread_join()->munmap()).
-      (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+      atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
     atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
-    if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
-      atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
+    if (atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
+      atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
       CallUserSignalHandler(thr, sync, true, sig, info, ctx);
-      atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
+      atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
     } else {
       // Be very conservative with when we do acquire in this case.
       // It's unsafe to do acquire in async handlers, because ThreadState
@@ -3029,7 +3042,9 @@ void InitializeInterceptors() {
 constexpr u32 kBarrierThreadBits = 10;
 constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
 
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
+extern "C" {
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
     atomic_uint32_t *barrier, u32 num_threads) {
   if (num_threads >= kBarrierThreads) {
     Printf("barrier_init: count is too large (%d)\n", num_threads);
@@ -3044,7 +3059,7 @@ static u32 barrier_epoch(u32 value) {
   return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
 }
 
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
     atomic_uint32_t *barrier) {
   u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
   u32 old_epoch = barrier_epoch(old);
@@ -3059,3 +3074,23 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
     FutexWait(barrier, cur);
   }
 }
+
+void *__tsan_memcpy(void *dst, const void *src, uptr size) {
+  void *ctx;
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
+  COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+#else
+  COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+#endif
+}
+
+void *__tsan_memset(void *dst, int c, uptr size) {
+  void *ctx;
+  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, c, size);
+}
+
+void *__tsan_memmove(void *dst, const void *src, uptr size) {
+  void *ctx;
+  COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+}
diff --git a/libsanitizer/tsan/tsan_interface.h b/libsanitizer/tsan/tsan_interface.h
index 711f064174c..5b9d664e503 100644
--- a/libsanitizer/tsan/tsan_interface.h
+++ b/libsanitizer/tsan/tsan_interface.h
@@ -72,6 +72,13 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_vptr_update(void **vptr_p, void *new_val);
 
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memcpy(void *dest, const void *src, uptr count);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memset(void *dest, int ch, uptr count);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memmove(void *dest, const void *src, uptr count);
+
 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
 
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index e1e121e2ee0..f0918d86d4e 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -191,6 +191,7 @@ struct ThreadState {
 #if !SANITIZER_GO
   Vector<JmpBuf> jmp_bufs;
   int in_symbolizer;
+  atomic_uintptr_t in_blocking_func;
   bool in_ignored_lib;
   bool is_inited;
 #endif
@@ -627,6 +628,13 @@ class SlotLocker {
   ALWAYS_INLINE
   SlotLocker(ThreadState *thr, bool recursive = false)
       : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
+#if !SANITIZER_GO
+    // We are in trouble if we are here with in_blocking_func set.
+    // If in_blocking_func is set, all signals will be delivered synchronously,
+    // which means we can't lock slots since the signal handler will try
+    // to lock it recursively and deadlock.
+    DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));
+#endif
     if (!locked_)
       SlotLock(thr_);
   }
@@ -670,8 +678,8 @@ ALWAYS_INLINE
 void LazyInitialize(ThreadState *thr) {
   // If we can use .preinit_array, assume that __tsan_init
   // called from .preinit_array initializes runtime before
-  // any instrumented code.
-#if !SANITIZER_CAN_USE_PREINIT_ARRAY
+  // any instrumented code except ANDROID.
+#if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(__ANDROID__))
   if (UNLIKELY(!is_initialized))
     Initialize(thr);
 #endif
diff --git a/libsanitizer/tsan/tsan_rtl_ppc64.S b/libsanitizer/tsan/tsan_rtl_ppc64.S
index 9e533a71a9c..8285e21aa1e 100644
--- a/libsanitizer/tsan/tsan_rtl_ppc64.S
+++ b/libsanitizer/tsan/tsan_rtl_ppc64.S
@@ -1,6 +1,5 @@
 #include "tsan_ppc_regs.h"
 
-        .machine altivec
         .section .text
         .hidden __tsan_setjmp
         .globl _setjmp
diff --git a/libsanitizer/ubsan/ubsan_flags.cpp b/libsanitizer/ubsan/ubsan_flags.cpp
index 9a66bd37518..25cefd46ce2 100644
--- a/libsanitizer/ubsan/ubsan_flags.cpp
+++ b/libsanitizer/ubsan/ubsan_flags.cpp
@@ -50,7 +50,6 @@ void InitializeFlags() {
   {
     CommonFlags cf;
     cf.CopyFrom(*common_flags());
-    cf.print_summary = false;
     cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH");
     OverrideCommonFlags(cf);
   }
diff --git a/libsanitizer/ubsan/ubsan_handlers.cpp b/libsanitizer/ubsan/ubsan_handlers.cpp
index 970075e69a6..410292a0d53 100644
--- a/libsanitizer/ubsan/ubsan_handlers.cpp
+++ b/libsanitizer/ubsan/ubsan_handlers.cpp
@@ -894,21 +894,6 @@ void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
 
 }  // namespace __ubsan
 
-void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData,
-                                           ValueHandle Function) {
-  GET_REPORT_OPTIONS(false);
-  CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
-  handleCFIBadIcall(&Data, Function, Opts);
-}
-
-void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData,
-                                                 ValueHandle Function) {
-  GET_REPORT_OPTIONS(true);
-  CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
-  handleCFIBadIcall(&Data, Function, Opts);
-  Die();
-}
-
 void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
                                             ValueHandle Value,
                                             uptr ValidVtable) {
diff --git a/libsanitizer/ubsan/ubsan_handlers.h b/libsanitizer/ubsan/ubsan_handlers.h
index 9f412353fc0..219fb15de55 100644
--- a/libsanitizer/ubsan/ubsan_handlers.h
+++ b/libsanitizer/ubsan/ubsan_handlers.h
@@ -215,20 +215,12 @@ enum CFITypeCheckKind : unsigned char {
   CFITCK_VMFCall,
 };
 
-struct CFIBadIcallData {
-  SourceLocation Loc;
-  const TypeDescriptor &Type;
-};
-
 struct CFICheckFailData {
   CFITypeCheckKind CheckKind;
   SourceLocation Loc;
   const TypeDescriptor &Type;
 };
 
-/// \brief Handle control flow integrity failure for indirect function calls.
-RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
-
 /// \brief Handle control flow integrity failures.
 RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
             uptr VtableIsValid)
diff --git a/libsanitizer/ubsan/ubsan_platform.h b/libsanitizer/ubsan/ubsan_platform.h
index ad3e883f0f3..d2cc2e10bd2 100644
--- a/libsanitizer/ubsan/ubsan_platform.h
+++ b/libsanitizer/ubsan/ubsan_platform.h
@@ -12,7 +12,6 @@
 #ifndef UBSAN_PLATFORM_H
 #define UBSAN_PLATFORM_H
 
-#ifndef CAN_SANITIZE_UB
 // Other platforms should be easy to add, and probably work as-is.
 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) ||        \
     defined(__NetBSD__) || defined(__DragonFly__) ||                           \
@@ -22,6 +21,5 @@
 #else
 # define CAN_SANITIZE_UB 0
 #endif
-#endif //CAN_SANITIZE_UB
 
 #endif

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [gcc(refs/users/marxin/heads/merge-libsanitizer-v7)] libsanitizer: merge from upstream ae59131d3ef311fb4b1e50627c6457be00e60dc9
@ 2022-11-15 10:14 Martin Liska
  0 siblings, 0 replies; 2+ messages in thread
From: Martin Liska @ 2022-11-15 10:14 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:725fff76447ec31d05c1464efad178dfb29ffd48

commit 725fff76447ec31d05c1464efad178dfb29ffd48
Author: Martin Liska <mliska@suse.cz>
Date:   Tue Nov 15 11:11:41 2022 +0100

    libsanitizer: merge from upstream ae59131d3ef311fb4b1e50627c6457be00e60dc9

Diff:
---
 libsanitizer/MERGE                                 |  2 +-
 libsanitizer/asan/asan_allocator.cpp               |  4 +-
 libsanitizer/asan/asan_allocator.h                 |  6 --
 libsanitizer/asan/asan_descriptions.cpp            | 12 +--
 libsanitizer/asan/asan_errors.cpp                  |  3 +-
 libsanitizer/asan/asan_globals.cpp                 | 19 +++++
 libsanitizer/asan/asan_interceptors.h              |  9 +--
 libsanitizer/asan/asan_interface.inc               |  7 ++
 libsanitizer/asan/asan_interface_internal.h        | 14 ++++
 libsanitizer/asan/asan_mapping.h                   | 10 ++-
 libsanitizer/asan/asan_mapping_sparc64.h           | 19 +++++
 libsanitizer/asan/asan_poisoning.cpp               | 92 +++++++++++++++++++--
 libsanitizer/asan/asan_rtl.cpp                     | 17 ++--
 libsanitizer/hwasan/hwasan.cpp                     |  8 +-
 libsanitizer/hwasan/hwasan_flags.inc               |  2 +-
 libsanitizer/hwasan/hwasan_fuchsia.cpp             |  4 +
 libsanitizer/hwasan/hwasan_report.cpp              | 24 +++---
 libsanitizer/include/sanitizer/msan_interface.h    |  2 +
 libsanitizer/lsan/lsan_common.cpp                  | 49 ++++++++++--
 libsanitizer/lsan/lsan_common.h                    |  2 +
 libsanitizer/lsan/lsan_common_mac.cpp              | 74 ++++++++++++-----
 libsanitizer/merge.sh                              |  2 +-
 libsanitizer/sanitizer_common/sanitizer_common.h   |  3 +
 .../sanitizer_common_interceptors.inc              |  2 +-
 ...zer_common_interceptors_vfork_loongarch64.inc.S |  6 --
 libsanitizer/sanitizer_common/sanitizer_linux.cpp  | 10 ++-
 .../sanitizer_common/sanitizer_linux_libcdep.cpp   |  4 -
 libsanitizer/sanitizer_common/sanitizer_mac.cpp    | 17 ++--
 libsanitizer/sanitizer_common/sanitizer_mac.h      | 20 -----
 libsanitizer/sanitizer_common/sanitizer_platform.h |  4 +-
 .../sanitizer_platform_limits_linux.cpp            |  5 +-
 .../sanitizer_platform_limits_posix.h              |  2 +-
 .../sanitizer_common/sanitizer_procmaps_mac.cpp    | 62 ++++++++++++---
 .../sanitizer_common/sanitizer_stacktrace.cpp      | 17 ++--
 .../sanitizer_stoptheworld_mac.cpp                 |  7 +-
 .../sanitizer_symbolizer_libcdep.cpp               |  2 +
 .../sanitizer_syscall_linux_loongarch64.inc        | 90 +++++++++++----------
 libsanitizer/tsan/tsan_interceptors.h              |  5 +-
 libsanitizer/tsan/tsan_interceptors_posix.cpp      | 93 +++++++++++++++-------
 libsanitizer/tsan/tsan_interface.h                 |  7 ++
 libsanitizer/tsan/tsan_rtl.h                       | 12 ++-
 libsanitizer/tsan/tsan_rtl_ppc64.S                 |  1 -
 libsanitizer/ubsan/ubsan_flags.cpp                 |  1 -
 libsanitizer/ubsan/ubsan_handlers.cpp              | 15 ----
 libsanitizer/ubsan/ubsan_handlers.h                |  8 --
 libsanitizer/ubsan/ubsan_platform.h                |  2 -
 46 files changed, 524 insertions(+), 252 deletions(-)

diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE
index 3ca670c1184..6bb19c7d5ba 100644
--- a/libsanitizer/MERGE
+++ b/libsanitizer/MERGE
@@ -1,4 +1,4 @@
-84a71d5259c2682403cdbd8710592410a2f128ab
+ae59131d3ef311fb4b1e50627c6457be00e60dc9
 
 The first line of this file holds the git revision number of the
 last merge done from the master library sources.
diff --git a/libsanitizer/asan/asan_allocator.cpp b/libsanitizer/asan/asan_allocator.cpp
index 7b7a289c2d2..52d7eff7281 100644
--- a/libsanitizer/asan/asan_allocator.cpp
+++ b/libsanitizer/asan/asan_allocator.cpp
@@ -803,8 +803,8 @@ struct Allocator {
     sptr offset = 0;
     if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
       // The address is in the chunk's left redzone, so maybe it is actually
-      // a right buffer overflow from the other chunk to the left.
-      // Search a bit to the left to see if there is another chunk.
+      // a right buffer overflow from the other chunk before.
+      // Search a bit before to see if there is another chunk.
       AsanChunk *m2 = nullptr;
       for (uptr l = 1; l < GetPageSizeCached(); l++) {
         m2 = GetAsanChunkByAddr(addr - l);
diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
index 27d826fb613..0b4dbf03bb9 100644
--- a/libsanitizer/asan/asan_allocator.h
+++ b/libsanitizer/asan/asan_allocator.h
@@ -135,12 +135,6 @@ typedef VeryCompactSizeClassMap SizeClassMap;
 const uptr kAllocatorSpace = ~(uptr)0;
 const uptr kAllocatorSize = 0x2000000000ULL;  // 128G.
 typedef VeryDenseSizeClassMap SizeClassMap;
-# elif defined(__aarch64__)
-// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA
-// so no need to different values for different VMA.
-const uptr kAllocatorSpace =  0x10000000000ULL;
-const uptr kAllocatorSize  =  0x10000000000ULL;  // 3T.
-typedef DefaultSizeClassMap SizeClassMap;
 #elif defined(__sparc__)
 const uptr kAllocatorSpace = ~(uptr)0;
 const uptr kAllocatorSize = 0x20000000000ULL;  // 2T.
diff --git a/libsanitizer/asan/asan_descriptions.cpp b/libsanitizer/asan/asan_descriptions.cpp
index d7d96168579..fbe92572b55 100644
--- a/libsanitizer/asan/asan_descriptions.cpp
+++ b/libsanitizer/asan/asan_descriptions.cpp
@@ -129,11 +129,11 @@ static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
   str.append("%s", d.Location());
   switch (descr.access_type) {
     case kAccessTypeLeft:
-      str.append("%p is located %zd bytes to the left of",
+      str.append("%p is located %zd bytes before",
                  (void *)descr.bad_addr, descr.offset);
       break;
     case kAccessTypeRight:
-      str.append("%p is located %zd bytes to the right of",
+      str.append("%p is located %zd bytes after",
                  (void *)descr.bad_addr, descr.offset);
       break;
     case kAccessTypeInside:
@@ -279,17 +279,17 @@ static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
   Decorator d;
   str.append("%s", d.Location());
   if (addr < g.beg) {
-    str.append("%p is located %zd bytes to the left", (void *)addr,
+    str.append("%p is located %zd bytes before", (void *)addr,
                g.beg - addr);
   } else if (addr + access_size > g.beg + g.size) {
     if (addr < g.beg + g.size) addr = g.beg + g.size;
-    str.append("%p is located %zd bytes to the right", (void *)addr,
+    str.append("%p is located %zd bytes after", (void *)addr,
                addr - (g.beg + g.size));
   } else {
     // Can it happen?
-    str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
+    str.append("%p is located %zd bytes inside of", (void *)addr, addr - g.beg);
   }
-  str.append(" of global variable '%s' defined in '",
+  str.append(" global variable '%s' defined in '",
              MaybeDemangleGlobalName(g.name));
   PrintGlobalLocation(&str, g);
   str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
diff --git a/libsanitizer/asan/asan_errors.cpp b/libsanitizer/asan/asan_errors.cpp
index 10f7c17991d..f3befdf3c2b 100644
--- a/libsanitizer/asan/asan_errors.cpp
+++ b/libsanitizer/asan/asan_errors.cpp
@@ -539,7 +539,8 @@ static void PrintShadowBytes(InternalScopedString *str, const char *before,
                              u8 *bytes, u8 *guilty, uptr n) {
   Decorator d;
   if (before)
-    str->append("%s%p:", before, (void *)bytes);
+    str->append("%s%p:", before,
+                (void *)ShadowToMem(reinterpret_cast<uptr>(bytes)));
   for (uptr i = 0; i < n; i++) {
     u8 *p = bytes + i;
     const char *before =
diff --git a/libsanitizer/asan/asan_globals.cpp b/libsanitizer/asan/asan_globals.cpp
index 8f3491f0199..b780128c9ad 100644
--- a/libsanitizer/asan/asan_globals.cpp
+++ b/libsanitizer/asan/asan_globals.cpp
@@ -154,6 +154,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
   }
 }
 
+// Check ODR violation for given global G by checking if it's already poisoned.
+// We use this method in case compiler doesn't use private aliases for global
+// variables.
+static void CheckODRViolationViaPoisoning(const Global *g) {
+  if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+    // This check may not be enough: if the first global is much larger
+    // the entire redzone of the second global may be within the first global.
+    for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+      if (g->beg == l->g->beg &&
+          (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+          !IsODRViolationSuppressed(g->name))
+        ReportODRViolation(g, FindRegistrationSite(g),
+                           l->g, FindRegistrationSite(l->g));
+    }
+  }
+}
+
 // Clang provides two different ways for global variables protection:
 // it can poison the global itself or its private alias. In former
 // case we may poison same symbol multiple times, that can help us to
@@ -199,6 +216,8 @@ static void RegisterGlobal(const Global *g) {
     // where two globals with the same name are defined in different modules.
     if (UseODRIndicator(g))
       CheckODRViolationViaIndicator(g);
+    else
+      CheckODRViolationViaPoisoning(g);
   }
   if (CanPoisonMemory())
     PoisonRedZones(*g);
diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h
index 7edae006271..c4bf087ea17 100644
--- a/libsanitizer/asan/asan_interceptors.h
+++ b/libsanitizer/asan/asan_interceptors.h
@@ -81,12 +81,7 @@ void InitializePlatformInterceptors();
 #if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
     !SANITIZER_NETBSD
 # define ASAN_INTERCEPT___CXA_THROW 1
-# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
-     || ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
-#   define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
-# else
-#   define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
-# endif
+# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
 # if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
 #  define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
 # else
@@ -119,7 +114,7 @@ void InitializePlatformInterceptors();
 
 #if SANITIZER_LINUX &&                                                \
     (defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \
-     defined(__x86_64__) || SANITIZER_RISCV64)
+     defined(__x86_64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64)
 # define ASAN_INTERCEPT_VFORK 1
 #else
 # define ASAN_INTERCEPT_VFORK 0
diff --git a/libsanitizer/asan/asan_interface.inc b/libsanitizer/asan/asan_interface.inc
index 89ef552b711..bfc44b46196 100644
--- a/libsanitizer/asan/asan_interface.inc
+++ b/libsanitizer/asan/asan_interface.inc
@@ -108,6 +108,13 @@ INTERFACE_FUNCTION(__asan_report_store_n_noabort)
 INTERFACE_FUNCTION(__asan_set_death_callback)
 INTERFACE_FUNCTION(__asan_set_error_report_callback)
 INTERFACE_FUNCTION(__asan_set_shadow_00)
+INTERFACE_FUNCTION(__asan_set_shadow_01)
+INTERFACE_FUNCTION(__asan_set_shadow_02)
+INTERFACE_FUNCTION(__asan_set_shadow_03)
+INTERFACE_FUNCTION(__asan_set_shadow_04)
+INTERFACE_FUNCTION(__asan_set_shadow_05)
+INTERFACE_FUNCTION(__asan_set_shadow_06)
+INTERFACE_FUNCTION(__asan_set_shadow_07)
 INTERFACE_FUNCTION(__asan_set_shadow_f1)
 INTERFACE_FUNCTION(__asan_set_shadow_f2)
 INTERFACE_FUNCTION(__asan_set_shadow_f3)
diff --git a/libsanitizer/asan/asan_interface_internal.h b/libsanitizer/asan/asan_interface_internal.h
index b0802a89ddb..987f855c0f9 100644
--- a/libsanitizer/asan/asan_interface_internal.h
+++ b/libsanitizer/asan/asan_interface_internal.h
@@ -90,6 +90,20 @@ extern "C" {
   SANITIZER_INTERFACE_ATTRIBUTE
   void __asan_set_shadow_00(uptr addr, uptr size);
   SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_01(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_02(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_03(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_04(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_05(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_06(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
+  void __asan_set_shadow_07(uptr addr, uptr size);
+  SANITIZER_INTERFACE_ATTRIBUTE
   void __asan_set_shadow_f1(uptr addr, uptr size);
   SANITIZER_INTERFACE_ATTRIBUTE
   void __asan_set_shadow_f2(uptr addr, uptr size);
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index 34ffc7b5cd0..c5f95c07a21 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -190,7 +190,7 @@
 #  elif defined(__aarch64__)
 #    define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000
 #  elif defined(__powerpc64__)
-#    define ASAN_SHADOW_OFFSET_CONST 0x0000020000000000
+#    define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
 #  elif defined(__s390x__)
 #    define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000
 #  elif SANITIZER_FREEBSD
@@ -272,6 +272,8 @@ extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;  // Initialized in __asan_init.
 #  else
 #    define MEM_TO_SHADOW(mem) \
       (((mem) >> ASAN_SHADOW_SCALE) + (ASAN_SHADOW_OFFSET))
+#    define SHADOW_TO_MEM(mem) \
+      (((mem) - (ASAN_SHADOW_OFFSET)) << (ASAN_SHADOW_SCALE))
 
 #    define kLowMemBeg 0
 #    define kLowMemEnd (ASAN_SHADOW_OFFSET ? ASAN_SHADOW_OFFSET - 1 : 0)
@@ -376,6 +378,12 @@ static inline bool AddrIsInShadow(uptr a) {
   return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
 }
 
+static inline uptr ShadowToMem(uptr p) {
+  PROFILE_ASAN_MAPPING();
+  CHECK(AddrIsInShadow(p));
+  return SHADOW_TO_MEM(p);
+}
+
 static inline bool AddrIsAlignedByGranularity(uptr a) {
   PROFILE_ASAN_MAPPING();
   return (a & (ASAN_SHADOW_GRANULARITY - 1)) == 0;
diff --git a/libsanitizer/asan/asan_mapping_sparc64.h b/libsanitizer/asan/asan_mapping_sparc64.h
index 90261d301f7..e310c12fe30 100644
--- a/libsanitizer/asan/asan_mapping_sparc64.h
+++ b/libsanitizer/asan/asan_mapping_sparc64.h
@@ -28,6 +28,7 @@
 #define MEM_TO_SHADOW(mem)                                       \
   ((((mem) << HIGH_BITS) >> (HIGH_BITS + (ASAN_SHADOW_SCALE))) + \
    (ASAN_SHADOW_OFFSET))
+#define SHADOW_TO_MEM(ptr) (__asan::ShadowToMemSparc64(ptr))
 
 #define kLowMemBeg 0
 #define kLowMemEnd (ASAN_SHADOW_OFFSET - 1)
@@ -97,6 +98,24 @@ static inline bool AddrIsInShadowGap(uptr a) {
   return a >= kShadowGapBeg && a <= kShadowGapEnd;
 }
 
+static inline constexpr uptr ShadowToMemSparc64(uptr p) {
+  PROFILE_ASAN_MAPPING();
+  p -= ASAN_SHADOW_OFFSET;
+  p <<= ASAN_SHADOW_SCALE;
+  if (p >= 0x8000000000000) {
+    p |= (~0ULL) << VMA_BITS;
+  }
+  return p;
+}
+
+static_assert(ShadowToMemSparc64(MEM_TO_SHADOW(0x0000000000000000)) ==
+              0x0000000000000000);
+static_assert(ShadowToMemSparc64(MEM_TO_SHADOW(0xfff8000000000000)) ==
+              0xfff8000000000000);
+// Gets aligned down.
+static_assert(ShadowToMemSparc64(MEM_TO_SHADOW(0x0007ffffffffffff)) ==
+              0x0007fffffffffff8);
+
 }  // namespace __asan
 
 #endif  // ASAN_MAPPING_SPARC64_H
diff --git a/libsanitizer/asan/asan_poisoning.cpp b/libsanitizer/asan/asan_poisoning.cpp
index 3b7c9d1312d..e4702563463 100644
--- a/libsanitizer/asan/asan_poisoning.cpp
+++ b/libsanitizer/asan/asan_poisoning.cpp
@@ -312,6 +312,34 @@ void __asan_set_shadow_00(uptr addr, uptr size) {
   REAL(memset)((void *)addr, 0, size);
 }
 
+void __asan_set_shadow_01(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x01, size);
+}
+
+void __asan_set_shadow_02(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x02, size);
+}
+
+void __asan_set_shadow_03(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x03, size);
+}
+
+void __asan_set_shadow_04(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x04, size);
+}
+
+void __asan_set_shadow_05(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x05, size);
+}
+
+void __asan_set_shadow_06(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x06, size);
+}
+
+void __asan_set_shadow_07(uptr addr, uptr size) {
+  REAL(memset)((void *)addr, 0x07, size);
+}
+
 void __asan_set_shadow_f1(uptr addr, uptr size) {
   REAL(memset)((void *)addr, 0xf1, size);
 }
@@ -354,8 +382,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
   uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
   uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
   uptr granularity = ASAN_SHADOW_GRANULARITY;
-  if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
-        IsAligned(beg, granularity))) {
+  if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end)) {
     GET_STACK_TRACE_FATAL_HERE;
     ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
                                                  &stack);
@@ -363,6 +390,56 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
   CHECK_LE(end - beg,
            FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
 
+  if (old_mid == new_mid)
+    return;  // Nothing to do here.
+
+  // Handle misaligned end and cut it off.
+  if (UNLIKELY(!AddrIsAlignedByGranularity(end))) {
+    uptr end_down = RoundDownTo(end, granularity);
+    // Either new or old mid must be in the granule to affect it.
+    if (new_mid > end_down) {
+      if (AddressIsPoisoned(end)) {
+        *(u8 *)MemToShadow(end_down) = static_cast<u8>(new_mid - end_down);
+      } else {
+        // Something after the container - don't touch.
+      }
+    } else if (old_mid > end_down) {
+      if (AddressIsPoisoned(end)) {
+        *(u8 *)MemToShadow(end_down) = kAsanContiguousContainerOOBMagic;
+      } else {
+        // Something after the container - don't touch.
+      }
+    }
+
+    if (beg >= end_down)
+      return;  // Same granule.
+
+    old_mid = Min(end_down, old_mid);
+    new_mid = Min(end_down, new_mid);
+  }
+
+  // Handle misaligned begin and cut it off.
+  if (UNLIKELY(!AddrIsAlignedByGranularity(beg))) {
+    uptr beg_up = RoundUpTo(beg, granularity);
+    uptr beg_down = RoundDownTo(beg, granularity);
+    // As soon as we add first byte into container we will not be able to
+    // determine the state of the byte before the container. So we assume it's
+    // always unpoison.
+
+    // Either new or old mid must be in the granule to affect it.
+    if (new_mid < beg_up) {
+      *(u8 *)MemToShadow(beg_down) = static_cast<u8>(new_mid - beg_down);
+    } else if (old_mid < beg_up) {
+      *(u8 *)MemToShadow(beg_down) = 0;
+    }
+
+    old_mid = Max(beg_up, old_mid);
+    new_mid = Max(beg_up, new_mid);
+  }
+
+  if (old_mid == new_mid)
+    return;
+
   uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
   uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
   uptr d1 = RoundDownTo(old_mid, granularity);
@@ -397,8 +474,13 @@ const void *__sanitizer_contiguous_container_find_bad_address(
     const void *beg_p, const void *mid_p, const void *end_p) {
   if (!flags()->detect_container_overflow)
     return nullptr;
+  uptr granularity = ASAN_SHADOW_GRANULARITY;
   uptr beg = reinterpret_cast<uptr>(beg_p);
   uptr end = reinterpret_cast<uptr>(end_p);
+  uptr annotations_end =
+      (!AddrIsAlignedByGranularity(end) && !AddressIsPoisoned(end))
+          ? RoundDownTo(end, granularity)
+          : end;
   uptr mid = reinterpret_cast<uptr>(mid_p);
   CHECK_LE(beg, mid);
   CHECK_LE(mid, end);
@@ -408,9 +490,9 @@ const void *__sanitizer_contiguous_container_find_bad_address(
   uptr r1_beg = beg;
   uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
   uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
-  uptr r2_end = Min(end, mid + kMaxRangeToCheck);
-  uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
-  uptr r3_end = end;
+  uptr r2_end = Min(annotations_end, mid + kMaxRangeToCheck);
+  uptr r3_beg = Max(annotations_end - kMaxRangeToCheck, mid);
+  uptr r3_end = annotations_end;
   for (uptr i = r1_beg; i < r1_end; i++)
     if (AddressIsPoisoned(i))
       return reinterpret_cast<const void *>(i);
diff --git a/libsanitizer/asan/asan_rtl.cpp b/libsanitizer/asan/asan_rtl.cpp
index 88f66de5669..853083182b4 100644
--- a/libsanitizer/asan/asan_rtl.cpp
+++ b/libsanitizer/asan/asan_rtl.cpp
@@ -288,11 +288,18 @@ static NOINLINE void force_interface_symbols() {
     case 38: __asan_region_is_poisoned(0, 0); break;
     case 39: __asan_describe_address(0); break;
     case 40: __asan_set_shadow_00(0, 0); break;
-    case 41: __asan_set_shadow_f1(0, 0); break;
-    case 42: __asan_set_shadow_f2(0, 0); break;
-    case 43: __asan_set_shadow_f3(0, 0); break;
-    case 44: __asan_set_shadow_f5(0, 0); break;
-    case 45: __asan_set_shadow_f8(0, 0); break;
+    case 41: __asan_set_shadow_01(0, 0); break;
+    case 42: __asan_set_shadow_02(0, 0); break;
+    case 43: __asan_set_shadow_03(0, 0); break;
+    case 44: __asan_set_shadow_04(0, 0); break;
+    case 45: __asan_set_shadow_05(0, 0); break;
+    case 46: __asan_set_shadow_06(0, 0); break;
+    case 47: __asan_set_shadow_07(0, 0); break;
+    case 48: __asan_set_shadow_f1(0, 0); break;
+    case 49: __asan_set_shadow_f2(0, 0); break;
+    case 50: __asan_set_shadow_f3(0, 0); break;
+    case 51: __asan_set_shadow_f5(0, 0); break;
+    case 52: __asan_set_shadow_f8(0, 0); break;
   }
   // clang-format on
 }
diff --git a/libsanitizer/hwasan/hwasan.cpp b/libsanitizer/hwasan/hwasan.cpp
index bb946c2ffe0..9db4fb09409 100644
--- a/libsanitizer/hwasan/hwasan.cpp
+++ b/libsanitizer/hwasan/hwasan.cpp
@@ -340,7 +340,13 @@ __attribute__((constructor(0))) void __hwasan_init() {
   DisableCoreDumperIfNecessary();
 
   InitInstrumentation();
-  InitLoadedGlobals();
+  if constexpr (!SANITIZER_FUCHSIA) {
+    // Fuchsia's libc provides a hook (__sanitizer_module_loaded) that runs on
+    // the startup path which calls into __hwasan_library_loaded on all
+    // initially loaded modules, so explicitly registering the globals here
+    // isn't needed.
+    InitLoadedGlobals();
+  }
 
   // Needs to be called here because flags()->random_tags might not have been
   // initialized when InitInstrumentation() was called.
diff --git a/libsanitizer/hwasan/hwasan_flags.inc b/libsanitizer/hwasan/hwasan_flags.inc
index 18ea47f981b..4a226ee2ab8 100644
--- a/libsanitizer/hwasan/hwasan_flags.inc
+++ b/libsanitizer/hwasan/hwasan_flags.inc
@@ -39,7 +39,7 @@ HWASAN_FLAG(
 
 HWASAN_FLAG(bool, free_checks_tail_magic, 1,
     "If set, free() will check the magic values "
-    "to the right of the allocated object "
+    "after the allocated object "
     "if the allocation size is not a divident of the granule size")
 HWASAN_FLAG(
     int, max_free_fill_size, 0,
diff --git a/libsanitizer/hwasan/hwasan_fuchsia.cpp b/libsanitizer/hwasan/hwasan_fuchsia.cpp
index 967c796c339..858fac05af2 100644
--- a/libsanitizer/hwasan/hwasan_fuchsia.cpp
+++ b/libsanitizer/hwasan/hwasan_fuchsia.cpp
@@ -224,6 +224,10 @@ void __sanitizer_thread_exit_hook(void *hook, thrd_t self) {
   __hwasan::ThreadExitHook(hook, self);
 }
 
+void __sanitizer_module_loaded(const struct dl_phdr_info *info, size_t) {
+  __hwasan_library_loaded(info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum);
+}
+
 }  // extern "C"
 
 #endif  // SANITIZER_FUCHSIA
diff --git a/libsanitizer/hwasan/hwasan_report.cpp b/libsanitizer/hwasan/hwasan_report.cpp
index fe769589186..de082150b70 100644
--- a/libsanitizer/hwasan/hwasan_report.cpp
+++ b/libsanitizer/hwasan/hwasan_report.cpp
@@ -309,16 +309,16 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
       whence = "inside";
     } else if (candidate == left) {
       offset = untagged_addr - chunk.End();
-      whence = "to the right of";
+      whence = "after";
     } else {
       offset = chunk.Beg() - untagged_addr;
-      whence = "to the left of";
+      whence = "before";
     }
     Printf("%s", d.Error());
     Printf("\nCause: heap-buffer-overflow\n");
     Printf("%s", d.Default());
     Printf("%s", d.Location());
-    Printf("%p is located %zd bytes %s %zd-byte region [%p,%p)\n",
+    Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
            untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
            chunk.End());
     Printf("%s", d.Allocation());
@@ -340,27 +340,27 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
     Printf("%s", d.Location());
     if (sym->SymbolizeData(mem, &info) && info.start) {
       Printf(
-          "%p is located %zd bytes to the %s of %zd-byte global variable "
+          "%p is located %zd bytes %s a %zd-byte global variable "
           "%s [%p,%p) in %s\n",
           untagged_addr,
           candidate == left ? untagged_addr - (info.start + info.size)
                             : info.start - untagged_addr,
-          candidate == left ? "right" : "left", info.size, info.name,
+          candidate == left ? "after" : "before", info.size, info.name,
           info.start, info.start + info.size, module_name);
     } else {
       uptr size = GetGlobalSizeFromDescriptor(mem);
       if (size == 0)
         // We couldn't find the size of the global from the descriptors.
         Printf(
-            "%p is located to the %s of a global variable in "
+            "%p is located %s a global variable in "
             "\n    #0 0x%x (%s+0x%x)\n",
-            untagged_addr, candidate == left ? "right" : "left", mem,
+            untagged_addr, candidate == left ? "after" : "before", mem,
             module_name, module_address);
       else
         Printf(
-            "%p is located to the %s of a %zd-byte global variable in "
+            "%p is located %s a %zd-byte global variable in "
             "\n    #0 0x%x (%s+0x%x)\n",
-            untagged_addr, candidate == left ? "right" : "left", size, mem,
+            untagged_addr, candidate == left ? "after" : "before", size, mem,
             module_name, module_address);
     }
     Printf("%s", d.Default());
@@ -459,7 +459,7 @@ void PrintAddressDescription(
       Printf("%s", d.Error());
       Printf("\nCause: use-after-free\n");
       Printf("%s", d.Location());
-      Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
+      Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
              untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
              har.requested_size, UntagAddr(har.tagged_addr),
              UntagAddr(har.tagged_addr) + har.requested_size);
@@ -518,7 +518,7 @@ static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
   InternalScopedString s;
   for (tag_t *row = beg_row; row < end_row; row += row_len) {
     s.append("%s", row == center_row_beg ? "=>" : "  ");
-    s.append("%p:", (void *)row);
+    s.append("%p:", (void *)ShadowToMem(reinterpret_cast<uptr>(row)));
     for (uptr i = 0; i < row_len; i++) {
       s.append("%s", row + i == tag_ptr ? "[" : " ");
       print_tag(s, &row[i]);
@@ -660,7 +660,7 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
     s.append("%s ", actual_expected[i] != tail[i] ? "^^" : "  ");
 
   s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
-    "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
+    "after a heap object, but within the %zd-byte granule, e.g.\n"
     "   char *x = new char[20];\n"
     "   x[25] = 42;\n"
     "%s does not detect such bugs in uninstrumented code at the time of write,"
diff --git a/libsanitizer/include/sanitizer/msan_interface.h b/libsanitizer/include/sanitizer/msan_interface.h
index eeb39fbed8b..854b12cda36 100644
--- a/libsanitizer/include/sanitizer/msan_interface.h
+++ b/libsanitizer/include/sanitizer/msan_interface.h
@@ -92,6 +92,8 @@ extern "C" {
 
   /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
   void __sanitizer_dtor_callback(const volatile void* data, size_t size);
+  void __sanitizer_dtor_callback_fields(const volatile void *data, size_t size);
+  void __sanitizer_dtor_callback_vptr(const volatile void *data);
 
   /* This function may be optionally provided by user and should return
      a string containing Msan runtime options. See msan_flags.h for details. */
diff --git a/libsanitizer/lsan/lsan_common.cpp b/libsanitizer/lsan/lsan_common.cpp
index 94bb3cca008..576274608c8 100644
--- a/libsanitizer/lsan/lsan_common.cpp
+++ b/libsanitizer/lsan/lsan_common.cpp
@@ -26,6 +26,18 @@
 #include "sanitizer_common/sanitizer_tls_get_addr.h"
 
 #if CAN_SANITIZE_LEAKS
+
+#  if SANITIZER_APPLE
+// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
+#    if SANITIZER_IOS && !SANITIZER_IOSSIM
+#      define OBJC_DATA_MASK 0x0000007ffffffff8UL
+#    else
+#      define OBJC_DATA_MASK 0x00007ffffffffff8UL
+#    endif
+// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139
+#    define OBJC_FAST_IS_RW 0x8000000000000000UL
+#  endif
+
 namespace __lsan {
 
 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
@@ -160,6 +172,17 @@ static uptr GetCallerPC(const StackTrace &stack) {
   return 0;
 }
 
+#  if SANITIZER_APPLE
+// Objective-C class data pointers are stored with flags in the low bits, so
+// they need to be transformed back into something that looks like a pointer.
+static inline void *MaybeTransformPointer(void *p) {
+  uptr ptr = reinterpret_cast<uptr>(p);
+  if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW)
+    ptr &= OBJC_DATA_MASK;
+  return reinterpret_cast<void *>(ptr);
+}
+#  endif
+
 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
 // modules accounting etc.
@@ -276,6 +299,9 @@ void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
     pp = pp + alignment - pp % alignment;
   for (; pp + sizeof(void *) <= end; pp += alignment) {
     void *p = *reinterpret_cast<void **>(pp);
+#  if SANITIZER_APPLE
+    p = MaybeTransformPointer(p);
+#  endif
     if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
       continue;
     uptr chunk = PointsIntoChunk(p);
@@ -332,7 +358,8 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
 #  if SANITIZER_FUCHSIA
 
 // Fuchsia handles all threads together with its own callback.
-static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
+static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
+                           uptr) {}
 
 #  else
 
@@ -365,7 +392,8 @@ static void ProcessThreadRegistry(Frontier *frontier) {
 
 // Scans thread data (stacks and TLS) for heap pointers.
 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
-                           Frontier *frontier) {
+                           Frontier *frontier, tid_t caller_tid,
+                           uptr caller_sp) {
   InternalMmapVector<uptr> registers;
   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
@@ -392,6 +420,9 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
         continue;
       sp = stack_begin;
     }
+    if (suspended_threads.GetThreadID(i) == caller_tid) {
+      sp = caller_sp;
+    }
 
     if (flags()->use_registers && have_registers) {
       uptr registers_begin = reinterpret_cast<uptr>(registers.data());
@@ -572,7 +603,8 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
 
 // Sets the appropriate tag on each chunk.
 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
-                              Frontier *frontier) {
+                              Frontier *frontier, tid_t caller_tid,
+                              uptr caller_sp) {
   const InternalMmapVector<u32> &suppressed_stacks =
       GetSuppressionContext()->GetSortedSuppressedStacks();
   if (!suppressed_stacks.empty()) {
@@ -581,7 +613,7 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
   }
   ForEachChunk(CollectIgnoredCb, frontier);
   ProcessGlobalRegions(frontier);
-  ProcessThreads(suspended_threads, frontier);
+  ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
   ProcessRootRegions(frontier);
   FloodFillTag(frontier, kReachable);
 
@@ -677,7 +709,8 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
   CHECK(param);
   CHECK(!param->success);
   ReportUnsuspendedThreads(suspended_threads);
-  ClassifyAllChunks(suspended_threads, &param->frontier);
+  ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
+                    param->caller_sp);
   ForEachChunk(CollectLeaksCb, &param->leaks);
   // Clean up for subsequent leak checks. This assumes we did not overwrite any
   // kIgnored tags.
@@ -716,6 +749,12 @@ static bool CheckForLeaks() {
   for (int i = 0;; ++i) {
     EnsureMainThreadIDIsCorrect();
     CheckForLeaksParam param;
+    // Capture calling thread's stack pointer early, to avoid false negatives.
+    // Old frame with dead pointers might be overlapped by new frame inside
+    // CheckForLeaks which does not use bytes with pointers before the
+    // threads are suspended and stack pointers captured.
+    param.caller_tid = GetTid();
+    param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
     if (!param.success) {
       Report("LeakSanitizer has encountered a fatal error.\n");
diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h
index d7153751fae..20ef7c458b4 100644
--- a/libsanitizer/lsan/lsan_common.h
+++ b/libsanitizer/lsan/lsan_common.h
@@ -145,6 +145,8 @@ struct RootRegion {
 struct CheckForLeaksParam {
   Frontier frontier;
   LeakedChunks leaks;
+  tid_t caller_tid;
+  uptr caller_sp;
   bool success = false;
 };
 
diff --git a/libsanitizer/lsan/lsan_common_mac.cpp b/libsanitizer/lsan/lsan_common_mac.cpp
index 26b623fb1d4..b6b15095744 100644
--- a/libsanitizer/lsan/lsan_common_mac.cpp
+++ b/libsanitizer/lsan/lsan_common_mac.cpp
@@ -17,21 +17,36 @@
 
 #if CAN_SANITIZE_LEAKS && SANITIZER_APPLE
 
-#include "sanitizer_common/sanitizer_allocator_internal.h"
-#include "lsan_allocator.h"
+#  include <mach/mach.h>
+#  include <mach/vm_statistics.h>
+#  include <pthread.h>
 
-#include <pthread.h>
+#  include "lsan_allocator.h"
+#  include "sanitizer_common/sanitizer_allocator_internal.h"
+namespace __lsan {
 
-#include <mach/mach.h>
+enum class SeenRegion {
+  None = 0,
+  AllocOnce = 1 << 0,
+  LibDispatch = 1 << 1,
+  Foundation = 1 << 2,
+  All = AllocOnce | LibDispatch | Foundation
+};
+
+inline SeenRegion operator|(SeenRegion left, SeenRegion right) {
+  return static_cast<SeenRegion>(static_cast<int>(left) |
+                                 static_cast<int>(right));
+}
 
-// Only introduced in Mac OS X 10.9.
-#ifdef VM_MEMORY_OS_ALLOC_ONCE
-static const int kSanitizerVmMemoryOsAllocOnce = VM_MEMORY_OS_ALLOC_ONCE;
-#else
-static const int kSanitizerVmMemoryOsAllocOnce = 73;
-#endif
+inline SeenRegion &operator|=(SeenRegion &left, const SeenRegion &right) {
+  left = left | right;
+  return left;
+}
 
-namespace __lsan {
+struct RegionScanState {
+  SeenRegion seen_regions = SeenRegion::None;
+  bool in_libdispatch = false;
+};
 
 typedef struct {
   int disable_counter;
@@ -148,6 +163,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
 
   InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions();
 
+  RegionScanState scan_state;
   while (err == KERN_SUCCESS) {
     vm_size_t size = 0;
     unsigned depth = 1;
@@ -157,17 +173,35 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
                                (vm_region_info_t)&info, &count);
 
     uptr end_address = address + size;
-
-    // libxpc stashes some pointers in the Kernel Alloc Once page,
-    // make sure not to report those as leaks.
-    if (info.user_tag == kSanitizerVmMemoryOsAllocOnce) {
+    if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) {
+      // libxpc stashes some pointers in the Kernel Alloc Once page,
+      // make sure not to report those as leaks.
+      scan_state.seen_regions |= SeenRegion::AllocOnce;
       ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
                            kReachable);
+    } else if (info.user_tag == VM_MEMORY_FOUNDATION) {
+      // Objective-C block trampolines use the Foundation region.
+      scan_state.seen_regions |= SeenRegion::Foundation;
+      ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+                           kReachable);
+    } else if (info.user_tag == VM_MEMORY_LIBDISPATCH) {
+      // Dispatch continuations use the libdispatch region. Empirically, there
+      // can be more than one region with this tag, so we'll optimistically
+      // assume that they're continguous. Otherwise, we would need to scan every
+      // region to ensure we find them all.
+      scan_state.in_libdispatch = true;
+      ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+                           kReachable);
+    } else if (scan_state.in_libdispatch) {
+      scan_state.seen_regions |= SeenRegion::LibDispatch;
+      scan_state.in_libdispatch = false;
+    }
 
-      // Recursing over the full memory map is very slow, break out
-      // early if we don't need the full iteration.
-      if (!flags()->use_root_regions || !root_regions->size())
-        break;
+    // Recursing over the full memory map is very slow, break out
+    // early if we don't need the full iteration.
+    if (scan_state.seen_regions == SeenRegion::All &&
+        !(flags()->use_root_regions && root_regions->size() > 0)) {
+      break;
     }
 
     // This additional root region scan is required on Darwin in order to
@@ -199,6 +233,6 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
   StopTheWorld(callback, argument);
 }
 
-} // namespace __lsan
+}  // namespace __lsan
 
 #endif // CAN_SANITIZE_LEAKS && SANITIZER_APPLE
diff --git a/libsanitizer/merge.sh b/libsanitizer/merge.sh
index 95ded4f9634..7d1b553e2bb 100755
--- a/libsanitizer/merge.sh
+++ b/libsanitizer/merge.sh
@@ -6,7 +6,7 @@
 
 get_upstream() {
   rm -rf upstream
-  git clone https://github.com/llvm/llvm-project.git upstream
+  git clone --depth 1 https://github.com/llvm/llvm-project.git upstream
 }
 
 get_current_rev() {
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h
index 08c6062ba06..b462e388c23 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.h
+++ b/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -709,6 +709,7 @@ enum ModuleArch {
   kModuleArchARMV7S,
   kModuleArchARMV7K,
   kModuleArchARM64,
+  kModuleArchLoongArch64,
   kModuleArchRISCV64,
   kModuleArchHexagon
 };
@@ -781,6 +782,8 @@ inline const char *ModuleArchToString(ModuleArch arch) {
       return "armv7k";
     case kModuleArchARM64:
       return "arm64";
+    case kModuleArchLoongArch64:
+      return "loongarch64";
     case kModuleArchRISCV64:
       return "riscv64";
     case kModuleArchHexagon:
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
index cd9235e503b..ba4b80081f0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
@@ -6727,7 +6727,7 @@ INTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) {
   COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value);
   // Workaround a bug in glibc's "old" semaphore implementation by
   // zero-initializing the sem_t contents. This has to be done here because
-  // interceptors bind to the lowest symbols version by default, hitting the
+  // interceptors bind to the lowest version before glibc 2.36, hitting the
   // buggy code path while the non-sanitized build of the same code works fine.
   REAL(memset)(s, 0, sizeof(*s));
   int res = REAL(sem_init)(s, pshared, value);
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
index 05192485d59..68782acb379 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
@@ -5,12 +5,6 @@
 ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
 ASM_HIDDEN(_ZN14__interception10real_vforkE)
 
-.bss
-.type _ZN14__interception10real_vforkE, @object
-.size _ZN14__interception10real_vforkE, 8
-_ZN14__interception10real_vforkE:
-        .zero     8
-
 .text
 .globl ASM_WRAPPER_NAME(vfork)
 ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_linux.cpp
index dc2ea933fad..f23ea9da371 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cpp
@@ -1105,7 +1105,7 @@ uptr GetMaxVirtualAddress() {
 #if SANITIZER_NETBSD && defined(__x86_64__)
   return 0x7f7ffffff000ULL;  // (0x00007f8000000000 - PAGE_SIZE)
 #elif SANITIZER_WORDSIZE == 64
-# if defined(__powerpc64__) || defined(__aarch64__)
+# if defined(__powerpc64__) || defined(__aarch64__) || defined(__loongarch__)
   // On PowerPC64 we have two different address space layouts: 44- and 46-bit.
   // We somehow need to figure out which one we are using now and choose
   // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
@@ -1113,6 +1113,7 @@ uptr GetMaxVirtualAddress() {
   // of the address space, so simply checking the stack address is not enough.
   // This should (does) work for both PowerPC64 Endian modes.
   // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
+  // loongarch64 also has multiple address space layouts: default is 47-bit.
   return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
 #elif SANITIZER_RISCV64
   return (1ULL << 38) - 1;
@@ -1955,6 +1956,13 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
   u64 esr;
   if (!Aarch64GetESR(ucontext, &esr)) return Unknown;
   return esr & ESR_ELx_WNR ? Write : Read;
+#elif defined(__loongarch__)
+  u32 flags = ucontext->uc_mcontext.__flags;
+  if (flags & SC_ADDRERR_RD)
+    return SignalContext::Read;
+  if (flags & SC_ADDRERR_WR)
+    return SignalContext::Write;
+  return SignalContext::Unknown;
 #elif defined(__sparc__)
   // Decode the instruction to determine the access type.
   // From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype).
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
index 56d231643ba..d74851c43e1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -822,13 +822,9 @@ u32 GetNumberOfCPUs() {
 #elif SANITIZER_SOLARIS
   return sysconf(_SC_NPROCESSORS_ONLN);
 #else
-#if defined(CPU_COUNT)
   cpu_set_t CPUs;
   CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
   return CPU_COUNT(&CPUs);
-#else
-  return 1;
-#endif
 #endif
 }
 
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_mac.cpp
index 9ee799be3c8..23c4c6619de 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cpp
@@ -38,7 +38,7 @@
 extern char **environ;
 #endif
 
-#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
+#if defined(__has_include) && __has_include(<os/trace.h>)
 #define SANITIZER_OS_TRACE 1
 #include <os/trace.h>
 #else
@@ -71,15 +71,7 @@ extern "C" {
 #include <mach/mach_time.h>
 #include <mach/vm_statistics.h>
 #include <malloc/malloc.h>
-#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format)
-# include <os/log.h>
-#else
-   /* Without support for __builtin_os_log_format, fall back to the older
-      method.  */
-# define OS_LOG_DEFAULT 0
-# define os_log_error(A,B,C) \
-  asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
-#endif
+#include <os/log.h>
 #include <pthread.h>
 #include <pthread/introspection.h>
 #include <sched.h>
@@ -1259,6 +1251,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
   mach_vm_address_t start_address =
     (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
 
+  const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1;
   mach_vm_address_t address = start_address;
   mach_vm_address_t free_begin = start_address;
   kern_return_t kr = KERN_SUCCESS;
@@ -1273,7 +1266,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
                                 (vm_region_info_t)&vminfo, &count);
     if (kr == KERN_INVALID_ADDRESS) {
       // No more regions beyond "address", consider the gap at the end of VM.
-      address = GetMaxVirtualAddress() + 1;
+      address = max_vm_address;
       vmsize = 0;
     } else {
       if (max_occupied_addr) *max_occupied_addr = address + vmsize;
@@ -1281,7 +1274,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
     if (free_begin != address) {
       // We found a free region [free_begin..address-1].
       uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
-      uptr gap_end = RoundDownTo((uptr)address, alignment);
+      uptr gap_end = RoundDownTo((uptr)Min(address, max_vm_address), alignment);
       uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
       if (size < gap_size) {
         return gap_start;
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h
index 1cf2e298cc9..f0a97d098ee 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.h
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.h
@@ -14,26 +14,6 @@
 
 #include "sanitizer_common.h"
 #include "sanitizer_platform.h"
-
-/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
-   TARGET_OS_MAC (we have no support for iOS in any form for these versions,
-   so there's no ambiguity).  */
-#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
-# define TARGET_OS_OSX 1
-#endif
-
-/* Other TARGET_OS_xxx are not present on earlier versions, define them to
-   0 (we have no support for them; they are not valid targets anyway).  */
-#ifndef TARGET_OS_IOS
-#define TARGET_OS_IOS 0
-#endif
-#ifndef TARGET_OS_TV
-#define TARGET_OS_TV 0
-#endif
-#ifndef TARGET_OS_WATCH
-#define TARGET_OS_WATCH 0
-#endif
-
 #if SANITIZER_APPLE
 #include "sanitizer_posix.h"
 
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h
index 32005eef08c..7ecc465bea9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform.h
@@ -286,8 +286,8 @@
 #ifndef SANITIZER_CAN_USE_ALLOCATOR64
 #  if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
 #    define SANITIZER_CAN_USE_ALLOCATOR64 1
-#  elif defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
-      defined(__arm__) || SANITIZER_RISCV64 || defined(__hexagon__)
+#  elif defined(__mips64) || defined(__arm__) || defined(__i386__) || \
+      SANITIZER_RISCV64 || defined(__hexagon__)
 #    define SANITIZER_CAN_USE_ALLOCATOR64 0
 #  else
 #    define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
index c278c8797f7..bf0f355847c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
@@ -26,10 +26,7 @@
 
 // With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
 // are not defined anywhere in userspace headers. Fake them. This seems to work
-// fine with newer headers, too.  Beware that with <sys/stat.h>, struct stat
-// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
-// Also, for some platforms (e.g. mips) there are additional members in the
-// <sys/stat.h> struct stat:s.
+// fine with newer headers, too.
 #include <linux/posix_types.h>
 #  if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
 #    include <sys/stat.h>
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
index bd5692ed511..44dd3d9e22d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -101,7 +101,7 @@ const unsigned struct_kernel_stat64_sz = 104;
 const unsigned struct_kernel_stat_sz =
     SANITIZER_ANDROID
         ? FIRST_32_SECOND_64(104, 128)
-        : FIRST_32_SECOND_64((_MIPS_SIM == _ABIN32) ? 160 : 144, 216);
+        : FIRST_32_SECOND_64((_MIPS_SIM == _ABIN32) ? 176 : 160, 216);
 const unsigned struct_kernel_stat64_sz = 104;
 #elif defined(__s390__) && !defined(__s390x__)
 const unsigned struct_kernel_stat_sz = 64;
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp
index ba4259acd46..4b0e6781976 100644
--- a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp
@@ -146,13 +146,8 @@ static bool IsDyldHdr(const mach_header *hdr) {
 // until we hit a Mach header matching dyld instead. These recurse
 // calls are expensive, but the first memory map generation occurs
 // early in the process, when dyld is one of the only images loaded,
-// so it will be hit after only a few iterations.  These assumptions don't
-// hold on macOS 13+ anymore (dyld itself has moved into the shared cache).
-
-// FIXME: Unfortunately, the upstream revised version to deal with macOS 13+
-// is incompatible with GCC and also uses APIs not available on earlier
-// systems which we support; backed out for now.
-
+// so it will be hit after only a few iterations.  These assumptions don't hold
+// on macOS 13+ anymore (dyld itself has moved into the shared cache).
 static mach_header *GetDyldImageHeaderViaVMRegion() {
   vm_address_t address = 0;
 
@@ -176,17 +171,64 @@ static mach_header *GetDyldImageHeaderViaVMRegion() {
   }
 }
 
+extern "C" {
+struct dyld_shared_cache_dylib_text_info {
+  uint64_t version;  // current version 2
+  // following fields all exist in version 1
+  uint64_t loadAddressUnslid;
+  uint64_t textSegmentSize;
+  uuid_t dylibUuid;
+  const char *path;  // pointer invalid at end of iterations
+  // following fields all exist in version 2
+  uint64_t textSegmentOffset;  // offset from start of cache
+};
+typedef struct dyld_shared_cache_dylib_text_info
+    dyld_shared_cache_dylib_text_info;
+
+extern bool _dyld_get_shared_cache_uuid(uuid_t uuid);
+extern const void *_dyld_get_shared_cache_range(size_t *length);
+extern int dyld_shared_cache_iterate_text(
+    const uuid_t cacheUuid,
+    void (^callback)(const dyld_shared_cache_dylib_text_info *info));
+}  // extern "C"
+
+static mach_header *GetDyldImageHeaderViaSharedCache() {
+  uuid_t uuid;
+  bool hasCache = _dyld_get_shared_cache_uuid(uuid);
+  if (!hasCache)
+    return nullptr;
+
+  size_t cacheLength;
+  __block uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
+  CHECK(cacheStart && cacheLength);
+
+  __block mach_header *dyldHdr = nullptr;
+  int res = dyld_shared_cache_iterate_text(
+      uuid, ^(const dyld_shared_cache_dylib_text_info *info) {
+        CHECK_GE(info->version, 2);
+        mach_header *hdr =
+            (mach_header *)(cacheStart + info->textSegmentOffset);
+        if (IsDyldHdr(hdr))
+          dyldHdr = hdr;
+      });
+  CHECK_EQ(res, 0);
+
+  return dyldHdr;
+}
+
 const mach_header *get_dyld_hdr() {
   if (!dyld_hdr) {
     // On macOS 13+, dyld itself has moved into the shared cache.  Looking it up
     // via vm_region_recurse_64() causes spins/hangs/crashes.
-    // FIXME: find a way to do this compatible with GCC.
     if (GetMacosAlignedVersion() >= MacosVersion(13, 0)) {
+      dyld_hdr = GetDyldImageHeaderViaSharedCache();
+      if (!dyld_hdr) {
         VReport(1,
-                "looking up the dyld image header in the shared cache on "
-                "macOS 13+ is not yet supported.  Falling back to "
+                "Failed to lookup the dyld image header in the shared cache on "
+                "macOS 13+ (or no shared cache in use).  Falling back to "
                 "lookup via vm_region_recurse_64().\n");
         dyld_hdr = GetDyldImageHeaderViaVMRegion();
+      }
     } else {
       dyld_hdr = GetDyldImageHeaderViaVMRegion();
     }
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
index 661495e2340..d24fae98213 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
@@ -87,8 +87,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
   // Nope, this does not look right either. This means the frame after next does
   // not have a valid frame pointer, but we can still extract the caller PC.
   // Unfortunately, there is no way to decide between GCC and LLVM frame
-  // layouts. Assume GCC.
-  return bp_prev - 1;
+  // layouts. Assume LLVM.
+  return bp_prev;
 #else
   return (uhwptr*)bp;
 #endif
@@ -111,21 +111,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
          IsAligned((uptr)frame, sizeof(*frame)) &&
          size < max_depth) {
 #ifdef __powerpc__
-    // PowerPC ABIs specify that the return address is saved on the
-    // *caller's* stack frame.  Thus we must dereference the back chain
-    // to find the caller frame before extracting it.
+    // PowerPC ABIs specify that the return address is saved at offset
+    // 16 of the *caller's* stack frame.  Thus we must dereference the
+    // back chain to find the caller frame before extracting it.
     uhwptr *caller_frame = (uhwptr*)frame[0];
     if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
         !IsAligned((uptr)caller_frame, sizeof(uhwptr)))
       break;
-    // For most ABIs the offset where the return address is saved is two
-    // register sizes.  The exception is the SVR4 ABI, which uses an
-    // offset of only one register size.
-#ifdef _CALL_SYSV
-    uhwptr pc1 = caller_frame[1];
-#else
     uhwptr pc1 = caller_frame[2];
-#endif
 #elif defined(__s390__)
     uhwptr pc1 = frame[14];
 #elif defined(__loongarch__) || defined(__riscv)
diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp
index 87f5250db64..3ebeac52280 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp
@@ -87,11 +87,13 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
 
 #if defined(__x86_64__)
 typedef x86_thread_state64_t regs_struct;
+#define regs_flavor x86_THREAD_STATE64
 
 #define SP_REG __rsp
 
 #elif defined(__aarch64__)
 typedef arm_thread_state64_t regs_struct;
+#define regs_flavor ARM_THREAD_STATE64
 
 # if __DARWIN_UNIX03
 #  define SP_REG __sp
@@ -101,6 +103,7 @@ typedef arm_thread_state64_t regs_struct;
 
 #elif defined(__i386)
 typedef x86_thread_state32_t regs_struct;
+#define regs_flavor x86_THREAD_STATE32
 
 #define SP_REG __esp
 
@@ -146,8 +149,8 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
   thread_t thread = GetThread(index);
   regs_struct regs;
   int err;
-  mach_msg_type_number_t reg_count = MACHINE_THREAD_STATE_COUNT;
-  err = thread_get_state(thread, MACHINE_THREAD_STATE, (thread_state_t)&regs,
+  mach_msg_type_number_t reg_count = sizeof(regs) / sizeof(natural_t);
+  err = thread_get_state(thread, regs_flavor, (thread_state_t)&regs,
                          &reg_count);
   if (err != KERN_SUCCESS) {
     VReport(1, "Error - unable to get registers for a thread\n");
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
index 461fe966136..a6f82ced203 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
@@ -256,6 +256,8 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess {
     const char* const kSymbolizerArch = "--default-arch=x86_64";
 #elif defined(__i386__)
     const char* const kSymbolizerArch = "--default-arch=i386";
+#elif SANITIZER_LOONGARCH64
+    const char *const kSymbolizerArch = "--default-arch=loongarch64";
 #elif SANITIZER_RISCV64
     const char *const kSymbolizerArch = "--default-arch=riscv64";
 #elif defined(__aarch64__)
diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc
index 97ca7f2f3f9..80f5e6be8ad 100644
--- a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc
@@ -14,18 +14,22 @@
 // About local register variables:
 // https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
 //
-// Kernel ABI...
-//  syscall number is passed in a7
-//  (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in
-//  a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments
-//  are passed in: a0-a7 (confirmed by inspecting glibc sources).
+// Kernel ABI:
+// https://lore.kernel.org/loongarch/1f353678-3398-e30b-1c87-6edb278f74db@xen0n.name/T/#m1613bc86c2d7bf5f6da92bd62984302bfd699a2f
+//  syscall number is placed in a7
+//  parameters, if present, are placed in a0-a6
+//  upon return:
+//    the return value is placed in a0
+//    t0-t8 should be considered clobbered
+//    all other registers are preserved
 #define SYSCALL(name) __NR_##name
 
-#define INTERNAL_SYSCALL_CLOBBERS "memory"
+#define INTERNAL_SYSCALL_CLOBBERS \
+  "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8"
 
 static uptr __internal_syscall(u64 nr) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0");
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0");
   __asm__ volatile("syscall 0\n\t"
                    : "=r"(a0)
                    : "r"(a7)
@@ -35,8 +39,8 @@ static uptr __internal_syscall(u64 nr) {
 #define __internal_syscall0(n) (__internal_syscall)(n)
 
 static uptr __internal_syscall(u64 nr, u64 arg1) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7)
@@ -46,9 +50,9 @@ static uptr __internal_syscall(u64 nr, u64 arg1) {
 #define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1)
@@ -59,10 +63,10 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
   (__internal_syscall)(n, (u64)(a1), (long)(a2))
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
-  register u64 a2 asm("a2") = arg3;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
+  register u64 a2 asm("$a2") = arg3;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1), "r"(a2)
@@ -74,11 +78,11 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
                                u64 arg4) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
-  register u64 a2 asm("a2") = arg3;
-  register u64 a3 asm("a3") = arg4;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
+  register u64 a2 asm("$a2") = arg3;
+  register u64 a3 asm("$a3") = arg4;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1), "r"(a2), "r"(a3)
@@ -90,12 +94,12 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
                                long arg5) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
-  register u64 a2 asm("a2") = arg3;
-  register u64 a3 asm("a3") = arg4;
-  register u64 a4 asm("a4") = arg5;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
+  register u64 a2 asm("$a2") = arg3;
+  register u64 a3 asm("$a3") = arg4;
+  register u64 a4 asm("$a4") = arg5;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
@@ -108,13 +112,13 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
                                long arg5, long arg6) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
-  register u64 a2 asm("a2") = arg3;
-  register u64 a3 asm("a3") = arg4;
-  register u64 a4 asm("a4") = arg5;
-  register u64 a5 asm("a5") = arg6;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
+  register u64 a2 asm("$a2") = arg3;
+  register u64 a3 asm("$a3") = arg4;
+  register u64 a4 asm("$a4") = arg5;
+  register u64 a5 asm("$a5") = arg6;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
@@ -127,14 +131,14 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
 
 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
                                long arg5, long arg6, long arg7) {
-  register u64 a7 asm("a7") = nr;
-  register u64 a0 asm("a0") = arg1;
-  register u64 a1 asm("a1") = arg2;
-  register u64 a2 asm("a2") = arg3;
-  register u64 a3 asm("a3") = arg4;
-  register u64 a4 asm("a4") = arg5;
-  register u64 a5 asm("a5") = arg6;
-  register u64 a6 asm("a6") = arg7;
+  register u64 a7 asm("$a7") = nr;
+  register u64 a0 asm("$a0") = arg1;
+  register u64 a1 asm("$a1") = arg2;
+  register u64 a2 asm("$a2") = arg3;
+  register u64 a3 asm("$a3") = arg4;
+  register u64 a4 asm("$a4") = arg5;
+  register u64 a5 asm("$a5") = arg6;
+  register u64 a6 asm("$a6") = arg7;
   __asm__ volatile("syscall 0\n\t"
                    : "+r"(a0)
                    : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
diff --git a/libsanitizer/tsan/tsan_interceptors.h b/libsanitizer/tsan/tsan_interceptors.h
index 3091ad809c4..60fbc58f988 100644
--- a/libsanitizer/tsan/tsan_interceptors.h
+++ b/libsanitizer/tsan/tsan_interceptors.h
@@ -21,8 +21,9 @@ class ScopedInterceptor {
 
  private:
   ThreadState *const thr_;
-  bool in_ignored_lib_;
-  bool ignoring_;
+  bool in_ignored_lib_ = false;
+  bool in_blocking_func_ = false;
+  bool ignoring_ = false;
 
   void DisableIgnoresImpl();
   void EnableIgnoresImpl();
diff --git a/libsanitizer/tsan/tsan_interceptors_posix.cpp b/libsanitizer/tsan/tsan_interceptors_posix.cpp
index 17f6b1f472d..c557d5ddc6a 100644
--- a/libsanitizer/tsan/tsan_interceptors_posix.cpp
+++ b/libsanitizer/tsan/tsan_interceptors_posix.cpp
@@ -165,13 +165,26 @@ struct SignalDesc {
 
 struct ThreadSignalContext {
   int int_signal_send;
-  atomic_uintptr_t in_blocking_func;
   SignalDesc pending_signals[kSigCount];
   // emptyset and oldset are too big for stack.
   __sanitizer_sigset_t emptyset;
   __sanitizer_sigset_t oldset;
 };
 
+void EnterBlockingFunc(ThreadState *thr) {
+  for (;;) {
+    // The order is important to not delay a signal infinitely if it's
+    // delivered right before we set in_blocking_func. Note: we can't call
+    // ProcessPendingSignals when in_blocking_func is set, or we can handle
+    // a signal synchronously when we are already handling a signal.
+    atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
+    if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
+      break;
+    atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+    ProcessPendingSignals(thr);
+  }
+}
+
 // The sole reason tsan wraps atexit callbacks is to establish synchronization
 // between callback setup and callback execution.
 struct AtExitCtx {
@@ -245,8 +258,18 @@ static ThreadSignalContext *SigCtx(ThreadState *thr) {
 
 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
                                      uptr pc)
-    : thr_(thr), in_ignored_lib_(false), ignoring_(false) {
+    : thr_(thr) {
   LazyInitialize(thr);
+  if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
+    // pthread_join is marked as blocking, but it's also known to call other
+    // intercepted functions (mmap, free). If we don't reset in_blocking_func
+    // we can get deadlocks and memory corruptions if we deliver a synchronous
+    // signal inside of an mmap/free interceptor.
+    // So reset it and restore it back in the destructor.
+    // See https://github.com/google/sanitizers/issues/1540
+    atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+    in_blocking_func_ = true;
+  }
   if (!thr_->is_inited) return;
   if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
   DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
@@ -259,6 +282,8 @@ ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
 ScopedInterceptor::~ScopedInterceptor() {
   if (!thr_->is_inited) return;
   DisableIgnores();
+  if (UNLIKELY(in_blocking_func_))
+    EnterBlockingFunc(thr_);
   if (!thr_->ignore_interceptors) {
     ProcessPendingSignals(thr_);
     FuncExit(thr_);
@@ -321,15 +346,8 @@ void ScopedInterceptor::DisableIgnoresImpl() {
 
 struct BlockingCall {
   explicit BlockingCall(ThreadState *thr)
-      : thr(thr)
-      , ctx(SigCtx(thr)) {
-    for (;;) {
-      atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
-      if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
-        break;
-      atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
-      ProcessPendingSignals(thr);
-    }
+      : thr(thr) {
+    EnterBlockingFunc(thr);
     // When we are in a "blocking call", we process signals asynchronously
     // (right when they arrive). In this context we do not expect to be
     // executing any user/runtime code. The known interceptor sequence when
@@ -340,11 +358,10 @@ struct BlockingCall {
 
   ~BlockingCall() {
     thr->ignore_interceptors--;
-    atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+    atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
   }
 
   ThreadState *thr;
-  ThreadSignalContext *ctx;
 };
 
 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
@@ -517,9 +534,7 @@ static void SetJmp(ThreadState *thr, uptr sp) {
   buf->shadow_stack_pos = thr->shadow_stack_pos;
   ThreadSignalContext *sctx = SigCtx(thr);
   buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
-  buf->in_blocking_func = sctx ?
-      atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
-      false;
+  buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
   buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
       memory_order_relaxed);
 }
@@ -535,11 +550,10 @@ static void LongJmp(ThreadState *thr, uptr *env) {
       while (thr->shadow_stack_pos > buf->shadow_stack_pos)
         FuncExit(thr);
       ThreadSignalContext *sctx = SigCtx(thr);
-      if (sctx) {
+      if (sctx)
         sctx->int_signal_send = buf->int_signal_send;
-        atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
-            memory_order_relaxed);
-      }
+      atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
+          memory_order_relaxed);
       atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
           memory_order_relaxed);
       JmpBufGarbageCollect(thr, buf->sp - 1);  // do not collect buf->sp
@@ -1198,9 +1212,8 @@ void CondMutexUnlockCtx<Fn>::Unlock() const {
   // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
   // since the thread is cancelled, so we have to manually execute them
   // (the thread still can run some user code due to pthread_cleanup_push).
-  ThreadSignalContext *ctx = SigCtx(thr);
-  CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
-  atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+  CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
+  atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
   // Undo BlockingCall ctor effects.
   thr->ignore_interceptors--;
@@ -2089,12 +2102,12 @@ void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
       // If we are in blocking function, we can safely process it now
       // (but check if we are in a recursive interceptor,
       // i.e. pthread_join()->munmap()).
-      (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+      atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
     atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
-    if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
-      atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
+    if (atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
+      atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
       CallUserSignalHandler(thr, sync, true, sig, info, ctx);
-      atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
+      atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
     } else {
       // Be very conservative with when we do acquire in this case.
       // It's unsafe to do acquire in async handlers, because ThreadState
@@ -3029,7 +3042,9 @@ void InitializeInterceptors() {
 constexpr u32 kBarrierThreadBits = 10;
 constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
 
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
+extern "C" {
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
     atomic_uint32_t *barrier, u32 num_threads) {
   if (num_threads >= kBarrierThreads) {
     Printf("barrier_init: count is too large (%d)\n", num_threads);
@@ -3044,7 +3059,7 @@ static u32 barrier_epoch(u32 value) {
   return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
 }
 
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
     atomic_uint32_t *barrier) {
   u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
   u32 old_epoch = barrier_epoch(old);
@@ -3059,3 +3074,23 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
     FutexWait(barrier, cur);
   }
 }
+
+void *__tsan_memcpy(void *dst, const void *src, uptr size) {
+  void *ctx;
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
+  COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+#else
+  COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+#endif
+}
+
+void *__tsan_memset(void *dst, int c, uptr size) {
+  void *ctx;
+  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, c, size);
+}
+
+void *__tsan_memmove(void *dst, const void *src, uptr size) {
+  void *ctx;
+  COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+}
diff --git a/libsanitizer/tsan/tsan_interface.h b/libsanitizer/tsan/tsan_interface.h
index 711f064174c..5b9d664e503 100644
--- a/libsanitizer/tsan/tsan_interface.h
+++ b/libsanitizer/tsan/tsan_interface.h
@@ -72,6 +72,13 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_vptr_update(void **vptr_p, void *new_val);
 
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memcpy(void *dest, const void *src, uptr count);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memset(void *dest, int ch, uptr count);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memmove(void *dest, const void *src, uptr count);
+
 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
 
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index e1e121e2ee0..f0918d86d4e 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -191,6 +191,7 @@ struct ThreadState {
 #if !SANITIZER_GO
   Vector<JmpBuf> jmp_bufs;
   int in_symbolizer;
+  atomic_uintptr_t in_blocking_func;
   bool in_ignored_lib;
   bool is_inited;
 #endif
@@ -627,6 +628,13 @@ class SlotLocker {
   ALWAYS_INLINE
   SlotLocker(ThreadState *thr, bool recursive = false)
       : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
+#if !SANITIZER_GO
+    // We are in trouble if we are here with in_blocking_func set.
+    // If in_blocking_func is set, all signals will be delivered synchronously,
+    // which means we can't lock slots since the signal handler will try
+    // to lock it recursively and deadlock.
+    DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));
+#endif
     if (!locked_)
       SlotLock(thr_);
   }
@@ -670,8 +678,8 @@ ALWAYS_INLINE
 void LazyInitialize(ThreadState *thr) {
   // If we can use .preinit_array, assume that __tsan_init
   // called from .preinit_array initializes runtime before
-  // any instrumented code.
-#if !SANITIZER_CAN_USE_PREINIT_ARRAY
+  // any instrumented code except ANDROID.
+#if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(__ANDROID__))
   if (UNLIKELY(!is_initialized))
     Initialize(thr);
 #endif
diff --git a/libsanitizer/tsan/tsan_rtl_ppc64.S b/libsanitizer/tsan/tsan_rtl_ppc64.S
index 9e533a71a9c..8285e21aa1e 100644
--- a/libsanitizer/tsan/tsan_rtl_ppc64.S
+++ b/libsanitizer/tsan/tsan_rtl_ppc64.S
@@ -1,6 +1,5 @@
 #include "tsan_ppc_regs.h"
 
-        .machine altivec
         .section .text
         .hidden __tsan_setjmp
         .globl _setjmp
diff --git a/libsanitizer/ubsan/ubsan_flags.cpp b/libsanitizer/ubsan/ubsan_flags.cpp
index 9a66bd37518..25cefd46ce2 100644
--- a/libsanitizer/ubsan/ubsan_flags.cpp
+++ b/libsanitizer/ubsan/ubsan_flags.cpp
@@ -50,7 +50,6 @@ void InitializeFlags() {
   {
     CommonFlags cf;
     cf.CopyFrom(*common_flags());
-    cf.print_summary = false;
     cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH");
     OverrideCommonFlags(cf);
   }
diff --git a/libsanitizer/ubsan/ubsan_handlers.cpp b/libsanitizer/ubsan/ubsan_handlers.cpp
index 970075e69a6..410292a0d53 100644
--- a/libsanitizer/ubsan/ubsan_handlers.cpp
+++ b/libsanitizer/ubsan/ubsan_handlers.cpp
@@ -894,21 +894,6 @@ void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
 
 }  // namespace __ubsan
 
-void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData,
-                                           ValueHandle Function) {
-  GET_REPORT_OPTIONS(false);
-  CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
-  handleCFIBadIcall(&Data, Function, Opts);
-}
-
-void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData,
-                                                 ValueHandle Function) {
-  GET_REPORT_OPTIONS(true);
-  CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
-  handleCFIBadIcall(&Data, Function, Opts);
-  Die();
-}
-
 void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
                                             ValueHandle Value,
                                             uptr ValidVtable) {
diff --git a/libsanitizer/ubsan/ubsan_handlers.h b/libsanitizer/ubsan/ubsan_handlers.h
index 9f412353fc0..219fb15de55 100644
--- a/libsanitizer/ubsan/ubsan_handlers.h
+++ b/libsanitizer/ubsan/ubsan_handlers.h
@@ -215,20 +215,12 @@ enum CFITypeCheckKind : unsigned char {
   CFITCK_VMFCall,
 };
 
-struct CFIBadIcallData {
-  SourceLocation Loc;
-  const TypeDescriptor &Type;
-};
-
 struct CFICheckFailData {
   CFITypeCheckKind CheckKind;
   SourceLocation Loc;
   const TypeDescriptor &Type;
 };
 
-/// \brief Handle control flow integrity failure for indirect function calls.
-RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
-
 /// \brief Handle control flow integrity failures.
 RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
             uptr VtableIsValid)
diff --git a/libsanitizer/ubsan/ubsan_platform.h b/libsanitizer/ubsan/ubsan_platform.h
index ad3e883f0f3..d2cc2e10bd2 100644
--- a/libsanitizer/ubsan/ubsan_platform.h
+++ b/libsanitizer/ubsan/ubsan_platform.h
@@ -12,7 +12,6 @@
 #ifndef UBSAN_PLATFORM_H
 #define UBSAN_PLATFORM_H
 
-#ifndef CAN_SANITIZE_UB
 // Other platforms should be easy to add, and probably work as-is.
 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) ||        \
     defined(__NetBSD__) || defined(__DragonFly__) ||                           \
@@ -22,6 +21,5 @@
 #else
 # define CAN_SANITIZE_UB 0
 #endif
-#endif //CAN_SANITIZE_UB
 
 #endif

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-11-15 14:45 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-15 14:45 [gcc(refs/users/marxin/heads/merge-libsanitizer-v7)] libsanitizer: merge from upstream ae59131d3ef311fb4b1e50627c6457be00e60dc9 Martin Liska
  -- strict thread matches above, loose matches on Subject: below --
2022-11-15 10:14 Martin Liska

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).