public inbox for glibc-cvs@sourceware.org
help / color / mirror / Atom feed
* [glibc/release/2.30/master] x86: Increase `non_temporal_threshold` to roughly `sizeof_L3 / 4`
@ 2023-09-12  3:48 Noah Goldstein
  0 siblings, 0 replies; only message in thread
From: Noah Goldstein @ 2023-09-12  3:48 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=e8132392cd5733e269c2e8c9026716c95bc2b247

commit e8132392cd5733e269c2e8c9026716c95bc2b247
Author: Noah Goldstein <goldstein.w.n@gmail.com>
Date:   Thu Aug 10 12:13:26 2023 -0500

    x86: Increase `non_temporal_threshold` to roughly `sizeof_L3 / 4`
    
    Current `non_temporal_threshold` set to roughly '3/4 * sizeof_L3 /
    ncores_per_socket'. This patch updates that value to roughly
    'sizeof_L3 / 4`
    
    The original value (specifically dividing the `ncores_per_socket`) was
    done to limit the amount of other threads' data a `memcpy`/`memset`
    could evict.
    
    Dividing by 'ncores_per_socket', however leads to exceedingly low
    non-temporal thresholds and leads to using non-temporal stores in
    cases where REP MOVSB is multiple times faster.
    
    Furthermore, non-temporal stores are written directly to main memory
    so using it at a size much smaller than L3 can place soon to be
    accessed data much further away than it otherwise could be. As well,
    modern machines are able to detect streaming patterns (especially if
    REP MOVSB is used) and provide LRU hints to the memory subsystem. This
    in affect caps the total amount of eviction at 1/cache_associativity,
    far below meaningfully thrashing the entire cache.
    
    As best I can tell, the benchmarks that lead this small threshold
    where done comparing non-temporal stores versus standard cacheable
    stores. A better comparison (linked below) is to be REP MOVSB which,
    on the measure systems, is nearly 2x faster than non-temporal stores
    at the low-end of the previous threshold, and within 10% for over
    100MB copies (well past even the current threshold). In cases with a
    low number of threads competing for bandwidth, REP MOVSB is ~2x faster
    up to `sizeof_L3`.
    
    The divisor of `4` is a somewhat arbitrary value. From benchmarks it
    seems Skylake and Icelake both prefer a divisor of `2`, but older CPUs
    such as Broadwell prefer something closer to `8`. This patch is meant
    to be followed up by another one to make the divisor cpu-specific, but
    in the meantime (and for easier backporting), this patch settles on
    `4` as a middle-ground.
    
    Benchmarks comparing non-temporal stores, REP MOVSB, and cacheable
    stores where done using:
    https://github.com/goldsteinn/memcpy-nt-benchmarks
    
    Sheets results (also available in pdf on the github):
    https://docs.google.com/spreadsheets/d/e/2PACX-1vS183r0rW_jRX6tG_E90m9qVuFiMbRIJvi5VAE8yYOvEOIEEc3aSNuEsrFbuXw5c3nGboxMmrupZD7K/pubhtml
    Reviewed-by: DJ Delorie <dj@redhat.com>
    Reviewed-by: Carlos O'Donell <carlos@redhat.com>
    
    (cherry picked from commit af992e7abdc9049714da76cae1e5e18bc4838fb8)

Diff:
---
 sysdeps/x86/cacheinfo.c | 76 +++++++++++++++++++++++++++++++------------------
 1 file changed, 49 insertions(+), 27 deletions(-)

diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c
index 02c886c9cd..ac98741ac6 100644
--- a/sysdeps/x86/cacheinfo.c
+++ b/sysdeps/x86/cacheinfo.c
@@ -494,6 +494,7 @@ init_cacheinfo (void)
   int max_cpuid_ex;
   long int data = -1;
   long int shared = -1;
+  long int shared_per_thread = -1;
   unsigned int level;
   unsigned int threads = 0;
   const struct cpu_features *cpu_features = __get_cpu_features ();
@@ -509,7 +510,7 @@ init_cacheinfo (void)
       /* Try L3 first.  */
       level  = 3;
       shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, cpu_features);
-
+      shared_per_thread = shared;
       /* Number of logical processors sharing L2 cache.  */
       int threads_l2;
 
@@ -521,6 +522,7 @@ init_cacheinfo (void)
 	  /* Try L2 otherwise.  */
 	  level  = 2;
 	  shared = core;
+      shared_per_thread = core;
 	  threads_l2 = 0;
 	  threads_l3 = -1;
 	}
@@ -677,26 +679,25 @@ init_cacheinfo (void)
 	    }
 	  else
 	    {
-intel_bug_no_cache_info:
+	    intel_bug_no_cache_info:
 	      /* Assume that all logical threads share the highest cache
 		 level.  */
 
-	      threads
-		= ((cpu_features->cpuid[COMMON_CPUID_INDEX_1].ebx
-		    >> 16) & 0xff);
-	    }
+	      threads = ((cpu_features->cpuid[COMMON_CPUID_INDEX_1].ebx >> 16)
+			 & 0xff);
 
-	  /* Cap usage of highest cache level to the number of supported
-	     threads.  */
-	  if (shared > 0 && threads > 0)
-	    shared /= threads;
+	      /* Cap usage of highest cache level to the number of supported
+		 threads.  */
+	      if (shared_per_thread > 0 && threads > 0)
+		shared_per_thread /= threads;
+	    }
 	}
 
       /* Account for non-inclusive L2 and L3 caches.  */
       if (!inclusive_cache)
 	{
-	  if (threads_l2 > 0)
-	    core /= threads_l2;
+      if (threads_l2 > 0)
+	shared_per_thread += core / threads_l2;
 	  shared += core;
 	}
     }
@@ -705,13 +706,17 @@ intel_bug_no_cache_info:
       data   = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
       long int core = handle_amd (_SC_LEVEL2_CACHE_SIZE);
       shared = handle_amd (_SC_LEVEL3_CACHE_SIZE);
+      shared_per_thread = shared;
 
       /* Get maximum extended function. */
       __cpuid (0x80000000, max_cpuid_ex, ebx, ecx, edx);
 
       if (shared <= 0)
-	/* No shared L3 cache.  All we have is the L2 cache.  */
-	shared = core;
+	{
+	  /* No shared L3 cache.  All we have is the L2 cache.  */
+	  shared = core;
+	  shared_per_thread = core;
+	}
       else
 	{
 	  /* Figure out the number of logical threads that share L3.  */
@@ -735,10 +740,11 @@ intel_bug_no_cache_info:
 	  /* Cap usage of highest cache level to the number of
 	     supported threads.  */
 	  if (threads > 0)
-	    shared /= threads;
+	    shared_per_thread /= threads;
 
 	  /* Account for exclusive L2 and L3 caches.  */
 	  shared += core;
+	  shared_per_thread += core;
 	}
 
 #ifndef DISABLE_PREFETCHW
@@ -766,26 +772,42 @@ intel_bug_no_cache_info:
     }
 
   if (cpu_features->shared_cache_size != 0)
-    shared = cpu_features->shared_cache_size;
+    shared_per_thread = cpu_features->shared_cache_size;
 
-  if (shared > 0)
+  if (shared_per_thread > 0)
     {
-      __x86_raw_shared_cache_size_half = shared / 2;
-      __x86_raw_shared_cache_size = shared;
+      __x86_raw_shared_cache_size_half = shared_per_thread / 2;
+      __x86_raw_shared_cache_size = shared_per_thread;
       /* Round shared cache size to multiple of 256 bytes.  */
-      shared = shared & ~255L;
-      __x86_shared_cache_size_half = shared / 2;
-      __x86_shared_cache_size = shared;
+      shared_per_thread = shared_per_thread & ~255L;
+      __x86_shared_cache_size_half = shared_per_thread / 2;
+      __x86_shared_cache_size = shared_per_thread;
     }
 
-  /* The large memcpy micro benchmark in glibc shows that 6 times of
-     shared cache size is the approximate value above which non-temporal
-     store becomes faster on a 8-core processor.  This is the 3/4 of the
-     total shared cache size.  */
+  /* The default setting for the non_temporal threshold is [1/8, 1/2] of size
+     of the chip's cache (depending on `cachesize_non_temporal_divisor` which
+     is microarch specific. The default is 1/4). For most Intel processors
+     with an initial release date between 2017 and 2023, a thread's
+     typical share of the cache is from 18-64MB. Using a reasonable size
+     fraction of L3 is meant to estimate the point where non-temporal stores
+     begin out-competing REP MOVSB. As well the point where the fact that
+     non-temporal stores are forced back to main memory would already occurred
+     to the majority of the lines in the copy. Note, concerns about the entire
+     L3 cache being evicted by the copy are mostly alleviated by the fact that
+     modern HW detects streaming patterns and provides proper LRU hints so that
+     the maximum thrashing capped at 1/associativity. */
+  unsigned long int non_temporal_threshold = shared / 4;
+  /* If no ERMS, we use the per-thread L3 chunking. Normal cacheable stores run
+     a higher risk of actually thrashing the cache as they don't have a HW LRU
+     hint. As well, their performance in highly parallel situations is
+     noticeably worse.  */
+  if (!CPU_FEATURES_CPU_P (cpu_features, ERMS))
+    non_temporal_threshold = shared_per_thread * 3 / 4;
+
   __x86_shared_non_temporal_threshold
     = (cpu_features->non_temporal_threshold != 0
        ? cpu_features->non_temporal_threshold
-       : __x86_shared_cache_size * threads * 3 / 4);
+       : non_temporal_threshold);
 }
 
 #endif

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-09-12  3:48 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-09-12  3:48 [glibc/release/2.30/master] x86: Increase `non_temporal_threshold` to roughly `sizeof_L3 / 4` Noah Goldstein

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).