public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: Adhemerval Zanella <adhemerval.zanella@linaro.org>
To: "H.J. Lu" <hjl.tools@gmail.com>, libc-alpha@sourceware.org
Cc: Florian Weimer <fweimer@redhat.com>
Subject: Re: V4 [PATCH 1/2] x86: Move x86 processor cache info to cpu_features
Date: Thu, 14 Jan 2021 11:13:16 -0300	[thread overview]
Message-ID: <42ca59b0-47af-6e46-03a1-2bb2ab7dfdc1@linaro.org> (raw)
In-Reply-To: <20201031154437.2689427-2-hjl.tools@gmail.com>



On 31/10/2020 12:44, H.J. Lu via Libc-alpha wrote:
> 1. Move x86 processor cache info to _dl_x86_cpu_features in ld.so.
> 2. Update tunable bounds with TUNABLE_SET_WITH_BOUNDS.
> 3. Move x86 cache info initialization to dl-cacheinfo.h and initialize
> x86 cache info in init_cpu_features ().
> 4. Put x86 cache info for libc in cacheinfo.h, which is included in
> libc-start.c in libc.a and is included in cacheinfo.c in libc.so.

Patch looks ok for 2.33 with a small nit below regarding an implicit check.

I have checked with some build variations (default and static-pie) 
and it shows no regression. I saw that --enable-tunables=no is not 
building anymore, but it is due another patch.

Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>

> ---
>  sysdeps/x86/cacheinfo.c            |  46 ++-
>  sysdeps/x86/cacheinfo.h            | 400 ++-----------------------
>  sysdeps/x86/cpu-features.c         |  35 +--
>  sysdeps/x86/dl-cacheinfo.h         | 460 +++++++++++++++++++++++++++++
>  sysdeps/x86/include/cpu-features.h |  22 ++
>  5 files changed, 551 insertions(+), 412 deletions(-)
> 
> diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c
> index 0d2fe3a2fa..e1ea4d5228 100644
> --- a/sysdeps/x86/cacheinfo.c
> +++ b/sysdeps/x86/cacheinfo.c
> @@ -18,11 +18,8 @@
>  
>  #if IS_IN (libc)
>  
> -#include <assert.h>
>  #include <unistd.h>
> -#include <cpuid.h>
>  #include <ldsodefs.h>
> -#include <dl-cacheinfo.h>
>  
>  /* Get the value of the system variable NAME.  */
>  long int
> @@ -30,20 +27,45 @@ attribute_hidden
>  __cache_sysconf (int name)
>  {
>    const struct cpu_features *cpu_features = __get_cpu_features ();
> +  switch (name)
> +    {
> +    case _SC_LEVEL1_ICACHE_SIZE:
> +      return cpu_features->level1_icache_size;
>  
> -  if (cpu_features->basic.kind == arch_kind_intel)
> -    return handle_intel (name, cpu_features);
> +    case _SC_LEVEL1_DCACHE_SIZE:
> +      return cpu_features->level1_dcache_size;
>  
> -  if (cpu_features->basic.kind == arch_kind_amd)
> -    return handle_amd (name);
> +    case _SC_LEVEL1_DCACHE_ASSOC:
> +      return cpu_features->level1_dcache_assoc;
>  
> -  if (cpu_features->basic.kind == arch_kind_zhaoxin)
> -    return handle_zhaoxin (name);
> +    case _SC_LEVEL1_DCACHE_LINESIZE:
> +      return cpu_features->level1_dcache_linesize;
>  
> -  // XXX Fill in more vendors.
> +    case _SC_LEVEL2_CACHE_SIZE:
> +      return cpu_features->level2_cache_size;
>  
> -  /* CPU not known, we have no information.  */
> -  return 0;
> +    case _SC_LEVEL2_CACHE_ASSOC:
> +      return cpu_features->level2_cache_assoc;
> +
> +    case _SC_LEVEL2_CACHE_LINESIZE:
> +      return cpu_features->level2_cache_linesize;
> +
> +    case _SC_LEVEL3_CACHE_SIZE:
> +      return cpu_features->level3_cache_size;
> +
> +    case _SC_LEVEL3_CACHE_ASSOC:
> +      return cpu_features->level3_cache_assoc;
> +
> +    case _SC_LEVEL3_CACHE_LINESIZE:
> +      return cpu_features->level3_cache_linesize;
> +
> +    case _SC_LEVEL4_CACHE_SIZE:
> +      return cpu_features->level4_cache_size;
> +
> +    default:
> +      break;
> +    }
> +  return -1;
>  }
>  
>  # ifdef SHARED

Ok, so now it handles _SC_LEVEL1_ICACHE_SIZE <= name < _SC_LEVEL4_CACHE_LINESIZE
and returns -1 otherwise.  It align with sysconf interface.

> diff --git a/sysdeps/x86/cacheinfo.h b/sysdeps/x86/cacheinfo.h
> index 0aec0e2875..5aa40b45b5 100644
> --- a/sysdeps/x86/cacheinfo.h
> +++ b/sysdeps/x86/cacheinfo.h
> @@ -18,7 +18,16 @@
>  
>  #include <assert.h>
>  #include <unistd.h>
> +#include <cpuid.h>
> +#include <cpu-features.h>
>  
> +#if HAVE_TUNABLES
> +# define TUNABLE_NAMESPACE cpu
> +# include <unistd.h>		/* Get STDOUT_FILENO for _dl_printf.  */
> +# include <elf/dl-tunables.h>
> +#endif
> +
> +#if IS_IN (libc)
>  /* Data cache size for use in memory and string routines, typically
>     L1 size, rounded to multiple of 256 bytes.  */
>  long int __x86_data_cache_size_half attribute_hidden = 32 * 1024 / 2;

Ok.

> @@ -45,385 +54,30 @@ long int __x86_rep_movsb_threshold attribute_hidden = 2048;
>  /* Threshold to use Enhanced REP STOSB.  */
>  long int __x86_rep_stosb_threshold attribute_hidden = 2048;
>  
> -static void
> -get_common_cache_info (long int *shared_ptr, unsigned int *threads_ptr,
> -		       long int core)
> -{
> -  unsigned int eax;
> -  unsigned int ebx;
> -  unsigned int ecx;
> -  unsigned int edx;
> -
> -  /* Number of logical processors sharing L2 cache.  */
> -  int threads_l2;
> -
> -  /* Number of logical processors sharing L3 cache.  */
> -  int threads_l3;
> -
> -  const struct cpu_features *cpu_features = __get_cpu_features ();
> -  int max_cpuid = cpu_features->basic.max_cpuid;
> -  unsigned int family = cpu_features->basic.family;
> -  unsigned int model = cpu_features->basic.model;
> -  long int shared = *shared_ptr;
> -  unsigned int threads = *threads_ptr;
> -  bool inclusive_cache = true;
> -  bool support_count_mask = true;
> -
> -  /* Try L3 first.  */
> -  unsigned int level = 3;
> -
> -  if (cpu_features->basic.kind == arch_kind_zhaoxin && family == 6)
> -    support_count_mask = false;
> -
> -  if (shared <= 0)
> -    {
> -      /* Try L2 otherwise.  */
> -      level  = 2;
> -      shared = core;
> -      threads_l2 = 0;
> -      threads_l3 = -1;
> -    }
> -  else
> -    {
> -      threads_l2 = 0;
> -      threads_l3 = 0;
> -    }
> -
> -  /* A value of 0 for the HTT bit indicates there is only a single
> -     logical processor.  */
> -  if (HAS_CPU_FEATURE (HTT))
> -    {
> -      /* Figure out the number of logical threads that share the
> -         highest cache level.  */
> -      if (max_cpuid >= 4)
> -        {
> -          int i = 0;
> -
> -          /* Query until cache level 2 and 3 are enumerated.  */
> -          int check = 0x1 | (threads_l3 == 0) << 1;
> -          do
> -            {
> -              __cpuid_count (4, i++, eax, ebx, ecx, edx);
> -
> -              /* There seems to be a bug in at least some Pentium Ds
> -                 which sometimes fail to iterate all cache parameters.
> -                 Do not loop indefinitely here, stop in this case and
> -                 assume there is no such information.  */
> -              if (cpu_features->basic.kind == arch_kind_intel
> -                  && (eax & 0x1f) == 0 )
> -                goto intel_bug_no_cache_info;
> -
> -              switch ((eax >> 5) & 0x7)
> -                {
> -                  default:
> -                    break;
> -                  case 2:
> -                    if ((check & 0x1))
> -                      {
> -                        /* Get maximum number of logical processors
> -                           sharing L2 cache.  */
> -                        threads_l2 = (eax >> 14) & 0x3ff;
> -                        check &= ~0x1;
> -                      }
> -                    break;
> -                  case 3:
> -                    if ((check & (0x1 << 1)))
> -                      {
> -                        /* Get maximum number of logical processors
> -                           sharing L3 cache.  */
> -                        threads_l3 = (eax >> 14) & 0x3ff;
> -
> -                        /* Check if L2 and L3 caches are inclusive.  */
> -                        inclusive_cache = (edx & 0x2) != 0;
> -                        check &= ~(0x1 << 1);
> -                      }
> -                    break;
> -                }
> -            }
> -          while (check);
> -
> -          /* If max_cpuid >= 11, THREADS_L2/THREADS_L3 are the maximum
> -             numbers of addressable IDs for logical processors sharing
> -             the cache, instead of the maximum number of threads
> -             sharing the cache.  */
> -          if (max_cpuid >= 11 && support_count_mask)
> -            {
> -              /* Find the number of logical processors shipped in
> -                 one core and apply count mask.  */
> -              i = 0;
> -
> -              /* Count SMT only if there is L3 cache.  Always count
> -                 core if there is no L3 cache.  */
> -              int count = ((threads_l2 > 0 && level == 3)
> -                           | ((threads_l3 > 0
> -                               || (threads_l2 > 0 && level == 2)) << 1));
> -
> -              while (count)
> -                {
> -                  __cpuid_count (11, i++, eax, ebx, ecx, edx);
> -
> -                  int shipped = ebx & 0xff;
> -                  int type = ecx & 0xff00;
> -                  if (shipped == 0 || type == 0)
> -                    break;
> -                  else if (type == 0x100)
> -                    {
> -                      /* Count SMT.  */
> -                      if ((count & 0x1))
> -                        {
> -                          int count_mask;
> -
> -                          /* Compute count mask.  */
> -                          asm ("bsr %1, %0"
> -                               : "=r" (count_mask) : "g" (threads_l2));
> -                          count_mask = ~(-1 << (count_mask + 1));
> -                          threads_l2 = (shipped - 1) & count_mask;
> -                          count &= ~0x1;
> -                        }
> -                    }
> -                  else if (type == 0x200)
> -                    {
> -                      /* Count core.  */
> -                      if ((count & (0x1 << 1)))
> -                        {
> -                          int count_mask;
> -                          int threads_core
> -                            = (level == 2 ? threads_l2 : threads_l3);
> -
> -                          /* Compute count mask.  */
> -                          asm ("bsr %1, %0"
> -                               : "=r" (count_mask) : "g" (threads_core));
> -                          count_mask = ~(-1 << (count_mask + 1));
> -                          threads_core = (shipped - 1) & count_mask;
> -                          if (level == 2)
> -                            threads_l2 = threads_core;
> -                          else
> -                            threads_l3 = threads_core;
> -                          count &= ~(0x1 << 1);
> -                        }
> -                    }
> -                }
> -            }
> -          if (threads_l2 > 0)
> -            threads_l2 += 1;
> -          if (threads_l3 > 0)
> -            threads_l3 += 1;
> -          if (level == 2)
> -            {
> -              if (threads_l2)
> -                {
> -                  threads = threads_l2;
> -                  if (cpu_features->basic.kind == arch_kind_intel
> -                      && threads > 2
> -                      && family == 6)
> -                    switch (model)
> -                      {
> -                        case 0x37:
> -                        case 0x4a:
> -                        case 0x4d:
> -                        case 0x5a:
> -                        case 0x5d:
> -                          /* Silvermont has L2 cache shared by 2 cores.  */
> -                          threads = 2;
> -                          break;
> -                        default:
> -                          break;
> -                      }
> -                }
> -            }
> -          else if (threads_l3)
> -            threads = threads_l3;
> -        }
> -      else
> -        {
> -intel_bug_no_cache_info:
> -          /* Assume that all logical threads share the highest cache
> -             level.  */
> -          threads
> -            = ((cpu_features->features[COMMON_CPUID_INDEX_1].cpuid.ebx
> -                >> 16) & 0xff);
> -        }
> -
> -        /* Cap usage of highest cache level to the number of supported
> -           threads.  */
> -        if (shared > 0 && threads > 0)
> -          shared /= threads;
> -    }
> -
> -  /* Account for non-inclusive L2 and L3 caches.  */
> -  if (!inclusive_cache)
> -    {
> -      if (threads_l2 > 0)
> -        core /= threads_l2;
> -      shared += core;
> -    }
> -
> -  *shared_ptr = shared;
> -  *threads_ptr = threads;
> -}
> -
>  static void
>  init_cacheinfo (void)
>  {
> -  /* Find out what brand of processor.  */
> -  unsigned int ebx;
> -  unsigned int ecx;
> -  unsigned int edx;
> -  int max_cpuid_ex;
> -  long int data = -1;
> -  long int shared = -1;
> -  long int core;
> -  unsigned int threads = 0;
>    const struct cpu_features *cpu_features = __get_cpu_features ();
> +  long int data = cpu_features->data_cache_size;
> +  __x86_raw_data_cache_size_half = data / 2;
> +  __x86_raw_data_cache_size = data;
> +  /* Round data cache size to multiple of 256 bytes.  */
> +  data = data & ~255L;
> +  __x86_data_cache_size_half = data / 2;
> +  __x86_data_cache_size = data;
> +
> +  long int shared = cpu_features->shared_cache_size;
> +  __x86_raw_shared_cache_size_half = shared / 2;
> +  __x86_raw_shared_cache_size = shared;
> +  /* Round shared cache size to multiple of 256 bytes.  */
> +  shared = shared & ~255L;
> +  __x86_shared_cache_size_half = shared / 2;
> +  __x86_shared_cache_size = shared;
>  
> -  /* NB: In libc.so, cpu_features is defined in ld.so and is initialized
> -     by DL_PLATFORM_INIT or IFUNC relocation before init_cacheinfo is
> -     called by IFUNC relocation.  In libc.a, init_cacheinfo is called
> -     from init_cpu_features by ARCH_INIT_CPU_FEATURES.  */
> -  assert (cpu_features->basic.kind != arch_kind_unknown);
> -
> -  if (cpu_features->basic.kind == arch_kind_intel)
> -    {
> -      data = handle_intel (_SC_LEVEL1_DCACHE_SIZE, cpu_features);
> -      core = handle_intel (_SC_LEVEL2_CACHE_SIZE, cpu_features);
> -      shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, cpu_features);
> -
> -      get_common_cache_info (&shared, &threads, core);
> -    }
> -  else if (cpu_features->basic.kind == arch_kind_zhaoxin)
> -    {
> -      data = handle_zhaoxin (_SC_LEVEL1_DCACHE_SIZE);
> -      core = handle_zhaoxin (_SC_LEVEL2_CACHE_SIZE);
> -      shared = handle_zhaoxin (_SC_LEVEL3_CACHE_SIZE);
> -
> -      get_common_cache_info (&shared, &threads, core);
> -    }
> -  else if (cpu_features->basic.kind == arch_kind_amd)
> -    {
> -      data   = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
> -      long int core = handle_amd (_SC_LEVEL2_CACHE_SIZE);
> -      shared = handle_amd (_SC_LEVEL3_CACHE_SIZE);
> -
> -      /* Get maximum extended function. */
> -      __cpuid (0x80000000, max_cpuid_ex, ebx, ecx, edx);
> -
> -      if (shared <= 0)
> -	/* No shared L3 cache.  All we have is the L2 cache.  */
> -	shared = core;
> -      else
> -	{
> -	  /* Figure out the number of logical threads that share L3.  */
> -	  if (max_cpuid_ex >= 0x80000008)
> -	    {
> -	      /* Get width of APIC ID.  */
> -	      __cpuid (0x80000008, max_cpuid_ex, ebx, ecx, edx);
> -	      threads = 1 << ((ecx >> 12) & 0x0f);
> -	    }
> -
> -	  if (threads == 0 || cpu_features->basic.family >= 0x17)
> -	    {
> -	      /* If APIC ID width is not available, use logical
> -		 processor count.  */
> -	      __cpuid (0x00000001, max_cpuid_ex, ebx, ecx, edx);
> -
> -	      if ((edx & (1 << 28)) != 0)
> -		threads = (ebx >> 16) & 0xff;
> -	    }
> -
> -	  /* Cap usage of highest cache level to the number of
> -	     supported threads.  */
> -	  if (threads > 0)
> -	    shared /= threads;
> -
> -	  /* Get shared cache per ccx for Zen architectures.  */
> -	  if (cpu_features->basic.family >= 0x17)
> -	    {
> -	      unsigned int eax;
> -
> -	      /* Get number of threads share the L3 cache in CCX.  */
> -	      __cpuid_count (0x8000001D, 0x3, eax, ebx, ecx, edx);
> -
> -	      unsigned int threads_per_ccx = ((eax >> 14) & 0xfff) + 1;
> -	      shared *= threads_per_ccx;
> -	    }
> -	  else
> -	    {
> -	      /* Account for exclusive L2 and L3 caches.  */
> -	      shared += core;
> -            }
> -      }
> -    }
> -
> -  /* Prefer cache size configure via tuning.  */
> -  if (cpu_features->data_cache_size != 0)
> -    data = cpu_features->data_cache_size;
> -
> -  if (data > 0)
> -    {
> -      __x86_raw_data_cache_size_half = data / 2;
> -      __x86_raw_data_cache_size = data;
> -      /* Round data cache size to multiple of 256 bytes.  */
> -      data = data & ~255L;
> -      __x86_data_cache_size_half = data / 2;
> -      __x86_data_cache_size = data;
> -    }
> -
> -  /* Prefer cache size configure via tuning.  */
> -  if (cpu_features->shared_cache_size != 0)
> -    shared = cpu_features->shared_cache_size;
> -
> -  if (shared > 0)
> -    {
> -      __x86_raw_shared_cache_size_half = shared / 2;
> -      __x86_raw_shared_cache_size = shared;
> -      /* Round shared cache size to multiple of 256 bytes.  */
> -      shared = shared & ~255L;
> -      __x86_shared_cache_size_half = shared / 2;
> -      __x86_shared_cache_size = shared;
> -    }
> -
> -  /* The default setting for the non_temporal threshold is 3/4 of one
> -     thread's share of the chip's cache. For most Intel and AMD processors
> -     with an initial release date between 2017 and 2020, a thread's typical
> -     share of the cache is from 500 KBytes to 2 MBytes. Using the 3/4
> -     threshold leaves 125 KBytes to 500 KBytes of the thread's data
> -     in cache after a maximum temporal copy, which will maintain
> -     in cache a reasonable portion of the thread's stack and other
> -     active data. If the threshold is set higher than one thread's
> -     share of the cache, it has a substantial risk of negatively
> -     impacting the performance of other threads running on the chip. */
>    __x86_shared_non_temporal_threshold
> -    = (cpu_features->non_temporal_threshold != 0
> -       ? cpu_features->non_temporal_threshold
> -       : __x86_shared_cache_size * 3 / 4);
> -
> -  /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8.  */
> -  unsigned int minimum_rep_movsb_threshold;
> -  /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16).  */
> -  unsigned int rep_movsb_threshold;
> -  if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
> -      && !CPU_FEATURE_PREFERRED_P (cpu_features, Prefer_No_AVX512))
> -    {
> -      rep_movsb_threshold = 2048 * (64 / 16);
> -      minimum_rep_movsb_threshold = 64 * 8;
> -    }
> -  else if (CPU_FEATURE_PREFERRED_P (cpu_features,
> -				    AVX_Fast_Unaligned_Load))
> -    {
> -      rep_movsb_threshold = 2048 * (32 / 16);
> -      minimum_rep_movsb_threshold = 32 * 8;
> -    }
> -  else
> -    {
> -      rep_movsb_threshold = 2048 * (16 / 16);
> -      minimum_rep_movsb_threshold = 16 * 8;
> -    }
> -  if (cpu_features->rep_movsb_threshold > minimum_rep_movsb_threshold)
> -    __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
> -  else
> -    __x86_rep_movsb_threshold = rep_movsb_threshold;
> +    = cpu_features->non_temporal_threshold;
>  
> -# if HAVE_TUNABLES
> +  __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
>    __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;
> -# endif
>  }
> +#endif

Ok, it is refactoring the code.

> diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
> index f26deba38d..51c12d89ca 100644
> --- a/sysdeps/x86/cpu-features.c
> +++ b/sysdeps/x86/cpu-features.c
> @@ -16,21 +16,12 @@
>     License along with the GNU C Library; if not, see
>     <https://www.gnu.org/licenses/>.  */
>  
> -#include <cpuid.h>
>  #include <dl-hwcap.h>
>  #include <libc-pointer-arith.h>
> -#if IS_IN (libc) && !defined SHARED
> -# include <assert.h>
> -# include <unistd.h>
> -# include <dl-cacheinfo.h>
> -# include <cacheinfo.h>
> -#endif
> +#include <cacheinfo.h>
> +#include <dl-cacheinfo.h>
>  
>  #if HAVE_TUNABLES
> -# define TUNABLE_NAMESPACE cpu
> -# include <unistd.h>		/* Get STDOUT_FILENO for _dl_printf.  */
> -# include <elf/dl-tunables.h>
> -
>  extern void TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *)
>    attribute_hidden;
>  

Ok.

> @@ -642,24 +633,14 @@ no_cpuid:
>    cpu_features->basic.model = model;
>    cpu_features->basic.stepping = stepping;
>  
> +  dl_init_cacheinfo (cpu_features);
> +
>  #if HAVE_TUNABLES
>    TUNABLE_GET (hwcaps, tunable_val_t *, TUNABLE_CALLBACK (set_hwcaps));
> -  cpu_features->non_temporal_threshold
> -    = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
> -  cpu_features->rep_movsb_threshold
> -    = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
> -  cpu_features->rep_stosb_threshold
> -    = TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL);
> -  cpu_features->data_cache_size
> -    = TUNABLE_GET (x86_data_cache_size, long int, NULL);
> -  cpu_features->shared_cache_size
> -    = TUNABLE_GET (x86_shared_cache_size, long int, NULL);
> -#endif
> -
> -  /* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86.  */
> -#if !HAVE_TUNABLES && defined SHARED
> -  /* The glibc.cpu.hwcap_mask tunable is initialized already, so no need to do
> -     this.  */
> +#elif defined SHARED
> +  /* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86.  The
> +     glibc.cpu.hwcap_mask tunable is initialized already, so no
> +     need to do this.  */
>    GLRO(dl_hwcap_mask) = HWCAP_IMPORTANT;
>  #endif
>  

Ok.

> diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> index b2b90074b0..9632ee7818 100644
> --- a/sysdeps/x86/dl-cacheinfo.h
> +++ b/sysdeps/x86/dl-cacheinfo.h
> @@ -476,3 +476,463 @@ handle_zhaoxin (int name)
>    /* Nothing found.  */
>    return 0;
>  }
> +
> +static void
> +get_common_cache_info (long int *shared_ptr, unsigned int *threads_ptr,
> +                long int core)
> +{
> +  unsigned int eax;
> +  unsigned int ebx;
> +  unsigned int ecx;
> +  unsigned int edx;
> +
> +  /* Number of logical processors sharing L2 cache.  */
> +  int threads_l2;
> +
> +  /* Number of logical processors sharing L3 cache.  */
> +  int threads_l3;
> +
> +  const struct cpu_features *cpu_features = __get_cpu_features ();
> +  int max_cpuid = cpu_features->basic.max_cpuid;
> +  unsigned int family = cpu_features->basic.family;
> +  unsigned int model = cpu_features->basic.model;
> +  long int shared = *shared_ptr;
> +  unsigned int threads = *threads_ptr;
> +  bool inclusive_cache = true;
> +  bool support_count_mask = true;
> +
> +  /* Try L3 first.  */
> +  unsigned int level = 3;
> +
> +  if (cpu_features->basic.kind == arch_kind_zhaoxin && family == 6)
> +    support_count_mask = false;
> +
> +  if (shared <= 0)
> +    {
> +      /* Try L2 otherwise.  */
> +      level  = 2;
> +      shared = core;
> +      threads_l2 = 0;
> +      threads_l3 = -1;
> +    }
> +  else
> +    {
> +      threads_l2 = 0;
> +      threads_l3 = 0;
> +    }
> +
> +  /* A value of 0 for the HTT bit indicates there is only a single
> +     logical processor.  */
> +  if (HAS_CPU_FEATURE (HTT))
> +    {
> +      /* Figure out the number of logical threads that share the
> +         highest cache level.  */
> +      if (max_cpuid >= 4)
> +        {
> +          int i = 0;
> +
> +          /* Query until cache level 2 and 3 are enumerated.  */
> +          int check = 0x1 | (threads_l3 == 0) << 1;
> +          do
> +            {
> +              __cpuid_count (4, i++, eax, ebx, ecx, edx);
> +
> +              /* There seems to be a bug in at least some Pentium Ds
> +                 which sometimes fail to iterate all cache parameters.
> +                 Do not loop indefinitely here, stop in this case and
> +                 assume there is no such information.  */
> +              if (cpu_features->basic.kind == arch_kind_intel
> +                  && (eax & 0x1f) == 0 )
> +                goto intel_bug_no_cache_info;
> +
> +              switch ((eax >> 5) & 0x7)
> +                {
> +                  default:
> +                    break;
> +                  case 2:
> +                    if ((check & 0x1))
> +                      {
> +                        /* Get maximum number of logical processors
> +                           sharing L2 cache.  */
> +                        threads_l2 = (eax >> 14) & 0x3ff;
> +                        check &= ~0x1;
> +                      }
> +                    break;
> +                  case 3:
> +                    if ((check & (0x1 << 1)))
> +                      {
> +                        /* Get maximum number of logical processors
> +                           sharing L3 cache.  */
> +                        threads_l3 = (eax >> 14) & 0x3ff;
> +
> +                        /* Check if L2 and L3 caches are inclusive.  */
> +                        inclusive_cache = (edx & 0x2) != 0;
> +                        check &= ~(0x1 << 1);
> +                      }
> +                    break;
> +                }
> +            }
> +          while (check);
> +
> +          /* If max_cpuid >= 11, THREADS_L2/THREADS_L3 are the maximum
> +             numbers of addressable IDs for logical processors sharing
> +             the cache, instead of the maximum number of threads
> +             sharing the cache.  */
> +          if (max_cpuid >= 11 && support_count_mask)
> +            {
> +              /* Find the number of logical processors shipped in
> +                 one core and apply count mask.  */
> +              i = 0;
> +
> +              /* Count SMT only if there is L3 cache.  Always count
> +                 core if there is no L3 cache.  */
> +              int count = ((threads_l2 > 0 && level == 3)
> +                           | ((threads_l3 > 0
> +                               || (threads_l2 > 0 && level == 2)) << 1));
> +
> +              while (count)
> +                {
> +                  __cpuid_count (11, i++, eax, ebx, ecx, edx);
> +
> +                  int shipped = ebx & 0xff;
> +                  int type = ecx & 0xff00;
> +                  if (shipped == 0 || type == 0)
> +                    break;
> +                  else if (type == 0x100)
> +                    {
> +                      /* Count SMT.  */
> +                      if ((count & 0x1))
> +                        {
> +                          int count_mask;
> +
> +                          /* Compute count mask.  */
> +                          asm ("bsr %1, %0"
> +                               : "=r" (count_mask) : "g" (threads_l2));
> +                          count_mask = ~(-1 << (count_mask + 1));
> +                          threads_l2 = (shipped - 1) & count_mask;
> +                          count &= ~0x1;
> +                        }
> +                    }
> +                  else if (type == 0x200)
> +                    {
> +                      /* Count core.  */
> +                      if ((count & (0x1 << 1)))
> +                        {
> +                          int count_mask;
> +                          int threads_core
> +                            = (level == 2 ? threads_l2 : threads_l3);
> +
> +                          /* Compute count mask.  */
> +                          asm ("bsr %1, %0"
> +                               : "=r" (count_mask) : "g" (threads_core));
> +                          count_mask = ~(-1 << (count_mask + 1));
> +                          threads_core = (shipped - 1) & count_mask;
> +                          if (level == 2)
> +                            threads_l2 = threads_core;
> +                          else
> +                            threads_l3 = threads_core;
> +                          count &= ~(0x1 << 1);
> +                        }
> +                    }
> +                }
> +            }
> +          if (threads_l2 > 0)
> +            threads_l2 += 1;
> +          if (threads_l3 > 0)
> +            threads_l3 += 1;
> +          if (level == 2)
> +            {
> +              if (threads_l2)
> +                {
> +                  threads = threads_l2;
> +                  if (cpu_features->basic.kind == arch_kind_intel
> +                      && threads > 2
> +                      && family == 6)
> +                    switch (model)
> +                      {
> +                        case 0x37:
> +                        case 0x4a:
> +                        case 0x4d:
> +                        case 0x5a:
> +                        case 0x5d:
> +                          /* Silvermont has L2 cache shared by 2 cores.  */
> +                          threads = 2;
> +                          break;
> +                        default:
> +                          break;
> +                      }
> +                }
> +            }
> +          else if (threads_l3)
> +            threads = threads_l3;
> +        }
> +      else
> +        {
> +intel_bug_no_cache_info:
> +          /* Assume that all logical threads share the highest cache
> +             level.  */
> +          threads
> +            = ((cpu_features->features[COMMON_CPUID_INDEX_1].cpuid.ebx
> +                >> 16) & 0xff);
> +        }
> +
> +        /* Cap usage of highest cache level to the number of supported
> +           threads.  */
> +        if (shared > 0 && threads > 0)
> +          shared /= threads;
> +    }
> +
> +  /* Account for non-inclusive L2 and L3 caches.  */
> +  if (!inclusive_cache)
> +    {
> +      if (threads_l2 > 0)
> +        core /= threads_l2;
> +      shared += core;
> +    }
> +
> +  *shared_ptr = shared;
> +  *threads_ptr = threads;
> +}
> +

Ok, so this is function moved from sysdeps/x86/cacheinfo.h.

> +static void
> +dl_init_cacheinfo (struct cpu_features *cpu_features)
> +{
> +  /* Find out what brand of processor.  */
> +  unsigned int ebx;
> +  unsigned int ecx;
> +  unsigned int edx;
> +  int max_cpuid_ex;
> +  long int data = -1;
> +  long int shared = -1;
> +  long int core;
> +  unsigned int threads = 0;
> +  unsigned long int level1_icache_size = -1;
> +  unsigned long int level1_dcache_size = -1;
> +  unsigned long int level1_dcache_assoc = -1;
> +  unsigned long int level1_dcache_linesize = -1;
> +  unsigned long int level2_cache_size = -1;
> +  unsigned long int level2_cache_assoc = -1;
> +  unsigned long int level2_cache_linesize = -1;
> +  unsigned long int level3_cache_size = -1;
> +  unsigned long int level3_cache_assoc = -1;
> +  unsigned long int level3_cache_linesize = -1;
> +  unsigned long int level4_cache_size = -1;
> +
> +  if (cpu_features->basic.kind == arch_kind_intel)
> +    {
> +      data = handle_intel (_SC_LEVEL1_DCACHE_SIZE, cpu_features);
> +      core = handle_intel (_SC_LEVEL2_CACHE_SIZE, cpu_features);
> +      shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, cpu_features);
> +
> +      level1_icache_size
> +	= handle_intel (_SC_LEVEL1_ICACHE_SIZE, cpu_features);
> +      level1_dcache_size = data;
> +      level1_dcache_assoc
> +	= handle_intel (_SC_LEVEL1_DCACHE_ASSOC, cpu_features);
> +      level1_dcache_linesize
> +	= handle_intel (_SC_LEVEL1_DCACHE_LINESIZE, cpu_features);
> +      level2_cache_size = core;
> +      level2_cache_assoc
> +	= handle_intel (_SC_LEVEL2_CACHE_ASSOC, cpu_features);
> +      level2_cache_linesize
> +	= handle_intel (_SC_LEVEL2_CACHE_LINESIZE, cpu_features);
> +      level3_cache_size = shared;
> +      level3_cache_assoc
> +	= handle_intel (_SC_LEVEL3_CACHE_ASSOC, cpu_features);
> +      level3_cache_linesize
> +	= handle_intel (_SC_LEVEL3_CACHE_LINESIZE, cpu_features);
> +      level4_cache_size
> +	= handle_intel (_SC_LEVEL4_CACHE_SIZE, cpu_features);
> +
> +      get_common_cache_info (&shared, &threads, core);
> +    }
> +  else if (cpu_features->basic.kind == arch_kind_zhaoxin)
> +    {
> +      data = handle_zhaoxin (_SC_LEVEL1_DCACHE_SIZE);
> +      core = handle_zhaoxin (_SC_LEVEL2_CACHE_SIZE);
> +      shared = handle_zhaoxin (_SC_LEVEL3_CACHE_SIZE);
> +
> +      level1_icache_size = handle_zhaoxin (_SC_LEVEL1_ICACHE_SIZE);
> +      level1_dcache_size = data;
> +      level1_dcache_assoc = handle_zhaoxin (_SC_LEVEL1_DCACHE_ASSOC);
> +      level1_dcache_linesize = handle_zhaoxin (_SC_LEVEL1_DCACHE_LINESIZE);
> +      level2_cache_size = core;
> +      level2_cache_assoc = handle_zhaoxin (_SC_LEVEL2_CACHE_ASSOC);
> +      level2_cache_linesize = handle_zhaoxin (_SC_LEVEL2_CACHE_LINESIZE);
> +      level3_cache_size = shared;
> +      level3_cache_assoc = handle_zhaoxin (_SC_LEVEL3_CACHE_ASSOC);
> +      level3_cache_linesize = handle_zhaoxin (_SC_LEVEL3_CACHE_LINESIZE);
> +
> +      get_common_cache_info (&shared, &threads, core);
> +    }
> +  else if (cpu_features->basic.kind == arch_kind_amd)
> +    {
> +      data  = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
> +      core = handle_amd (_SC_LEVEL2_CACHE_SIZE);
> +      shared = handle_amd (_SC_LEVEL3_CACHE_SIZE);
> +
> +      level1_icache_size = handle_amd (_SC_LEVEL1_ICACHE_SIZE);
> +      level1_dcache_size = data;
> +      level1_dcache_assoc = handle_amd (_SC_LEVEL1_DCACHE_ASSOC);
> +      level1_dcache_linesize = handle_amd (_SC_LEVEL1_DCACHE_LINESIZE);
> +      level2_cache_size = core;
> +      level2_cache_assoc = handle_amd (_SC_LEVEL2_CACHE_ASSOC);
> +      level2_cache_linesize = handle_amd (_SC_LEVEL2_CACHE_LINESIZE);
> +      level3_cache_size = shared;
> +      level3_cache_assoc = handle_amd (_SC_LEVEL3_CACHE_ASSOC);
> +      level3_cache_linesize = handle_amd (_SC_LEVEL3_CACHE_LINESIZE);
> +
> +      /* Get maximum extended function. */
> +      __cpuid (0x80000000, max_cpuid_ex, ebx, ecx, edx);
> +
> +      if (shared <= 0)
> +	/* No shared L3 cache.  All we have is the L2 cache.  */
> +	shared = core;
> +      else
> +	{
> +	  /* Figure out the number of logical threads that share L3.  */
> +	  if (max_cpuid_ex >= 0x80000008)
> +	    {
> +	      /* Get width of APIC ID.  */
> +	      __cpuid (0x80000008, max_cpuid_ex, ebx, ecx, edx);
> +	      threads = 1 << ((ecx >> 12) & 0x0f);
> +	    }
> +
> +	  if (threads == 0 || cpu_features->basic.family >= 0x17)
> +	    {
> +	      /* If APIC ID width is not available, use logical
> +		 processor count.  */
> +	      __cpuid (0x00000001, max_cpuid_ex, ebx, ecx, edx);
> +
> +	      if ((edx & (1 << 28)) != 0)
> +		threads = (ebx >> 16) & 0xff;
> +	    }
> +
> +	  /* Cap usage of highest cache level to the number of
> +	     supported threads.  */
> +	  if (threads > 0)
> +	    shared /= threads;
> +
> +	  /* Get shared cache per ccx for Zen architectures.  */
> +	  if (cpu_features->basic.family >= 0x17)
> +	    {
> +	      unsigned int eax;
> +
> +	      /* Get number of threads share the L3 cache in CCX.  */
> +	      __cpuid_count (0x8000001D, 0x3, eax, ebx, ecx, edx);
> +
> +	      unsigned int threads_per_ccx = ((eax >> 14) & 0xfff) + 1;
> +	      shared *= threads_per_ccx;
> +	    }
> +	  else
> +	    {
> +	      /* Account for exclusive L2 and L3 caches.  */
> +	      shared += core;
> +            }
> +	}
> +    }
> +
> +  cpu_features->level1_icache_size = level1_icache_size;
> +  cpu_features->level1_dcache_size = level1_dcache_size;
> +  cpu_features->level1_dcache_assoc = level1_dcache_assoc;
> +  cpu_features->level1_dcache_linesize = level1_dcache_linesize;
> +  cpu_features->level2_cache_size = level2_cache_size;
> +  cpu_features->level2_cache_assoc = level2_cache_assoc;
> +  cpu_features->level2_cache_linesize = level2_cache_linesize;
> +  cpu_features->level3_cache_size = level3_cache_size;
> +  cpu_features->level3_cache_assoc = level3_cache_assoc;
> +  cpu_features->level3_cache_linesize = level3_cache_linesize;
> +  cpu_features->level4_cache_size = level4_cache_size;

Ok, so you are expanding the definitions by family.

> +
> +  /* The default setting for the non_temporal threshold is 3/4 of one
> +     thread's share of the chip's cache. For most Intel and AMD processors
> +     with an initial release date between 2017 and 2020, a thread's typical
> +     share of the cache is from 500 KBytes to 2 MBytes. Using the 3/4
> +     threshold leaves 125 KBytes to 500 KBytes of the thread's data
> +     in cache after a maximum temporal copy, which will maintain
> +     in cache a reasonable portion of the thread's stack and other
> +     active data. If the threshold is set higher than one thread's
> +     share of the cache, it has a substantial risk of negatively
> +     impacting the performance of other threads running on the chip. */
> +  unsigned long int non_temporal_threshold = shared * 3 / 4;
> +
> +#if HAVE_TUNABLES
> +  /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8.  */
> +  unsigned int minimum_rep_movsb_threshold;
> +#endif
> +  /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16).  */
> +  unsigned int rep_movsb_threshold;
> +  if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
> +      && !CPU_FEATURE_PREFERRED_P (cpu_features, Prefer_No_AVX512))
> +    {
> +      rep_movsb_threshold = 2048 * (64 / 16);
> +#if HAVE_TUNABLES
> +      minimum_rep_movsb_threshold = 64 * 8;
> +#endif
> +    }
> +  else if (CPU_FEATURE_PREFERRED_P (cpu_features,
> +				    AVX_Fast_Unaligned_Load))
> +    {
> +      rep_movsb_threshold = 2048 * (32 / 16);
> +#if HAVE_TUNABLES
> +      minimum_rep_movsb_threshold = 32 * 8;
> +#endif
> +    }
> +  else
> +    {
> +      rep_movsb_threshold = 2048 * (16 / 16);
> +#if HAVE_TUNABLES
> +      minimum_rep_movsb_threshold = 16 * 8;
> +#endif
> +    }
> +
> +  /* The default threshold to use Enhanced REP STOSB.  */
> +  unsigned long int rep_stosb_threshold = 2048;
> +

Ok.

> +#if HAVE_TUNABLES
> +  long int tunable_size;
> +
> +  tunable_size = TUNABLE_GET (x86_data_cache_size, long int, NULL);
> +  /* NB: Ignore the default value 0.  */
> +  if (tunable_size)

No implicit checks, same as the others below.

> +    data = tunable_size;
> +
> +  tunable_size = TUNABLE_GET (x86_shared_cache_size, long int, NULL);
> +  /* NB: Ignore the default value 0.  */
> +  if (tunable_size)
> +    shared = tunable_size;
> +
> +  tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
> +  /* NB: Ignore the default value 0.  */
> +  if (tunable_size)
> +    non_temporal_threshold = tunable_size;
> +
> +  tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
> +  if (tunable_size > minimum_rep_movsb_threshold)
> +    rep_movsb_threshold = tunable_size;
> +
> +  /* NB: The default value of the x86_rep_stosb_threshold tunable is the
> +     same as the default value of __x86_rep_stosb_threshold and the
> +     minimum value is fixed.  */
> +  rep_stosb_threshold = TUNABLE_GET (x86_rep_stosb_threshold,
> +				     long int, NULL);
> +
> +  TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, long int, data,
> +			   0, (long int) -1);
> +  TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, long int, shared,
> +			   0, (long int) -1);
> +  TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, long int,
> +			   non_temporal_threshold, 0, (long int) -1);
> +  TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, long int,
> +			   rep_movsb_threshold,
> +			   minimum_rep_movsb_threshold, (long int) -1);
> +  TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, long int,
> +			   rep_stosb_threshold, 1, (long int) -1);
> +#endif

Ok.  Are the bounds ok for the architecture?

> +
> +  cpu_features->data_cache_size = data;
> +  cpu_features->shared_cache_size = shared;
> +  cpu_features->non_temporal_threshold = non_temporal_threshold;
> +  cpu_features->rep_movsb_threshold = rep_movsb_threshold;
> +  cpu_features->rep_stosb_threshold = rep_stosb_threshold;
> +}

Ok.

> diff --git a/sysdeps/x86/include/cpu-features.h b/sysdeps/x86/include/cpu-features.h
> index f62be0b9b3..3f3bd93320 100644
> --- a/sysdeps/x86/include/cpu-features.h
> +++ b/sysdeps/x86/include/cpu-features.h
> @@ -153,6 +153,28 @@ struct cpu_features
>    unsigned long int rep_movsb_threshold;
>    /* Threshold to use "rep stosb".  */
>    unsigned long int rep_stosb_threshold;
> +  /* _SC_LEVEL1_ICACHE_SIZE.  */
> +  unsigned long int level1_icache_size;
> +  /* _SC_LEVEL1_DCACHE_SIZE.  */
> +  unsigned long int level1_dcache_size;
> +  /* _SC_LEVEL1_DCACHE_ASSOC.  */
> +  unsigned long int level1_dcache_assoc;
> +  /* _SC_LEVEL1_DCACHE_LINESIZE.  */
> +  unsigned long int level1_dcache_linesize;
> +  /* _SC_LEVEL2_CACHE_ASSOC.  */
> +  unsigned long int level2_cache_size;
> +  /* _SC_LEVEL2_DCACHE_ASSOC.  */
> +  unsigned long int level2_cache_assoc;
> +  /* _SC_LEVEL2_CACHE_LINESIZE.  */
> +  unsigned long int level2_cache_linesize;
> +  /* /_SC_LEVEL3_CACHE_SIZE.  */
> +  unsigned long int level3_cache_size;
> +  /* _SC_LEVEL3_CACHE_ASSOC.  */
> +  unsigned long int level3_cache_assoc;
> +  /* _SC_LEVEL3_CACHE_LINESIZE.  */
> +  unsigned long int level3_cache_linesize;
> +  /* /_SC_LEVEL4_CACHE_SIZE.  */
> +  unsigned long int level4_cache_size;
>  };
>  
>  # if defined (_LIBC) && !IS_IN (nonlib)
> 

Ok.

  reply	other threads:[~2021-01-14 14:13 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-31 15:44 V4 [PATCH 0/2] ld.so: Add --list-tunables to print tunable values H.J. Lu
2020-10-31 15:44 ` V4 [PATCH 1/2] x86: Move x86 processor cache info to cpu_features H.J. Lu
2021-01-14 14:13   ` Adhemerval Zanella [this message]
2021-01-14 19:28     ` V5 " H.J. Lu
2020-10-31 15:44 ` V4 [PATCH 2/2] ld.so: Add --list-tunables to print tunable values H.J. Lu
2021-01-14 18:35   ` Adhemerval Zanella
2021-01-14 22:25     ` V5 " H.J. Lu
2021-01-15 12:47       ` Adhemerval Zanella
2021-02-02 10:59       ` Andreas Schwab
2021-02-02 17:36         ` [PATCH] ld.so: Unset glibc tunables for --list-tunables test H.J. Lu
2021-02-02 18:07           ` Andreas Schwab
2021-02-02 18:12             ` [PATCH] tst-rtld-list-tunables.sh: Unset glibc tunables H.J. Lu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=42ca59b0-47af-6e46-03a1-2bb2ab7dfdc1@linaro.org \
    --to=adhemerval.zanella@linaro.org \
    --cc=fweimer@redhat.com \
    --cc=hjl.tools@gmail.com \
    --cc=libc-alpha@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).