public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
* [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 Zhaoxin processors
@ 2024-06-29  3:58 MayShao-oc
  2024-06-29  3:58 ` [PATCH v2 2/3] x86_64: Optimize large size copy in memmove-ssse3 MayShao-oc
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: MayShao-oc @ 2024-06-29  3:58 UTC (permalink / raw)
  To: libc-alpha, goldstein.w.n, carlos, hjl.tools
  Cc: LouisQi, TimHu, Hawkwang, MayShao

Fix code formatting under the Zhaoxin branch and add comments for
different Zhaoxin models.

Unaligned AVX load are slower on KH-40000 and KX-7000, so disable
the AVX_Fast_Unaligned_Load.

Enable Prefer_No_VZEROUPPER and Fast_Unaligned_Load features to
use sse2_unaligned version of memset,strcpy and strcat.
---
 sysdeps/x86/cpu-features.c | 51 ++++++++++++++++++++++++++------------
 1 file changed, 35 insertions(+), 16 deletions(-)

diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
index 3d7c2819d7..1927f65699 100644
--- a/sysdeps/x86/cpu-features.c
+++ b/sysdeps/x86/cpu-features.c
@@ -1023,39 +1023,58 @@ https://www.intel.com/content/www/us/en/support/articles/000059422/processors.ht
 
       model += extended_model;
       if (family == 0x6)
-        {
-          if (model == 0xf || model == 0x19)
-            {
+	{
+	  /* Tuning for older Zhaoxin processors.  */
+	  if (model == 0xf || model == 0x19)
+	    {
 	      CPU_FEATURE_UNSET (cpu_features, AVX);
 	      CPU_FEATURE_UNSET (cpu_features, AVX2);
 
-              cpu_features->preferred[index_arch_Slow_SSE4_2]
-                |= bit_arch_Slow_SSE4_2;
+	      cpu_features->preferred[index_arch_Slow_SSE4_2]
+		  |= bit_arch_Slow_SSE4_2;
 
+	      /*  Unaligned AVX loads are slower.  */
 	      cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
-		&= ~bit_arch_AVX_Fast_Unaligned_Load;
-            }
-        }
+		  &= ~bit_arch_AVX_Fast_Unaligned_Load;
+	    }
+	}
       else if (family == 0x7)
-        {
-	  if (model == 0x1b)
+	{
+	  switch (model)
 	    {
+	      /* Wudaokou microarch tuning.  */
+	    case 0x1b:
 	      CPU_FEATURE_UNSET (cpu_features, AVX);
 	      CPU_FEATURE_UNSET (cpu_features, AVX2);
 
 	      cpu_features->preferred[index_arch_Slow_SSE4_2]
-		|= bit_arch_Slow_SSE4_2;
+		  |= bit_arch_Slow_SSE4_2;
 
 	      cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
-		&= ~bit_arch_AVX_Fast_Unaligned_Load;
-	    }
-	  else if (model == 0x3b)
-	    {
+		  &= ~bit_arch_AVX_Fast_Unaligned_Load;
+	      break;
+
+	      /* Lujiazui microarch tuning.  */
+	    case 0x3b:
 	      CPU_FEATURE_UNSET (cpu_features, AVX);
 	      CPU_FEATURE_UNSET (cpu_features, AVX2);
 
 	      cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
-		&= ~bit_arch_AVX_Fast_Unaligned_Load;
+		  &= ~bit_arch_AVX_Fast_Unaligned_Load;
+	      break;
+
+	      /* Yongfeng and Shijidadao mircoarch tuning.  */
+	    case 0x5b:
+	    case 0x6b:
+	      cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
+		  &= ~bit_arch_AVX_Fast_Unaligned_Load;
+
+	      /* To use sse2_unaligned versions of memset, strcpy and strcat.
+	       */
+	      cpu_features->preferred[index_arch_Prefer_No_VZEROUPPER]
+		  |= (bit_arch_Prefer_No_VZEROUPPER
+		      | bit_arch_Fast_Unaligned_Load);
+	      break;
 	    }
 	}
     }
-- 
2.34.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v2 2/3] x86_64: Optimize large size copy in memmove-ssse3
  2024-06-29  3:58 [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 Zhaoxin processors MayShao-oc
@ 2024-06-29  3:58 ` MayShao-oc
  2024-06-30  2:07   ` Noah Goldstein
  2024-06-29  3:58 ` [PATCH v2 3/3] x86: Set default non_temporal_threshold for Zhaoxin processors MayShao-oc
  2024-06-30  2:06 ` [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 " Noah Goldstein
  2 siblings, 1 reply; 7+ messages in thread
From: MayShao-oc @ 2024-06-29  3:58 UTC (permalink / raw)
  To: libc-alpha, goldstein.w.n, carlos, hjl.tools
  Cc: LouisQi, TimHu, Hawkwang, MayShao

This patch optimizes large size copy using normal store when src > dst
and overlap.  Make it the same as the logic in memmove-vec-unaligned-erms.S.

Current memmove-ssse3 use '__x86_shared_cache_size_half' as the non-
temporal threshold, this patch updates that value to
'__x86_shared_non_temporal_threshold'.  Currently, the
__x86_shared_non_temporal_threshold is cpu-specific, and different CPUs
will have different values based on the related nt-benchmark results.
However, in memmove-ssse3, the nontemporal threshold uses
'__x86_shared_cache_size_half', which sounds unreasonable.

The performance is not changed drastically although shows overall
improvements without any major regressions or gains.

Results on Zhaoxin KX-7000:
bench-memcpy geometric_mean(N=20) New / Original: 0.999

bench-memcpy-random geometric_mean(N=20) New / Original: 0.999

bench-memcpy-large geometric_mean(N=20) New / Original: 0.978

bench-memmove geometric_mean(N=20) New / Original: 1.000

bench-memmmove-large geometric_mean(N=20) New / Original: 0.962

Results on Intel Core i5-6600K:
bench-memcpy geometric_mean(N=20) New / Original: 1.001

bench-memcpy-random geometric_mean(N=20) New / Original: 0.999

bench-memcpy-large geometric_mean(N=20) New / Original: 1.001

bench-memmove geometric_mean(N=20) New / Original: 0.995

bench-memmmove-large geometric_mean(N=20) New / Original: 0.936
---
 sysdeps/x86_64/multiarch/memmove-ssse3.S | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/sysdeps/x86_64/multiarch/memmove-ssse3.S b/sysdeps/x86_64/multiarch/memmove-ssse3.S
index 048d015712..01008fd981 100644
--- a/sysdeps/x86_64/multiarch/memmove-ssse3.S
+++ b/sysdeps/x86_64/multiarch/memmove-ssse3.S
@@ -151,13 +151,10 @@ L(more_2x_vec):
 	   loop.  */
 	movups	%xmm0, (%rdi)
 
-# ifdef SHARED_CACHE_SIZE_HALF
-	cmp	$SHARED_CACHE_SIZE_HALF, %RDX_LP
-# else
-	cmp	__x86_shared_cache_size_half(%rip), %rdx
-# endif
+	cmp	__x86_shared_non_temporal_threshold(%rip), %rdx
 	ja	L(large_memcpy)
 
+L(loop_fwd):
 	leaq	-64(%rdi, %rdx), %r8
 	andq	$-16, %rdi
 	movl	$48, %edx
@@ -199,6 +196,13 @@ L(large_memcpy):
 	movups	-64(%r9, %rdx), %xmm10
 	movups	-80(%r9, %rdx), %xmm11
 
+	/* Check if src and dst overlap. If they do use cacheable
+	   writes to potentially gain positive interference between
+	   the loads during the memmove.  */
+	subq	%rdi, %r9
+	cmpq	%rdx, %r9
+	jb	L(loop_fwd)
+
 	sall	$5, %ecx
 	leal	(%rcx, %rcx, 2), %r8d
 	leaq	-96(%rdi, %rdx), %rcx
-- 
2.34.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v2 3/3] x86: Set default non_temporal_threshold for Zhaoxin processors
  2024-06-29  3:58 [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 Zhaoxin processors MayShao-oc
  2024-06-29  3:58 ` [PATCH v2 2/3] x86_64: Optimize large size copy in memmove-ssse3 MayShao-oc
@ 2024-06-29  3:58 ` MayShao-oc
  2024-06-30  2:08   ` Noah Goldstein
  2024-06-30  2:06 ` [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 " Noah Goldstein
  2 siblings, 1 reply; 7+ messages in thread
From: MayShao-oc @ 2024-06-29  3:58 UTC (permalink / raw)
  To: libc-alpha, goldstein.w.n, carlos, hjl.tools
  Cc: LouisQi, TimHu, Hawkwang, MayShao

Current 'non_temporal_threshold' set to 'non_temporal_threshold_lowbound'
on Zhaoxin processors without ERMS. The default
'non_temporal_threshold_lowbound' is too small for the KH-40000 and KX-7000
Zhaoxin processors, this patch updates the value to
'shared / cachesize_non_temporal_divisor'.
---
 sysdeps/x86/cpu-features.c | 1 +
 sysdeps/x86/dl-cacheinfo.h | 6 ++++--
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
index 1927f65699..e501e084ef 100644
--- a/sysdeps/x86/cpu-features.c
+++ b/sysdeps/x86/cpu-features.c
@@ -1065,6 +1065,7 @@ https://www.intel.com/content/www/us/en/support/articles/000059422/processors.ht
 
 	      /* Yongfeng and Shijidadao mircoarch tuning.  */
 	    case 0x5b:
+	      cpu_features->cachesize_non_temporal_divisor = 2;
 	    case 0x6b:
 	      cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
 		  &= ~bit_arch_AVX_Fast_Unaligned_Load;
diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
index 3a6ec4ef9f..5e77345a6e 100644
--- a/sysdeps/x86/dl-cacheinfo.h
+++ b/sysdeps/x86/dl-cacheinfo.h
@@ -934,8 +934,10 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
   /* If no ERMS, we use the per-thread L3 chunking. Normal cacheable stores run
      a higher risk of actually thrashing the cache as they don't have a HW LRU
      hint. As well, their performance in highly parallel situations is
-     noticeably worse.  */
-  if (!CPU_FEATURE_USABLE_P (cpu_features, ERMS))
+     noticeably worse. Zhaoxin processors are an exception, the lowbound is not
+     suitable for them based on actual test data.  */
+  if (!CPU_FEATURE_USABLE_P (cpu_features, ERMS)
+      && cpu_features->basic.kind != arch_kind_zhaoxin)
     non_temporal_threshold = non_temporal_threshold_lowbound;
   /* SIZE_MAX >> 4 because memmove-vec-unaligned-erms right-shifts the value of
      'x86_non_temporal_threshold' by `LOG_4X_MEMCPY_THRESH` (4) and it is best
-- 
2.34.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 Zhaoxin processors
  2024-06-29  3:58 [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 Zhaoxin processors MayShao-oc
  2024-06-29  3:58 ` [PATCH v2 2/3] x86_64: Optimize large size copy in memmove-ssse3 MayShao-oc
  2024-06-29  3:58 ` [PATCH v2 3/3] x86: Set default non_temporal_threshold for Zhaoxin processors MayShao-oc
@ 2024-06-30  2:06 ` Noah Goldstein
  2024-06-30 13:40   ` Mayshao-oc
  2 siblings, 1 reply; 7+ messages in thread
From: Noah Goldstein @ 2024-06-30  2:06 UTC (permalink / raw)
  To: MayShao-oc
  Cc: libc-alpha, carlos, hjl.tools, LouisQi, TimHu, Hawkwang, MayShao

[-- Attachment #1: Type: text/plain, Size: 3570 bytes --]

On Saturday, June 29, 2024, MayShao-oc <MayShao-oc@zhaoxin.com> wrote:

> Fix code formatting under the Zhaoxin branch and add comments for
> different Zhaoxin models.
>
> Unaligned AVX load are slower on KH-40000 and KX-7000, so disable
> the AVX_Fast_Unaligned_Load.
>
> Enable Prefer_No_VZEROUPPER and Fast_Unaligned_Load features to
> use sse2_unaligned version of memset,strcpy and strcat.
> ---
>  sysdeps/x86/cpu-features.c | 51 ++++++++++++++++++++++++++------------
>  1 file changed, 35 insertions(+), 16 deletions(-)
>
> diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
> index 3d7c2819d7..1927f65699 100644
> --- a/sysdeps/x86/cpu-features.c
> +++ b/sysdeps/x86/cpu-features.c
> @@ -1023,39 +1023,58 @@ https://www.intel.com/content/
> www/us/en/support/articles/000059422/processors.ht
>
>        model += extended_model;
>        if (family == 0x6)
> -        {
> -          if (model == 0xf || model == 0x19)
> -            {
> +       {
> +         /* Tuning for older Zhaoxin processors.  */
> +         if (model == 0xf || model == 0x19)
> +           {
>               CPU_FEATURE_UNSET (cpu_features, AVX);
>               CPU_FEATURE_UNSET (cpu_features, AVX2);
>
> -              cpu_features->preferred[index_arch_Slow_SSE4_2]
> -                |= bit_arch_Slow_SSE4_2;
> +             cpu_features->preferred[index_arch_Slow_SSE4_2]
> +                 |= bit_arch_Slow_SSE4_2;
>
> +             /*  Unaligned AVX loads are slower.  */
>               cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
> -               &= ~bit_arch_AVX_Fast_Unaligned_Load;
> -            }
> -        }
> +                 &= ~bit_arch_AVX_Fast_Unaligned_Load;
> +           }
> +       }
>        else if (family == 0x7)
> -        {
> -         if (model == 0x1b)
> +       {
> +         switch (model)
>             {
> +             /* Wudaokou microarch tuning.  */
> +           case 0x1b:
>               CPU_FEATURE_UNSET (cpu_features, AVX);
>               CPU_FEATURE_UNSET (cpu_features, AVX2);
>
>               cpu_features->preferred[index_arch_Slow_SSE4_2]
> -               |= bit_arch_Slow_SSE4_2;
> +                 |= bit_arch_Slow_SSE4_2;
>
>               cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
> -               &= ~bit_arch_AVX_Fast_Unaligned_Load;
> -           }
> -         else if (model == 0x3b)
> -           {
> +                 &= ~bit_arch_AVX_Fast_Unaligned_Load;
> +             break;
> +
> +             /* Lujiazui microarch tuning.  */
> +           case 0x3b:
>               CPU_FEATURE_UNSET (cpu_features, AVX);
>               CPU_FEATURE_UNSET (cpu_features, AVX2);
>
>               cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
> -               &= ~bit_arch_AVX_Fast_Unaligned_Load;
> +                 &= ~bit_arch_AVX_Fast_Unaligned_Load;
> +             break;
> +
> +             /* Yongfeng and Shijidadao mircoarch tuning.  */
> +           case 0x5b:
> +           case 0x6b:
> +             cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
> +                 &= ~bit_arch_AVX_Fast_Unaligned_Load;
> +
> +             /* To use sse2_unaligned versions of memset, strcpy and
> strcat.
> +              */
> +             cpu_features->preferred[index_arch_Prefer_No_VZEROUPPER]
> +                 |= (bit_arch_Prefer_No_VZEROUPPER
> +                     | bit_arch_Fast_Unaligned_Load);
> +             break;
>             }
>         }
>      }
> --
> 2.34.1


LGTM.


Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v2 2/3] x86_64: Optimize large size copy in memmove-ssse3
  2024-06-29  3:58 ` [PATCH v2 2/3] x86_64: Optimize large size copy in memmove-ssse3 MayShao-oc
@ 2024-06-30  2:07   ` Noah Goldstein
  0 siblings, 0 replies; 7+ messages in thread
From: Noah Goldstein @ 2024-06-30  2:07 UTC (permalink / raw)
  To: MayShao-oc
  Cc: GNU C Library, Carlos O'Donell, H.J. Lu, Louis Qi(BJ-RD),
	Tim Hu(WH-RD), Hawk Wang(BJ-RD), May Shao(BJ-RD)

[-- Attachment #1: Type: text/plain, Size: 2932 bytes --]

On Sat, Jun 29, 2024, 11:58 MayShao-oc <MayShao-oc@zhaoxin.com> wrote:

> This patch optimizes large size copy using normal store when src > dst
> and overlap.  Make it the same as the logic in
> memmove-vec-unaligned-erms.S.
>
> Current memmove-ssse3 use '__x86_shared_cache_size_half' as the non-
> temporal threshold, this patch updates that value to
> '__x86_shared_non_temporal_threshold'.  Currently, the
> __x86_shared_non_temporal_threshold is cpu-specific, and different CPUs
> will have different values based on the related nt-benchmark results.
> However, in memmove-ssse3, the nontemporal threshold uses
> '__x86_shared_cache_size_half', which sounds unreasonable.
>
> The performance is not changed drastically although shows overall
> improvements without any major regressions or gains.
>
> Results on Zhaoxin KX-7000:
> bench-memcpy geometric_mean(N=20) New / Original: 0.999
>
> bench-memcpy-random geometric_mean(N=20) New / Original: 0.999
>
> bench-memcpy-large geometric_mean(N=20) New / Original: 0.978
>
> bench-memmove geometric_mean(N=20) New / Original: 1.000
>
> bench-memmmove-large geometric_mean(N=20) New / Original: 0.962
>
> Results on Intel Core i5-6600K:
> bench-memcpy geometric_mean(N=20) New / Original: 1.001
>
> bench-memcpy-random geometric_mean(N=20) New / Original: 0.999
>
> bench-memcpy-large geometric_mean(N=20) New / Original: 1.001
>
> bench-memmove geometric_mean(N=20) New / Original: 0.995
>
> bench-memmmove-large geometric_mean(N=20) New / Original: 0.936
> ---
>  sysdeps/x86_64/multiarch/memmove-ssse3.S | 14 +++++++++-----
>  1 file changed, 9 insertions(+), 5 deletions(-)
>
> diff --git a/sysdeps/x86_64/multiarch/memmove-ssse3.S
> b/sysdeps/x86_64/multiarch/memmove-ssse3.S
> index 048d015712..01008fd981 100644
> --- a/sysdeps/x86_64/multiarch/memmove-ssse3.S
> +++ b/sysdeps/x86_64/multiarch/memmove-ssse3.S
> @@ -151,13 +151,10 @@ L(more_2x_vec):
>            loop.  */
>         movups  %xmm0, (%rdi)
>
> -# ifdef SHARED_CACHE_SIZE_HALF
> -       cmp     $SHARED_CACHE_SIZE_HALF, %RDX_LP
> -# else
> -       cmp     __x86_shared_cache_size_half(%rip), %rdx
> -# endif
> +       cmp     __x86_shared_non_temporal_threshold(%rip), %rdx
>         ja      L(large_memcpy)
>
> +L(loop_fwd):
>         leaq    -64(%rdi, %rdx), %r8
>         andq    $-16, %rdi
>         movl    $48, %edx
> @@ -199,6 +196,13 @@ L(large_memcpy):
>         movups  -64(%r9, %rdx), %xmm10
>         movups  -80(%r9, %rdx), %xmm11
>
> +       /* Check if src and dst overlap. If they do use cacheable
> +          writes to potentially gain positive interference between
> +          the loads during the memmove.  */
> +       subq    %rdi, %r9
> +       cmpq    %rdx, %r9
> +       jb      L(loop_fwd)
> +
>         sall    $5, %ecx
>         leal    (%rcx, %rcx, 2), %r8d
>         leaq    -96(%rdi, %rdx), %rcx
> --
> 2.34.1
>
>
LGTM.

Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 3/3] x86: Set default non_temporal_threshold for Zhaoxin processors
  2024-06-29  3:58 ` [PATCH v2 3/3] x86: Set default non_temporal_threshold for Zhaoxin processors MayShao-oc
@ 2024-06-30  2:08   ` Noah Goldstein
  0 siblings, 0 replies; 7+ messages in thread
From: Noah Goldstein @ 2024-06-30  2:08 UTC (permalink / raw)
  To: MayShao-oc
  Cc: libc-alpha, carlos, hjl.tools, LouisQi, TimHu, Hawkwang, MayShao

[-- Attachment #1: Type: text/plain, Size: 2266 bytes --]

On Saturday, June 29, 2024, MayShao-oc <MayShao-oc@zhaoxin.com> wrote:

> Current 'non_temporal_threshold' set to 'non_temporal_threshold_lowbound'
> on Zhaoxin processors without ERMS. The default
> 'non_temporal_threshold_lowbound' is too small for the KH-40000 and
> KX-7000
> Zhaoxin processors, this patch updates the value to
> 'shared / cachesize_non_temporal_divisor'.
> ---
>  sysdeps/x86/cpu-features.c | 1 +
>  sysdeps/x86/dl-cacheinfo.h | 6 ++++--
>  2 files changed, 5 insertions(+), 2 deletions(-)
>
> diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
> index 1927f65699..e501e084ef 100644
> --- a/sysdeps/x86/cpu-features.c
> +++ b/sysdeps/x86/cpu-features.c
> @@ -1065,6 +1065,7 @@ https://www.intel.com/content/
> www/us/en/support/articles/000059422/processors.ht
>
>               /* Yongfeng and Shijidadao mircoarch tuning.  */
>             case 0x5b:
> +             cpu_features->cachesize_non_temporal_divisor = 2;
>             case 0x6b:
>               cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
>                   &= ~bit_arch_AVX_Fast_Unaligned_Load;
> diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> index 3a6ec4ef9f..5e77345a6e 100644
> --- a/sysdeps/x86/dl-cacheinfo.h
> +++ b/sysdeps/x86/dl-cacheinfo.h
> @@ -934,8 +934,10 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
>    /* If no ERMS, we use the per-thread L3 chunking. Normal cacheable
> stores run
>       a higher risk of actually thrashing the cache as they don't have a
> HW LRU
>       hint. As well, their performance in highly parallel situations is
> -     noticeably worse.  */
> -  if (!CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> +     noticeably worse. Zhaoxin processors are an exception, the lowbound
> is not
> +     suitable for them based on actual test data.  */
> +  if (!CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> +      && cpu_features->basic.kind != arch_kind_zhaoxin)
>      non_temporal_threshold = non_temporal_threshold_lowbound;
>    /* SIZE_MAX >> 4 because memmove-vec-unaligned-erms right-shifts the
> value of
>       'x86_non_temporal_threshold' by `LOG_4X_MEMCPY_THRESH` (4) and it is
> best
> --
> 2.34.1
>
>
LGTM.

Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 Zhaoxin processors
  2024-06-30  2:06 ` [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 " Noah Goldstein
@ 2024-06-30 13:40   ` Mayshao-oc
  0 siblings, 0 replies; 7+ messages in thread
From: Mayshao-oc @ 2024-06-30 13:40 UTC (permalink / raw)
  To: Noah Goldstein, hjl.tools, carlos, Florian Weimer, Xi Ruoyao
  Cc: libc-alpha, Louis Qi(BJ-RD), Tim Hu(WH-RD), Hawk Wang(BJ-RD),
	May Shao(BJ-RD)

Hi Folrain, Ruoyao, H.J. and Carlos ,

I would like to ask, can anyone help push this series of patches to master? Because
tomorrow is the freeze date, but I don't have commit access permission, and Noah can't
submit now either.

Thank you in advance, everyone.

Best Regards,
May Shao


> On Sunday, Jun 30, 2024 at 10:06 AM  Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> On Saturday, June 29, 2024, MayShao-oc <MayShao-oc@zhaoxin.com<mailto:MayShao-oc@zhaoxin.com>> wrote:
> Fix code formatting under the Zhaoxin branch and add comments for
> different Zhaoxin models.
> 
> Unaligned AVX load are slower on KH-40000 and KX-7000, so disable
> the AVX_Fast_Unaligned_Load.
> 
> Enable Prefer_No_VZEROUPPER and Fast_Unaligned_Load features to
> use sse2_unaligned version of memset,strcpy and strcat.
> ---
>  sysdeps/x86/cpu-features.c | 51 ++++++++++++++++++++++++++------------
>  1 file changed, 35 insertions(+), 16 deletions(-)
> 
> diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
> index 3d7c2819d7..1927f65699 100644
> --- a/sysdeps/x86/cpu-features.c
> +++ b/sysdeps/x86/cpu-features.c
> @@ -1023,39 +1023,58 @@ https://www.intel.com/content/www/us/en/support/articles/000059422/processors.ht
> 
>        model += extended_model;
>        if (family == 0x6)
> -        {
> -          if (model == 0xf || model == 0x19)
> -            {
> +       {
> +         /* Tuning for older Zhaoxin processors.  */
> +         if (model == 0xf || model == 0x19)
> +           {
>               CPU_FEATURE_UNSET (cpu_features, AVX);
>               CPU_FEATURE_UNSET (cpu_features, AVX2);
> 
> -              cpu_features->preferred[index_arch_Slow_SSE4_2]
> -                |= bit_arch_Slow_SSE4_2;
> +             cpu_features->preferred[index_arch_Slow_SSE4_2]
> +                 |= bit_arch_Slow_SSE4_2;
> 
> +             /*  Unaligned AVX loads are slower.  */
>               cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
> -               &= ~bit_arch_AVX_Fast_Unaligned_Load;
> -            }
> -        }
> +                 &= ~bit_arch_AVX_Fast_Unaligned_Load;
> +           }
> +       }
>        else if (family == 0x7)
> -        {
> -         if (model == 0x1b)
> +       {
> +         switch (model)
>             {
> +             /* Wudaokou microarch tuning.  */
> +           case 0x1b:
>               CPU_FEATURE_UNSET (cpu_features, AVX);
>               CPU_FEATURE_UNSET (cpu_features, AVX2);
> 
>               cpu_features->preferred[index_arch_Slow_SSE4_2]
> -               |= bit_arch_Slow_SSE4_2;
> +                 |= bit_arch_Slow_SSE4_2;
> 
>               cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
> -               &= ~bit_arch_AVX_Fast_Unaligned_Load;
> -           }
> -         else if (model == 0x3b)
> -           {
> +                 &= ~bit_arch_AVX_Fast_Unaligned_Load;
> +             break;
> +
> +             /* Lujiazui microarch tuning.  */
> +           case 0x3b:
>               CPU_FEATURE_UNSET (cpu_features, AVX);
>               CPU_FEATURE_UNSET (cpu_features, AVX2);
> 
>               cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
> -               &= ~bit_arch_AVX_Fast_Unaligned_Load;
> +                 &= ~bit_arch_AVX_Fast_Unaligned_Load;
> +             break;
> +
> +             /* Yongfeng and Shijidadao mircoarch tuning.  */
> +           case 0x5b:
> +           case 0x6b:
> +             cpu_features->preferred[index_arch_AVX_Fast_Unaligned_Load]
> +                 &= ~bit_arch_AVX_Fast_Unaligned_Load;
> +
> +             /* To use sse2_unaligned versions of memset, strcpy and strcat.
> +              */
> +             cpu_features->preferred[index_arch_Prefer_No_VZEROUPPER]
> +                 |= (bit_arch_Prefer_No_VZEROUPPER
> +                     | bit_arch_Fast_Unaligned_Load);
> +             break;
>             }
>         }
>      }
> --
> 2.34.1
> 
> LGTM.
> 
> 
> Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com<mailto:goldstein.w.n@gmail.com>>

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2024-06-30 13:40 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-06-29  3:58 [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 Zhaoxin processors MayShao-oc
2024-06-29  3:58 ` [PATCH v2 2/3] x86_64: Optimize large size copy in memmove-ssse3 MayShao-oc
2024-06-30  2:07   ` Noah Goldstein
2024-06-29  3:58 ` [PATCH v2 3/3] x86: Set default non_temporal_threshold for Zhaoxin processors MayShao-oc
2024-06-30  2:08   ` Noah Goldstein
2024-06-30  2:06 ` [PATCH v2 1/3] x86: Set preferred CPU features on the KH-40000 and KX-7000 " Noah Goldstein
2024-06-30 13:40   ` Mayshao-oc

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).