public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: Carlos O'Donell <carlos@redhat.com>
To: "H.J. Lu" <hjl.tools@gmail.com>, libc-alpha@sourceware.org
Subject: Re: [PATCH 2/2] x86: Add thresholds for "rep movsb/stosb" to tunables
Date: Fri, 3 Jul 2020 15:49:21 -0400	[thread overview]
Message-ID: <8cef5b4a-cdda-6eaa-a859-5a410560a4ce@redhat.com> (raw)
In-Reply-To: <20200703175220.1178840-3-hjl.tools@gmail.com>

On 7/3/20 1:52 PM, H.J. Lu wrote:
> Add x86_rep_movsb_threshold and x86_rep_stosb_threshold to tunables
> to update thresholds for "rep movsb" and "rep stosb" at run-time.
> 
> Note that the user specified threshold for "rep movsb" smaller than the
> minimum threshold will be ignored.

Post v2 please. Almost there.

> ---
>  manual/tunables.texi                          | 14 +++++++
>  sysdeps/x86/cacheinfo.c                       | 20 ++++++++++
>  sysdeps/x86/cpu-features.h                    |  4 ++
>  sysdeps/x86/dl-cacheinfo.c                    | 38 +++++++++++++++++++
>  sysdeps/x86/dl-tunables.list                  |  6 +++
>  .../multiarch/memmove-vec-unaligned-erms.S    | 16 +-------
>  .../multiarch/memset-vec-unaligned-erms.S     | 12 +-----
>  7 files changed, 84 insertions(+), 26 deletions(-)
> 
> diff --git a/manual/tunables.texi b/manual/tunables.texi
> index ec18b10834..61edd62425 100644
> --- a/manual/tunables.texi
> +++ b/manual/tunables.texi
> @@ -396,6 +396,20 @@ to set threshold in bytes for non temporal store.
>  This tunable is specific to i386 and x86-64.
>  @end deftp
>  
> +@deftp Tunable glibc.cpu.x86_rep_movsb_threshold
> +The @code{glibc.cpu.x86_rep_movsb_threshold} tunable allows the user
> +to set threshold in bytes to start using "rep movsb".
> +
> +This tunable is specific to i386 and x86-64.
> +@end deftp
> +
> +@deftp Tunable glibc.cpu.x86_rep_stosb_threshold
> +The @code{glibc.cpu.x86_rep_stosb_threshold} tunable allows the user
> +to set threshold in bytes to start using "rep stosb".
> +
> +This tunable is specific to i386 and x86-64.
> +@end deftp
> +
>  @deftp Tunable glibc.cpu.x86_ibt
>  The @code{glibc.cpu.x86_ibt} tunable allows the user to control how
>  indirect branch tracking (IBT) should be enabled.  Accepted values are
> diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c
> index 8c4c7f9972..bb536d96ef 100644
> --- a/sysdeps/x86/cacheinfo.c
> +++ b/sysdeps/x86/cacheinfo.c
> @@ -41,6 +41,23 @@ long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;
>  /* Threshold to use non temporal store.  */
>  long int __x86_shared_non_temporal_threshold attribute_hidden;
>  
> +/* Threshold to use Enhanced REP MOVSB.  Since there is overhead to set
> +   up REP MOVSB operation, REP MOVSB isn't faster on short data.  The
> +   memcpy micro benchmark in glibc shows that 2KB is the approximate
> +   value above which REP MOVSB becomes faster than SSE2 optimization
> +   on processors with Enhanced REP MOVSB.  Since larger register size
> +   can move more data with a single load and store, the threshold is
> +   higher with larger register size.  */
> +long int __x86_rep_movsb_threshold attribute_hidden = 2048;
> +
> +/* Threshold to use Enhanced REP STOSB.  Since there is overhead to set
> +   up REP STOSB operation, REP STOSB isn't faster on short data.  The
> +   memset micro benchmark in glibc shows that 2KB is the approximate
> +   value above which REP STOSB becomes faster on processors with
> +   Enhanced REP STOSB.  Since the stored value is fixed, larger register
> +   size has minimal impact on threshold.  */
> +long int __x86_rep_stosb_threshold attribute_hidden = 2048;
> +
>  #ifndef __x86_64__
>  /* PREFETCHW support flag for use in memory and string routines.  */
>  int __x86_prefetchw attribute_hidden;
> @@ -117,6 +134,9 @@ init_cacheinfo (void)
>    __x86_shared_non_temporal_threshold
>      = cpu_features->non_temporal_threshold;
>  
> +  __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
> +  __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;
> +

OK. Update global from cpu_features with values.

I would really like to see some kind of "assert (cpu_features->initialized);"
that way we know we didn't break the startup sequence unintentionally.

>  #ifndef __x86_64__
>    __x86_prefetchw = cpu_features->prefetchw;
>  #endif
> diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h
> index 3aaed33cbc..002e12e11f 100644
> --- a/sysdeps/x86/cpu-features.h
> +++ b/sysdeps/x86/cpu-features.h
> @@ -128,6 +128,10 @@ struct cpu_features
>    /* PREFETCHW support flag for use in memory and string routines.  */
>    unsigned long int prefetchw;
>  #endif
> +  /* Threshold to use "rep movsb".  */
> +  unsigned long int rep_movsb_threshold;
> +  /* Threshold to use "rep stosb".  */
> +  unsigned long int rep_stosb_threshold;

OK.

>  };
>  
>  /* Used from outside of glibc to get access to the CPU features
> diff --git a/sysdeps/x86/dl-cacheinfo.c b/sysdeps/x86/dl-cacheinfo.c
> index 8e2a6f552c..aff9bd1067 100644
> --- a/sysdeps/x86/dl-cacheinfo.c
> +++ b/sysdeps/x86/dl-cacheinfo.c
> @@ -860,6 +860,31 @@ __init_cacheinfo (void)
>       total shared cache size.  */
>    unsigned long int non_temporal_threshold = (shared * threads * 3 / 4);
>  
> +  /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8.  */
> +  unsigned long int minimum_rep_movsb_threshold;
> +  /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16).  See
> +     comments for __x86_rep_movsb_threshold in cacheinfo.c.  */
> +  unsigned long int rep_movsb_threshold;
> +  if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
> +      && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
> +    {
> +      rep_movsb_threshold = 2048 * (64 / 16);
> +      minimum_rep_movsb_threshold = 64 * 8;
> +    }
> +  else if (CPU_FEATURES_ARCH_P (cpu_features,
> +				AVX_Fast_Unaligned_Load))
> +    {
> +      rep_movsb_threshold = 2048 * (32 / 16);
> +      minimum_rep_movsb_threshold = 32 * 8;
> +    }
> +  else
> +    {
> +      rep_movsb_threshold = 2048 * (16 / 16);
> +      minimum_rep_movsb_threshold = 16 * 8;
> +    }
> +  /* NB: See comments for __x86_rep_stosb_threshold in cacheinfo.c.  */
> +  unsigned long int rep_stosb_threshold = 2048;
> +
>  #if HAVE_TUNABLES
>    long int tunable_size;
>    tunable_size = TUNABLE_GET (x86_data_cache_size, long int, NULL);
> @@ -871,11 +896,19 @@ __init_cacheinfo (void)
>    tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
>    if (tunable_size != 0)
>      non_temporal_threshold = tunable_size;

> +  tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
> +  if (tunable_size > minimum_rep_movsb_threshold)
> +    rep_movsb_threshold = tunable_size;

OK. Good, we only set rep_movsb_threshold if it's greater than min.

> +  tunable_size = TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL);
> +  if (tunable_size != 0)
> +    rep_stosb_threshold = tunable_size;

This should be min=1, default=2048 in dl-tunables.list, and would remove 
this code since the range is not dynamic.

The point of the tunables framework is to remove such boiler plate for
range a default processing and clearing parameters for security settings.

>  #endif
>  
>    cpu_features->data_cache_size = data;
>    cpu_features->shared_cache_size = shared;
>    cpu_features->non_temporal_threshold = non_temporal_threshold;
> +  cpu_features->rep_movsb_threshold = rep_movsb_threshold;
> +  cpu_features->rep_stosb_threshold = rep_stosb_threshold;
>  
>  #if HAVE_TUNABLES
>    TUNABLE_UPDATE (x86_data_cache_size, long int,
> @@ -884,5 +917,10 @@ __init_cacheinfo (void)
>  		  shared, 0, (long int) -1);
>    TUNABLE_UPDATE (x86_non_temporal_threshold, long int,
>  		  non_temporal_threshold, 0, (long int) -1);
> +  TUNABLE_UPDATE (x86_rep_movsb_threshold, long int,
> +		  rep_movsb_threshold, minimum_rep_movsb_threshold,
> +		  (long int) -1);

OK. Store the new value and the computed minimum.

> +  TUNABLE_UPDATE (x86_rep_stosb_threshold, long int,
> +		  rep_stosb_threshold, 0, (long int) -1);

This one can be deleted.

>  #endif
>  }
> diff --git a/sysdeps/x86/dl-tunables.list b/sysdeps/x86/dl-tunables.list
> index 251b926ce4..43bf6c2389 100644
> --- a/sysdeps/x86/dl-tunables.list
> +++ b/sysdeps/x86/dl-tunables.list
> @@ -30,6 +30,12 @@ glibc {
>      x86_non_temporal_threshold {
>        type: SIZE_T
>      }
> +    x86_rep_movsb_threshold {
> +      type: SIZE_T
> +    }
> +    x86_rep_stosb_threshold {
> +      type: SIZE_T

min: 1
default: 2048

> +    }
>      x86_data_cache_size {
>        type: SIZE_T
>      }
> diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> index 74953245aa..bd5dc1a3f3 100644
> --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> @@ -56,17 +56,6 @@
>  # endif
>  #endif
>  
> -/* Threshold to use Enhanced REP MOVSB.  Since there is overhead to set
> -   up REP MOVSB operation, REP MOVSB isn't faster on short data.  The
> -   memcpy micro benchmark in glibc shows that 2KB is the approximate
> -   value above which REP MOVSB becomes faster than SSE2 optimization
> -   on processors with Enhanced REP MOVSB.  Since larger register size
> -   can move more data with a single load and store, the threshold is
> -   higher with larger register size.  */
> -#ifndef REP_MOVSB_THRESHOLD
> -# define REP_MOVSB_THRESHOLD	(2048 * (VEC_SIZE / 16))> -#endif

OK.

> -
>  #ifndef PREFETCH
>  # define PREFETCH(addr) prefetcht0 addr
>  #endif
> @@ -253,9 +242,6 @@ L(movsb):
>  	leaq	(%rsi,%rdx), %r9
>  	cmpq	%r9, %rdi
>  	/* Avoid slow backward REP MOVSB.  */
> -# if REP_MOVSB_THRESHOLD <= (VEC_SIZE * 8)
> -#  error Unsupported REP_MOVSB_THRESHOLD and VEC_SIZE!
> -# endif

OK.

>  	jb	L(more_8x_vec_backward)
>  1:
>  	mov	%RDX_LP, %RCX_LP
> @@ -331,7 +317,7 @@ L(between_2_3):
>  
>  #if defined USE_MULTIARCH && IS_IN (libc)
>  L(movsb_more_2x_vec):
> -	cmpq	$REP_MOVSB_THRESHOLD, %rdx
> +	cmp	__x86_rep_movsb_threshold(%rip), %RDX_LP

OK.

>  	ja	L(movsb)
>  #endif
>  L(more_2x_vec):
> diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> index af2299709c..2bfc95de05 100644
> --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> @@ -58,16 +58,6 @@
>  # endif
>  #endif
>  
> -/* Threshold to use Enhanced REP STOSB.  Since there is overhead to set
> -   up REP STOSB operation, REP STOSB isn't faster on short data.  The
> -   memset micro benchmark in glibc shows that 2KB is the approximate
> -   value above which REP STOSB becomes faster on processors with
> -   Enhanced REP STOSB.  Since the stored value is fixed, larger register
> -   size has minimal impact on threshold.  */
> -#ifndef REP_STOSB_THRESHOLD
> -# define REP_STOSB_THRESHOLD		2048
> -#endif
> -
>  #ifndef SECTION
>  # error SECTION is not defined!
>  #endif
> @@ -181,7 +171,7 @@ ENTRY (MEMSET_SYMBOL (__memset, unaligned_erms))
>  	ret
>  
>  L(stosb_more_2x_vec):
> -	cmpq	$REP_STOSB_THRESHOLD, %rdx
> +	cmp	__x86_rep_stosb_threshold(%rip), %RDX_LP

OK.

>  	ja	L(stosb)
>  #endif
>  L(more_2x_vec):
> 


-- 
Cheers,
Carlos.


  reply	other threads:[~2020-07-03 19:49 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-03 17:52 [PATCH 0/2] " H.J. Lu
2020-07-03 17:52 ` [PATCH 1/2] Update tunable min/max values H.J. Lu
2020-07-03 19:49   ` Carlos O'Donell
2020-07-03 23:09     ` V2 " H.J. Lu
2020-07-06 12:47       ` Carlos O'Donell
2020-07-06 13:13         ` H.J. Lu
2020-07-06 13:15           ` Carlos O'Donell
2020-07-06 13:42             ` H.J. Lu
2020-07-03 17:52 ` [PATCH 2/2] x86: Add thresholds for "rep movsb/stosb" to tunables H.J. Lu
2020-07-03 19:49   ` Carlos O'Donell [this message]
2020-07-04 12:03     ` V2 [PATCH] " H.J. Lu
2020-07-06 12:59       ` Carlos O'Donell
2020-07-06 16:44         ` V3 " H.J. Lu
2020-07-06 18:18           ` Carlos O'Donell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8cef5b4a-cdda-6eaa-a859-5a410560a4ce@redhat.com \
    --to=carlos@redhat.com \
    --cc=hjl.tools@gmail.com \
    --cc=libc-alpha@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).