public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: "H.J. Lu" <hjl.tools@gmail.com>
To: Noah Goldstein <goldstein.w.n@gmail.com>
Cc: GNU C Library <libc-alpha@sourceware.org>,
	"Carlos O'Donell" <carlos@systemhalted.org>
Subject: Re: [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S
Date: Mon, 19 Apr 2021 13:39:03 -0700	[thread overview]
Message-ID: <CAMe9rOpxiWWmPVkc-EGAtgKWp20Po3aDc5o8ZEANLwEE1uz01w@mail.gmail.com> (raw)
In-Reply-To: <CAFUsyfJXLM-mHd=hBHV6PWoSVgUbZdmfBZwBtcqdoEm4Jjtk2Q@mail.gmail.com>

On Mon, Apr 19, 2021 at 12:35 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> On Mon, Apr 19, 2021 at 2:45 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> >
> > On Mon, Apr 19, 2021 at 9:30 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > >
> > > No bug. This commit adds optimized cased for less_vec memset case that
> > > uses the avx512vl/avx512bw mask store avoiding the excessive
> > > branches. test-memset and test-wmemset are passing.
> > >
> > > Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
> > > ---
> > >  sysdeps/x86_64/multiarch/ifunc-memset.h       |  6 ++-
> > >  .../multiarch/memset-avx512-unaligned-erms.S  |  2 +-
> > >  .../multiarch/memset-evex-unaligned-erms.S    |  2 +-
> > >  .../multiarch/memset-vec-unaligned-erms.S     | 52 +++++++++++++++----
> > >  4 files changed, 47 insertions(+), 15 deletions(-)
> > >
> > > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > index 502f946a84..eda5640541 100644
> > > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void)
> > >        && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
> > >      {
> > >        if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> > > -         && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> > > +          && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > > +          && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> > >         {
> > >           if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > >             return OPTIMIZE (avx512_unaligned_erms);
> > > @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void)
> > >    if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
> > >      {
> > >        if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> > > -         && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> > > +          && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > > +          && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> >
> > Please also update ifunc-impl-list.c.
>
> Done.
>
> >
> > >         {
> > >           if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > >             return OPTIMIZE (evex_unaligned_erms);
> > > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > > index 22e7b187c8..d03460be93 100644
> > > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > > @@ -19,6 +19,6 @@
> > >  # define SECTION(p)            p##.evex512
> > >  # define MEMSET_SYMBOL(p,s)    p##_avx512_##s
> > >  # define WMEMSET_SYMBOL(p,s)   p##_avx512_##s
> > > -
> > > +# define USE_LESS_VEC_MASKMOV  1
> >
> > USE_LESS_VEC_MASKED_STORE
>
> Done.
>
> >
> > >  # include "memset-vec-unaligned-erms.S"
> > >  #endif
> > > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > > index ae0a4d6e46..eb3541ef60 100644
> > > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > > @@ -19,6 +19,6 @@
> > >  # define SECTION(p)            p##.evex
> > >  # define MEMSET_SYMBOL(p,s)    p##_evex_##s
> > >  # define WMEMSET_SYMBOL(p,s)   p##_evex_##s
> > > -
> > > +# define USE_LESS_VEC_MASKMOV  1
> > >  # include "memset-vec-unaligned-erms.S"
> > >  #endif
> > > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > > index 584747f1a1..6b02e87f48 100644
> > > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > > @@ -63,6 +63,9 @@
> > >  # endif
> > >  #endif
> > >
> > > +#define PAGE_SIZE 4096
> > > +#define LOG_PAGE_SIZE 12
> > > +
> > >  #ifndef SECTION
> > >  # error SECTION is not defined!
> > >  #endif
> > > @@ -213,11 +216,38 @@ L(loop):
> > >         cmpq    %rcx, %rdx
> > >         jne     L(loop)
> > >         VZEROUPPER_SHORT_RETURN
> > > +
> > > +       .p2align 4
> > >  L(less_vec):
> > >         /* Less than 1 VEC.  */
> > >  # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
> > >  #  error Unsupported VEC_SIZE!
> > >  # endif
> > > +# ifdef USE_LESS_VEC_MASKMOV
> > > +       /* Clear high bits from edi. Only keeping bits relevant to page
> > > +          cross check. Using sall instead of andl saves 3 bytes. Note
> > > +          that we are using rax which is set in
> > > +          MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out.  */
> > > +       sall    $(32 - LOG_PAGE_SIZE), %edi
> > > +       /* Check if VEC_SIZE load cross page. Mask loads suffer serious
> > > +          performance degradation when it has to fault supress.  */
> > > +       cmpl    $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi
> >
> > Please use AND and CMP since AND has higher throughput.
>
> AND uses more code size for VEC_SIZE=16/32 and just barely pushes the
> L(cross_page) to the next 16 byte chunk so the extra 3 bytes from AND
> end up costing 16 bytes. Not aligning L(cross_page) to 16 also
> introduces higher variance to benchmarks so I think it has to be all 16 bytes.
>
> As is I don't think throughput of AND / SAL is on the critical
> path so code size should win out. (We can also decode MOV -1, ecx
> first cycle with SAL as opposed to AND).
>
> What do you think?

I prefer AND over SAL.  Something like

diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
index 3a59d39267..763fb907b9 100644
--- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
@@ -217,21 +217,17 @@ L(loop):
   jne   L(loop)
   VZEROUPPER_SHORT_RETURN

-  .p2align 4
+  /* NB: Don't align this branch target to reduce code size.  */
 L(less_vec):
   /* Less than 1 VEC.  */
 # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
 #  error Unsupported VEC_SIZE!
 # endif
 # ifdef USE_LESS_VEC_MASK_STORE
-  /* Clear high bits from edi. Only keeping bits relevant to page
-     cross check. Using sall instead of andl saves 3 bytes. Note
-     that we are using rax which is set in
-     MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out.  */
-  sall  $(32 - LOG_PAGE_SIZE), %edi
-  /* Check if VEC_SIZE load cross page. Mask loads suffer serious
+  /* Check if VEC_SIZE store cross page. Mask stores suffer serious
      performance degradation when it has to fault supress.  */
-  cmpl  $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi
+  andl  $(PAGE_SIZE - 1), %edi
+  cmpl  $(PAGE_SIZE - VEC_SIZE), %edi
   ja L(cross_page)
 # if VEC_SIZE > 32
   movq  $-1, %rcx

Thanks.

-- 
H.J.

  reply	other threads:[~2021-04-19 20:39 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-19 16:30 Noah Goldstein
2021-04-19 16:30 ` [PATCH v2 2/2] x86: Expand test-memset.c and bench-memset.c Noah Goldstein
2021-04-19 18:44 ` [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S H.J. Lu
2021-04-19 19:35   ` Noah Goldstein
2021-04-19 20:39     ` H.J. Lu [this message]
2021-04-19 21:07       ` Noah Goldstein

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAMe9rOpxiWWmPVkc-EGAtgKWp20Po3aDc5o8ZEANLwEE1uz01w@mail.gmail.com \
    --to=hjl.tools@gmail.com \
    --cc=carlos@systemhalted.org \
    --cc=goldstein.w.n@gmail.com \
    --cc=libc-alpha@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).