public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Richard Sandiford <richard.sandiford@arm.com>
To: Jonathan Wright <Jonathan.Wright@arm.com>
Cc: "gcc-patches\@gcc.gnu.org" <gcc-patches@gcc.gnu.org>,
	Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>
Subject: Re: [PATCH] aarch64: Use type-qualified builtins for [R]SUBHN[2] Neon intrinsics
Date: Thu, 11 Nov 2021 10:31:52 +0000	[thread overview]
Message-ID: <mpt4k8jrnjb.fsf@arm.com> (raw)
In-Reply-To: <DB9PR08MB695961671B4E2BE369F96C60EB949@DB9PR08MB6959.eurprd08.prod.outlook.com> (Jonathan Wright's message of "Thu, 11 Nov 2021 10:27:26 +0000")

Jonathan Wright <Jonathan.Wright@arm.com> writes:
> Hi,
>
> This patch declares unsigned type-qualified builtins and uses them to
> implement (rounding) halving-narrowing-subtract Neon intrinsics. This
> removes the need for many casts in arm_neon.h.
>
> Bootstrapped and regression tested on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?
>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-11-09  Jonathan Wright  <jonathan.wright@arm.com>
>
>         * config/aarch64/aarch64-simd-builtins.def: Declare unsigned
>         builtins for [r]subhn[2].
>         * config/aarch64/arm_neon.h (vsubhn_s16): Remove unnecessary
>         cast.
>         (vsubhn_s32): Likewise.
>         (vsubhn_s64): Likewise.
>         (vsubhn_u16): Use type-qualified builtin and remove casts.
>         (vsubhn_u32): Likewise.
>         (vsubhn_u64): Likewise.
>         (vrsubhn_s16): Remove unnecessary cast.
>         (vrsubhn_s32): Likewise.
>         (vrsubhn_s64): Likewise.
>         (vrsubhn_u16): Use type-qualified builtin and remove casts.
>         (vrsubhn_u32): Likewise.
>         (vrsubhn_u64): Likewise.
>         (vrsubhn_high_s16): Remove unnecessary cast.
>         (vrsubhn_high_s32): Likewise.
>         (vrsubhn_high_s64): Likewise.
>         (vrsubhn_high_u16): Use type-qualified builtin and remove
>         casts.
>         (vrsubhn_high_u32): Likewise.
>         (vrsubhn_high_u64): Likewise.
>         (vsubhn_high_s16): Remove unnecessary cast.
>         (vsubhn_high_s32): Likewise.
>         (vsubhn_high_s64): Likewise.
>         (vsubhn_high_u16): Use type-qualified builtin and remove
>         casts.
>         (vsubhn_high_u32): Likewise.
>         (vsubhn_high_u64): Likewise.

OK, thanks.

Richard

> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 6372da80be33c40cb27e5811bfb4f4f672f28a35..035bddcb660e34146b709fdae244571cdeb06272 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -222,16 +222,20 @@
>    BUILTIN_VQN (BINOP, addhn, 0, NONE)
>    BUILTIN_VQN (BINOPU, addhn, 0, NONE)
>    BUILTIN_VQN (BINOP, subhn, 0, NONE)
> +  BUILTIN_VQN (BINOPU, subhn, 0, NONE)
>    BUILTIN_VQN (BINOP, raddhn, 0, NONE)
>    BUILTIN_VQN (BINOPU, raddhn, 0, NONE)
>    BUILTIN_VQN (BINOP, rsubhn, 0, NONE)
> +  BUILTIN_VQN (BINOPU, rsubhn, 0, NONE)
>    /* Implemented by aarch64_<sur><addsub>hn2<mode>.  */
>    BUILTIN_VQN (TERNOP, addhn2, 0, NONE)
>    BUILTIN_VQN (TERNOPU, addhn2, 0, NONE)
>    BUILTIN_VQN (TERNOP, subhn2, 0, NONE)
> +  BUILTIN_VQN (TERNOPU, subhn2, 0, NONE)
>    BUILTIN_VQN (TERNOP, raddhn2, 0, NONE)
>    BUILTIN_VQN (TERNOPU, raddhn2, 0, NONE)
>    BUILTIN_VQN (TERNOP, rsubhn2, 0, NONE)
> +  BUILTIN_VQN (TERNOPU, rsubhn2, 0, NONE)
>  
>    /* Implemented by aarch64_<us>xtl<mode>.  */
>    BUILTIN_VQN (UNOP, sxtl, 0, NONE)
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index cb481542ba0d6ffb7cc8ffe7c1a098930fc5e746..ac871d4e503c634b453cd1f1d3e61182ce4a5a88 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -2022,186 +2022,168 @@ __extension__ extern __inline int8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_s16 (int16x8_t __a, int16x8_t __b)
>  {
> -  return (int8x8_t) __builtin_aarch64_subhnv8hi (__a, __b);
> +  return __builtin_aarch64_subhnv8hi (__a, __b);
>  }
>  
>  __extension__ extern __inline int16x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_s32 (int32x4_t __a, int32x4_t __b)
>  {
> -  return (int16x4_t) __builtin_aarch64_subhnv4si (__a, __b);
> +  return __builtin_aarch64_subhnv4si (__a, __b);
>  }
>  
>  __extension__ extern __inline int32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_s64 (int64x2_t __a, int64x2_t __b)
>  {
> -  return (int32x2_t) __builtin_aarch64_subhnv2di (__a, __b);
> +  return __builtin_aarch64_subhnv2di (__a, __b);
>  }
>  
>  __extension__ extern __inline uint8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
>  {
> -  return (uint8x8_t) __builtin_aarch64_subhnv8hi ((int16x8_t) __a,
> -						  (int16x8_t) __b);
> +  return __builtin_aarch64_subhnv8hi_uuu (__a, __b);
>  }
>  
>  __extension__ extern __inline uint16x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
>  {
> -  return (uint16x4_t) __builtin_aarch64_subhnv4si ((int32x4_t) __a,
> -						   (int32x4_t) __b);
> +  return __builtin_aarch64_subhnv4si_uuu (__a, __b);
>  }
>  
>  __extension__ extern __inline uint32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
>  {
> -  return (uint32x2_t) __builtin_aarch64_subhnv2di ((int64x2_t) __a,
> -						   (int64x2_t) __b);
> +  return __builtin_aarch64_subhnv2di_uuu (__a, __b);
>  }
>  
>  __extension__ extern __inline int8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_s16 (int16x8_t __a, int16x8_t __b)
>  {
> -  return (int8x8_t) __builtin_aarch64_rsubhnv8hi (__a, __b);
> +  return __builtin_aarch64_rsubhnv8hi (__a, __b);
>  }
>  
>  __extension__ extern __inline int16x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_s32 (int32x4_t __a, int32x4_t __b)
>  {
> -  return (int16x4_t) __builtin_aarch64_rsubhnv4si (__a, __b);
> +  return __builtin_aarch64_rsubhnv4si (__a, __b);
>  }
>  
>  __extension__ extern __inline int32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_s64 (int64x2_t __a, int64x2_t __b)
>  {
> -  return (int32x2_t) __builtin_aarch64_rsubhnv2di (__a, __b);
> +  return __builtin_aarch64_rsubhnv2di (__a, __b);
>  }
>  
>  __extension__ extern __inline uint8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
>  {
> -  return (uint8x8_t) __builtin_aarch64_rsubhnv8hi ((int16x8_t) __a,
> -						   (int16x8_t) __b);
> +  return __builtin_aarch64_rsubhnv8hi_uuu (__a, __b);
>  }
>  
>  __extension__ extern __inline uint16x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
>  {
> -  return (uint16x4_t) __builtin_aarch64_rsubhnv4si ((int32x4_t) __a,
> -						    (int32x4_t) __b);
> +  return __builtin_aarch64_rsubhnv4si_uuu (__a, __b);
>  }
>  
>  __extension__ extern __inline uint32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
>  {
> -  return (uint32x2_t) __builtin_aarch64_rsubhnv2di ((int64x2_t) __a,
> -						    (int64x2_t) __b);
> +  return __builtin_aarch64_rsubhnv2di_uuu (__a, __b);
>  }
>  
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
>  {
> -  return (int8x16_t) __builtin_aarch64_rsubhn2v8hi (__a, __b, __c);
> +  return __builtin_aarch64_rsubhn2v8hi (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
>  {
> -  return (int16x8_t) __builtin_aarch64_rsubhn2v4si (__a, __b, __c);
> +  return __builtin_aarch64_rsubhn2v4si (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
>  {
> -  return (int32x4_t) __builtin_aarch64_rsubhn2v2di (__a, __b, __c);
> +  return __builtin_aarch64_rsubhn2v2di (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline uint8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
>  {
> -  return (uint8x16_t) __builtin_aarch64_rsubhn2v8hi ((int8x8_t) __a,
> -						     (int16x8_t) __b,
> -						     (int16x8_t) __c);
> +  return __builtin_aarch64_rsubhn2v8hi_uuuu (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline uint16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
>  {
> -  return (uint16x8_t) __builtin_aarch64_rsubhn2v4si ((int16x4_t) __a,
> -						     (int32x4_t) __b,
> -						     (int32x4_t) __c);
> +  return __builtin_aarch64_rsubhn2v4si_uuuu (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline uint32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vrsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
>  {
> -  return (uint32x4_t) __builtin_aarch64_rsubhn2v2di ((int32x2_t) __a,
> -						     (int64x2_t) __b,
> -						     (int64x2_t) __c);
> +  return __builtin_aarch64_rsubhn2v2di_uuuu (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
>  {
> -  return (int8x16_t) __builtin_aarch64_subhn2v8hi (__a, __b, __c);
> +  return __builtin_aarch64_subhn2v8hi (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
>  {
> -  return (int16x8_t) __builtin_aarch64_subhn2v4si (__a, __b, __c);;
> +  return __builtin_aarch64_subhn2v4si (__a, __b, __c);;
>  }
>  
>  __extension__ extern __inline int32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
>  {
> -  return (int32x4_t) __builtin_aarch64_subhn2v2di (__a, __b, __c);
> +  return __builtin_aarch64_subhn2v2di (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline uint8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
>  {
> -  return (uint8x16_t) __builtin_aarch64_subhn2v8hi ((int8x8_t) __a,
> -						    (int16x8_t) __b,
> -						    (int16x8_t) __c);
> +  return __builtin_aarch64_subhn2v8hi_uuuu (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline uint16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
>  {
> -  return (uint16x8_t) __builtin_aarch64_subhn2v4si ((int16x4_t) __a,
> -						    (int32x4_t) __b,
> -						    (int32x4_t) __c);
> +  return __builtin_aarch64_subhn2v4si_uuuu (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline uint32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
>  {
> -  return (uint32x4_t) __builtin_aarch64_subhn2v2di ((int32x2_t) __a,
> -						    (int64x2_t) __b,
> -						    (int64x2_t) __c);
> +  return __builtin_aarch64_subhn2v2di_uuuu (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline uint16x4_t

      reply	other threads:[~2021-11-11 10:31 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-11 10:27 Jonathan Wright
2021-11-11 10:31 ` Richard Sandiford [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=mpt4k8jrnjb.fsf@arm.com \
    --to=richard.sandiford@arm.com \
    --cc=Jonathan.Wright@arm.com \
    --cc=Kyrylo.Tkachov@arm.com \
    --cc=gcc-patches@gcc.gnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).