public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: liuhongt <hongtao.liu@intel.com>
To: gcc-patches@gcc.gnu.org
Cc: crazylht@gmail.com, hjl.tools@gmail.com, ubizjak@gmail.com,
	jakub@redhat.com
Subject: [PATCH 26/62] AVX512FP16: Add vcvtph2dq/vcvtph2qq/vcvtph2w/vcvtph2uw/vcvtph2uqq/vcvtph2udq
Date: Thu,  1 Jul 2021 14:16:12 +0800	[thread overview]
Message-ID: <20210701061648.9447-27-hongtao.liu@intel.com> (raw)
In-Reply-To: <20210701061648.9447-1-hongtao.liu@intel.com>

gcc/ChangeLog:

	* config/i386/avx512fp16intrin.h (_mm512_cvtph_epi32):
	New intrinsic/
	(_mm512_mask_cvtph_epi32): Likewise.
	(_mm512_maskz_cvtph_epi32): Likewise.
	(_mm512_cvt_roundph_epi32): Likewise.
	(_mm512_mask_cvt_roundph_epi32): Likewise.
	(_mm512_maskz_cvt_roundph_epi32): Likewise.
	(_mm512_cvtph_epu32): Likewise.
	(_mm512_mask_cvtph_epu32): Likewise.
	(_mm512_maskz_cvtph_epu32): Likewise.
	(_mm512_cvt_roundph_epu32): Likewise.
	(_mm512_mask_cvt_roundph_epu32): Likewise.
	(_mm512_maskz_cvt_roundph_epu32): Likewise.
	(_mm512_cvtph_epi64): Likewise.
	(_mm512_mask_cvtph_epi64): Likewise.
	(_mm512_maskz_cvtph_epi64): Likewise.
	(_mm512_cvt_roundph_epi64): Likewise.
	(_mm512_mask_cvt_roundph_epi64): Likewise.
	(_mm512_maskz_cvt_roundph_epi64): Likewise.
	(_mm512_cvtph_epu64): Likewise.
	(_mm512_mask_cvtph_epu64): Likewise.
	(_mm512_maskz_cvtph_epu64): Likewise.
	(_mm512_cvt_roundph_epu64): Likewise.
	(_mm512_mask_cvt_roundph_epu64): Likewise.
	(_mm512_maskz_cvt_roundph_epu64): Likewise.
	(_mm512_cvtph_epi16): Likewise.
	(_mm512_mask_cvtph_epi16): Likewise.
	(_mm512_maskz_cvtph_epi16): Likewise.
	(_mm512_cvt_roundph_epi16): Likewise.
	(_mm512_mask_cvt_roundph_epi16): Likewise.
	(_mm512_maskz_cvt_roundph_epi16): Likewise.
	(_mm512_cvtph_epu16): Likewise.
	(_mm512_mask_cvtph_epu16): Likewise.
	(_mm512_maskz_cvtph_epu16): Likewise.
	(_mm512_cvt_roundph_epu16): Likewise.
	(_mm512_mask_cvt_roundph_epu16): Likewise.
	(_mm512_maskz_cvt_roundph_epu16): Likewise.
	* config/i386/avx512fp16vlintrin.h (_mm_cvtph_epi32):
	New intrinsic.
	(_mm_mask_cvtph_epi32): Likewise.
	(_mm_maskz_cvtph_epi32): Likewise.
	(_mm256_cvtph_epi32): Likewise.
	(_mm256_mask_cvtph_epi32): Likewise.
	(_mm256_maskz_cvtph_epi32): Likewise.
	(_mm_cvtph_epu32): Likewise.
	(_mm_mask_cvtph_epu32): Likewise.
	(_mm_maskz_cvtph_epu32): Likewise.
	(_mm256_cvtph_epu32): Likewise.
	(_mm256_mask_cvtph_epu32): Likewise.
	(_mm256_maskz_cvtph_epu32): Likewise.
	(_mm_cvtph_epi64): Likewise.
	(_mm_mask_cvtph_epi64): Likewise.
	(_mm_maskz_cvtph_epi64): Likewise.
	(_mm256_cvtph_epi64): Likewise.
	(_mm256_mask_cvtph_epi64): Likewise.
	(_mm256_maskz_cvtph_epi64): Likewise.
	(_mm_cvtph_epu64): Likewise.
	(_mm_mask_cvtph_epu64): Likewise.
	(_mm_maskz_cvtph_epu64): Likewise.
	(_mm256_cvtph_epu64): Likewise.
	(_mm256_mask_cvtph_epu64): Likewise.
	(_mm256_maskz_cvtph_epu64): Likewise.
	(_mm_cvtph_epi16): Likewise.
	(_mm_mask_cvtph_epi16): Likewise.
	(_mm_maskz_cvtph_epi16): Likewise.
	(_mm256_cvtph_epi16): Likewise.
	(_mm256_mask_cvtph_epi16): Likewise.
	(_mm256_maskz_cvtph_epi16): Likewise.
	(_mm_cvtph_epu16): Likewise.
	(_mm_mask_cvtph_epu16): Likewise.
	(_mm_maskz_cvtph_epu16): Likewise.
	(_mm256_cvtph_epu16): Likewise.
	(_mm256_mask_cvtph_epu16): Likewise.
	(_mm256_maskz_cvtph_epu16): Likewise.
	* config/i386/i386-builtin-types.def: Add new builtin types.
	* config/i386/i386-builtin.def: Add new builtins.
	* config/i386/i386-expand.c
	(ix86_expand_args_builtin): Handle new builtin types.
	(ix86_expand_round_builtin): Ditto.
	* config/i386/sse.md (sseintconvert): New.
	(ssePHmode): Ditto.
	(UNSPEC_US_FIX_NOTRUNC): Ditto.
	(sseintconvertsignprefix): Ditto.
	(avx512fp16_vcvtph2<sseintconvertsignprefix><sseintconvert>_<mode><mask_name><round_name>):
	Ditto.

gcc/testsuite/ChangeLog:

	* gcc.target/i386/avx-1.c: Add test for new builtins.
	* gcc.target/i386/sse-13.c: Ditto.
	* gcc.target/i386/sse-23.c: Ditto.
	* gcc.target/i386/sse-14.c: Add test for new intrinsics.
	* gcc.target/i386/sse-22.c: Ditto.
---
 gcc/config/i386/avx512fp16intrin.h     | 525 +++++++++++++++++++++++++
 gcc/config/i386/avx512fp16vlintrin.h   | 345 ++++++++++++++++
 gcc/config/i386/i386-builtin-types.def |   9 +
 gcc/config/i386/i386-builtin.def       |  18 +
 gcc/config/i386/i386-expand.c          |   9 +
 gcc/config/i386/sse.md                 |  35 ++
 gcc/testsuite/gcc.target/i386/avx-1.c  |   6 +
 gcc/testsuite/gcc.target/i386/sse-13.c |   6 +
 gcc/testsuite/gcc.target/i386/sse-14.c |  18 +
 gcc/testsuite/gcc.target/i386/sse-22.c |  18 +
 gcc/testsuite/gcc.target/i386/sse-23.c |   6 +
 11 files changed, 995 insertions(+)

diff --git a/gcc/config/i386/avx512fp16intrin.h b/gcc/config/i386/avx512fp16intrin.h
index cdf6646c8c6..42576c4ae2e 100644
--- a/gcc/config/i386/avx512fp16intrin.h
+++ b/gcc/config/i386/avx512fp16intrin.h
@@ -2512,6 +2512,531 @@ _mm_maskz_move_sh (__mmask8 __A, __m128h  __B, __m128h __C)
   return __builtin_ia32_vmovsh_mask (__B, __C, _mm_setzero_ph (), __A);
 }
 
+/* Intrinsics vcvtph2dq.  */
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtph_epi32 (__m256h __A)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2dq_v16si_mask_round (__A,
+					       (__v16si)
+					       _mm512_setzero_si512 (),
+					       (__mmask16) -1,
+					       _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtph_epi32 (__m512i __A, __mmask16 __B, __m256h __C)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2dq_v16si_mask_round (__C,
+					       (__v16si) __A,
+					       __B,
+					       _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtph_epi32 (__mmask16 __A, __m256h __B)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2dq_v16si_mask_round (__B,
+					       (__v16si)
+					       _mm512_setzero_si512 (),
+					       __A,
+					       _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundph_epi32 (__m256h __A, int __B)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2dq_v16si_mask_round (__A,
+					       (__v16si)
+					       _mm512_setzero_si512 (),
+					       (__mmask16) -1,
+					       __B);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundph_epi32 (__m512i __A, __mmask16 __B, __m256h __C, int __D)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2dq_v16si_mask_round (__C,
+					       (__v16si) __A,
+					       __B,
+					       __D);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundph_epi32 (__mmask16 __A, __m256h __B, int __C)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2dq_v16si_mask_round (__B,
+					       (__v16si)
+					       _mm512_setzero_si512 (),
+					       __A,
+					       __C);
+}
+
+#else
+#define _mm512_cvt_roundph_epi32(A, B)					\
+  ((__m512i)								\
+   __builtin_ia32_vcvtph2dq_v16si_mask_round ((A),			\
+					      (__v16si)			\
+					      _mm512_setzero_si512 (),	\
+					      (__mmask16)-1,		\
+					      (B)))
+
+#define _mm512_mask_cvt_roundph_epi32(A, B, C, D)			\
+  ((__m512i)								\
+   __builtin_ia32_vcvtph2dq_v16si_mask_round ((C), (__v16si)(A), (B), (D)))
+
+#define _mm512_maskz_cvt_roundph_epi32(A, B, C)				\
+  ((__m512i)								\
+   __builtin_ia32_vcvtph2dq_v16si_mask_round ((B),			\
+					      (__v16si)			\
+					      _mm512_setzero_si512 (),	\
+					      (A),			\
+					      (C)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vcvtph2udq.  */
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtph_epu32 (__m256h __A)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2udq_v16si_mask_round (__A,
+						(__v16si)
+						_mm512_setzero_si512 (),
+						(__mmask16) -1,
+						_MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtph_epu32 (__m512i __A, __mmask16 __B, __m256h __C)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2udq_v16si_mask_round (__C,
+						(__v16si) __A,
+						__B,
+						_MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtph_epu32 (__mmask16 __A, __m256h __B)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2udq_v16si_mask_round (__B,
+						(__v16si)
+						_mm512_setzero_si512 (),
+						__A,
+						_MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundph_epu32 (__m256h __A, int __B)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2udq_v16si_mask_round (__A,
+						(__v16si)
+						_mm512_setzero_si512 (),
+						(__mmask16) -1,
+						__B);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundph_epu32 (__m512i __A, __mmask16 __B, __m256h __C, int __D)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2udq_v16si_mask_round (__C,
+						(__v16si) __A,
+						__B,
+						__D);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundph_epu32 (__mmask16 __A, __m256h __B, int __C)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2udq_v16si_mask_round (__B,
+						(__v16si)
+						_mm512_setzero_si512 (),
+						__A,
+						__C);
+}
+
+#else
+#define _mm512_cvt_roundph_epu32(A, B)					\
+  ((__m512i)								\
+   __builtin_ia32_vcvtph2udq_v16si_mask_round ((A),			\
+					       (__v16si)		\
+					       _mm512_setzero_si512 (),	\
+					       (__mmask16)-1,		\
+					       (B)))
+
+#define _mm512_mask_cvt_roundph_epu32(A, B, C, D)			\
+  ((__m512i)								\
+   __builtin_ia32_vcvtph2udq_v16si_mask_round ((C), (__v16si)(A), (B), (D)))
+
+#define _mm512_maskz_cvt_roundph_epu32(A, B, C)				\
+  ((__m512i)								\
+   __builtin_ia32_vcvtph2udq_v16si_mask_round ((B),			\
+					       (__v16si)		\
+					       _mm512_setzero_si512 (),	\
+					       (A),			\
+					       (C)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vcvtph2qq.  */
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtph_epi64 (__m128h __A)
+{
+  return __builtin_ia32_vcvtph2qq_v8di_mask_round (__A,
+						   _mm512_setzero_si512 (),
+						   (__mmask8) -1,
+						   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtph_epi64 (__m512i __A, __mmask8 __B, __m128h __C)
+{
+  return __builtin_ia32_vcvtph2qq_v8di_mask_round (__C, __A, __B,
+						   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtph_epi64 (__mmask8 __A, __m128h __B)
+{
+  return __builtin_ia32_vcvtph2qq_v8di_mask_round (__B,
+						   _mm512_setzero_si512 (),
+						   __A,
+						   _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundph_epi64 (__m128h __A, int __B)
+{
+  return __builtin_ia32_vcvtph2qq_v8di_mask_round (__A,
+						   _mm512_setzero_si512 (),
+						   (__mmask8) -1,
+						   __B);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundph_epi64 (__m512i __A, __mmask8 __B, __m128h __C, int __D)
+{
+  return __builtin_ia32_vcvtph2qq_v8di_mask_round (__C, __A, __B, __D);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundph_epi64 (__mmask8 __A, __m128h __B, int __C)
+{
+  return __builtin_ia32_vcvtph2qq_v8di_mask_round (__B,
+						   _mm512_setzero_si512 (),
+						   __A,
+						   __C);
+}
+
+#else
+#define _mm512_cvt_roundph_epi64(A, B)					\
+  (__builtin_ia32_vcvtph2qq_v8di_mask_round ((A),			\
+					     _mm512_setzero_si512 (),	\
+					     (__mmask8)-1,		\
+					     (B)))
+
+#define _mm512_mask_cvt_roundph_epi64(A, B, C, D)			\
+  (__builtin_ia32_vcvtph2qq_v8di_mask_round ((C), (A), (B), (D)))
+
+#define _mm512_maskz_cvt_roundph_epi64(A, B, C)				\
+  (__builtin_ia32_vcvtph2qq_v8di_mask_round ((B),			\
+					     _mm512_setzero_si512 (),	\
+					     (A),			\
+					     (C)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vcvtph2uqq.  */
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtph_epu64 (__m128h __A)
+{
+  return __builtin_ia32_vcvtph2uqq_v8di_mask_round (__A,
+						    _mm512_setzero_si512 (),
+						    (__mmask8) -1,
+						    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtph_epu64 (__m512i __A, __mmask8 __B, __m128h __C)
+{
+  return __builtin_ia32_vcvtph2uqq_v8di_mask_round (__C, __A, __B,
+						    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtph_epu64 (__mmask8 __A, __m128h __B)
+{
+  return __builtin_ia32_vcvtph2uqq_v8di_mask_round (__B,
+						    _mm512_setzero_si512 (),
+						    __A,
+						    _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundph_epu64 (__m128h __A, int __B)
+{
+  return __builtin_ia32_vcvtph2uqq_v8di_mask_round (__A,
+						    _mm512_setzero_si512 (),
+						    (__mmask8) -1,
+						    __B);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundph_epu64 (__m512i __A, __mmask8 __B, __m128h __C, int __D)
+{
+  return __builtin_ia32_vcvtph2uqq_v8di_mask_round (__C, __A, __B, __D);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundph_epu64 (__mmask8 __A, __m128h __B, int __C)
+{
+  return __builtin_ia32_vcvtph2uqq_v8di_mask_round (__B,
+						    _mm512_setzero_si512 (),
+						    __A,
+						    __C);
+}
+
+#else
+#define _mm512_cvt_roundph_epu64(A, B)					\
+  (__builtin_ia32_vcvtph2uqq_v8di_mask_round ((A),			\
+					      _mm512_setzero_si512 (),	\
+					      (__mmask8)-1,		\
+					      (B)))
+
+#define _mm512_mask_cvt_roundph_epu64(A, B, C, D)			\
+  (__builtin_ia32_vcvtph2uqq_v8di_mask_round ((C), (A), (B), (D)))
+
+#define _mm512_maskz_cvt_roundph_epu64(A, B, C)				\
+  (__builtin_ia32_vcvtph2uqq_v8di_mask_round ((B),			\
+					      _mm512_setzero_si512 (),	\
+					      (A),			\
+					      (C)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vcvtph2w.  */
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtph_epi16 (__m512h __A)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2w_v32hi_mask_round (__A,
+					      (__v32hi)
+					      _mm512_setzero_si512 (),
+					      (__mmask32) -1,
+					      _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtph_epi16 (__m512i __A, __mmask32 __B, __m512h __C)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2w_v32hi_mask_round (__C,
+					      (__v32hi) __A,
+					      __B,
+					      _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtph_epi16 (__mmask32 __A, __m512h __B)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2w_v32hi_mask_round (__B,
+					      (__v32hi)
+					      _mm512_setzero_si512 (),
+					      __A,
+					      _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundph_epi16 (__m512h __A, int __B)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2w_v32hi_mask_round (__A,
+					      (__v32hi)
+					      _mm512_setzero_si512 (),
+					      (__mmask32) -1,
+					      __B);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundph_epi16 (__m512i __A, __mmask32 __B, __m512h __C, int __D)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2w_v32hi_mask_round (__C,
+					      (__v32hi) __A,
+					      __B,
+					      __D);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundph_epi16 (__mmask32 __A, __m512h __B, int __C)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2w_v32hi_mask_round (__B,
+					      (__v32hi)
+					      _mm512_setzero_si512 (),
+					      __A,
+					      __C);
+}
+
+#else
+#define _mm512_cvt_roundph_epi16(A, B)					\
+  ((__m512i)__builtin_ia32_vcvtph2w_v32hi_mask_round ((A),		\
+						      (__v32hi)		\
+						      _mm512_setzero_si512 (), \
+						      (__mmask32)-1,	\
+						      (B)))
+
+#define _mm512_mask_cvt_roundph_epi16(A, B, C, D)			\
+  ((__m512i)__builtin_ia32_vcvtph2w_v32hi_mask_round ((C),		\
+						      (__v32hi)(A),	\
+						      (B),		\
+						      (D)))
+
+#define _mm512_maskz_cvt_roundph_epi16(A, B, C)				\
+  ((__m512i)__builtin_ia32_vcvtph2w_v32hi_mask_round ((B),		\
+						      (__v32hi)		\
+						      _mm512_setzero_si512 (), \
+						      (A),		\
+						      (C)))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vcvtph2uw.  */
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtph_epu16 (__m512h __A)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2uw_v32hi_mask_round (__A,
+					       (__v32hi)
+					       _mm512_setzero_si512 (),
+					       (__mmask32) -1,
+					       _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtph_epu16 (__m512i __A, __mmask32 __B, __m512h __C)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2uw_v32hi_mask_round (__C, (__v32hi) __A, __B,
+					       _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtph_epu16 (__mmask32 __A, __m512h __B)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2uw_v32hi_mask_round (__B,
+					       (__v32hi)
+					       _mm512_setzero_si512 (),
+					       __A,
+					       _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundph_epu16 (__m512h __A, int __B)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2uw_v32hi_mask_round (__A,
+					       (__v32hi)
+					       _mm512_setzero_si512 (),
+					       (__mmask32) -1,
+					       __B);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundph_epu16 (__m512i __A, __mmask32 __B, __m512h __C, int __D)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2uw_v32hi_mask_round (__C, (__v32hi) __A, __B, __D);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundph_epu16 (__mmask32 __A, __m512h __B, int __C)
+{
+  return (__m512i)
+    __builtin_ia32_vcvtph2uw_v32hi_mask_round (__B,
+					       (__v32hi)
+					       _mm512_setzero_si512 (),
+					       __A,
+					       __C);
+}
+
+#else
+#define _mm512_cvt_roundph_epu16(A, B)					\
+  ((__m512i)								\
+   __builtin_ia32_vcvtph2uw_v32hi_mask_round ((A),			\
+					      (__v32hi)			\
+					      _mm512_setzero_si512 (),	\
+					      (__mmask32)-1, (B)))
+
+#define _mm512_mask_cvt_roundph_epu16(A, B, C, D)			\
+  ((__m512i)								\
+   __builtin_ia32_vcvtph2uw_v32hi_mask_round ((C), (__v32hi)(A), (B), (D)))
+
+#define _mm512_maskz_cvt_roundph_epu16(A, B, C)				\
+  ((__m512i)								\
+   __builtin_ia32_vcvtph2uw_v32hi_mask_round ((B),			\
+					      (__v32hi)			\
+					      _mm512_setzero_si512 (),	\
+					      (A),			\
+					      (C)))
+
+#endif /* __OPTIMIZE__ */
+
 #ifdef __DISABLE_AVX512FP16__
 #undef __DISABLE_AVX512FP16__
 #pragma GCC pop_options
diff --git a/gcc/config/i386/avx512fp16vlintrin.h b/gcc/config/i386/avx512fp16vlintrin.h
index 206d60407fc..8a7e0aaa6b1 100644
--- a/gcc/config/i386/avx512fp16vlintrin.h
+++ b/gcc/config/i386/avx512fp16vlintrin.h
@@ -930,6 +930,351 @@ _mm_maskz_getmant_ph (__mmask8 __U, __m128h __A,
 
 #endif /* __OPTIMIZE__ */
 
+/* Intrinsics vcvtph2dq.  */
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtph_epi32 (__m128h __A)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2dq_v4si_mask (__A,
+					(__v4si)
+					_mm_setzero_si128 (),
+					(__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtph_epi32 (__m128i __A, __mmask8 __B, __m128h __C)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2dq_v4si_mask (__C, ( __v4si) __A, __B);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtph_epi32 (__mmask8 __A, __m128h __B)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2dq_v4si_mask (__B,
+					(__v4si) _mm_setzero_si128 (),
+					__A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtph_epi32 (__m128h __A)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2dq_v8si_mask (__A,
+					(__v8si)
+					_mm256_setzero_si256 (),
+					(__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtph_epi32 (__m256i __A, __mmask8 __B, __m128h __C)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2dq_v8si_mask (__C, ( __v8si) __A, __B);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtph_epi32 (__mmask8 __A, __m128h __B)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2dq_v8si_mask (__B,
+					(__v8si)
+					_mm256_setzero_si256 (),
+					__A);
+}
+
+/* Intrinsics vcvtph2udq.  */
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtph_epu32 (__m128h __A)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2udq_v4si_mask (__A,
+					 (__v4si)
+					 _mm_setzero_si128 (),
+					 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtph_epu32 (__m128i __A, __mmask8 __B, __m128h __C)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2udq_v4si_mask (__C, ( __v4si) __A, __B);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtph_epu32 (__mmask8 __A, __m128h __B)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2udq_v4si_mask (__B,
+					 (__v4si)
+					 _mm_setzero_si128 (),
+					 __A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtph_epu32 (__m128h __A)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2udq_v8si_mask (__A,
+					 (__v8si)
+					 _mm256_setzero_si256 (),
+					 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtph_epu32 (__m256i __A, __mmask8 __B, __m128h __C)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2udq_v8si_mask (__C, ( __v8si) __A, __B);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtph_epu32 (__mmask8 __A, __m128h __B)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2udq_v8si_mask (__B,
+					 (__v8si) _mm256_setzero_si256 (),
+					 __A);
+}
+
+/* Intrinsics vcvtph2qq.  */
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtph_epi64 (__m128h __A)
+{
+  return
+    __builtin_ia32_vcvtph2qq_v2di_mask (__A,
+					_mm_setzero_si128 (),
+					(__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtph_epi64 (__m128i __A, __mmask8 __B, __m128h __C)
+{
+  return __builtin_ia32_vcvtph2qq_v2di_mask (__C, __A, __B);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtph_epi64 (__mmask8 __A, __m128h __B)
+{
+  return __builtin_ia32_vcvtph2qq_v2di_mask (__B,
+					     _mm_setzero_si128 (),
+					     __A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtph_epi64 (__m128h __A)
+{
+  return __builtin_ia32_vcvtph2qq_v4di_mask (__A,
+					     _mm256_setzero_si256 (),
+					     (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtph_epi64 (__m256i __A, __mmask8 __B, __m128h __C)
+{
+  return __builtin_ia32_vcvtph2qq_v4di_mask (__C, __A, __B);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtph_epi64 (__mmask8 __A, __m128h __B)
+{
+  return __builtin_ia32_vcvtph2qq_v4di_mask (__B,
+					     _mm256_setzero_si256 (),
+					     __A);
+}
+
+/* Intrinsics vcvtph2uqq.  */
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtph_epu64 (__m128h __A)
+{
+  return __builtin_ia32_vcvtph2uqq_v2di_mask (__A,
+					      _mm_setzero_si128 (),
+					      (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtph_epu64 (__m128i __A, __mmask8 __B, __m128h __C)
+{
+  return __builtin_ia32_vcvtph2uqq_v2di_mask (__C, __A, __B);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtph_epu64 (__mmask8 __A, __m128h __B)
+{
+  return __builtin_ia32_vcvtph2uqq_v2di_mask (__B,
+					      _mm_setzero_si128 (),
+					      __A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtph_epu64 (__m128h __A)
+{
+  return __builtin_ia32_vcvtph2uqq_v4di_mask (__A,
+					      _mm256_setzero_si256 (),
+					      (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtph_epu64 (__m256i __A, __mmask8 __B, __m128h __C)
+{
+  return __builtin_ia32_vcvtph2uqq_v4di_mask (__C, __A, __B);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtph_epu64 (__mmask8 __A, __m128h __B)
+{
+  return __builtin_ia32_vcvtph2uqq_v4di_mask (__B,
+					      _mm256_setzero_si256 (),
+					      __A);
+}
+
+/* Intrinsics vcvtph2w.  */
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtph_epi16 (__m128h __A)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2w_v8hi_mask (__A,
+				       (__v8hi)
+				       _mm_setzero_si128 (),
+				       (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtph_epi16 (__m128i __A, __mmask8 __B, __m128h __C)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2w_v8hi_mask (__C, ( __v8hi) __A, __B);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtph_epi16 (__mmask8 __A, __m128h __B)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2w_v8hi_mask (__B,
+				       (__v8hi)
+				       _mm_setzero_si128 (),
+				       __A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtph_epi16 (__m256h __A)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2w_v16hi_mask (__A,
+					(__v16hi)
+					_mm256_setzero_si256 (),
+					(__mmask16) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtph_epi16 (__m256i __A, __mmask16 __B, __m256h __C)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2w_v16hi_mask (__C, ( __v16hi) __A, __B);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtph_epi16 (__mmask16 __A, __m256h __B)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2w_v16hi_mask (__B,
+					(__v16hi)
+					_mm256_setzero_si256 (),
+					__A);
+}
+
+/* Intrinsics vcvtph2uw.  */
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtph_epu16 (__m128h __A)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2uw_v8hi_mask (__A,
+					(__v8hi)
+					_mm_setzero_si128 (),
+					(__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtph_epu16 (__m128i __A, __mmask8 __B, __m128h __C)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2uw_v8hi_mask (__C, ( __v8hi) __A, __B);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtph_epu16 (__mmask8 __A, __m128h __B)
+{
+  return (__m128i)
+    __builtin_ia32_vcvtph2uw_v8hi_mask (__B,
+					(__v8hi)
+					_mm_setzero_si128 (),
+					__A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtph_epu16 (__m256h __A)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2uw_v16hi_mask (__A,
+					 (__v16hi)
+					 _mm256_setzero_si256 (),
+					 (__mmask16) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtph_epu16 (__m256i __A, __mmask16 __B, __m256h __C)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2uw_v16hi_mask (__C, ( __v16hi) __A, __B);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtph_epu16 (__mmask16 __A, __m256h __B)
+{
+  return (__m256i)
+    __builtin_ia32_vcvtph2uw_v16hi_mask (__B,
+					 (__v16hi)
+					 _mm256_setzero_si256 (),
+					 __A);
+}
+
 #ifdef __DISABLE_AVX512FP16VL__
 #undef __DISABLE_AVX512FP16VL__
 #pragma GCC pop_options
diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def
index 6cf3e354c78..c430dc9ab48 100644
--- a/gcc/config/i386/i386-builtin-types.def
+++ b/gcc/config/i386/i386-builtin-types.def
@@ -1311,21 +1311,30 @@ DEF_FUNCTION_TYPE (SI, V32HF, INT, USI)
 DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF)
 DEF_FUNCTION_TYPE (VOID, PCFLOAT16, V8HF, UQI)
 DEF_FUNCTION_TYPE (V8HF, PCFLOAT16, V8HF, UQI)
+DEF_FUNCTION_TYPE (V2DI, V8HF, V2DI, UQI)
+DEF_FUNCTION_TYPE (V4DI, V8HF, V4DI, UQI)
+DEF_FUNCTION_TYPE (V4SI, V8HF, V4SI, UQI)
+DEF_FUNCTION_TYPE (V8SI, V8HF, V8SI, UQI)
+DEF_FUNCTION_TYPE (V8HI, V8HF, V8HI, UQI)
 DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, UQI)
 DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, INT)
 DEF_FUNCTION_TYPE (V8HF, V8HF, INT, V8HF, UQI)
 DEF_FUNCTION_TYPE (UQI, V8HF, V8HF, INT, UQI)
 DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI)
 DEF_FUNCTION_TYPE (UQI, V8HF, V8HF, INT, UQI, INT)
+DEF_FUNCTION_TYPE (V8DI, V8HF, V8DI, UQI, INT)
 DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI, INT)
 DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, INT, V8HF, UQI, INT)
 DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF)
+DEF_FUNCTION_TYPE (V16HI, V16HF, V16HI, UHI)
 DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, UHI)
+DEF_FUNCTION_TYPE (V16SI, V16HF, V16SI, UHI, INT)
 DEF_FUNCTION_TYPE (V16HF, V16HF, INT, V16HF, UHI)
 DEF_FUNCTION_TYPE (UHI, V16HF, V16HF, INT, UHI)
 DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI)
 DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, USI)
 DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, INT)
+DEF_FUNCTION_TYPE (V32HI, V32HF, V32HI, USI, INT)
 DEF_FUNCTION_TYPE (USI, V32HF, V32HF, INT, USI)
 DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, USI, INT)
 DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, V32HF, USI)
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index be617b8f18a..dde8af53ff0 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -2831,6 +2831,18 @@ BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp1
 BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512vl_getmantv16hf_mask, "__builtin_ia32_getmantph256_mask", IX86_BUILTIN_GETMANTPH256, UNKNOWN, (int) V16HF_FTYPE_V16HF_INT_V16HF_UHI)
 BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_getmantv8hf_mask, "__builtin_ia32_getmantph128_mask", IX86_BUILTIN_GETMANTPH128, UNKNOWN, (int) V8HF_FTYPE_V8HF_INT_V8HF_UQI)
 BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_movhf_mask, "__builtin_ia32_vmovsh_mask", IX86_BUILTIN_VMOVSH_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2dq_v4si_mask, "__builtin_ia32_vcvtph2dq_v4si_mask", IX86_BUILTIN_VCVTPH2DQ_V4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V8HF_V4SI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2dq_v8si_mask, "__builtin_ia32_vcvtph2dq_v8si_mask", IX86_BUILTIN_VCVTPH2DQ_V8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2udq_v4si_mask, "__builtin_ia32_vcvtph2udq_v4si_mask", IX86_BUILTIN_VCVTPH2UDQ_V4SI_MASK, UNKNOWN, (int) V4SI_FTYPE_V8HF_V4SI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2udq_v8si_mask, "__builtin_ia32_vcvtph2udq_v8si_mask", IX86_BUILTIN_VCVTPH2UDQ_V8SI_MASK, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2qq_v2di_mask, "__builtin_ia32_vcvtph2qq_v2di_mask", IX86_BUILTIN_VCVTPH2QQ_V2DI_MASK, UNKNOWN, (int) V2DI_FTYPE_V8HF_V2DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2qq_v4di_mask, "__builtin_ia32_vcvtph2qq_v4di_mask", IX86_BUILTIN_VCVTPH2QQ_V4DI_MASK, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2uqq_v2di_mask, "__builtin_ia32_vcvtph2uqq_v2di_mask", IX86_BUILTIN_VCVTPH2UQQ_V2DI_MASK, UNKNOWN, (int) V2DI_FTYPE_V8HF_V2DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2uqq_v4di_mask, "__builtin_ia32_vcvtph2uqq_v4di_mask", IX86_BUILTIN_VCVTPH2UQQ_V4DI_MASK, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2w_v8hi_mask, "__builtin_ia32_vcvtph2w_v8hi_mask", IX86_BUILTIN_VCVTPH2W_V8HI_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HF_V8HI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2w_v16hi_mask, "__builtin_ia32_vcvtph2w_v16hi_mask", IX86_BUILTIN_VCVTPH2W_V16HI_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2uw_v8hi_mask, "__builtin_ia32_vcvtph2uw_v8hi_mask", IX86_BUILTIN_VCVTPH2UW_V8HI_MASK, UNKNOWN, (int) V8HI_FTYPE_V8HF_V8HI_UQI)
+BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2uw_v16hi_mask, "__builtin_ia32_vcvtph2uw_v16hi_mask", IX86_BUILTIN_VCVTPH2UW_V16HI_MASK, UNKNOWN, (int) V16HI_FTYPE_V16HF_V16HI_UHI)
 
 /* Builtins with rounding support.  */
 BDESC_END (ARGS, ROUND_ARGS)
@@ -3058,6 +3070,12 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_getexpv32hf_mask_round,
 BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_sgetexpv8hf_mask_round, "__builtin_ia32_getexpsh_mask_round", IX86_BUILTIN_GETEXPSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_getmantv32hf_mask_round, "__builtin_ia32_getmantph512_mask", IX86_BUILTIN_GETMANTPH512, UNKNOWN, (int) V32HF_FTYPE_V32HF_INT_V32HF_USI_INT)
 BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vgetmantv8hf_mask_round, "__builtin_ia32_getmantsh_mask_round", IX86_BUILTIN_GETMANTSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2dq_v16si_mask_round, "__builtin_ia32_vcvtph2dq_v16si_mask_round", IX86_BUILTIN_VCVTPH2DQ_V16SI_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16HF_V16SI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2udq_v16si_mask_round, "__builtin_ia32_vcvtph2udq_v16si_mask_round", IX86_BUILTIN_VCVTPH2UDQ_V16SI_MASK_ROUND, UNKNOWN, (int) V16SI_FTYPE_V16HF_V16SI_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2qq_v8di_mask_round, "__builtin_ia32_vcvtph2qq_v8di_mask_round", IX86_BUILTIN_VCVTPH2QQ_V8DI_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8HF_V8DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2uqq_v8di_mask_round, "__builtin_ia32_vcvtph2uqq_v8di_mask_round", IX86_BUILTIN_VCVTPH2UQQ_V8DI_MASK_ROUND, UNKNOWN, (int) V8DI_FTYPE_V8HF_V8DI_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2w_v32hi_mask_round, "__builtin_ia32_vcvtph2w_v32hi_mask_round", IX86_BUILTIN_VCVTPH2W_V32HI_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vcvtph2uw_v32hi_mask_round, "__builtin_ia32_vcvtph2uw_v32hi_mask_round", IX86_BUILTIN_VCVTPH2UW_V32HI_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
 
 BDESC_END (ROUND_ARGS, MULTI_ARG)
 
diff --git a/gcc/config/i386/i386-expand.c b/gcc/config/i386/i386-expand.c
index bfc7fc75b97..59d1f4f5eea 100644
--- a/gcc/config/i386/i386-expand.c
+++ b/gcc/config/i386/i386-expand.c
@@ -9565,9 +9565,13 @@ ix86_expand_args_builtin (const struct builtin_description *d,
     case V16HF_FTYPE_V16HF_V16HF_UHI:
     case V8SF_FTYPE_V8HI_V8SF_UQI:
     case V4SF_FTYPE_V8HI_V4SF_UQI:
+    case V8SI_FTYPE_V8HF_V8SI_UQI:
     case V8SI_FTYPE_V8SF_V8SI_UQI:
     case V4SI_FTYPE_V4SF_V4SI_UQI:
+    case V4SI_FTYPE_V8HF_V4SI_UQI:
+    case V4DI_FTYPE_V8HF_V4DI_UQI:
     case V4DI_FTYPE_V4SF_V4DI_UQI:
+    case V2DI_FTYPE_V8HF_V2DI_UQI:
     case V2DI_FTYPE_V4SF_V2DI_UQI:
     case V8HF_FTYPE_V8HF_V8HF_UQI:
     case V4SF_FTYPE_V4DI_V4SF_UQI:
@@ -9578,6 +9582,7 @@ ix86_expand_args_builtin (const struct builtin_description *d,
     case V16QI_FTYPE_V16HI_V16QI_UHI:
     case V16QI_FTYPE_V4SI_V16QI_UQI:
     case V16QI_FTYPE_V8SI_V16QI_UQI:
+    case V8HI_FTYPE_V8HF_V8HI_UQI:
     case V8HI_FTYPE_V4SI_V8HI_UQI:
     case V8HI_FTYPE_V8SI_V8HI_UQI:
     case V16QI_FTYPE_V2DI_V16QI_UQI:
@@ -9635,6 +9640,7 @@ ix86_expand_args_builtin (const struct builtin_description *d,
     case V8DI_FTYPE_DI_V8DI_UQI:
     case V16SF_FTYPE_V8SF_V16SF_UHI:
     case V16SI_FTYPE_V8SI_V16SI_UHI:
+    case V16HI_FTYPE_V16HF_V16HI_UHI:
     case V16HI_FTYPE_V16HI_V16HI_UHI:
     case V8HI_FTYPE_V16QI_V8HI_UQI:
     case V16HI_FTYPE_V16QI_V16HI_UHI:
@@ -10501,7 +10507,9 @@ ix86_expand_round_builtin (const struct builtin_description *d,
       break;
     case V8SF_FTYPE_V8DF_V8SF_QI_INT:
     case V8DF_FTYPE_V8DF_V8DF_QI_INT:
+    case V32HI_FTYPE_V32HF_V32HI_USI_INT:
     case V8SI_FTYPE_V8DF_V8SI_QI_INT:
+    case V8DI_FTYPE_V8HF_V8DI_UQI_INT:
     case V8DI_FTYPE_V8DF_V8DI_QI_INT:
     case V8SF_FTYPE_V8DI_V8SF_QI_INT:
     case V8DF_FTYPE_V8DI_V8DF_QI_INT:
@@ -10510,6 +10518,7 @@ ix86_expand_round_builtin (const struct builtin_description *d,
     case V8DI_FTYPE_V8SF_V8DI_QI_INT:
     case V16SF_FTYPE_V16SI_V16SF_HI_INT:
     case V16SI_FTYPE_V16SF_V16SI_HI_INT:
+    case V16SI_FTYPE_V16HF_V16SI_UHI_INT:
     case V8DF_FTYPE_V8SF_V8DF_QI_INT:
     case V16SF_FTYPE_V16HI_V16SF_HI_INT:
     case V2DF_FTYPE_V2DF_V2DF_V2DF_INT:
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 97f7c698d5d..7b705422396 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -722,6 +722,11 @@ (define_mode_attr ssebytemode
   [(V8DI "V64QI") (V4DI "V32QI") (V2DI "V16QI")
    (V16SI "V64QI") (V8SI "V32QI") (V4SI "V16QI")])
 
+(define_mode_attr sseintconvert
+  [(V32HI "w") (V16HI "w") (V8HI "w")
+   (V16SI "dq") (V8SI "dq") (V4SI "dq")
+   (V8DI "qq") (V4DI "qq") (V2DI "qq")])
+
 ;; All 128bit vector integer modes
 (define_mode_iterator VI_128 [V16QI V8HI V4SI V2DI])
 
@@ -943,6 +948,12 @@ (define_mode_attr ssehalfvecmodelower
    (V4SF  "v2sf")
    (V32HF "v16hf") (V16HF "v8hf") (V8HF "v4hf")])
 
+;; Mapping of vector modes to vector hf modes of conversion.
+(define_mode_attr ssePHmode
+  [(V32HI "V32HF") (V16HI "V16HF") (V8HI "V8HF")
+   (V16SI "V16HF") (V8SI "V8HF") (V4SI "V8HF")
+   (V8DI "V8HF") (V4DI "V8HF") (V2DI "V8HF")])
+
 ;; Mapping of vector modes to packed single mode of the same size
 (define_mode_attr ssePSmode
   [(V16SI "V16SF") (V8DF "V16SF")
@@ -5408,6 +5419,30 @@ (define_insn "*fma4i_vmfnmsub_<mode>"
   [(set_attr "type" "ssemuladd")
    (set_attr "mode" "<MODE>")])
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel half-precision floating point conversion operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_int_iterator UNSPEC_US_FIX_NOTRUNC
+	[UNSPEC_UNSIGNED_FIX_NOTRUNC UNSPEC_FIX_NOTRUNC])
+
+(define_int_attr sseintconvertsignprefix
+	[(UNSPEC_UNSIGNED_FIX_NOTRUNC "u")
+	 (UNSPEC_FIX_NOTRUNC "")])
+
+(define_insn "avx512fp16_vcvtph2<sseintconvertsignprefix><sseintconvert>_<mode><mask_name><round_name>"
+  [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v")
+        (unspec:VI248_AVX512VL
+	   [(match_operand:<ssePHmode> 1 "<round_nimm_predicate>" "<round_constraint>")]
+	   UNSPEC_US_FIX_NOTRUNC))]
+  "TARGET_AVX512FP16"
+  "vcvtph2<sseintconvertsignprefix><sseintconvert>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
+  [(set_attr "type" "ssecvt")
+   (set_attr "prefix" "evex")
+   (set_attr "mode" "<sseinsnmode>")])
+
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ;;
 ;; Parallel single-precision floating point conversion operations
diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c
index b3cffa0644f..cdfc2e3b69f 100644
--- a/gcc/testsuite/gcc.target/i386/avx-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx-1.c
@@ -719,6 +719,12 @@
 #define __builtin_ia32_getexpsh_mask_round(A, B, C, D, E) __builtin_ia32_getexpsh_mask_round(A, B, C, D, 4)
 #define __builtin_ia32_getmantph512_mask(A, F, C, D, E) __builtin_ia32_getmantph512_mask(A, 1, C, D, 8)
 #define __builtin_ia32_getmantsh_mask_round(A, B, C, W, U, D) __builtin_ia32_getmantsh_mask_round(A, B, 1, W, U, 4)
+#define __builtin_ia32_vcvtph2dq_v16si_mask_round(A, B, C, D) __builtin_ia32_vcvtph2dq_v16si_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2udq_v16si_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq_v16si_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2qq_v8di_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq_v8di_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uqq_v8di_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq_v8di_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2w_v32hi_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w_v32hi_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uw_v32hi_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw_v32hi_mask_round(A, B, C, 8)
 
 /* avx512fp16vlintrin.h */
 #define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)
diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c
index 67ef567e437..5e4aaf8ce9b 100644
--- a/gcc/testsuite/gcc.target/i386/sse-13.c
+++ b/gcc/testsuite/gcc.target/i386/sse-13.c
@@ -736,6 +736,12 @@
 #define __builtin_ia32_getexpsh_mask_round(A, B, C, D, E) __builtin_ia32_getexpsh_mask_round(A, B, C, D, 4)
 #define __builtin_ia32_getmantph512_mask(A, F, C, D, E) __builtin_ia32_getmantph512_mask(A, 1, C, D, 8)
 #define __builtin_ia32_getmantsh_mask_round(A, B, C, W, U, D) __builtin_ia32_getmantsh_mask_round(A, B, 1, W, U, 4)
+#define __builtin_ia32_vcvtph2dq_v16si_mask_round(A, B, C, D) __builtin_ia32_vcvtph2dq_v16si_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2udq_v16si_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq_v16si_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2qq_v8di_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq_v8di_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uqq_v8di_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq_v8di_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2w_v32hi_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w_v32hi_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uw_v32hi_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw_v32hi_mask_round(A, B, C, 8)
 
 /* avx512fp16vlintrin.h */
 #define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)
diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c
index 04163874f90..32aa4518703 100644
--- a/gcc/testsuite/gcc.target/i386/sse-14.c
+++ b/gcc/testsuite/gcc.target/i386/sse-14.c
@@ -678,6 +678,12 @@ test_1 (_mm_roundscale_ph, __m128h, __m128h, 123)
 test_1 (_mm256_roundscale_ph, __m256h, __m256h, 123)
 test_1 (_mm512_roundscale_ph, __m512h, __m512h, 123)
 test_1 (_mm512_getexp_round_ph, __m512h, __m512h, 8)
+test_1 (_mm512_cvt_roundph_epi16, __m512i, __m512h, 8)
+test_1 (_mm512_cvt_roundph_epu16, __m512i, __m512h, 8)
+test_1 (_mm512_cvt_roundph_epi32, __m512i, __m256h, 8)
+test_1 (_mm512_cvt_roundph_epu32, __m512i, __m256h, 8)
+test_1 (_mm512_cvt_roundph_epi64, __m512i, __m128h, 8)
+test_1 (_mm512_cvt_roundph_epu64, __m512i, __m128h, 8)
 test_1x (_mm512_reduce_round_ph, __m512h, __m512h, 123, 8)
 test_1x (_mm512_roundscale_round_ph, __m512h, __m512h, 123, 8)
 test_1x (_mm512_getmant_ph, __m512h, __m512h, 1, 1)
@@ -710,6 +716,12 @@ test_2 (_mm512_maskz_roundscale_ph, __m512h, __mmask32, __m512h, 123)
 test_2 (_mm_roundscale_sh, __m128h, __m128h, __m128h, 123)
 test_2 (_mm512_maskz_getexp_round_ph, __m512h, __mmask32, __m512h, 8)
 test_2 (_mm_getexp_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epi16, __m512i, __mmask32, __m512h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epu16, __m512i, __mmask32, __m512h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epi32, __m512i, __mmask16, __m256h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epu32, __m512i, __mmask16, __m256h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epi64, __m512i, __mmask8, __m128h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epu64, __m512i, __mmask8, __m128h, 8)
 test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
 test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
 test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
@@ -748,6 +760,12 @@ test_3 (_mm512_mask_roundscale_ph, __m512h, __m512h, __mmask32, __m512h, 123)
 test_3 (_mm_maskz_roundscale_sh, __m128h, __mmask8, __m128h, __m128h, 123)
 test_3 (_mm_maskz_getexp_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
 test_3 (_mm512_mask_getexp_round_ph, __m512h, __m512h, __mmask32, __m512h, 8)
+test_3 (_mm512_mask_cvt_roundph_epi16, __m512i, __m512i, __mmask32, __m512h, 8)
+test_3 (_mm512_mask_cvt_roundph_epu16, __m512i, __m512i, __mmask32, __m512h, 8)
+test_3 (_mm512_mask_cvt_roundph_epi32, __m512i, __m512i, __mmask16, __m256h, 8)
+test_3 (_mm512_mask_cvt_roundph_epu32, __m512i, __m512i, __mmask16, __m256h, 8)
+test_3 (_mm512_mask_cvt_roundph_epi64, __m512i, __m512i, __mmask8, __m128h, 8)
+test_3 (_mm512_mask_cvt_roundph_epu64, __m512i, __m512i, __mmask8, __m128h, 8)
 test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
 test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
 test_3x (_mm512_mask_reduce_round_ph, __m512h, __m512h, __mmask32, __m512h, 123, 8)
diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c
index 008600a393d..44ac10d602f 100644
--- a/gcc/testsuite/gcc.target/i386/sse-22.c
+++ b/gcc/testsuite/gcc.target/i386/sse-22.c
@@ -783,6 +783,12 @@ test_1 (_mm_roundscale_ph, __m128h, __m128h, 123)
 test_1 (_mm256_roundscale_ph, __m256h, __m256h, 123)
 test_1 (_mm512_roundscale_ph, __m512h, __m512h, 123)
 test_1 (_mm512_getexp_round_ph, __m512h, __m512h, 8)
+test_1 (_mm512_cvt_roundph_epi16, __m512i, __m512h, 8)
+test_1 (_mm512_cvt_roundph_epu16, __m512i, __m512h, 8)
+test_1 (_mm512_cvt_roundph_epi32, __m512i, __m256h, 8)
+test_1 (_mm512_cvt_roundph_epu32, __m512i, __m256h, 8)
+test_1 (_mm512_cvt_roundph_epi64, __m512i, __m128h, 8)
+test_1 (_mm512_cvt_roundph_epu64, __m512i, __m128h, 8)
 test_1x (_mm512_reduce_round_ph, __m512h, __m512h, 123, 8)
 test_1x (_mm512_roundscale_round_ph, __m512h, __m512h, 123, 8)
 test_1x (_mm512_getmant_ph, __m512h, __m512h, 1, 1)
@@ -814,6 +820,12 @@ test_2 (_mm512_maskz_roundscale_ph, __m512h, __mmask32, __m512h, 123)
 test_2 (_mm_roundscale_sh, __m128h, __m128h, __m128h, 123)
 test_2 (_mm512_maskz_getexp_round_ph, __m512h, __mmask32, __m512h, 8)
 test_2 (_mm_getexp_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epi16, __m512i, __mmask32, __m512h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epu16, __m512i, __mmask32, __m512h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epi32, __m512i, __mmask16, __m256h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epu32, __m512i, __mmask16, __m256h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epi64, __m512i, __mmask8, __m128h, 8)
+test_2 (_mm512_maskz_cvt_roundph_epu64, __m512i, __mmask8, __m128h, 8)
 test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
 test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
 test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
@@ -851,6 +863,12 @@ test_3 (_mm512_mask_roundscale_ph, __m512h, __m512h, __mmask32, __m512h, 123)
 test_3 (_mm_maskz_roundscale_sh, __m128h, __mmask8, __m128h, __m128h, 123)
 test_3 (_mm_maskz_getexp_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
 test_3 (_mm512_mask_getexp_round_ph, __m512h, __m512h, __mmask32, __m512h, 8)
+test_3 (_mm512_mask_cvt_roundph_epi16, __m512i, __m512i, __mmask32, __m512h, 8)
+test_3 (_mm512_mask_cvt_roundph_epu16, __m512i, __m512i, __mmask32, __m512h, 8)
+test_3 (_mm512_mask_cvt_roundph_epi32, __m512i, __m512i, __mmask16, __m256h, 8)
+test_3 (_mm512_mask_cvt_roundph_epu32, __m512i, __m512i, __mmask16, __m256h, 8)
+test_3 (_mm512_mask_cvt_roundph_epi64, __m512i, __m512i, __mmask8, __m128h, 8)
+test_3 (_mm512_mask_cvt_roundph_epu64, __m512i, __m512i, __mmask8, __m128h, 8)
 test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
 test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
 test_3x (_mm512_mask_reduce_round_ph, __m512h, __m512h, __mmask32, __m512h, 123, 8)
diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c
index b3f07587acb..ae6151b4a61 100644
--- a/gcc/testsuite/gcc.target/i386/sse-23.c
+++ b/gcc/testsuite/gcc.target/i386/sse-23.c
@@ -737,6 +737,12 @@
 #define __builtin_ia32_getexpsh_mask_round(A, B, C, D, E) __builtin_ia32_getexpsh_mask_round(A, B, C, D, 4)
 #define __builtin_ia32_getmantph512_mask(A, F, C, D, E) __builtin_ia32_getmantph512_mask(A, 1, C, D, 8)
 #define __builtin_ia32_getmantsh_mask_round(A, B, C, W, U, D) __builtin_ia32_getmantsh_mask_round(A, B, 1, W, U, 4)
+#define __builtin_ia32_vcvtph2dq_v16si_mask_round(A, B, C, D) __builtin_ia32_vcvtph2dq_v16si_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2udq_v16si_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq_v16si_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2qq_v8di_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq_v8di_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uqq_v8di_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq_v8di_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2w_v32hi_mask_round(A, B, C, D) __builtin_ia32_vcvtph2w_v32hi_mask_round(A, B, C, 8)
+#define __builtin_ia32_vcvtph2uw_v32hi_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uw_v32hi_mask_round(A, B, C, 8)
 
 /* avx512fp16vlintrin.h */
 #define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)
-- 
2.18.1


  parent reply	other threads:[~2021-07-01  6:17 UTC|newest]

Thread overview: 85+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-01  6:15 [PATCH 00/62] Support all AVX512FP16 intrinsics liuhongt
2021-07-01  6:15 ` [PATCH 01/62] AVX512FP16: Support vector init/broadcast for FP16 liuhongt
2021-07-01  6:15 ` [PATCH 02/62] AVX512FP16: Add testcase for vector init and broadcast intrinsics liuhongt
2021-07-01  6:15 ` [PATCH 03/62] AVX512FP16: Fix HF vector passing in variable arguments liuhongt
2021-07-01  6:15 ` [PATCH 04/62] AVX512FP16: Add ABI tests for xmm liuhongt
2021-07-01  6:15 ` [PATCH 05/62] AVX512FP16: Add ABI test for ymm liuhongt
2021-07-01  6:15 ` [PATCH 06/62] AVX512FP16: Add abi test for zmm liuhongt
2021-07-01  6:15 ` [PATCH 07/62] AVX512FP16: Add vaddph/vsubph/vdivph/vmulph liuhongt
2021-09-09  7:48   ` Hongtao Liu
2021-07-01  6:15 ` [PATCH 08/62] AVX512FP16: Add testcase for vaddph/vsubph/vmulph/vdivph liuhongt
2021-07-01  6:15 ` [PATCH 09/62] AVX512FP16: Enable _Float16 autovectorization liuhongt
2021-09-10  7:03   ` Hongtao Liu
2021-07-01  6:15 ` [PATCH 10/62] AVX512FP16: Add vaddsh/vsubsh/vmulsh/vdivsh liuhongt
2021-07-01  6:15 ` [PATCH 11/62] AVX512FP16: Add testcase for vaddsh/vsubsh/vmulsh/vdivsh liuhongt
2021-07-01  6:15 ` [PATCH 12/62] AVX512FP16: Add vmaxph/vminph/vmaxsh/vminsh liuhongt
2021-07-01  6:15 ` [PATCH 13/62] AVX512FP16: Add testcase for vmaxph/vmaxsh/vminph/vminsh liuhongt
2021-07-01  6:16 ` [PATCH 14/62] AVX512FP16: Add vcmpph/vcmpsh/vcomish/vucomish liuhongt
2021-07-01  6:16 ` [PATCH 15/62] AVX512FP16: Add testcase for vcmpph/vcmpsh/vcomish/vucomish liuhongt
2021-07-01  6:16 ` [PATCH 16/62] AVX512FP16: Add vsqrtph/vrsqrtph/vsqrtsh/vrsqrtsh liuhongt
2021-09-14  3:50   ` Hongtao Liu
2021-07-01  6:16 ` [PATCH 17/62] AVX512FP16: Add testcase for vsqrtph/vsqrtsh/vrsqrtph/vrsqrtsh liuhongt
2021-07-01  6:16 ` [PATCH 18/62] AVX512FP16: Add vrcpph/vrcpsh/vscalefph/vscalefsh liuhongt
2021-07-01  6:16 ` [PATCH 19/62] AVX512FP16: Add testcase for vrcpph/vrcpsh/vscalefph/vscalefsh liuhongt
2021-07-01  6:16 ` [PATCH 20/62] AVX512FP16: Add vreduceph/vreducesh/vrndscaleph/vrndscalesh liuhongt
2021-07-01  6:16 ` [PATCH 21/62] AVX512FP16: Add testcase for vreduceph/vreducesh/vrndscaleph/vrndscalesh liuhongt
2021-07-01  6:16 ` [PATCH 22/62] AVX512FP16: Add fpclass/getexp/getmant instructions liuhongt
2021-07-01  6:16 ` [PATCH 23/62] AVX512FP16: Add testcase for fpclass/getmant/getexp instructions liuhongt
2021-07-01  6:16 ` [PATCH 24/62] AVX512FP16: Add vmovw/vmovsh liuhongt
2021-09-16  5:08   ` Hongtao Liu
2021-07-01  6:16 ` [PATCH 25/62] AVX512FP16: Add testcase for vmovsh/vmovw liuhongt
2021-07-01  6:16 ` liuhongt [this message]
2021-07-01  6:16 ` [PATCH 27/62] AVX512FP16: Add testcase for vcvtph2w/vcvtph2uw/vcvtph2dq/vcvtph2udq/vcvtph2qq/vcvtph2uqq liuhongt
2021-07-01  6:16 ` [PATCH 28/62] AVX512FP16: Add vcvtuw2ph/vcvtw2ph/vcvtdq2ph/vcvtudq2ph/vcvtqq2ph/vcvtuqq2ph liuhongt
2021-07-01  6:16 ` [PATCH 29/62] AVX512FP16: Add testcase for vcvtw2ph/vcvtuw2ph/vcvtdq2ph/vcvtudq2ph/vcvtqq2ph/vcvtuqq2ph liuhongt
2021-07-01  6:16 ` [PATCH 30/62] AVX512FP16: Add vcvtsh2si/vcvtsh2usi/vcvtsi2sh/vcvtusi2sh liuhongt
2021-09-17  8:07   ` Hongtao Liu
2021-07-01  6:16 ` [PATCH 31/62] AVX512FP16: Add testcase for vcvtsh2si/vcvtsh2usi/vcvtsi2sh/vcvtusi2sh liuhongt
2021-07-01  6:16 ` [PATCH 32/62] AVX512FP16: Add vcvttph2w/vcvttph2uw/vcvttph2dq/vcvttph2qq/vcvttph2udq/vcvttph2uqq liuhongt
2021-07-01  6:16 ` [PATCH 33/62] AVX512FP16: Add testcase for vcvttph2w/vcvttph2uw/vcvttph2dq/vcvttph2udq/vcvttph2qq/vcvttph2uqq liuhongt
2021-07-01  6:16 ` [PATCH 34/62] AVX512FP16: Add vcvttsh2si/vcvttsh2usi liuhongt
2021-07-01  6:16 ` [PATCH 35/62] AVX512FP16: Add vcvtph2pd/vcvtph2psx/vcvtpd2ph/vcvtps2phx liuhongt
2021-07-01  6:16 ` [PATCH 36/62] AVX512FP16: Add testcase for vcvtph2pd/vcvtph2psx/vcvtpd2ph/vcvtps2phx liuhongt
2021-07-01  6:16 ` [PATCH 37/62] AVX512FP16: Add vcvtsh2ss/vcvtsh2sd/vcvtss2sh/vcvtsd2sh liuhongt
2021-07-01  6:16 ` [PATCH 38/62] AVX512FP16: Add testcase for vcvtsh2sd/vcvtsh2ss/vcvtsd2sh/vcvtss2sh liuhongt
2021-07-01  6:16 ` [PATCH 39/62] AVX512FP16: Add intrinsics for casting between vector float16 and vector float32/float64/integer liuhongt
2021-07-01  6:16 ` [PATCH 40/62] AVX512FP16: Add vfmaddsub[132, 213, 231]ph/vfmsubadd[132, 213, 231]ph liuhongt
2021-09-18  7:04   ` Hongtao Liu
2021-07-01  6:16 ` [PATCH 41/62] AVX512FP16: Add testcase for " liuhongt
2021-07-01  6:16 ` [PATCH 42/62] AVX512FP16: Add FP16 fma instructions liuhongt
2021-07-01  6:16 ` [PATCH 43/62] AVX512FP16: Add testcase for " liuhongt
2021-07-01  6:16 ` [PATCH 44/62] AVX512FP16: Add scalar/vector bitwise operations, including liuhongt
2021-07-23  5:13   ` Hongtao Liu
2021-07-26  2:25     ` Hongtao Liu
2021-07-01  6:16 ` [PATCH 45/62] AVX512FP16: Add testcase for fp16 bitwise operations liuhongt
2021-07-01  6:16 ` [PATCH 46/62] AVX512FP16: Enable FP16 mask load/store liuhongt
2021-07-01  6:16 ` [PATCH 47/62] AVX512FP16: Add scalar fma instructions liuhongt
2021-07-01  6:16 ` [PATCH 48/62] AVX512FP16: Add testcase for scalar FMA instructions liuhongt
2021-07-01  6:16 ` [PATCH 49/62] AVX512FP16: Add vfcmaddcph/vfmaddcph/vfcmulcph/vfmulcph liuhongt
2021-09-22  4:38   ` Hongtao Liu
2021-07-01  6:16 ` [PATCH 50/62] AVX512FP16: Add testcases for vfcmaddcph/vfmaddcph/vfcmulcph/vfmulcph liuhongt
2021-07-01  6:16 ` [PATCH 51/62] AVX512FP16: Add vfcmaddcsh/vfmaddcsh/vfcmulcsh/vfmulcsh liuhongt
2021-07-01  6:16 ` [PATCH 52/62] AVX512FP16: Add testcases for vfcmaddcsh/vfmaddcsh/vfcmulcsh/vfmulcsh liuhongt
2021-07-01  6:16 ` [PATCH 53/62] AVX512FP16: Add expander for sqrthf2 liuhongt
2021-07-23  5:12   ` Hongtao Liu
2021-07-01  6:16 ` [PATCH 54/62] AVX512FP16: Add expander for ceil/floor/trunc/roundeven liuhongt
2021-07-01  6:16 ` [PATCH 55/62] AVX512FP16: Add expander for cstorehf4 liuhongt
2021-07-01  6:16 ` [PATCH 56/62] AVX512FP16: Optimize (_Float16) sqrtf ((float) f16) to sqrtf16 (f16) liuhongt
2021-07-01  9:50   ` Richard Biener
2021-07-01 10:23     ` Hongtao Liu
2021-07-01 12:43       ` Richard Biener
2021-07-01 21:48         ` Joseph Myers
2021-07-02  7:38           ` Richard Biener
2021-07-01 21:17   ` Joseph Myers
2021-07-01  6:16 ` [PATCH 57/62] AVX512FP16: Add expander for fmahf4 liuhongt
2021-07-01  6:16 ` [PATCH 58/62] AVX512FP16: Optimize for code like (_Float16) __builtin_ceif ((float) f16) liuhongt
2021-07-01  9:52   ` Richard Biener
2021-07-01 21:26   ` Joseph Myers
2021-07-02  7:36     ` Richard Biener
2021-07-02 11:46       ` Bernhard Reutner-Fischer
2021-07-04  5:17         ` Hongtao Liu
2021-07-01  6:16 ` [PATCH 59/62] AVX512FP16: Support load/store/abs intrinsics liuhongt
2021-09-22 10:30   ` Hongtao Liu
2021-07-01  6:16 ` [PATCH 60/62] AVX512FP16: Add reduce operators(add/mul/min/max) liuhongt
2021-07-01  6:16 ` [PATCH 61/62] AVX512FP16: Add complex conjugation intrinsic instructions liuhongt
2021-07-01  6:16 ` [PATCH 62/62] AVX512FP16: Add permutation and mask blend intrinsics liuhongt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210701061648.9447-27-hongtao.liu@intel.com \
    --to=hongtao.liu@intel.com \
    --cc=crazylht@gmail.com \
    --cc=gcc-patches@gcc.gnu.org \
    --cc=hjl.tools@gmail.com \
    --cc=jakub@redhat.com \
    --cc=ubizjak@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).