* [PATCH 1/2] [x86] Support dot_prod optabs for 64-bit vector.
@ 2024-04-28 5:58 liuhongt
2024-04-28 5:58 ` [PATCH 2/2] Extend usdot_prodv*qi with vpmaddwd when AVXVNNI/AVX512VNNI is not available liuhongt
0 siblings, 1 reply; 2+ messages in thread
From: liuhongt @ 2024-04-28 5:58 UTC (permalink / raw)
To: gcc-patches; +Cc: crazylht, hjl.tools
Bootstrapped and regtested on x86_64-pc-linux-gnu{-m32,}.
Ready push to trunk.
gcc/ChangeLog:
PR target/113079
* config/i386/mmx.md (usdot_prodv8qi): New expander.
(sdot_prodv8qi): Ditto.
(udot_prodv8qi): Ditto.
(usdot_prodv4hi): Ditto.
(udot_prodv4hi): Ditto.
(sdot_prodv4hi): Ditto.
gcc/testsuite/ChangeLog:
* gcc.target/i386/pr113079.c: New test.
* gcc.target/i386/pr113079-2.c: New test.
* gcc.target/i386/sse4-pr113079-2.c: New test.
---
gcc/config/i386/mmx.md | 195 ++++++++++++++++++
gcc/testsuite/gcc.target/i386/pr113079-2.c | 161 +++++++++++++++
gcc/testsuite/gcc.target/i386/pr113079.c | 57 +++++
.../gcc.target/i386/sse4-pr113079-2.c | 158 ++++++++++++++
4 files changed, 571 insertions(+)
create mode 100644 gcc/testsuite/gcc.target/i386/pr113079-2.c
create mode 100644 gcc/testsuite/gcc.target/i386/pr113079.c
create mode 100644 gcc/testsuite/gcc.target/i386/sse4-pr113079-2.c
diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md
index 9a8d6030d8b..5f342497885 100644
--- a/gcc/config/i386/mmx.md
+++ b/gcc/config/i386/mmx.md
@@ -6342,6 +6342,201 @@ (define_expand "usadv8qi"
DONE;
})
+(define_expand "usdot_prodv8qi"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V8QI 1 "register_operand")
+ (match_operand:V8QI 2 "register_operand")
+ (match_operand:V2SI 3 "register_operand")]
+ "TARGET_MMX_WITH_SSE && TARGET_SSE4_1"
+{
+ operands[1] = force_reg (V8QImode, operands[1]);
+ operands[2] = force_reg (V8QImode, operands[2]);
+ operands[3] = force_reg (V2SImode, operands[3]);
+
+ if ((TARGET_AVX512VNNI && TARGET_AVX512VL)
+ || TARGET_AVXVNNI)
+ {
+ rtx op1 = lowpart_subreg (V16QImode, operands[1], V8QImode);
+ rtx op2 = lowpart_subreg (V16QImode, operands[2], V8QImode);
+ rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+ rtx op0 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_usdot_prodv16qi (op0, op1, op2, op3));
+ emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+ }
+ else
+ {
+ rtx op1 = gen_reg_rtx (V8HImode);
+ rtx op2 = gen_reg_rtx (V8HImode);
+ rtx op3 = gen_reg_rtx (V4SImode);
+ rtx op0 = gen_reg_rtx (V4SImode);
+ rtx op0_1 = gen_reg_rtx (V4SImode);
+
+ emit_move_insn (op3, CONST0_RTX (V4SImode));
+ emit_insn (gen_zero_extendv8qiv8hi2 (op1, operands[1]));
+ emit_insn (gen_extendv8qiv8hi2 (op2, operands[2]));
+ emit_insn (gen_sdot_prodv8hi (op0, op1, op2, op3));
+
+ /* vec_perm (op0, 2, 3, 0, 1); */
+ emit_insn (gen_sse2_pshufd (op0_1, op0, GEN_INT (78)));
+ emit_insn (gen_addv4si3 (op0, op0, op0_1));
+ emit_insn (gen_addv2si3 (operands[0], operands[3],
+ lowpart_subreg (V2SImode, op0, V4SImode)));
+ }
+ DONE;
+})
+
+(define_expand "sdot_prodv8qi"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V8QI 1 "register_operand")
+ (match_operand:V8QI 2 "register_operand")
+ (match_operand:V2SI 3 "register_operand")]
+ "TARGET_MMX_WITH_SSE && TARGET_SSE4_1"
+{
+ operands[1] = force_reg (V8QImode, operands[1]);
+ operands[2] = force_reg (V8QImode, operands[2]);
+ operands[3] = force_reg (V2SImode, operands[3]);
+
+ if (TARGET_AVXVNNIINT8)
+ {
+ rtx op1 = lowpart_subreg (V16QImode, operands[1], V8QImode);
+ rtx op2 = lowpart_subreg (V16QImode, operands[2], V8QImode);
+ rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+ rtx op0 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_sdot_prodv16qi (op0, op1, op2, op3));
+ emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+ }
+ else
+ {
+ rtx op1 = gen_reg_rtx (V8HImode);
+ rtx op2 = gen_reg_rtx (V8HImode);
+ rtx op3 = gen_reg_rtx (V4SImode);
+ rtx op0 = gen_reg_rtx (V4SImode);
+ rtx op0_1 = gen_reg_rtx (V4SImode);
+
+ emit_move_insn (op3, CONST0_RTX (V4SImode));
+ emit_insn (gen_extendv8qiv8hi2 (op1, operands[1]));
+ emit_insn (gen_extendv8qiv8hi2 (op2, operands[2]));
+ emit_insn (gen_sdot_prodv8hi (op0, op1, op2, op3));
+
+ /* vec_perm (op0, 2, 3, 0, 1); */
+ emit_insn (gen_sse2_pshufd (op0_1, op0, GEN_INT (78)));
+ emit_insn (gen_addv4si3 (op0, op0, op0_1));
+ emit_insn (gen_addv2si3 (operands[0], operands[3],
+ lowpart_subreg (V2SImode, op0, V4SImode)));
+ }
+ DONE;
+
+})
+
+(define_expand "udot_prodv8qi"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V8QI 1 "register_operand")
+ (match_operand:V8QI 2 "register_operand")
+ (match_operand:V2SI 3 "register_operand")]
+ "TARGET_MMX_WITH_SSE && TARGET_SSE4_1"
+{
+ operands[1] = force_reg (V8QImode, operands[1]);
+ operands[2] = force_reg (V8QImode, operands[2]);
+ operands[3] = force_reg (V2SImode, operands[3]);
+
+ if (TARGET_AVXVNNIINT8)
+ {
+ rtx op1 = lowpart_subreg (V16QImode, operands[1], V8QImode);
+ rtx op2 = lowpart_subreg (V16QImode, operands[2], V8QImode);
+ rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+ rtx op0 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_udot_prodv16qi (op0, op1, op2, op3));
+ emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+ }
+ else
+ {
+ rtx op1 = gen_reg_rtx (V8HImode);
+ rtx op2 = gen_reg_rtx (V8HImode);
+ rtx op3 = gen_reg_rtx (V4SImode);
+ rtx op0 = gen_reg_rtx (V4SImode);
+ rtx op0_1 = gen_reg_rtx (V4SImode);
+
+ emit_move_insn (op3, CONST0_RTX (V4SImode));
+ emit_insn (gen_zero_extendv8qiv8hi2 (op1, operands[1]));
+ emit_insn (gen_zero_extendv8qiv8hi2 (op2, operands[2]));
+ emit_insn (gen_sdot_prodv8hi (op0, op1, op2, op3));
+
+ /* vec_perm (op0, 2, 3, 0, 1); */
+ emit_insn (gen_sse2_pshufd (op0_1, op0, GEN_INT (78)));
+ emit_insn (gen_addv4si3 (op0, op0, op0_1));
+ emit_insn (gen_addv2si3 (operands[0], operands[3],
+ lowpart_subreg (V2SImode, op0, V4SImode)));
+ }
+ DONE;
+
+})
+
+(define_expand "usdot_prodv4hi"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V4HI 1 "register_operand")
+ (match_operand:V4HI 2 "register_operand")
+ (match_operand:V2SI 3 "register_operand")]
+ "TARGET_AVXVNNIINT16 && TARGET_MMX_WITH_SSE"
+{
+ operands[1] = force_reg (V4HImode, operands[1]);
+ operands[2] = force_reg (V4HImode, operands[2]);
+ operands[3] = force_reg (V2SImode, operands[3]);
+
+ rtx op1 = lowpart_subreg (V8HImode, operands[1], V4HImode);
+ rtx op2 = lowpart_subreg (V8HImode, operands[2], V4HImode);
+ rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+ rtx op0 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_usdot_prodv8hi (op0, op1, op2, op3));
+ emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+ DONE;
+})
+
+(define_expand "udot_prodv4hi"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V4HI 1 "register_operand")
+ (match_operand:V4HI 2 "register_operand")
+ (match_operand:V2SI 3 "register_operand")]
+ "TARGET_AVXVNNIINT16 && TARGET_MMX_WITH_SSE"
+{
+ operands[1] = force_reg (V4HImode, operands[1]);
+ operands[2] = force_reg (V4HImode, operands[2]);
+ operands[3] = force_reg (V2SImode, operands[3]);
+
+ rtx op1 = lowpart_subreg (V8HImode, operands[1], V4HImode);
+ rtx op2 = lowpart_subreg (V8HImode, operands[2], V4HImode);
+ rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+ rtx op0 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_udot_prodv8hi (op0, op1, op2, op3));
+ emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+ DONE;
+})
+
+(define_expand "sdot_prodv4hi"
+ [(match_operand:V2SI 0 "register_operand")
+ (match_operand:V4HI 1 "register_operand")
+ (match_operand:V4HI 2 "register_operand")
+ (match_operand:V2SI 3 "register_operand")]
+ "TARGET_MMX_WITH_SSE"
+{
+ operands[1] = force_reg (V4HImode, operands[1]);
+ operands[2] = force_reg (V4HImode, operands[2]);
+ operands[3] = force_reg (V2SImode, operands[3]);
+
+ rtx op1 = lowpart_subreg (V8HImode, operands[1], V4HImode);
+ rtx op2 = lowpart_subreg (V8HImode, operands[2], V4HImode);
+ rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+ rtx op0 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_sdot_prodv8hi (op0, op1, op2, op3));
+ emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+ DONE;
+})
+
(define_insn_and_split "mmx_pmovmskb"
[(set (match_operand:SI 0 "register_operand" "=r,r,jr")
(unspec:SI [(match_operand:V8QI 1 "register_operand" "y,x,x")]
diff --git a/gcc/testsuite/gcc.target/i386/pr113079-2.c b/gcc/testsuite/gcc.target/i386/pr113079-2.c
new file mode 100644
index 00000000000..2f0e4e824df
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr113079-2.c
@@ -0,0 +1,161 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -mavxvnniint8 -mavxvnni -mavxvnniint16" } */
+/* { dg-require-effective-target avxvnniint16 } */
+/* { dg-require-effective-target avxvnniint8 } */
+
+#define AVXVNNIINT16
+#define AVXVNNIINT8
+#ifndef CHECK
+#define CHECK "avx-check.h"
+#endif
+
+#ifndef TEST
+#define TEST avx_test
+#endif
+
+#include CHECK
+#include "pr113079.c"
+
+#define N 256
+
+short hs1[4], hs2[4];
+unsigned short hu1[4], hu2[4];
+char qs1[16], qs2[16];
+unsigned char qu1[16], qu2[16];
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+usdot_prodv4hi_scalar (unsigned short *a, short *b, int c)
+{
+ int i;
+ for (i = 0; i < 4; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+udot_prodv4hi_scalar (unsigned short *a, unsigned short *b, int c)
+{
+ int i;
+ for (i = 0; i < 4; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+sdot_prodv4hi_scalar (short *a, short *b, int c)
+{
+ int i;
+ for (i = 0; i < 4; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+usdot_prodv8qi_scalar (unsigned char *a, char *b, int c)
+{
+ int i;
+ for (i = 0; i < 8; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+udot_prodv8qi_scalar (unsigned char *a, unsigned char *b, int c)
+{
+ int i;
+ for (i = 0; i < 8; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+sdot_prodv8qi_scalar (char *a, char *b, int c)
+{
+ int i;
+ for (i = 0; i < 8; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+void init ()
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ {
+ hs1[i] = -i + 2;
+ hs2[i] = -i * 2;
+ hu1[i] = i * 3;
+ hu2[i] = i * 4;
+ }
+
+ for (i = 0; i < 8; i++)
+ {
+ qs1[i] = -i + 2;
+ qs2[i] = -i * 2;
+ qu1[i] = i * 3;
+ qu2[i] = i * 4;
+ }
+
+}
+
+void
+TEST (void)
+{
+ init ();
+ int usdot_prodv8qi_ref;
+ int sdot_prodv8qi_ref;
+ int udot_prodv8qi_ref;
+ int usdot_prodv4hi_ref;
+ int sdot_prodv4hi_ref;
+ int udot_prodv4hi_ref;
+
+ int usdot_prodv8qi_exp;
+ int sdot_prodv8qi_exp;
+ int udot_prodv8qi_exp;
+ int usdot_prodv4hi_exp;
+ int sdot_prodv4hi_exp;
+ int udot_prodv4hi_exp;
+
+ usdot_prodv8qi_ref = usdot_prodv8qi (qu1, qs1, 1);
+ usdot_prodv8qi_exp = usdot_prodv8qi_scalar (qu1, qs1, 1);
+ if (usdot_prodv8qi_ref != usdot_prodv8qi_exp)
+ abort ();
+
+ udot_prodv8qi_ref = udot_prodv8qi (qu1, qu2, 2);
+ udot_prodv8qi_exp = udot_prodv8qi_scalar (qu1, qu2, 2);
+ if (udot_prodv8qi_ref != udot_prodv8qi_exp)
+ abort ();
+
+ sdot_prodv8qi_ref = sdot_prodv8qi (qs1, qs2, 3);
+ sdot_prodv8qi_exp = sdot_prodv8qi_scalar (qs1, qs2, 3);
+ if (sdot_prodv8qi_ref != sdot_prodv8qi_exp)
+ abort ();
+
+ usdot_prodv4hi_ref = usdot_prodv4hi (hu1, hs1, 4);
+ usdot_prodv4hi_exp = usdot_prodv4hi_scalar (hu1, hs1, 4);
+ if (usdot_prodv4hi_ref != usdot_prodv4hi_exp)
+ abort ();
+
+ udot_prodv4hi_ref = udot_prodv4hi (hu1, hu2, 5);
+ udot_prodv4hi_exp = udot_prodv4hi_scalar (hu1, hu2, 5);
+ if (udot_prodv4hi_ref != udot_prodv4hi_exp)
+ abort ();
+
+ sdot_prodv4hi_ref = sdot_prodv4hi (hs1, hs2, 6);
+ sdot_prodv4hi_exp = sdot_prodv4hi_scalar (hs1, hs2, 6);
+ if (sdot_prodv4hi_ref != sdot_prodv4hi_exp)
+ abort ();
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr113079.c b/gcc/testsuite/gcc.target/i386/pr113079.c
new file mode 100644
index 00000000000..a2232c22255
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr113079.c
@@ -0,0 +1,57 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-mavxvnniint8 -mavxvnniint16 -O2 -mavxvnni" } */
+/* { dg-final { scan-assembler-times "vpdpbusd" 1 } } */
+/* { dg-final { scan-assembler-times "vpdpbssd" 1 } } */
+/* { dg-final { scan-assembler-times "vpdpbuud" 1 } } */
+/* { dg-final { scan-assembler-times "vpdpwssd" 1 } } */
+/* { dg-final { scan-assembler-times "vpdpwuud" 1 } } */
+/* { dg-final { scan-assembler-times "vpdpwusd" 1 } } */
+
+int
+__attribute__((noinline))
+usdot_prodv8qi (unsigned char* p, char* q, int sum)
+{
+ for (int i = 0; i != 8; i++)
+ sum += p[i] * q[i];
+ return sum;
+}
+
+int
+udot_prodv8qi (unsigned char* p, unsigned char* q, int sum)
+{
+ for (int i = 0; i != 8; i++)
+ sum += p[i] * q[i];
+ return sum;
+}
+
+int
+sdot_prodv8qi (char* p, char* q, int sum)
+{
+ for (int i = 0; i != 8; i++)
+ sum += p[i] * q[i];
+ return sum;
+}
+
+int
+usdot_prodv4hi (unsigned short* p, short* q, int sum)
+{
+ for (int i = 0; i != 4; i++)
+ sum += p[i] * q[i];
+ return sum;
+}
+
+int
+sdot_prodv4hi (short* p, short* q, int sum)
+{
+ for (int i = 0; i != 4; i++)
+ sum += p[i] * q[i];
+ return sum;
+}
+
+int
+udot_prodv4hi (unsigned short* p, unsigned short* q, int sum)
+{
+ for (int i = 0; i != 4; i++)
+ sum += p[i] * q[i];
+ return sum;
+}
diff --git a/gcc/testsuite/gcc.target/i386/sse4-pr113079-2.c b/gcc/testsuite/gcc.target/i386/sse4-pr113079-2.c
new file mode 100644
index 00000000000..1d06635aa6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/sse4-pr113079-2.c
@@ -0,0 +1,158 @@
+/* { dg-do run } */
+/* { dg-require-effective-target sse4 } */
+/* { dg-options "-O2 -msse4.1" } */
+
+#ifndef CHECK_H
+#define CHECK_H "sse4_1-check.h"
+#endif
+
+#ifndef TEST
+#define TEST sse4_1_test
+#endif
+
+#include CHECK_H
+#include "pr113079.c"
+
+#define N 256
+
+short hs1[4], hs2[4];
+unsigned short hu1[4], hu2[4];
+char qs1[16], qs2[16];
+unsigned char qu1[16], qu2[16];
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+usdot_prodv4hi_scalar (unsigned short *a, short *b, int c)
+{
+ int i;
+ for (i = 0; i < 4; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+udot_prodv4hi_scalar (unsigned short *a, unsigned short *b, int c)
+{
+ int i;
+ for (i = 0; i < 4; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+sdot_prodv4hi_scalar (short *a, short *b, int c)
+{
+ int i;
+ for (i = 0; i < 4; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+usdot_prodv8qi_scalar (unsigned char *a, char *b, int c)
+{
+ int i;
+ for (i = 0; i < 8; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+udot_prodv8qi_scalar (unsigned char *a, unsigned char *b, int c)
+{
+ int i;
+ for (i = 0; i < 8; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+sdot_prodv8qi_scalar (char *a, char *b, int c)
+{
+ int i;
+ for (i = 0; i < 8; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+void init ()
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ {
+ hs1[i] = -i + 2;
+ hs2[i] = -i * 2;
+ hu1[i] = i * 3;
+ hu2[i] = i * 4;
+ }
+
+ for (i = 0; i < 8; i++)
+ {
+ qs1[i] = -i + 2;
+ qs2[i] = -i * 2;
+ qu1[i] = i * 3;
+ qu2[i] = i * 4;
+ }
+
+}
+
+void
+TEST (void)
+{
+ init ();
+ int usdot_prodv8qi_ref;
+ int sdot_prodv8qi_ref;
+ int udot_prodv8qi_ref;
+ int usdot_prodv4hi_ref;
+ int sdot_prodv4hi_ref;
+ int udot_prodv4hi_ref;
+
+ int usdot_prodv8qi_exp;
+ int sdot_prodv8qi_exp;
+ int udot_prodv8qi_exp;
+ int usdot_prodv4hi_exp;
+ int sdot_prodv4hi_exp;
+ int udot_prodv4hi_exp;
+
+ usdot_prodv8qi_ref = usdot_prodv8qi (qu1, qs1, 1);
+ usdot_prodv8qi_exp = usdot_prodv8qi_scalar (qu1, qs1, 1);
+ if (usdot_prodv8qi_ref != usdot_prodv8qi_exp)
+ abort ();
+
+ udot_prodv8qi_ref = udot_prodv8qi (qu1, qu2, 2);
+ udot_prodv8qi_exp = udot_prodv8qi_scalar (qu1, qu2, 2);
+ if (udot_prodv8qi_ref != udot_prodv8qi_exp)
+ abort ();
+
+ sdot_prodv8qi_ref = sdot_prodv8qi (qs1, qs2, 3);
+ sdot_prodv8qi_exp = sdot_prodv8qi_scalar (qs1, qs2, 3);
+ if (sdot_prodv8qi_ref != sdot_prodv8qi_exp)
+ abort ();
+
+ usdot_prodv4hi_ref = usdot_prodv4hi (hu1, hs1, 4);
+ usdot_prodv4hi_exp = usdot_prodv4hi_scalar (hu1, hs1, 4);
+ if (usdot_prodv4hi_ref != usdot_prodv4hi_exp)
+ abort ();
+
+ udot_prodv4hi_ref = udot_prodv4hi (hu1, hu2, 5);
+ udot_prodv4hi_exp = udot_prodv4hi_scalar (hu1, hu2, 5);
+ if (udot_prodv4hi_ref != udot_prodv4hi_exp)
+ abort ();
+
+ sdot_prodv4hi_ref = sdot_prodv4hi (hs1, hs2, 6);
+ sdot_prodv4hi_exp = sdot_prodv4hi_scalar (hs1, hs2, 6);
+ if (sdot_prodv4hi_ref != sdot_prodv4hi_exp)
+ abort ();
+}
--
2.31.1
^ permalink raw reply [flat|nested] 2+ messages in thread
* [PATCH 2/2] Extend usdot_prodv*qi with vpmaddwd when AVXVNNI/AVX512VNNI is not available.
2024-04-28 5:58 [PATCH 1/2] [x86] Support dot_prod optabs for 64-bit vector liuhongt
@ 2024-04-28 5:58 ` liuhongt
0 siblings, 0 replies; 2+ messages in thread
From: liuhongt @ 2024-04-28 5:58 UTC (permalink / raw)
To: gcc-patches; +Cc: crazylht, hjl.tools
Bootstrapped and regtested on x86_64-pc-linux-gnu{-m32,}.
Ready push to trunk.
gcc/ChangeLog:
* config/i386/sse.md (usdot_prodv*qi): Extend to VI1_AVX512
with vpmaddwd when avxvnni/avx512vnni is not available.
---
gcc/config/i386/sse.md | 55 +++++++++++++++++++++++++++++++-----------
1 file changed, 41 insertions(+), 14 deletions(-)
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 1bf50726e83..f57f36ae380 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -29955,21 +29955,48 @@ (define_insn "vpshldv_<mode>_maskz_1"
(define_expand "usdot_prod<mode>"
[(match_operand:<ssedvecmode> 0 "register_operand")
- (match_operand:VI1_AVX512VNNI 1 "register_operand")
- (match_operand:VI1_AVX512VNNI 2 "register_operand")
+ (match_operand:VI1_AVX512 1 "register_operand")
+ (match_operand:VI1_AVX512 2 "register_operand")
(match_operand:<ssedvecmode> 3 "register_operand")]
- "((<MODE_SIZE> == 64 && TARGET_EVEX512)
- || ((TARGET_AVX512VNNI && TARGET_AVX512VL)
- || TARGET_AVXVNNI))"
-{
- operands[1] = lowpart_subreg (<ssedvecmode>mode,
- force_reg (<MODE>mode, operands[1]),
- <MODE>mode);
- operands[2] = lowpart_subreg (<ssedvecmode>mode,
- force_reg (<MODE>mode, operands[2]),
- <MODE>mode);
- emit_insn (gen_vpdpbusd_<ssedvecmodelower> (operands[0], operands[3],
- operands[1], operands[2]));
+ "TARGET_SSE2"
+{
+ if (<MODE_SIZE> == 64
+ ? TARGET_AVX512VNNI
+ : ((TARGET_AVX512VNNI && TARGET_AVX512VL) || TARGET_AVXVNNI))
+ {
+ operands[1] = lowpart_subreg (<ssedvecmode>mode,
+ force_reg (<MODE>mode, operands[1]),
+ <MODE>mode);
+ operands[2] = lowpart_subreg (<ssedvecmode>mode,
+ force_reg (<MODE>mode, operands[2]),
+ <MODE>mode);
+ emit_insn (gen_vpdpbusd_<ssedvecmodelower> (operands[0], operands[3],
+ operands[1], operands[2]));
+ }
+ else
+ {
+ /* Emulate with vpdpwssd. */
+ rtx op1_lo = gen_reg_rtx (<sseunpackmode>mode);
+ rtx op1_hi = gen_reg_rtx (<sseunpackmode>mode);
+ rtx op2_lo = gen_reg_rtx (<sseunpackmode>mode);
+ rtx op2_hi = gen_reg_rtx (<sseunpackmode>mode);
+
+ emit_insn (gen_vec_unpacku_lo_<mode> (op1_lo, operands[1]));
+ emit_insn (gen_vec_unpacks_lo_<mode> (op2_lo, operands[2]));
+ emit_insn (gen_vec_unpacku_hi_<mode> (op1_hi, operands[1]));
+ emit_insn (gen_vec_unpacks_hi_<mode> (op2_hi, operands[2]));
+
+ rtx res1 = gen_reg_rtx (<ssedvecmode>mode);
+ rtx res2 = gen_reg_rtx (<ssedvecmode>mode);
+ rtx sum = gen_reg_rtx (<ssedvecmode>mode);
+
+ emit_move_insn (sum, CONST0_RTX (<ssedvecmode>mode));
+ emit_insn (gen_sdot_prod<sseunpackmodelower> (res1, op1_lo,
+ op2_lo, sum));
+ emit_insn (gen_sdot_prod<sseunpackmodelower> (res2, op1_hi,
+ op2_hi, operands[3]));
+ emit_insn (gen_add<ssedvecmodelower>3 (operands[0], res1, res2));
+ }
DONE;
})
--
2.31.1
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2024-04-28 6:00 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-04-28 5:58 [PATCH 1/2] [x86] Support dot_prod optabs for 64-bit vector liuhongt
2024-04-28 5:58 ` [PATCH 2/2] Extend usdot_prodv*qi with vpmaddwd when AVXVNNI/AVX512VNNI is not available liuhongt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).