public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r13-4024] middle-end: Add optimized float addsub without needing VEC_PERM_EXPR.
@ 2022-11-14 17:46 Tamar Christina
  0 siblings, 0 replies; only message in thread
From: Tamar Christina @ 2022-11-14 17:46 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:b2bb611d90d01f64a2456c29de2a2ca1211ac134

commit r13-4024-gb2bb611d90d01f64a2456c29de2a2ca1211ac134
Author: Tamar Christina <tamar.christina@arm.com>
Date:   Mon Nov 14 15:42:42 2022 +0000

    middle-end: Add optimized float addsub without needing VEC_PERM_EXPR.
    
    For IEEE 754 floating point formats we can replace a sequence of alternative
    +/- with fneg of a wider type followed by an fadd.  This eliminated the need for
    using a permutation.  This patch adds a math.pd rule to recognize and do this
    rewriting.
    
    For
    
    void f (float *restrict a, float *restrict b, float *res, int n)
    {
       for (int i = 0; i < (n & -4); i+=2)
        {
          res[i+0] = a[i+0] + b[i+0];
          res[i+1] = a[i+1] - b[i+1];
        }
    }
    
    we generate:
    
    .L3:
            ldr     q1, [x1, x3]
            ldr     q0, [x0, x3]
            fneg    v1.2d, v1.2d
            fadd    v0.4s, v0.4s, v1.4s
            str     q0, [x2, x3]
            add     x3, x3, 16
            cmp     x3, x4
            bne     .L3
    
    now instead of:
    
    .L3:
            ldr     q1, [x0, x3]
            ldr     q2, [x1, x3]
            fadd    v0.4s, v1.4s, v2.4s
            fsub    v1.4s, v1.4s, v2.4s
            tbl     v0.16b, {v0.16b - v1.16b}, v3.16b
            str     q0, [x2, x3]
            add     x3, x3, 16
            cmp     x3, x4
            bne     .L3
    
    Thanks to George Steed for the idea.
    
    gcc/ChangeLog:
    
            * generic-match-head.cc: Include langooks.
            * gimple-match-head.cc: Likewise.
            * match.pd: Add fneg/fadd rule.
    
    gcc/testsuite/ChangeLog:
    
            * gcc.target/aarch64/simd/addsub_1.c: New test.
            * gcc.target/aarch64/sve/addsub_1.c: New test.

Diff:
---
 gcc/generic-match-head.cc                        |  1 +
 gcc/gimple-match-head.cc                         |  1 +
 gcc/match.pd                                     | 59 ++++++++++++++++++++++++
 gcc/testsuite/gcc.target/aarch64/simd/addsub_1.c | 56 ++++++++++++++++++++++
 gcc/testsuite/gcc.target/aarch64/sve/addsub_1.c  | 52 +++++++++++++++++++++
 5 files changed, 169 insertions(+)

diff --git a/gcc/generic-match-head.cc b/gcc/generic-match-head.cc
index cb0fbd32fa6..aed4dcc8c3d 100644
--- a/gcc/generic-match-head.cc
+++ b/gcc/generic-match-head.cc
@@ -39,6 +39,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "dbgcnt.h"
 #include "tm.h"
 #include "tree-eh.h"
+#include "langhooks.h"
 
 /* Routine to determine if the types T1 and T2 are effectively
    the same for GENERIC.  If T1 or T2 is not a type, the test
diff --git a/gcc/gimple-match-head.cc b/gcc/gimple-match-head.cc
index 4c80d77f8ba..9986e3479f9 100644
--- a/gcc/gimple-match-head.cc
+++ b/gcc/gimple-match-head.cc
@@ -46,6 +46,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "dbgcnt.h"
 #include "tm.h"
 #include "gimple-range.h"
+#include "langhooks.h"
 
 /* Forward declarations of the private auto-generated matchers.
    They expect valueized operands in canonical order and do not
diff --git a/gcc/match.pd b/gcc/match.pd
index 4d0898ccdcb..421278df007 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -7918,6 +7918,65 @@ and,
   (simplify (reduc (op @0 VECTOR_CST@1))
     (op (reduc:type @0) (reduc:type @1))))
 
+/* Simplify vector floating point operations of alternating sub/add pairs
+   into using an fneg of a wider element type followed by a normal add.
+   under IEEE 754 the fneg of the wider type will negate every even entry
+   and when doing an add we get a sub of the even and add of every odd
+   elements.  */
+(simplify
+ (vec_perm (plus:c @0 @1) (minus @0 @1) VECTOR_CST@2)
+ (if (!VECTOR_INTEGER_TYPE_P (type)
+      && !FLOAT_WORDS_BIG_ENDIAN)
+  (with
+   {
+     /* Build a vector of integers from the tree mask.  */
+     vec_perm_builder builder;
+     if (!tree_to_vec_perm_builder (&builder, @2))
+       return NULL_TREE;
+
+     /* Create a vec_perm_indices for the integer vector.  */
+     poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
+     vec_perm_indices sel (builder, 2, nelts);
+   }
+   (if (sel.series_p (0, 2, 0, 2))
+    (with
+     {
+       machine_mode vec_mode = TYPE_MODE (type);
+       machine_mode wide_mode;
+       if (!GET_MODE_WIDER_MODE (vec_mode).exists (&wide_mode)
+	   || !VECTOR_MODE_P (wide_mode)
+	   || (GET_MODE_UNIT_BITSIZE (vec_mode) * 2
+		!= GET_MODE_UNIT_BITSIZE (wide_mode)))
+	 return NULL_TREE;
+
+       tree stype = lang_hooks.types.type_for_mode (GET_MODE_INNER (wide_mode),
+						    TYPE_UNSIGNED (type));
+       if (TYPE_MODE (stype) == BLKmode)
+	 return NULL_TREE;
+       tree ntype = build_vector_type_for_mode (stype, wide_mode);
+       if (!VECTOR_TYPE_P (ntype))
+	 return NULL_TREE;
+
+       /* The format has to be a non-extended ieee format.  */
+       const struct real_format *fmt_old = FLOAT_MODE_FORMAT (vec_mode);
+       const struct real_format *fmt_new = FLOAT_MODE_FORMAT (wide_mode);
+       if (fmt_old == NULL || fmt_new == NULL)
+	 return NULL_TREE;
+
+       /* If the target doesn't support v1xx vectors, try using scalar mode xx
+	  instead.  */
+       if (known_eq (GET_MODE_NUNITS (wide_mode), 1)
+	   && !target_supports_op_p (ntype, NEGATE_EXPR, optab_vector))
+	 ntype = stype;
+     }
+     (if (fmt_new->signbit_rw
+	     == fmt_old->signbit_rw + GET_MODE_UNIT_BITSIZE (vec_mode)
+	  && fmt_new->signbit_rw == fmt_new->signbit_ro
+	  && targetm.can_change_mode_class (TYPE_MODE (ntype), TYPE_MODE (type), ALL_REGS)
+	  && ((optimize_vectors_before_lowering_p () && VECTOR_TYPE_P (ntype))
+	      || target_supports_op_p (ntype, NEGATE_EXPR, optab_vector)))
+      (plus (view_convert:type (negate (view_convert:ntype @1))) @0)))))))
+
 (simplify
  (vec_perm @0 @1 VECTOR_CST@2)
  (with
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/addsub_1.c b/gcc/testsuite/gcc.target/aarch64/simd/addsub_1.c
new file mode 100644
index 00000000000..1fb91a34c42
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/addsub_1.c
@@ -0,0 +1,56 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_2a_fp16_neon_ok } */
+/* { dg-options "-Ofast" } */
+/* { dg-add-options arm_v8_2a_fp16_neon } */
+/* { dg-final { check-function-bodies "**" "" "" { target { le } } } } */
+
+#pragma GCC target "+nosve"
+
+/* 
+** f1:
+** ...
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
+**	fadd	v[0-9]+.4s, v[0-9]+.4s, v[0-9]+.4s
+** ...
+*/
+void f1 (float *restrict a, float *restrict b, float *res, int n)
+{
+   for (int i = 0; i < (n & -4); i+=2)
+    {
+      res[i+0] = a[i+0] + b[i+0];
+      res[i+1] = a[i+1] - b[i+1];
+    }
+}
+
+/* 
+** d1:
+** ...
+** 	fneg	v[0-9]+.4s, v[0-9]+.4s
+** 	fadd	v[0-9]+.8h, v[0-9]+.8h, v[0-9]+.8h
+** ...
+*/
+void d1 (_Float16 *restrict a, _Float16 *restrict b, _Float16 *res, int n)
+{
+   for (int i = 0; i < (n & -8); i+=2)
+    {
+      res[i+0] = a[i+0] + b[i+0];
+      res[i+1] = a[i+1] - b[i+1];
+    }
+}
+
+/* 
+** e1:
+** ...
+** 	fadd	v[0-9]+.2d, v[0-9]+.2d, v[0-9]+.2d
+** 	fsub	v[0-9]+.2d, v[0-9]+.2d, v[0-9]+.2d
+** 	ins	v[0-9]+.d\[1\], v[0-9]+.d\[1\]
+** ...
+*/
+void e1 (double *restrict a, double *restrict b, double *res, int n)
+{
+   for (int i = 0; i < (n & -4); i+=2)
+    {
+      res[i+0] = a[i+0] + b[i+0];
+      res[i+1] = a[i+1] - b[i+1];
+    }
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/addsub_1.c b/gcc/testsuite/gcc.target/aarch64/sve/addsub_1.c
new file mode 100644
index 00000000000..ea7f9d9db2c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/addsub_1.c
@@ -0,0 +1,52 @@
+/* { dg-do compile } */
+/* { dg-options "-Ofast" } */
+/* { dg-final { check-function-bodies "**" "" "" { target { le } } } } */
+
+/*
+** f1:
+** ...
+** 	fneg	z[0-9]+.d, p[0-9]+/m, z[0-9]+.d
+** 	fadd	z[0-9]+.s, z[0-9]+.s, z[0-9]+.s
+** ...
+*/
+void f1 (float *restrict a, float *restrict b, float *res, int n)
+{
+   for (int i = 0; i < (n & -4); i+=2)
+    {
+      res[i+0] = a[i+0] + b[i+0];
+      res[i+1] = a[i+1] - b[i+1];
+    }
+}
+
+/* 
+** d1:
+** ...
+** 	fneg	z[0-9]+.s, p[0-9]+/m, z[0-9]+.s
+** 	fadd	z[0-9]+.h, z[0-9]+.h, z[0-9]+.h
+** ...
+*/ 
+void d1 (_Float16 *restrict a, _Float16 *restrict b, _Float16 *res, int n)
+{
+   for (int i = 0; i < (n & -8); i+=2)
+    {
+      res[i+0] = a[i+0] + b[i+0];
+      res[i+1] = a[i+1] - b[i+1];
+    }
+}
+
+/*
+** e1:
+** ...
+** 	fsub	z[0-9]+.d, z[0-9]+.d, z[0-9]+.d
+** 	movprfx	z[0-9]+.d, p[0-9]+/m, z[0-9]+.d
+** 	fadd	z[0-9]+.d, p[0-9]+/m, z[0-9]+.d, z[0-9]+.d
+** ...
+*/
+void e1 (double *restrict a, double *restrict b, double *res, int n)
+{
+   for (int i = 0; i < (n & -4); i+=2)
+    {
+      res[i+0] = a[i+0] + b[i+0];
+      res[i+1] = a[i+1] - b[i+1];
+    }
+}

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-11-14 17:46 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-14 17:46 [gcc r13-4024] middle-end: Add optimized float addsub without needing VEC_PERM_EXPR Tamar Christina

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).