public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/users/tnfchris/heads/gcc-14-early-break)] Add AArch32 Advanced SIMD cbranch implementation
@ 2023-06-28 13:33 Tamar Christina
  0 siblings, 0 replies; only message in thread
From: Tamar Christina @ 2023-06-28 13:33 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:c5214656ac4043f8611ec80985682ecf0c8697d4

commit c5214656ac4043f8611ec80985682ecf0c8697d4
Author: Tamar Christina <tamar.christina@arm.com>
Date:   Wed Jun 28 14:23:10 2023 +0100

    Add AArch32 Advanced SIMD cbranch implementation
    
    This adds an implementation for conditional branch optab for AArch32.
    
    For e.g.
    
    void f1 ()
    {
      for (int i = 0; i < N; i++)
        {
          b[i] += a[i];
          if (a[i] > 0)
            break;
        }
    }
    
    For 128-bit vectors we generate:
    
            vcgt.s32        q8, q9, #0
            vpmax.u32       d7, d16, d17
            vpmax.u32       d7, d7, d7
            vmov    r3, s14 @ int
            cmp     r3, #0
    
    and of 64-bit vector we can omit one vpmax as we still need to compress to
    32-bits.
    
    Bootstrapped Regtested on arm-none-linux-gnueabihf and no issues.
    
    gcc/ChangeLog:
    
            * config/arm/neon.md (cbranch<mode>4): New.
    
    gcc/testsuite/ChangeLog:
    
            * lib/target-supports.exp (vect_early_break): Add AArch32.
            * gcc.target/arm/vect-early-break-cbranch.c: New test.

Diff:
---
 gcc/config/arm/neon.md                             |  39 ++++++
 .../gcc.target/arm/vect-early-break-cbranch.c      | 136 +++++++++++++++++++++
 gcc/testsuite/lib/target-supports.exp              |   1 +
 3 files changed, 176 insertions(+)

diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index d213369ffc3..130efbc37cf 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -408,6 +408,45 @@
   [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
 )
 
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation.  To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+  [(set (pc) (if_then_else
+	      (match_operator 0 "expandable_comparison_operator"
+	       [(match_operand:VDQI 1 "register_operand")
+	        (match_operand:VDQI 2 "zero_operand")])
+	      (label_ref (match_operand 3 "" ""))
+	      (pc)))]
+  "TARGET_NEON"
+{
+  rtx mask = operands[1];
+
+  /* For 128-bit vectors we need an additional reductions.  */
+  if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+    {
+      /* Always reduce using a V4SI.  */
+      mask = gen_reg_rtx (V2SImode);
+      rtx low = gen_reg_rtx (V2SImode);
+      rtx high = gen_reg_rtx (V2SImode);
+      emit_insn (gen_neon_vget_lowv4si (low, operands[1]));
+      emit_insn (gen_neon_vget_highv4si (high, operands[1]));
+      emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+    }
+
+  emit_insn (gen_neon_vpumaxv2si (mask, mask, mask));
+
+  rtx val = gen_reg_rtx (SImode);
+  emit_move_insn (val, gen_lowpart (SImode, mask));
+  emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+  DONE;
+})
+
 ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
 ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
 ;; by define_expand in vec-common.md file.
diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
new file mode 100644
index 00000000000..2c05aa10d26
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
@@ -0,0 +1,136 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/* f1:
+**	...
+**	vcgt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f1 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] > 0)
+	break;
+    }
+}
+
+/*
+** f2:
+**	...
+**	vcge.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f2 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] >= 0)
+	break;
+    }
+}
+
+/*
+** f3:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f3 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] == 0)
+	break;
+    }
+}
+
+/*
+** f4:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vmvn	q[0-9]+, q[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f4 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] != 0)
+	break;
+    }
+}
+
+/*
+** f5:
+**	...
+**	vclt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f5 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] < 0)
+	break;
+    }
+}
+
+/*
+** f6:
+**	...
+**	vcle.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f6 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] <= 0)
+	break;
+    }
+}
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 5516188dc0a..8f58671e6cf 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -3784,6 +3784,7 @@ proc check_effective_target_vect_early_break { } {
     return [check_cached_effective_target_indexed vect_early_break {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_neon_ok]
 	}}]
 }
 # Return 1 if the target supports hardware vectorization of complex additions of

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-06-28 13:33 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-28 13:33 [gcc(refs/users/tnfchris/heads/gcc-14-early-break)] Add AArch32 Advanced SIMD cbranch implementation Tamar Christina

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).