public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
@ 2023-12-29 14:42 Tamar Christina
  2024-01-04 11:06 ` Tamar Christina
  0 siblings, 1 reply; 6+ messages in thread
From: Tamar Christina @ 2023-12-29 14:42 UTC (permalink / raw)
  To: gcc-patches
  Cc: nd, Ramana.Radhakrishnan, Richard.Earnshaw, nickc, Kyrylo.Tkachov

[-- Attachment #1: Type: text/plain, Size: 11342 bytes --]

Hi All,

This adds an implementation for conditional branch optab for AArch32.
The previous version only allowed operand 0 but it looks like cbranch
expansion does not check with the target and so we have to implement all.

I therefore did not commit it.  This is a larger version. 

For e.g.

void f1 ()
{
  for (int i = 0; i < N; i++)
    {
      b[i] += a[i];
      if (a[i] > 0)
	break;
    }
}

For 128-bit vectors we generate:

        vcgt.s32        q8, q9, #0
        vpmax.u32       d7, d16, d17
        vpmax.u32       d7, d7, d7
        vmov    r3, s14 @ int
        cmp     r3, #0

and of 64-bit vector we can omit one vpmax as we still need to compress to
32-bits.

Bootstrapped Regtested on arm-none-linux-gnueabihf and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

	* config/arm/neon.md (cbranch<mode>4): New.

gcc/testsuite/ChangeLog:

	* gcc.dg/vect/vect-early-break_2.c: Skip Arm.
	* gcc.dg/vect/vect-early-break_7.c: Likewise.
	* gcc.dg/vect/vect-early-break_75.c: Likewise.
	* gcc.dg/vect/vect-early-break_77.c: Likewise.
	* gcc.dg/vect/vect-early-break_82.c: Likewise.
	* gcc.dg/vect/vect-early-break_88.c: Likewise.
	* lib/target-supports.exp (add_options_for_vect_early_break,
	check_effective_target_vect_early_break_hw,
	check_effective_target_vect_early_break): Support AArch32.
	* gcc.target/arm/vect-early-break-cbranch.c: New test.

--- inline copy of patch -- 
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index d213369ffc38fb88ad0357d848cc7da5af73bab7..0f088a51d31e6882bc0fabbad99862b8b465dd22 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -408,6 +408,54 @@ (define_insn "vec_extract<mode><V_elem_l>"
   [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
 )
 
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation.  To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+  [(set (pc) (if_then_else
+	      (match_operator 0 "expandable_comparison_operator"
+	       [(match_operand:VDQI 1 "register_operand")
+	        (match_operand:VDQI 2 "reg_or_zero_operand")])
+	      (label_ref (match_operand 3 "" ""))
+	      (pc)))]
+  "TARGET_NEON"
+{
+  rtx mask = operands[1];
+
+  /* If comparing against a non-zero vector we have to do a comparison first
+     so we can have a != 0 comparison with the result.  */
+  if (operands[2] != CONST0_RTX (<MODE>mode))
+    {
+      mask = gen_reg_rtx (<MODE>mode);
+      emit_insn (gen_xor<mode>3 (mask, operands[1], operands[2]));
+    }
+
+  /* For 128-bit vectors we need an additional reductions.  */
+  if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+    {
+      /* Always reduce using a V4SI.  */
+      mask = gen_reg_rtx (V2SImode);
+      rtx low = gen_reg_rtx (V2SImode);
+      rtx high = gen_reg_rtx (V2SImode);
+      rtx op1 = simplify_gen_subreg (V4SImode, operands[1], <MODE>mode, 0);
+      emit_insn (gen_neon_vget_lowv4si (low, op1));
+      emit_insn (gen_neon_vget_highv4si (high, op1));
+      emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+    }
+
+  emit_insn (gen_neon_vpumaxv2si (mask, mask, mask));
+
+  rtx val = gen_reg_rtx (SImode);
+  emit_move_insn (val, gen_lowpart (SImode, mask));
+  emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+  DONE;
+})
+
 ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
 ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
 ;; by define_expand in vec-common.md file.
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
index 5c32bf94409e9743e72429985ab3bf13aab8f2c1..dec0b492ab883de6e02944a95fd554a109a68a39 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
index 8c86c5034d7522b3733543fb384a23c5d6ed0fcf..d218a0686719fee4c167684dcf26402851b53260 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
index ed27f8635730ff0d8803517c72693625a2feddef..9dcc3372acd657458df8d94ce36c4bd96f02fd52 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-*" } } } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-* arm*-*-*" } } } } */
 
 #include <limits.h>
 #include <assert.h>
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
index 225106aab0a3efc7536de6f6e45bc6ff16210ea8..9fa7e6948ebfb5f1723833653fd6ad1fc65f4e8e 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include "tree-vect.h"
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
index 0e9b2d8d385c556063a3c6fcb14383317b056a79..7cd21d33485f3abb823e1943c87e9481c41fd2c3 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
index b392dd46553994d813761da41c42989a79b90119..59ed57c5fb5f3e8197fc20058eeb0a81a55815cc 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-Ofast --param vect-partial-vector-usage=2" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include "tree-vect.h"
 
diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
new file mode 100644
index 0000000000000000000000000000000000000000..0e9a39d231fdf4cb56590945e7cedfabd11d39b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
@@ -0,0 +1,138 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_early_break } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard  -fno-schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/* 
+** f1:
+**	...
+**	vcgt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f1 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] > 0)
+	break;
+    }
+}
+
+/*
+** f2:
+**	...
+**	vcge.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f2 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] >= 0)
+	break;
+    }
+}
+
+/*
+** f3:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f3 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] == 0)
+	break;
+    }
+}
+
+/*
+** f4:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vmvn	q[0-9]+, q[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f4 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] != 0)
+	break;
+    }
+}
+
+/*
+** f5:
+**	...
+**	vclt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f5 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] < 0)
+	break;
+    }
+}
+
+/*
+** f6:
+**	...
+**	vcle.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f6 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] <= 0)
+	break;
+    }
+}
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 05fc417877bcd658931061b7245eb8ba5abd2e09..24a937dbb59b5723af038bd9e0b89369595fcf87 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -4059,6 +4059,7 @@ proc check_effective_target_vect_early_break { } {
     return [check_cached_effective_target_indexed vect_early_break {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_v8_neon_ok]
 	|| [check_effective_target_sse4]
 	}}]
 }
@@ -4072,6 +4073,7 @@ proc check_effective_target_vect_early_break_hw { } {
     return [check_cached_effective_target_indexed vect_early_break_hw {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_v8_neon_hw]
 	|| [check_sse4_hw_available]
 	}}]
 }
@@ -4081,6 +4083,11 @@ proc add_options_for_vect_early_break { flags } {
 	return "$flags"
     }
 
+    if { [check_effective_target_arm_v8_neon_ok] } {
+	global et_arm_v8_neon_flags
+	return "$flags $et_arm_v8_neon_flags -march=armv8-a"
+    }
+
     if { [check_effective_target_sse4] } {
 	return "$flags -msse4.1"
     }




-- 

[-- Attachment #2: rb17512.patch --]
[-- Type: text/plain, Size: 9932 bytes --]

diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index d213369ffc38fb88ad0357d848cc7da5af73bab7..0f088a51d31e6882bc0fabbad99862b8b465dd22 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -408,6 +408,54 @@ (define_insn "vec_extract<mode><V_elem_l>"
   [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
 )
 
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation.  To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+  [(set (pc) (if_then_else
+	      (match_operator 0 "expandable_comparison_operator"
+	       [(match_operand:VDQI 1 "register_operand")
+	        (match_operand:VDQI 2 "reg_or_zero_operand")])
+	      (label_ref (match_operand 3 "" ""))
+	      (pc)))]
+  "TARGET_NEON"
+{
+  rtx mask = operands[1];
+
+  /* If comparing against a non-zero vector we have to do a comparison first
+     so we can have a != 0 comparison with the result.  */
+  if (operands[2] != CONST0_RTX (<MODE>mode))
+    {
+      mask = gen_reg_rtx (<MODE>mode);
+      emit_insn (gen_xor<mode>3 (mask, operands[1], operands[2]));
+    }
+
+  /* For 128-bit vectors we need an additional reductions.  */
+  if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+    {
+      /* Always reduce using a V4SI.  */
+      mask = gen_reg_rtx (V2SImode);
+      rtx low = gen_reg_rtx (V2SImode);
+      rtx high = gen_reg_rtx (V2SImode);
+      rtx op1 = simplify_gen_subreg (V4SImode, operands[1], <MODE>mode, 0);
+      emit_insn (gen_neon_vget_lowv4si (low, op1));
+      emit_insn (gen_neon_vget_highv4si (high, op1));
+      emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+    }
+
+  emit_insn (gen_neon_vpumaxv2si (mask, mask, mask));
+
+  rtx val = gen_reg_rtx (SImode);
+  emit_move_insn (val, gen_lowpart (SImode, mask));
+  emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+  DONE;
+})
+
 ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
 ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
 ;; by define_expand in vec-common.md file.
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
index 5c32bf94409e9743e72429985ab3bf13aab8f2c1..dec0b492ab883de6e02944a95fd554a109a68a39 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
index 8c86c5034d7522b3733543fb384a23c5d6ed0fcf..d218a0686719fee4c167684dcf26402851b53260 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
index ed27f8635730ff0d8803517c72693625a2feddef..9dcc3372acd657458df8d94ce36c4bd96f02fd52 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-*" } } } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-* arm*-*-*" } } } } */
 
 #include <limits.h>
 #include <assert.h>
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
index 225106aab0a3efc7536de6f6e45bc6ff16210ea8..9fa7e6948ebfb5f1723833653fd6ad1fc65f4e8e 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include "tree-vect.h"
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
index 0e9b2d8d385c556063a3c6fcb14383317b056a79..7cd21d33485f3abb823e1943c87e9481c41fd2c3 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
index b392dd46553994d813761da41c42989a79b90119..59ed57c5fb5f3e8197fc20058eeb0a81a55815cc 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-Ofast --param vect-partial-vector-usage=2" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include "tree-vect.h"
 
diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
new file mode 100644
index 0000000000000000000000000000000000000000..0e9a39d231fdf4cb56590945e7cedfabd11d39b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
@@ -0,0 +1,138 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_early_break } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard  -fno-schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/* 
+** f1:
+**	...
+**	vcgt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f1 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] > 0)
+	break;
+    }
+}
+
+/*
+** f2:
+**	...
+**	vcge.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f2 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] >= 0)
+	break;
+    }
+}
+
+/*
+** f3:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f3 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] == 0)
+	break;
+    }
+}
+
+/*
+** f4:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vmvn	q[0-9]+, q[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f4 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] != 0)
+	break;
+    }
+}
+
+/*
+** f5:
+**	...
+**	vclt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f5 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] < 0)
+	break;
+    }
+}
+
+/*
+** f6:
+**	...
+**	vcle.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f6 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] <= 0)
+	break;
+    }
+}
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 05fc417877bcd658931061b7245eb8ba5abd2e09..24a937dbb59b5723af038bd9e0b89369595fcf87 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -4059,6 +4059,7 @@ proc check_effective_target_vect_early_break { } {
     return [check_cached_effective_target_indexed vect_early_break {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_v8_neon_ok]
 	|| [check_effective_target_sse4]
 	}}]
 }
@@ -4072,6 +4073,7 @@ proc check_effective_target_vect_early_break_hw { } {
     return [check_cached_effective_target_indexed vect_early_break_hw {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_v8_neon_hw]
 	|| [check_sse4_hw_available]
 	}}]
 }
@@ -4081,6 +4083,11 @@ proc add_options_for_vect_early_break { flags } {
 	return "$flags"
     }
 
+    if { [check_effective_target_arm_v8_neon_ok] } {
+	global et_arm_v8_neon_flags
+	return "$flags $et_arm_v8_neon_flags -march=armv8-a"
+    }
+
     if { [check_effective_target_sse4] } {
 	return "$flags -msse4.1"
     }




^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
  2023-12-29 14:42 [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation Tamar Christina
@ 2024-01-04 11:06 ` Tamar Christina
  2024-01-04 11:12   ` Kyrylo Tkachov
  0 siblings, 1 reply; 6+ messages in thread
From: Tamar Christina @ 2024-01-04 11:06 UTC (permalink / raw)
  To: Tamar Christina, gcc-patches
  Cc: nd, Ramana Radhakrishnan, Richard Earnshaw, nickc, Kyrylo Tkachov

[-- Attachment #1: Type: text/plain, Size: 12232 bytes --]

Ping,

---

Hi All,

This adds an implementation for conditional branch optab for AArch32.
The previous version only allowed operand 0 but it looks like cbranch
expansion does not check with the target and so we have to implement all.

I therefore did not commit it.  This is a larger version. I've also dropped the MVE
version because the mid-end can rewrite the comparison into comparing two
predicates without checking with the backend.  Since MVE only has 1 predicate
register this would need to go through memory and two MRS calls.  It's unlikely
to be beneficial and so that's for GCC 15 when I can fix the middle-end.

The cases where AArch32 is skipped in the testsuite are all missed-optimizations
due to AArch32 missing some optabs.

For e.g.

void f1 ()
{
  for (int i = 0; i < N; i++)
    {
      b[i] += a[i];
      if (a[i] > 0)
	break;
    }
}

For 128-bit vectors we generate:

        vcgt.s32        q8, q9, #0
        vpmax.u32       d7, d16, d17
        vpmax.u32       d7, d7, d7
        vmov    r3, s14 @ int
        cmp     r3, #0

and of 64-bit vector we can omit one vpmax as we still need to compress to
32-bits.

Bootstrapped Regtested on arm-none-linux-gnueabihf and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

	* config/arm/neon.md (cbranch<mode>4): New.

gcc/testsuite/ChangeLog:

	* gcc.dg/vect/vect-early-break_2.c: Skip Arm.
	* gcc.dg/vect/vect-early-break_7.c: Likewise.
	* gcc.dg/vect/vect-early-break_75.c: Likewise.
	* gcc.dg/vect/vect-early-break_77.c: Likewise.
	* gcc.dg/vect/vect-early-break_82.c: Likewise.
	* gcc.dg/vect/vect-early-break_88.c: Likewise.
	* lib/target-supports.exp (add_options_for_vect_early_break,
	check_effective_target_vect_early_break_hw,
	check_effective_target_vect_early_break): Support AArch32.
	* gcc.target/arm/vect-early-break-cbranch.c: New test.

--- inline version of patch ---

diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index d213369ffc38fb88ad0357d848cc7da5af73bab7..ed659ab736862da416d1ff6241d0d3e6c6b96ff1 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -408,6 +408,55 @@ (define_insn "vec_extract<mode><V_elem_l>"
   [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
 )
 
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation.  To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+  [(set (pc) (if_then_else
+	      (match_operator 0 "expandable_comparison_operator"
+	       [(match_operand:VDQI 1 "register_operand")
+	        (match_operand:VDQI 2 "reg_or_zero_operand")])
+	      (label_ref (match_operand 3 "" ""))
+	      (pc)))]
+  "TARGET_NEON"
+{
+  rtx mask = operands[1];
+
+  /* If comparing against a non-zero vector we have to do a comparison first
+     so we can have a != 0 comparison with the result.  */
+  if (operands[2] != CONST0_RTX (<MODE>mode))
+    {
+      mask = gen_reg_rtx (<MODE>mode);
+      emit_insn (gen_xor<mode>3 (mask, operands[1], operands[2]));
+    }
+
+  /* For 128-bit vectors we need an additional reductions.  */
+  if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+    {
+      /* Always reduce using a V4SI.  */
+      mask = gen_reg_rtx (V2SImode);
+      rtx low = gen_reg_rtx (V2SImode);
+      rtx high = gen_reg_rtx (V2SImode);
+      rtx op1 = lowpart_subreg (V4SImode, operands[1], <MODE>mode);
+      emit_insn (gen_neon_vget_lowv4si (low, op1));
+      emit_insn (gen_neon_vget_highv4si (high, op1));
+      emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+    }
+
+  rtx op1 = lowpart_subreg (V2SImode, mask, GET_MODE (mask));
+  emit_insn (gen_neon_vpumaxv2si (op1, op1, op1));
+
+  rtx val = gen_reg_rtx (SImode);
+  emit_move_insn (val, gen_lowpart (SImode, mask));
+  emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+  DONE;
+})
+
 ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
 ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
 ;; by define_expand in vec-common.md file.
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
index 5c32bf94409e9743e72429985ab3bf13aab8f2c1..dec0b492ab883de6e02944a95fd554a109a68a39 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
index 8c86c5034d7522b3733543fb384a23c5d6ed0fcf..d218a0686719fee4c167684dcf26402851b53260 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
index ed27f8635730ff0d8803517c72693625a2feddef..9dcc3372acd657458df8d94ce36c4bd96f02fd52 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-*" } } } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-* arm*-*-*" } } } } */
 
 #include <limits.h>
 #include <assert.h>
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
index 225106aab0a3efc7536de6f6e45bc6ff16210ea8..9fa7e6948ebfb5f1723833653fd6ad1fc65f4e8e 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include "tree-vect.h"
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
index 0e9b2d8d385c556063a3c6fcb14383317b056a79..7cd21d33485f3abb823e1943c87e9481c41fd2c3 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
index b392dd46553994d813761da41c42989a79b90119..59ed57c5fb5f3e8197fc20058eeb0a81a55815cc 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-Ofast --param vect-partial-vector-usage=2" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include "tree-vect.h"
 
diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
new file mode 100644
index 0000000000000000000000000000000000000000..0e9a39d231fdf4cb56590945e7cedfabd11d39b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
@@ -0,0 +1,138 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_early_break } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard  -fno-schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/* 
+** f1:
+**	...
+**	vcgt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f1 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] > 0)
+	break;
+    }
+}
+
+/*
+** f2:
+**	...
+**	vcge.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f2 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] >= 0)
+	break;
+    }
+}
+
+/*
+** f3:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f3 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] == 0)
+	break;
+    }
+}
+
+/*
+** f4:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vmvn	q[0-9]+, q[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f4 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] != 0)
+	break;
+    }
+}
+
+/*
+** f5:
+**	...
+**	vclt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f5 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] < 0)
+	break;
+    }
+}
+
+/*
+** f6:
+**	...
+**	vcle.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f6 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] <= 0)
+	break;
+    }
+}
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 05fc417877bcd658931061b7245eb8ba5abd2e09..24a937dbb59b5723af038bd9e0b89369595fcf87 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -4059,6 +4059,7 @@ proc check_effective_target_vect_early_break { } {
     return [check_cached_effective_target_indexed vect_early_break {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_v8_neon_ok]
 	|| [check_effective_target_sse4]
 	}}]
 }
@@ -4072,6 +4073,7 @@ proc check_effective_target_vect_early_break_hw { } {
     return [check_cached_effective_target_indexed vect_early_break_hw {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_v8_neon_hw]
 	|| [check_sse4_hw_available]
 	}}]
 }
@@ -4081,6 +4083,11 @@ proc add_options_for_vect_early_break { flags } {
 	return "$flags"
     }
 
+    if { [check_effective_target_arm_v8_neon_ok] } {
+	global et_arm_v8_neon_flags
+	return "$flags $et_arm_v8_neon_flags -march=armv8-a"
+    }
+
     if { [check_effective_target_sse4] } {
 	return "$flags -msse4.1"
     }

[-- Attachment #2: rb17512.patch --]
[-- Type: application/octet-stream, Size: 9981 bytes --]

diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index d213369ffc38fb88ad0357d848cc7da5af73bab7..ed659ab736862da416d1ff6241d0d3e6c6b96ff1 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -408,6 +408,55 @@ (define_insn "vec_extract<mode><V_elem_l>"
   [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
 )
 
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation.  To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+  [(set (pc) (if_then_else
+	      (match_operator 0 "expandable_comparison_operator"
+	       [(match_operand:VDQI 1 "register_operand")
+	        (match_operand:VDQI 2 "reg_or_zero_operand")])
+	      (label_ref (match_operand 3 "" ""))
+	      (pc)))]
+  "TARGET_NEON"
+{
+  rtx mask = operands[1];
+
+  /* If comparing against a non-zero vector we have to do a comparison first
+     so we can have a != 0 comparison with the result.  */
+  if (operands[2] != CONST0_RTX (<MODE>mode))
+    {
+      mask = gen_reg_rtx (<MODE>mode);
+      emit_insn (gen_xor<mode>3 (mask, operands[1], operands[2]));
+    }
+
+  /* For 128-bit vectors we need an additional reductions.  */
+  if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+    {
+      /* Always reduce using a V4SI.  */
+      mask = gen_reg_rtx (V2SImode);
+      rtx low = gen_reg_rtx (V2SImode);
+      rtx high = gen_reg_rtx (V2SImode);
+      rtx op1 = lowpart_subreg (V4SImode, operands[1], <MODE>mode);
+      emit_insn (gen_neon_vget_lowv4si (low, op1));
+      emit_insn (gen_neon_vget_highv4si (high, op1));
+      emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+    }
+
+  rtx op1 = lowpart_subreg (V2SImode, mask, GET_MODE (mask));
+  emit_insn (gen_neon_vpumaxv2si (op1, op1, op1));
+
+  rtx val = gen_reg_rtx (SImode);
+  emit_move_insn (val, gen_lowpart (SImode, mask));
+  emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+  DONE;
+})
+
 ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
 ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
 ;; by define_expand in vec-common.md file.
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
index 5c32bf94409e9743e72429985ab3bf13aab8f2c1..dec0b492ab883de6e02944a95fd554a109a68a39 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
index 8c86c5034d7522b3733543fb384a23c5d6ed0fcf..d218a0686719fee4c167684dcf26402851b53260 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
index ed27f8635730ff0d8803517c72693625a2feddef..9dcc3372acd657458df8d94ce36c4bd96f02fd52 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-*" } } } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-* arm*-*-*" } } } } */
 
 #include <limits.h>
 #include <assert.h>
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
index 225106aab0a3efc7536de6f6e45bc6ff16210ea8..9fa7e6948ebfb5f1723833653fd6ad1fc65f4e8e 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include "tree-vect.h"
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
index 0e9b2d8d385c556063a3c6fcb14383317b056a79..7cd21d33485f3abb823e1943c87e9481c41fd2c3 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
@@ -5,7 +5,7 @@
 
 /* { dg-additional-options "-Ofast" } */
 
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include <complex.h>
 
diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
index b392dd46553994d813761da41c42989a79b90119..59ed57c5fb5f3e8197fc20058eeb0a81a55815cc 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
@@ -3,7 +3,7 @@
 /* { dg-require-effective-target vect_int } */
 
 /* { dg-additional-options "-Ofast --param vect-partial-vector-usage=2" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
 
 #include "tree-vect.h"
 
diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
new file mode 100644
index 0000000000000000000000000000000000000000..0e9a39d231fdf4cb56590945e7cedfabd11d39b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
@@ -0,0 +1,138 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_early_break } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard  -fno-schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/* 
+** f1:
+**	...
+**	vcgt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f1 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] > 0)
+	break;
+    }
+}
+
+/*
+** f2:
+**	...
+**	vcge.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f2 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] >= 0)
+	break;
+    }
+}
+
+/*
+** f3:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f3 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] == 0)
+	break;
+    }
+}
+
+/*
+** f4:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vmvn	q[0-9]+, q[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f4 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] != 0)
+	break;
+    }
+}
+
+/*
+** f5:
+**	...
+**	vclt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f5 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] < 0)
+	break;
+    }
+}
+
+/*
+** f6:
+**	...
+**	vcle.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f6 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] <= 0)
+	break;
+    }
+}
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 05fc417877bcd658931061b7245eb8ba5abd2e09..24a937dbb59b5723af038bd9e0b89369595fcf87 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -4059,6 +4059,7 @@ proc check_effective_target_vect_early_break { } {
     return [check_cached_effective_target_indexed vect_early_break {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_v8_neon_ok]
 	|| [check_effective_target_sse4]
 	}}]
 }
@@ -4072,6 +4073,7 @@ proc check_effective_target_vect_early_break_hw { } {
     return [check_cached_effective_target_indexed vect_early_break_hw {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_v8_neon_hw]
 	|| [check_sse4_hw_available]
 	}}]
 }
@@ -4081,6 +4083,11 @@ proc add_options_for_vect_early_break { flags } {
 	return "$flags"
     }
 
+    if { [check_effective_target_arm_v8_neon_ok] } {
+	global et_arm_v8_neon_flags
+	return "$flags $et_arm_v8_neon_flags -march=armv8-a"
+    }
+
     if { [check_effective_target_sse4] } {
 	return "$flags -msse4.1"
     }

^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
  2024-01-04 11:06 ` Tamar Christina
@ 2024-01-04 11:12   ` Kyrylo Tkachov
  2024-01-04 11:26     ` Tamar Christina
  0 siblings, 1 reply; 6+ messages in thread
From: Kyrylo Tkachov @ 2024-01-04 11:12 UTC (permalink / raw)
  To: Tamar Christina, gcc-patches
  Cc: nd, Ramana Radhakrishnan, Richard Earnshaw, nickc

Hi Tamar,

> -----Original Message-----
> From: Tamar Christina <Tamar.Christina@arm.com>
> Sent: Thursday, January 4, 2024 11:06 AM
> To: Tamar Christina <Tamar.Christina@arm.com>; gcc-patches@gcc.gnu.org
> Cc: nd <nd@arm.com>; Ramana Radhakrishnan
> <Ramana.Radhakrishnan@arm.com>; Richard Earnshaw
> <Richard.Earnshaw@arm.com>; nickc@redhat.com; Kyrylo Tkachov
> <Kyrylo.Tkachov@arm.com>
> Subject: RE: [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
> 
> Ping,
> 
> ---
> 
> Hi All,
> 
> This adds an implementation for conditional branch optab for AArch32.
> The previous version only allowed operand 0 but it looks like cbranch
> expansion does not check with the target and so we have to implement all.
> 
> I therefore did not commit it.  This is a larger version. I've also dropped the MVE
> version because the mid-end can rewrite the comparison into comparing two
> predicates without checking with the backend.  Since MVE only has 1 predicate
> register this would need to go through memory and two MRS calls.  It's unlikely
> to be beneficial and so that's for GCC 15 when I can fix the middle-end.
> 
> The cases where AArch32 is skipped in the testsuite are all missed-optimizations
> due to AArch32 missing some optabs.

Does the testsuite have vect_* checks that can be used instead of target arm*?
If so let's use those.
Otherwise it's okay as is.
Thanks,
Kyrill

> 
> For e.g.
> 
> void f1 ()
> {
>   for (int i = 0; i < N; i++)
>     {
>       b[i] += a[i];
>       if (a[i] > 0)
> 	break;
>     }
> }
> 
> For 128-bit vectors we generate:
> 
>         vcgt.s32        q8, q9, #0
>         vpmax.u32       d7, d16, d17
>         vpmax.u32       d7, d7, d7
>         vmov    r3, s14 @ int
>         cmp     r3, #0
> 
> and of 64-bit vector we can omit one vpmax as we still need to compress to
> 32-bits.
> 
> Bootstrapped Regtested on arm-none-linux-gnueabihf and no issues.
> 
> Ok for master?
> 
> Thanks,
> Tamar
> 
> gcc/ChangeLog:
> 
> 	* config/arm/neon.md (cbranch<mode>4): New.
> 
> gcc/testsuite/ChangeLog:
> 
> 	* gcc.dg/vect/vect-early-break_2.c: Skip Arm.
> 	* gcc.dg/vect/vect-early-break_7.c: Likewise.
> 	* gcc.dg/vect/vect-early-break_75.c: Likewise.
> 	* gcc.dg/vect/vect-early-break_77.c: Likewise.
> 	* gcc.dg/vect/vect-early-break_82.c: Likewise.
> 	* gcc.dg/vect/vect-early-break_88.c: Likewise.
> 	* lib/target-supports.exp (add_options_for_vect_early_break,
> 	check_effective_target_vect_early_break_hw,
> 	check_effective_target_vect_early_break): Support AArch32.
> 	* gcc.target/arm/vect-early-break-cbranch.c: New test.
> 
> --- inline version of patch ---
> 
> diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
> index
> d213369ffc38fb88ad0357d848cc7da5af73bab7..ed659ab736862da416d1ff6241d
> 0d3e6c6b96ff1 100644
> --- a/gcc/config/arm/neon.md
> +++ b/gcc/config/arm/neon.md
> @@ -408,6 +408,55 @@ (define_insn "vec_extract<mode><V_elem_l>"
>    [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
>  )
> 
> +;; Patterns comparing two vectors and conditionally jump.
> +;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
> +;; operation.  To not pay the penalty for inverting == we can map our any
> +;; comparisons to all i.e. any(~x) => all(x).
> +;;
> +;; However unlike the AArch64 version, we can't optimize this further as the
> +;; chain is too long for combine due to these being unspecs so it doesn't fold
> +;; the operation to something simpler.
> +(define_expand "cbranch<mode>4"
> +  [(set (pc) (if_then_else
> +	      (match_operator 0 "expandable_comparison_operator"
> +	       [(match_operand:VDQI 1 "register_operand")
> +	        (match_operand:VDQI 2 "reg_or_zero_operand")])
> +	      (label_ref (match_operand 3 "" ""))
> +	      (pc)))]
> +  "TARGET_NEON"
> +{
> +  rtx mask = operands[1];
> +
> +  /* If comparing against a non-zero vector we have to do a comparison first
> +     so we can have a != 0 comparison with the result.  */
> +  if (operands[2] != CONST0_RTX (<MODE>mode))
> +    {
> +      mask = gen_reg_rtx (<MODE>mode);
> +      emit_insn (gen_xor<mode>3 (mask, operands[1], operands[2]));
> +    }
> +
> +  /* For 128-bit vectors we need an additional reductions.  */
> +  if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
> +    {
> +      /* Always reduce using a V4SI.  */
> +      mask = gen_reg_rtx (V2SImode);
> +      rtx low = gen_reg_rtx (V2SImode);
> +      rtx high = gen_reg_rtx (V2SImode);
> +      rtx op1 = lowpart_subreg (V4SImode, operands[1], <MODE>mode);
> +      emit_insn (gen_neon_vget_lowv4si (low, op1));
> +      emit_insn (gen_neon_vget_highv4si (high, op1));
> +      emit_insn (gen_neon_vpumaxv2si (mask, low, high));
> +    }
> +
> +  rtx op1 = lowpart_subreg (V2SImode, mask, GET_MODE (mask));
> +  emit_insn (gen_neon_vpumaxv2si (op1, op1, op1));
> +
> +  rtx val = gen_reg_rtx (SImode);
> +  emit_move_insn (val, gen_lowpart (SImode, mask));
> +  emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
> +  DONE;
> +})
> +
>  ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
>  ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
>  ;; by define_expand in vec-common.md file.
> diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
> b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
> index
> 5c32bf94409e9743e72429985ab3bf13aab8f2c1..dec0b492ab883de6e02944a95f
> d554a109a68a39 100644
> --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
> +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
> @@ -5,7 +5,7 @@
> 
>  /* { dg-additional-options "-Ofast" } */
> 
> -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-
> *" } } } } */
> 
>  #include <complex.h>
> 
> diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
> b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
> index
> 8c86c5034d7522b3733543fb384a23c5d6ed0fcf..d218a0686719fee4c167684dcf2
> 6402851b53260 100644
> --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
> +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
> @@ -5,7 +5,7 @@
> 
>  /* { dg-additional-options "-Ofast" } */
> 
> -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-
> *" } } } } */
> 
>  #include <complex.h>
> 
> diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
> b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
> index
> ed27f8635730ff0d8803517c72693625a2feddef..9dcc3372acd657458df8d94ce36
> c4bd96f02fd52 100644
> --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
> +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
> @@ -3,7 +3,7 @@
>  /* { dg-require-effective-target vect_int } */
> 
>  /* { dg-additional-options "-O3" } */
> -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-
> * i?86-*-*" } } } } */
> +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-
> *-* i?86-*-* arm*-*-*" } } } } */
> 
>  #include <limits.h>
>  #include <assert.h>
> diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
> b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
> index
> 225106aab0a3efc7536de6f6e45bc6ff16210ea8..9fa7e6948ebfb5f1723833653fd
> 6ad1fc65f4e8e 100644
> --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
> +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
> @@ -3,7 +3,7 @@
>  /* { dg-require-effective-target vect_int } */
> 
>  /* { dg-additional-options "-O3" } */
> -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-
> *" } } } } */
> 
>  #include "tree-vect.h"
> 
> diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
> b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
> index
> 0e9b2d8d385c556063a3c6fcb14383317b056a79..7cd21d33485f3abb823e1943c
> 87e9481c41fd2c3 100644
> --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
> +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
> @@ -5,7 +5,7 @@
> 
>  /* { dg-additional-options "-Ofast" } */
> 
> -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-
> *" } } } } */
> 
>  #include <complex.h>
> 
> diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
> b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
> index
> b392dd46553994d813761da41c42989a79b90119..59ed57c5fb5f3e8197fc20058
> eeb0a81a55815cc 100644
> --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
> +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
> @@ -3,7 +3,7 @@
>  /* { dg-require-effective-target vect_int } */
> 
>  /* { dg-additional-options "-Ofast --param vect-partial-vector-usage=2" } */
> -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-
> *" } } } } */
> 
>  #include "tree-vect.h"
> 
> diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..0e9a39d231fdf4cb56590945e
> 7cedfabd11d39b5
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> @@ -0,0 +1,138 @@
> +/* { dg-do compile } */
> +/* { dg-require-effective-target vect_early_break } */
> +/* { dg-require-effective-target arm_neon_ok } */
> +/* { dg-require-effective-target arm32 } */
> +/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard  -fno-
> schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */
> +/* { dg-final { check-function-bodies "**" "" "" } } */
> +
> +#define N 640
> +int a[N] = {0};
> +int b[N] = {0};
> +
> +/*
> +** f1:
> +**	...
> +**	vcgt.s32	q[0-9]+, q[0-9]+, #0
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f1 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] > 0)
> +	break;
> +    }
> +}
> +
> +/*
> +** f2:
> +**	...
> +**	vcge.s32	q[0-9]+, q[0-9]+, #0
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f2 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] >= 0)
> +	break;
> +    }
> +}
> +
> +/*
> +** f3:
> +**	...
> +**	vceq.i32	q[0-9]+, q[0-9]+, #0
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f3 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] == 0)
> +	break;
> +    }
> +}
> +
> +/*
> +** f4:
> +**	...
> +**	vceq.i32	q[0-9]+, q[0-9]+, #0
> +**	vmvn	q[0-9]+, q[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f4 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] != 0)
> +	break;
> +    }
> +}
> +
> +/*
> +** f5:
> +**	...
> +**	vclt.s32	q[0-9]+, q[0-9]+, #0
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f5 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] < 0)
> +	break;
> +    }
> +}
> +
> +/*
> +** f6:
> +**	...
> +**	vcle.s32	q[0-9]+, q[0-9]+, #0
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f6 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] <= 0)
> +	break;
> +    }
> +}
> +
> diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-
> supports.exp
> index
> 05fc417877bcd658931061b7245eb8ba5abd2e09..24a937dbb59b5723af038bd9e
> 0b89369595fcf87 100644
> --- a/gcc/testsuite/lib/target-supports.exp
> +++ b/gcc/testsuite/lib/target-supports.exp
> @@ -4059,6 +4059,7 @@ proc check_effective_target_vect_early_break { } {
>      return [check_cached_effective_target_indexed vect_early_break {
>        expr {
>  	[istarget aarch64*-*-*]
> +	|| [check_effective_target_arm_v8_neon_ok]
>  	|| [check_effective_target_sse4]
>  	}}]
>  }
> @@ -4072,6 +4073,7 @@ proc check_effective_target_vect_early_break_hw { }
> {
>      return [check_cached_effective_target_indexed vect_early_break_hw {
>        expr {
>  	[istarget aarch64*-*-*]
> +	|| [check_effective_target_arm_v8_neon_hw]
>  	|| [check_sse4_hw_available]
>  	}}]
>  }
> @@ -4081,6 +4083,11 @@ proc add_options_for_vect_early_break { flags } {
>  	return "$flags"
>      }
> 
> +    if { [check_effective_target_arm_v8_neon_ok] } {
> +	global et_arm_v8_neon_flags
> +	return "$flags $et_arm_v8_neon_flags -march=armv8-a"
> +    }
> +
>      if { [check_effective_target_sse4] } {
>  	return "$flags -msse4.1"
>      }


^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
  2024-01-04 11:12   ` Kyrylo Tkachov
@ 2024-01-04 11:26     ` Tamar Christina
  0 siblings, 0 replies; 6+ messages in thread
From: Tamar Christina @ 2024-01-04 11:26 UTC (permalink / raw)
  To: Kyrylo Tkachov, gcc-patches
  Cc: nd, Ramana Radhakrishnan, Richard Earnshaw, nickc

> -----Original Message-----
> From: Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>
> Sent: Thursday, January 4, 2024 11:12 AM
> To: Tamar Christina <Tamar.Christina@arm.com>; gcc-patches@gcc.gnu.org
> Cc: nd <nd@arm.com>; Ramana Radhakrishnan
> <Ramana.Radhakrishnan@arm.com>; Richard Earnshaw
> <Richard.Earnshaw@arm.com>; nickc@redhat.com
> Subject: RE: [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
> 
> Hi Tamar,
> 
> > -----Original Message-----
> > From: Tamar Christina <Tamar.Christina@arm.com>
> > Sent: Thursday, January 4, 2024 11:06 AM
> > To: Tamar Christina <Tamar.Christina@arm.com>; gcc-patches@gcc.gnu.org
> > Cc: nd <nd@arm.com>; Ramana Radhakrishnan
> > <Ramana.Radhakrishnan@arm.com>; Richard Earnshaw
> > <Richard.Earnshaw@arm.com>; nickc@redhat.com; Kyrylo Tkachov
> > <Kyrylo.Tkachov@arm.com>
> > Subject: RE: [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
> >
> > Ping,
> >
> > ---
> >
> > Hi All,
> >
> > This adds an implementation for conditional branch optab for AArch32.
> > The previous version only allowed operand 0 but it looks like cbranch
> > expansion does not check with the target and so we have to implement all.
> >
> > I therefore did not commit it.  This is a larger version. I've also dropped the MVE
> > version because the mid-end can rewrite the comparison into comparing two
> > predicates without checking with the backend.  Since MVE only has 1 predicate
> > register this would need to go through memory and two MRS calls.  It's unlikely
> > to be beneficial and so that's for GCC 15 when I can fix the middle-end.
> >
> > The cases where AArch32 is skipped in the testsuite are all missed-optimizations
> > due to AArch32 missing some optabs.
> 
> Does the testsuite have vect_* checks that can be used instead of target arm*?
> If so let's use those.

Unfortunately not, a lot of them center around handling of complex doubles.
Some tests work and some fail, which makes it hard to disable based on a
target effective test.  They are things that look easy to fix so I may file some tickets
for them.

Cheers,
Tamar

> Otherwise it's okay as is.
> Thanks,
> Kyrill
> 
> >
> > For e.g.
> >
> > void f1 ()
> > {
> >   for (int i = 0; i < N; i++)
> >     {
> >       b[i] += a[i];
> >       if (a[i] > 0)
> > 	break;
> >     }
> > }
> >
> > For 128-bit vectors we generate:
> >
> >         vcgt.s32        q8, q9, #0
> >         vpmax.u32       d7, d16, d17
> >         vpmax.u32       d7, d7, d7
> >         vmov    r3, s14 @ int
> >         cmp     r3, #0
> >
> > and of 64-bit vector we can omit one vpmax as we still need to compress to
> > 32-bits.
> >
> > Bootstrapped Regtested on arm-none-linux-gnueabihf and no issues.
> >
> > Ok for master?
> >
> > Thanks,
> > Tamar
> >
> > gcc/ChangeLog:
> >
> > 	* config/arm/neon.md (cbranch<mode>4): New.
> >
> > gcc/testsuite/ChangeLog:
> >
> > 	* gcc.dg/vect/vect-early-break_2.c: Skip Arm.
> > 	* gcc.dg/vect/vect-early-break_7.c: Likewise.
> > 	* gcc.dg/vect/vect-early-break_75.c: Likewise.
> > 	* gcc.dg/vect/vect-early-break_77.c: Likewise.
> > 	* gcc.dg/vect/vect-early-break_82.c: Likewise.
> > 	* gcc.dg/vect/vect-early-break_88.c: Likewise.
> > 	* lib/target-supports.exp (add_options_for_vect_early_break,
> > 	check_effective_target_vect_early_break_hw,
> > 	check_effective_target_vect_early_break): Support AArch32.
> > 	* gcc.target/arm/vect-early-break-cbranch.c: New test.
> >
> > --- inline version of patch ---
> >
> > diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
> > index
> >
> d213369ffc38fb88ad0357d848cc7da5af73bab7..ed659ab736862da416d1ff624
> 1d
> > 0d3e6c6b96ff1 100644
> > --- a/gcc/config/arm/neon.md
> > +++ b/gcc/config/arm/neon.md
> > @@ -408,6 +408,55 @@ (define_insn "vec_extract<mode><V_elem_l>"
> >    [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
> >  )
> >
> > +;; Patterns comparing two vectors and conditionally jump.
> > +;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
> > +;; operation.  To not pay the penalty for inverting == we can map our any
> > +;; comparisons to all i.e. any(~x) => all(x).
> > +;;
> > +;; However unlike the AArch64 version, we can't optimize this further as the
> > +;; chain is too long for combine due to these being unspecs so it doesn't fold
> > +;; the operation to something simpler.
> > +(define_expand "cbranch<mode>4"
> > +  [(set (pc) (if_then_else
> > +	      (match_operator 0 "expandable_comparison_operator"
> > +	       [(match_operand:VDQI 1 "register_operand")
> > +	        (match_operand:VDQI 2 "reg_or_zero_operand")])
> > +	      (label_ref (match_operand 3 "" ""))
> > +	      (pc)))]
> > +  "TARGET_NEON"
> > +{
> > +  rtx mask = operands[1];
> > +
> > +  /* If comparing against a non-zero vector we have to do a comparison first
> > +     so we can have a != 0 comparison with the result.  */
> > +  if (operands[2] != CONST0_RTX (<MODE>mode))
> > +    {
> > +      mask = gen_reg_rtx (<MODE>mode);
> > +      emit_insn (gen_xor<mode>3 (mask, operands[1], operands[2]));
> > +    }
> > +
> > +  /* For 128-bit vectors we need an additional reductions.  */
> > +  if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
> > +    {
> > +      /* Always reduce using a V4SI.  */
> > +      mask = gen_reg_rtx (V2SImode);
> > +      rtx low = gen_reg_rtx (V2SImode);
> > +      rtx high = gen_reg_rtx (V2SImode);
> > +      rtx op1 = lowpart_subreg (V4SImode, operands[1], <MODE>mode);
> > +      emit_insn (gen_neon_vget_lowv4si (low, op1));
> > +      emit_insn (gen_neon_vget_highv4si (high, op1));
> > +      emit_insn (gen_neon_vpumaxv2si (mask, low, high));
> > +    }
> > +
> > +  rtx op1 = lowpart_subreg (V2SImode, mask, GET_MODE (mask));
> > +  emit_insn (gen_neon_vpumaxv2si (op1, op1, op1));
> > +
> > +  rtx val = gen_reg_rtx (SImode);
> > +  emit_move_insn (val, gen_lowpart (SImode, mask));
> > +  emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx,
> operands[3]));
> > +  DONE;
> > +})
> > +
> >  ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
> >  ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
> >  ;; by define_expand in vec-common.md file.
> > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
> > b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
> > index
> >
> 5c32bf94409e9743e72429985ab3bf13aab8f2c1..dec0b492ab883de6e02944a9
> 5f
> > d554a109a68a39 100644
> > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
> > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_2.c
> > @@ -5,7 +5,7 @@
> >
> >  /* { dg-additional-options "-Ofast" } */
> >
> > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-
> > *" } } } } */
> >
> >  #include <complex.h>
> >
> > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
> > b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
> > index
> >
> 8c86c5034d7522b3733543fb384a23c5d6ed0fcf..d218a0686719fee4c167684dc
> f2
> > 6402851b53260 100644
> > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
> > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_7.c
> > @@ -5,7 +5,7 @@
> >
> >  /* { dg-additional-options "-Ofast" } */
> >
> > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-
> > *" } } } } */
> >
> >  #include <complex.h>
> >
> > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
> > b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
> > index
> >
> ed27f8635730ff0d8803517c72693625a2feddef..9dcc3372acd657458df8d94ce
> 36
> > c4bd96f02fd52 100644
> > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
> > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_75.c
> > @@ -3,7 +3,7 @@
> >  /* { dg-require-effective-target vect_int } */
> >
> >  /* { dg-additional-options "-O3" } */
> > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-
> > * i?86-*-*" } } } } */
> > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-
> > *-* i?86-*-* arm*-*-*" } } } } */
> >
> >  #include <limits.h>
> >  #include <assert.h>
> > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
> > b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
> > index
> >
> 225106aab0a3efc7536de6f6e45bc6ff16210ea8..9fa7e6948ebfb5f1723833653f
> d
> > 6ad1fc65f4e8e 100644
> > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
> > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_77.c
> > @@ -3,7 +3,7 @@
> >  /* { dg-require-effective-target vect_int } */
> >
> >  /* { dg-additional-options "-O3" } */
> > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-
> > *" } } } } */
> >
> >  #include "tree-vect.h"
> >
> > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
> > b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
> > index
> >
> 0e9b2d8d385c556063a3c6fcb14383317b056a79..7cd21d33485f3abb823e194
> 3c
> > 87e9481c41fd2c3 100644
> > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
> > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_82.c
> > @@ -5,7 +5,7 @@
> >
> >  /* { dg-additional-options "-Ofast" } */
> >
> > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-
> > *" } } } } */
> >
> >  #include <complex.h>
> >
> > diff --git a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
> > b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
> > index
> >
> b392dd46553994d813761da41c42989a79b90119..59ed57c5fb5f3e8197fc200
> 58
> > eeb0a81a55815cc 100644
> > --- a/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
> > +++ b/gcc/testsuite/gcc.dg/vect/vect-early-break_88.c
> > @@ -3,7 +3,7 @@
> >  /* { dg-require-effective-target vect_int } */
> >
> >  /* { dg-additional-options "-Ofast --param vect-partial-vector-usage=2" } */
> > -/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> > +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-
> > *" } } } } */
> >
> >  #include "tree-vect.h"
> >
> > diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> > b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> > new file mode 100644
> > index
> >
> 0000000000000000000000000000000000000000..0e9a39d231fdf4cb565909
> 45e
> > 7cedfabd11d39b5
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> > @@ -0,0 +1,138 @@
> > +/* { dg-do compile } */
> > +/* { dg-require-effective-target vect_early_break } */
> > +/* { dg-require-effective-target arm_neon_ok } */
> > +/* { dg-require-effective-target arm32 } */
> > +/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard  -
> fno-
> > schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */
> > +/* { dg-final { check-function-bodies "**" "" "" } } */
> > +
> > +#define N 640
> > +int a[N] = {0};
> > +int b[N] = {0};
> > +
> > +/*
> > +** f1:
> > +**	...
> > +**	vcgt.s32	q[0-9]+, q[0-9]+, #0
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vmov	r[0-9]+, s[0-9]+	@ int
> > +**	cmp	r[0-9]+, #0
> > +**	bne	\.L[0-9]+
> > +**	...
> > +*/
> > +void f1 ()
> > +{
> > +  for (int i = 0; i < N; i++)
> > +    {
> > +      b[i] += a[i];
> > +      if (a[i] > 0)
> > +	break;
> > +    }
> > +}
> > +
> > +/*
> > +** f2:
> > +**	...
> > +**	vcge.s32	q[0-9]+, q[0-9]+, #0
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vmov	r[0-9]+, s[0-9]+	@ int
> > +**	cmp	r[0-9]+, #0
> > +**	bne	\.L[0-9]+
> > +**	...
> > +*/
> > +void f2 ()
> > +{
> > +  for (int i = 0; i < N; i++)
> > +    {
> > +      b[i] += a[i];
> > +      if (a[i] >= 0)
> > +	break;
> > +    }
> > +}
> > +
> > +/*
> > +** f3:
> > +**	...
> > +**	vceq.i32	q[0-9]+, q[0-9]+, #0
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vmov	r[0-9]+, s[0-9]+	@ int
> > +**	cmp	r[0-9]+, #0
> > +**	bne	\.L[0-9]+
> > +**	...
> > +*/
> > +void f3 ()
> > +{
> > +  for (int i = 0; i < N; i++)
> > +    {
> > +      b[i] += a[i];
> > +      if (a[i] == 0)
> > +	break;
> > +    }
> > +}
> > +
> > +/*
> > +** f4:
> > +**	...
> > +**	vceq.i32	q[0-9]+, q[0-9]+, #0
> > +**	vmvn	q[0-9]+, q[0-9]+
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vmov	r[0-9]+, s[0-9]+	@ int
> > +**	cmp	r[0-9]+, #0
> > +**	bne	\.L[0-9]+
> > +**	...
> > +*/
> > +void f4 ()
> > +{
> > +  for (int i = 0; i < N; i++)
> > +    {
> > +      b[i] += a[i];
> > +      if (a[i] != 0)
> > +	break;
> > +    }
> > +}
> > +
> > +/*
> > +** f5:
> > +**	...
> > +**	vclt.s32	q[0-9]+, q[0-9]+, #0
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vmov	r[0-9]+, s[0-9]+	@ int
> > +**	cmp	r[0-9]+, #0
> > +**	bne	\.L[0-9]+
> > +**	...
> > +*/
> > +void f5 ()
> > +{
> > +  for (int i = 0; i < N; i++)
> > +    {
> > +      b[i] += a[i];
> > +      if (a[i] < 0)
> > +	break;
> > +    }
> > +}
> > +
> > +/*
> > +** f6:
> > +**	...
> > +**	vcle.s32	q[0-9]+, q[0-9]+, #0
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> > +**	vmov	r[0-9]+, s[0-9]+	@ int
> > +**	cmp	r[0-9]+, #0
> > +**	bne	\.L[0-9]+
> > +**	...
> > +*/
> > +void f6 ()
> > +{
> > +  for (int i = 0; i < N; i++)
> > +    {
> > +      b[i] += a[i];
> > +      if (a[i] <= 0)
> > +	break;
> > +    }
> > +}
> > +
> > diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-
> > supports.exp
> > index
> >
> 05fc417877bcd658931061b7245eb8ba5abd2e09..24a937dbb59b5723af038bd
> 9e
> > 0b89369595fcf87 100644
> > --- a/gcc/testsuite/lib/target-supports.exp
> > +++ b/gcc/testsuite/lib/target-supports.exp
> > @@ -4059,6 +4059,7 @@ proc check_effective_target_vect_early_break { } {
> >      return [check_cached_effective_target_indexed vect_early_break {
> >        expr {
> >  	[istarget aarch64*-*-*]
> > +	|| [check_effective_target_arm_v8_neon_ok]
> >  	|| [check_effective_target_sse4]
> >  	}}]
> >  }
> > @@ -4072,6 +4073,7 @@ proc check_effective_target_vect_early_break_hw { }
> > {
> >      return [check_cached_effective_target_indexed vect_early_break_hw {
> >        expr {
> >  	[istarget aarch64*-*-*]
> > +	|| [check_effective_target_arm_v8_neon_hw]
> >  	|| [check_sse4_hw_available]
> >  	}}]
> >  }
> > @@ -4081,6 +4083,11 @@ proc add_options_for_vect_early_break { flags } {
> >  	return "$flags"
> >      }
> >
> > +    if { [check_effective_target_arm_v8_neon_ok] } {
> > +	global et_arm_v8_neon_flags
> > +	return "$flags $et_arm_v8_neon_flags -march=armv8-a"
> > +    }
> > +
> >      if { [check_effective_target_sse4] } {
> >  	return "$flags -msse4.1"
> >      }


^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
  2023-11-06  7:42 ` [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation Tamar Christina
@ 2023-11-27 12:48   ` Kyrylo Tkachov
  0 siblings, 0 replies; 6+ messages in thread
From: Kyrylo Tkachov @ 2023-11-27 12:48 UTC (permalink / raw)
  To: Tamar Christina, gcc-patches
  Cc: nd, Ramana Radhakrishnan, Richard Earnshaw, nickc

Hi Tamar,

> -----Original Message-----
> From: Tamar Christina <Tamar.Christina@arm.com>
> Sent: Monday, November 6, 2023 7:43 AM
> To: gcc-patches@gcc.gnu.org
> Cc: nd <nd@arm.com>; Ramana Radhakrishnan
> <Ramana.Radhakrishnan@arm.com>; Richard Earnshaw
> <Richard.Earnshaw@arm.com>; nickc@redhat.com; Kyrylo Tkachov
> <Kyrylo.Tkachov@arm.com>
> Subject: [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
> 
> Hi All,
> 
> This adds an implementation for conditional branch optab for AArch32.
> 
> For e.g.
> 
> void f1 ()
> {
>   for (int i = 0; i < N; i++)
>     {
>       b[i] += a[i];
>       if (a[i] > 0)
> 	break;
>     }
> }
> 
> For 128-bit vectors we generate:
> 
>         vcgt.s32        q8, q9, #0
>         vpmax.u32       d7, d16, d17
>         vpmax.u32       d7, d7, d7
>         vmov    r3, s14 @ int
>         cmp     r3, #0
> 
> and of 64-bit vector we can omit one vpmax as we still need to compress to
> 32-bits.
> 
> Bootstrapped Regtested on arm-none-linux-gnueabihf and no issues.
> 
> Ok for master?
> 

This is okay once the prerequisites go in.
Thanks,
Kyrill

> Thanks,
> Tamar
> 
> gcc/ChangeLog:
> 
> 	* config/arm/neon.md (cbranch<mode>4): New.
> 
> gcc/testsuite/ChangeLog:
> 
> 	* lib/target-supports.exp (vect_early_break): Add AArch32.
> 	* gcc.target/arm/vect-early-break-cbranch.c: New test.
> 
> --- inline copy of patch --
> diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
> index
> d213369ffc38fb88ad0357d848cc7da5af73bab7..130efbc37cfe3128533599dfadc
> 344d2243dcb63 100644
> --- a/gcc/config/arm/neon.md
> +++ b/gcc/config/arm/neon.md
> @@ -408,6 +408,45 @@ (define_insn "vec_extract<mode><V_elem_l>"
>    [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
>  )
> 
> +;; Patterns comparing two vectors and conditionally jump.
> +;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
> +;; operation.  To not pay the penalty for inverting == we can map our any
> +;; comparisons to all i.e. any(~x) => all(x).
> +;;
> +;; However unlike the AArch64 version, we can't optimize this further as the
> +;; chain is too long for combine due to these being unspecs so it doesn't fold
> +;; the operation to something simpler.
> +(define_expand "cbranch<mode>4"
> +  [(set (pc) (if_then_else
> +	      (match_operator 0 "expandable_comparison_operator"
> +	       [(match_operand:VDQI 1 "register_operand")
> +	        (match_operand:VDQI 2 "zero_operand")])
> +	      (label_ref (match_operand 3 "" ""))
> +	      (pc)))]
> +  "TARGET_NEON"
> +{
> +  rtx mask = operands[1];
> +
> +  /* For 128-bit vectors we need an additional reductions.  */
> +  if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
> +    {
> +      /* Always reduce using a V4SI.  */
> +      mask = gen_reg_rtx (V2SImode);
> +      rtx low = gen_reg_rtx (V2SImode);
> +      rtx high = gen_reg_rtx (V2SImode);
> +      emit_insn (gen_neon_vget_lowv4si (low, operands[1]));
> +      emit_insn (gen_neon_vget_highv4si (high, operands[1]));
> +      emit_insn (gen_neon_vpumaxv2si (mask, low, high));
> +    }
> +
> +  emit_insn (gen_neon_vpumaxv2si (mask, mask, mask));
> +
> +  rtx val = gen_reg_rtx (SImode);
> +  emit_move_insn (val, gen_lowpart (SImode, mask));
> +  emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
> +  DONE;
> +})
> +
>  ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
>  ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
>  ;; by define_expand in vec-common.md file.
> diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..2c05aa10d26ed4ac9785672e
> 6e3b4355cef046dc
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
> @@ -0,0 +1,136 @@
> +/* { dg-do compile } */
> +/* { dg-require-effective-target arm_neon_ok } */
> +/* { dg-require-effective-target arm32 } */
> +/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard" } */
> +/* { dg-final { check-function-bodies "**" "" "" } } */
> +
> +#define N 640
> +int a[N] = {0};
> +int b[N] = {0};
> +
> +/* f1:
> +**	...
> +**	vcgt.s32	q[0-9]+, q[0-9]+, #0
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f1 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] > 0)
> +	break;
> +    }
> +}
> +
> +/*
> +** f2:
> +**	...
> +**	vcge.s32	q[0-9]+, q[0-9]+, #0
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f2 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] >= 0)
> +	break;
> +    }
> +}
> +
> +/*
> +** f3:
> +**	...
> +**	vceq.i32	q[0-9]+, q[0-9]+, #0
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f3 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] == 0)
> +	break;
> +    }
> +}
> +
> +/*
> +** f4:
> +**	...
> +**	vceq.i32	q[0-9]+, q[0-9]+, #0
> +**	vmvn	q[0-9]+, q[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f4 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] != 0)
> +	break;
> +    }
> +}
> +
> +/*
> +** f5:
> +**	...
> +**	vclt.s32	q[0-9]+, q[0-9]+, #0
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f5 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] < 0)
> +	break;
> +    }
> +}
> +
> +/*
> +** f6:
> +**	...
> +**	vcle.s32	q[0-9]+, q[0-9]+, #0
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
> +**	vmov	r[0-9]+, s[0-9]+	@ int
> +**	cmp	r[0-9]+, #0
> +**	bne	\.L[0-9]+
> +**	...
> +*/
> +void f6 ()
> +{
> +  for (int i = 0; i < N; i++)
> +    {
> +      b[i] += a[i];
> +      if (a[i] <= 0)
> +	break;
> +    }
> +}
> +
> diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-
> supports.exp
> index
> 5516188dc0aa86d161d67dea5a7769e3c3d72f85..8f58671e6cfd3546c6a98e4034
> 1fe31c6492594b 100644
> --- a/gcc/testsuite/lib/target-supports.exp
> +++ b/gcc/testsuite/lib/target-supports.exp
> @@ -3784,6 +3784,7 @@ proc check_effective_target_vect_early_break { } {
>      return [check_cached_effective_target_indexed vect_early_break {
>        expr {
>  	[istarget aarch64*-*-*]
> +	|| [check_effective_target_arm_neon_ok]
>  	}}]
>  }
>  # Return 1 if the target supports hardware vectorization of complex additions of
> 
> 
> 
> 
> --

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation
  2023-06-28 13:40 [PATCH v5 0/19] Support early break/return auto-vectorization Tamar Christina
@ 2023-11-06  7:42 ` Tamar Christina
  2023-11-27 12:48   ` Kyrylo Tkachov
  0 siblings, 1 reply; 6+ messages in thread
From: Tamar Christina @ 2023-11-06  7:42 UTC (permalink / raw)
  To: gcc-patches
  Cc: nd, Ramana.Radhakrishnan, Richard.Earnshaw, nickc, Kyrylo.Tkachov

[-- Attachment #1: Type: text/plain, Size: 6094 bytes --]

Hi All,

This adds an implementation for conditional branch optab for AArch32.

For e.g.

void f1 ()
{
  for (int i = 0; i < N; i++)
    {
      b[i] += a[i];
      if (a[i] > 0)
	break;
    }
}

For 128-bit vectors we generate:

        vcgt.s32        q8, q9, #0
        vpmax.u32       d7, d16, d17
        vpmax.u32       d7, d7, d7
        vmov    r3, s14 @ int
        cmp     r3, #0

and of 64-bit vector we can omit one vpmax as we still need to compress to
32-bits.

Bootstrapped Regtested on arm-none-linux-gnueabihf and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

	* config/arm/neon.md (cbranch<mode>4): New.

gcc/testsuite/ChangeLog:

	* lib/target-supports.exp (vect_early_break): Add AArch32.
	* gcc.target/arm/vect-early-break-cbranch.c: New test.

--- inline copy of patch -- 
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index d213369ffc38fb88ad0357d848cc7da5af73bab7..130efbc37cfe3128533599dfadc344d2243dcb63 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -408,6 +408,45 @@ (define_insn "vec_extract<mode><V_elem_l>"
   [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
 )
 
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation.  To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+  [(set (pc) (if_then_else
+	      (match_operator 0 "expandable_comparison_operator"
+	       [(match_operand:VDQI 1 "register_operand")
+	        (match_operand:VDQI 2 "zero_operand")])
+	      (label_ref (match_operand 3 "" ""))
+	      (pc)))]
+  "TARGET_NEON"
+{
+  rtx mask = operands[1];
+
+  /* For 128-bit vectors we need an additional reductions.  */
+  if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+    {
+      /* Always reduce using a V4SI.  */
+      mask = gen_reg_rtx (V2SImode);
+      rtx low = gen_reg_rtx (V2SImode);
+      rtx high = gen_reg_rtx (V2SImode);
+      emit_insn (gen_neon_vget_lowv4si (low, operands[1]));
+      emit_insn (gen_neon_vget_highv4si (high, operands[1]));
+      emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+    }
+
+  emit_insn (gen_neon_vpumaxv2si (mask, mask, mask));
+
+  rtx val = gen_reg_rtx (SImode);
+  emit_move_insn (val, gen_lowpart (SImode, mask));
+  emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+  DONE;
+})
+
 ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
 ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
 ;; by define_expand in vec-common.md file.
diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
new file mode 100644
index 0000000000000000000000000000000000000000..2c05aa10d26ed4ac9785672e6e3b4355cef046dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
@@ -0,0 +1,136 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/* f1:
+**	...
+**	vcgt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f1 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] > 0)
+	break;
+    }
+}
+
+/*
+** f2:
+**	...
+**	vcge.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f2 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] >= 0)
+	break;
+    }
+}
+
+/*
+** f3:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f3 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] == 0)
+	break;
+    }
+}
+
+/*
+** f4:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vmvn	q[0-9]+, q[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f4 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] != 0)
+	break;
+    }
+}
+
+/*
+** f5:
+**	...
+**	vclt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f5 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] < 0)
+	break;
+    }
+}
+
+/*
+** f6:
+**	...
+**	vcle.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f6 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] <= 0)
+	break;
+    }
+}
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 5516188dc0aa86d161d67dea5a7769e3c3d72f85..8f58671e6cfd3546c6a98e40341fe31c6492594b 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -3784,6 +3784,7 @@ proc check_effective_target_vect_early_break { } {
     return [check_cached_effective_target_indexed vect_early_break {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_neon_ok]
 	}}]
 }
 # Return 1 if the target supports hardware vectorization of complex additions of




-- 

[-- Attachment #2: rb17512.patch --]
[-- Type: text/plain, Size: 5281 bytes --]

diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index d213369ffc38fb88ad0357d848cc7da5af73bab7..130efbc37cfe3128533599dfadc344d2243dcb63 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -408,6 +408,45 @@ (define_insn "vec_extract<mode><V_elem_l>"
   [(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
 )
 
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation.  To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+  [(set (pc) (if_then_else
+	      (match_operator 0 "expandable_comparison_operator"
+	       [(match_operand:VDQI 1 "register_operand")
+	        (match_operand:VDQI 2 "zero_operand")])
+	      (label_ref (match_operand 3 "" ""))
+	      (pc)))]
+  "TARGET_NEON"
+{
+  rtx mask = operands[1];
+
+  /* For 128-bit vectors we need an additional reductions.  */
+  if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+    {
+      /* Always reduce using a V4SI.  */
+      mask = gen_reg_rtx (V2SImode);
+      rtx low = gen_reg_rtx (V2SImode);
+      rtx high = gen_reg_rtx (V2SImode);
+      emit_insn (gen_neon_vget_lowv4si (low, operands[1]));
+      emit_insn (gen_neon_vget_highv4si (high, operands[1]));
+      emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+    }
+
+  emit_insn (gen_neon_vpumaxv2si (mask, mask, mask));
+
+  rtx val = gen_reg_rtx (SImode);
+  emit_move_insn (val, gen_lowpart (SImode, mask));
+  emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+  DONE;
+})
+
 ;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
 ;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
 ;; by define_expand in vec-common.md file.
diff --git a/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
new file mode 100644
index 0000000000000000000000000000000000000000..2c05aa10d26ed4ac9785672e6e3b4355cef046dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/vect-early-break-cbranch.c
@@ -0,0 +1,136 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/* f1:
+**	...
+**	vcgt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f1 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] > 0)
+	break;
+    }
+}
+
+/*
+** f2:
+**	...
+**	vcge.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f2 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] >= 0)
+	break;
+    }
+}
+
+/*
+** f3:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f3 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] == 0)
+	break;
+    }
+}
+
+/*
+** f4:
+**	...
+**	vceq.i32	q[0-9]+, q[0-9]+, #0
+**	vmvn	q[0-9]+, q[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f4 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] != 0)
+	break;
+    }
+}
+
+/*
+** f5:
+**	...
+**	vclt.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f5 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] < 0)
+	break;
+    }
+}
+
+/*
+** f6:
+**	...
+**	vcle.s32	q[0-9]+, q[0-9]+, #0
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vpmax.u32	d[0-9]+, d[0-9]+, d[0-9]+
+**	vmov	r[0-9]+, s[0-9]+	@ int
+**	cmp	r[0-9]+, #0
+**	bne	\.L[0-9]+
+**	...
+*/
+void f6 ()
+{
+  for (int i = 0; i < N; i++)
+    {
+      b[i] += a[i];
+      if (a[i] <= 0)
+	break;
+    }
+}
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 5516188dc0aa86d161d67dea5a7769e3c3d72f85..8f58671e6cfd3546c6a98e40341fe31c6492594b 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -3784,6 +3784,7 @@ proc check_effective_target_vect_early_break { } {
     return [check_cached_effective_target_indexed vect_early_break {
       expr {
 	[istarget aarch64*-*-*]
+	|| [check_effective_target_arm_neon_ok]
 	}}]
 }
 # Return 1 if the target supports hardware vectorization of complex additions of




^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2024-01-04 11:27 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-29 14:42 [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation Tamar Christina
2024-01-04 11:06 ` Tamar Christina
2024-01-04 11:12   ` Kyrylo Tkachov
2024-01-04 11:26     ` Tamar Christina
  -- strict thread matches above, loose matches on Subject: below --
2023-06-28 13:40 [PATCH v5 0/19] Support early break/return auto-vectorization Tamar Christina
2023-11-06  7:42 ` [PATCH 20/21]Arm: Add Advanced SIMD cbranch implementation Tamar Christina
2023-11-27 12:48   ` Kyrylo Tkachov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).