public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r13-1149] arm: mve: Don't force trivial vector literals to the pool
@ 2022-06-17  9:34 Richard Earnshaw
  0 siblings, 0 replies; only message in thread
From: Richard Earnshaw @ 2022-06-17  9:34 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:94018fd2675190a4353cb199da4957538f070886

commit r13-1149-g94018fd2675190a4353cb199da4957538f070886
Author: Richard Earnshaw <rearnsha@arm.com>
Date:   Fri Jun 17 10:30:57 2022 +0100

    arm: mve: Don't force trivial vector literals to the pool
    
    A bug in the ordering of the operands in the mve_mov<mode> pattern
    meant that all literal values were being pushed to the literal pool.
    This patch fixes that and simplifies some of the logic slightly so
    that we can use as simple switch statement.
    
    For example:
    void f (uint32_t *a)
    {
      int i;
      for (i = 0; i < 100; i++)
        a[i] += 1;
    }
    
    Now compiles to:
            push    {lr}
            mov     lr, #25
            vmov.i32        q2, #0x1  @ v4si
            ...
    
    instead of
    
            push    {lr}
            mov     lr, #25
            vldr.64 d4, .L6
            vldr.64 d5, .L6+8
            ...
    .L7:
            .align  3
    .L6:
            .word   1
            .word   1
            .word   1
            .word   1
    
    gcc/ChangeLog:
            * config/arm/mve.md (*mve_mov<mode>): Re-order constraints
            to avoid spilling trivial literals to the constant pool.
    
    gcc/testsuite/ChangeLog:
            * gcc.target/arm/acle/cde-mve-full-assembly.c: Adjust expected
            output.

Diff:
---
 gcc/config/arm/mve.md                              |  99 ++--
 .../gcc.target/arm/acle/cde-mve-full-assembly.c    | 549 ++++++++++-----------
 2 files changed, 311 insertions(+), 337 deletions(-)

diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index f16991c0a34..c4dec01baac 100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -18,66 +18,73 @@
 ;; <http://www.gnu.org/licenses/>.
 
 (define_insn "*mve_mov<mode>"
-  [(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w,w,r,w,Ux,w")
-	(match_operand:MVE_types 1 "general_operand" "w,r,w,Dn,UxUi,r,Dm,w,Ul"))]
+  [(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w   , w,   r,Ux,w")
+	(match_operand:MVE_types 1 "general_operand"      " w,r,w,DnDm,UxUi,r,w, Ul"))]
   "TARGET_HAVE_MVE || TARGET_HAVE_MVE_FLOAT"
 {
-  if (which_alternative == 3 || which_alternative == 6)
+  switch (which_alternative)
     {
-      int width, is_valid;
-      static char templ[40];
+    case 0:  /* [w,w].  */
+      return "vmov\t%q0, %q1";
 
-      is_valid = simd_immediate_valid_for_move (operands[1], <MODE>mode,
-	&operands[1], &width);
+    case 1:  /* [w,r].  */
+      return "vmov\t%e0, %Q1, %R1  %@ <mode>\;vmov\t%f0, %J1, %K1";
+
+    case 2:  /* [r,w].  */
+      return "vmov\t%Q0, %R0, %e1  %@ <mode>\;vmov\t%J0, %K0, %f1";
+
+    case 3:  /* [w,DnDm].  */
+      {
+	int width, is_valid;
+
+	is_valid = simd_immediate_valid_for_move (operands[1], <MODE>mode,
+						  &operands[1], &width);
+
+	gcc_assert (is_valid);
+
+	if (width == 0)
+	  return "vmov.f32\t%q0, %1  %@ <mode>";
+	else
+	  {
+	    const int templ_size = 40;
+	    static char templ[templ_size];
+	    if (snprintf (templ, templ_size,
+			  "vmov.i%d\t%%q0, %%x1  %%@ <mode>", width)
+		> templ_size)
+	      abort ();
+	    return templ;
+	  }
+      }
+
+    case 4:  /* [w,UxUi].  */
+      if (<MODE>mode == V2DFmode || <MODE>mode == V2DImode
+	  || <MODE>mode == TImode)
+	return "vldrw.u32\t%q0, %E1";
+      else
+	return "vldr<V_sz_elem1>.<V_sz_elem>\t%q0, %E1";
 
-      gcc_assert (is_valid != 0);
+    case 5:  /* [r,r].  */
+      return output_move_quad (operands);
 
-      if (width == 0)
-	return "vmov.f32\t%q0, %1  @ <mode>";
+    case 6:  /* [Ux,w].  */
+      if (<MODE>mode == V2DFmode || <MODE>mode == V2DImode
+	  || <MODE>mode == TImode)
+	return "vstrw.32\t%q1, %E0";
       else
-	sprintf (templ, "vmov.i%d\t%%q0, %%x1  @ <mode>", width);
-      return templ;
-    }
+	return "vstr<V_sz_elem1>.<V_sz_elem>\t%q1, %E0";
 
-  if (which_alternative == 4 || which_alternative == 7)
-    {
-      if (<MODE>mode == V2DFmode || <MODE>mode == V2DImode || <MODE>mode == TImode)
-	{
-	  if (which_alternative == 7)
-	    output_asm_insn ("vstrw.32\t%q1, %E0", operands);
-	  else
-	    output_asm_insn ("vldrw.u32\t%q0, %E1",operands);
-	}
-      else
-	{
-	  if (which_alternative == 7)
-	    output_asm_insn ("vstr<V_sz_elem1>.<V_sz_elem>\t%q1, %E0", operands);
-	  else
-	    output_asm_insn ("vldr<V_sz_elem1>.<V_sz_elem>\t%q0, %E1", operands);
-	}
-      return "";
-    }
-  switch (which_alternative)
-    {
-    case 0:
-      return "vmov\t%q0, %q1";
-    case 1:
-      return "vmov\t%e0, %Q1, %R1  @ <mode>\;vmov\t%f0, %J1, %K1";
-    case 2:
-      return "vmov\t%Q0, %R0, %e1  @ <mode>\;vmov\t%J0, %K0, %f1";
-    case 5:
-      return output_move_quad (operands);
-    case 8:
+    case 7:  /* [w,Ul].  */
 	return output_move_neon (operands);
+
     default:
       gcc_unreachable ();
       return "";
     }
 }
-  [(set_attr "type" "mve_move,mve_move,mve_move,mve_move,mve_load,multiple,mve_move,mve_store,mve_load")
-   (set_attr "length" "4,8,8,4,8,8,4,4,4")
-   (set_attr "thumb2_pool_range" "*,*,*,*,1018,*,*,*,*")
-   (set_attr "neg_pool_range" "*,*,*,*,996,*,*,*,*")])
+  [(set_attr "type" "mve_move,mve_move,mve_move,mve_move,mve_load,multiple,mve_store,mve_load")
+   (set_attr "length" "4,8,8,4,4,8,4,8")
+   (set_attr "thumb2_pool_range" "*,*,*,*,1018,*,*,*")
+   (set_attr "neg_pool_range" "*,*,*,*,996,*,*,*")])
 
 (define_insn "*mve_vdup<mode>"
   [(set (match_operand:MVE_vecs 0 "s_register_operand" "=w")
diff --git a/gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c b/gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c
index 501cc84da10..d025c3391fb 100644
--- a/gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c
+++ b/gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c
@@ -73,71 +73,61 @@
 */
 /*
 ** test_cde_vcx1qafloat16x8_tintint:
-** 	vldr\.64	d0, \.L([0-9]*)
-** 	vldr\.64	d1, \.L\1\+8
+** 	vmov\.i32	q0, #0  @ v16qi
 ** 	vcx1a	p0, q0, #33
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qafloat32x4_tintint:
-** 	vldr\.64	d0, \.L([0-9]*)
-** 	vldr\.64	d1, \.L\1\+8
+** 	vmov\.i32	q0, #0  @ v16qi
 ** 	vcx1a	p0, q0, #33
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qauint8x16_tintint:
-** 	vldr\.64	d0, \.L([0-9]*)
-** 	vldr\.64	d1, \.L\1\+8
+** 	vmov\.i32	q0, #0  @ v16qi
 ** 	vcx1a	p0, q0, #33
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qauint16x8_tintint:
-** 	vldr\.64	d0, \.L([0-9]*)
-** 	vldr\.64	d1, \.L\1\+8
+** 	vmov\.i32	q0, #0  @ v16qi
 ** 	vcx1a	p0, q0, #33
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qauint32x4_tintint:
-** 	vldr\.64	d0, \.L([0-9]*)
-** 	vldr\.64	d1, \.L\1\+8
+** 	vmov\.i32	q0, #0  @ v16qi
 ** 	vcx1a	p0, q0, #33
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qauint64x2_tintint:
-** 	vldr\.64	d0, \.L([0-9]*)
-** 	vldr\.64	d1, \.L\1\+8
+** 	vmov\.i32	q0, #0  @ v16qi
 ** 	vcx1a	p0, q0, #33
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qaint8x16_tintint:
-** 	vldr\.64	d0, \.L([0-9]*)
-** 	vldr\.64	d1, \.L\1\+8
+** 	vmov\.i32	q0, #0  @ v16qi
 ** 	vcx1a	p0, q0, #33
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qaint16x8_tintint:
-** 	vldr\.64	d0, \.L([0-9]*)
-** 	vldr\.64	d1, \.L\1\+8
+** 	vmov\.i32	q0, #0  @ v16qi
 ** 	vcx1a	p0, q0, #33
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qaint32x4_tintint:
-** 	vldr\.64	d0, \.L([0-9]*)
-** 	vldr\.64	d1, \.L\1\+8
+** 	vmov\.i32	q0, #0  @ v16qi
 ** 	vcx1a	p0, q0, #33
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qaint64x2_tintint:
-** 	vldr\.64	d0, \.L([0-9]*)
-** 	vldr\.64	d1, \.L\1\+8
+** 	vmov\.i32	q0, #0  @ v16qi
 ** 	vcx1a	p0, q0, #33
 ** 	bx	lr
 */
@@ -243,82 +233,72 @@
 */
 /*
 ** test_cde_vcx2qafloat16x8_tuint16x8_tint:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx2a	p0, (q[0-7]), q0, #33
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[1-7]), #0  @ v16qi
+** 	vcx2a	p0, \1, q0, #33
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qafloat16x8_tfloat32x4_tint:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx2a	p0, (q[0-7]), q0, #33
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[1-7]), #0  @ v16qi
+** 	vcx2a	p0, \1, q0, #33
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qafloat32x4_tuint8x16_tint:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx2a	p0, (q[0-7]), q0, #33
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[1-7]), #0  @ v16qi
+** 	vcx2a	p0, \1, q0, #33
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qaint64x2_tuint8x16_tint:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx2a	p0, (q[0-7]), q0, #33
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[1-7]), #0  @ v16qi
+** 	vcx2a	p0, \1, q0, #33
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qaint8x16_tuint8x16_tint:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx2a	p0, (q[0-7]), q0, #33
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[1-7]), #0  @ v16qi
+** 	vcx2a	p0, \1, q0, #33
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qauint16x8_tuint8x16_tint:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx2a	p0, (q[0-7]), q0, #33
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[1-7]), #0  @ v16qi
+** 	vcx2a	p0, \1, q0, #33
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qauint8x16_tint64x2_tint:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx2a	p0, (q[0-7]), q0, #33
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[1-7]), #0  @ v16qi
+** 	vcx2a	p0, \1, q0, #33
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qauint8x16_tint8x16_tint:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx2a	p0, (q[0-7]), q0, #33
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[1-7]), #0  @ v16qi
+** 	vcx2a	p0, \1, q0, #33
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qauint8x16_tuint16x8_tint:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx2a	p0, (q[0-7]), q0, #33
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[1-7]), #0  @ v16qi
+** 	vcx2a	p0, \1, q0, #33
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qauint8x16_tuint8x16_tint:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx2a	p0, (q[0-7]), q0, #33
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[1-7]), #0  @ v16qi
+** 	vcx2a	p0, \1, q0, #33
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
@@ -453,112 +433,99 @@
 */
 /*
 ** test_cde_vcx3qauint8x16_tuint8x16_tuint8x16_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qafloat16x8_tfloat16x8_tfloat16x8_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qafloat32x4_tuint64x2_tfloat16x8_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qauint16x8_tuint8x16_tuint8x16_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qauint8x16_tuint16x8_tuint8x16_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qauint8x16_tuint8x16_tuint16x8_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qaint8x16_tuint8x16_tuint8x16_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qauint8x16_tint8x16_tuint8x16_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qauint8x16_tuint8x16_tint8x16_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qaint64x2_tuint8x16_tuint8x16_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qauint8x16_tint64x2_tuint8x16_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qauint8x16_tuint8x16_tint64x2_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qauint8x16_tint64x2_tint64x2_t:
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L([0-9]*)
-** 	vldr\.64	d(?:[01][0-4]|[0-9]), \.L\1\+8
-** 	vcx3a	p0, (q[0-7]), q0, q1, #12
-** 	vmov	q0, \2
+** 	vmov\.i32	(q[2-7]), #0  @ v16qi
+** 	vcx3a	p0, \1, q0, q1, #12
+** 	vmov	q0, \1
 ** 	bx	lr
 */
 
 /* Predicated MVE intrinsics.  */
 /* Merging lane predication types.
-   NOTE: Depending on the target, the setup instructions (vldr's and vmsr) can
+   NOTE: Depending on the target, the setup instructions (vmov's and vmsr) can
    be in a different order.  Here we just check that all the expected setup
    instructions are there.  We don't check that the setup instructions are
    different since the likelyhood of the compiler generating repeated versions
@@ -567,80 +534,80 @@
    contain back references).  */
 /*
 ** test_cde_vcx1q_mfloat16x8_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1t	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1q_mfloat32x4_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1t	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1q_muint8x16_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1t	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1q_muint16x8_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1t	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1q_muint32x4_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1t	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1q_muint64x2_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1t	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1q_mint8x16_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1t	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1q_mint16x8_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1t	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1q_mint32x4_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1t	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1q_mint64x2_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1t	p0, q0, #32
 ** 	bx	lr
@@ -649,80 +616,80 @@
 
 /*
 ** test_cde_vcx1qa_mfloat16x8_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1at	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qa_mfloat32x4_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1at	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qa_muint8x16_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1at	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qa_muint16x8_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1at	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qa_muint32x4_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1at	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qa_muint64x2_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1at	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qa_mint8x16_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1at	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qa_mint16x8_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1at	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qa_mint32x4_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1at	p0, q0, #32
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx1qa_mint64x2_tintint:
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
-** 	(?:vldr\.64	d0, \.L[0-9]*\n\tvldr\.64	d1, \.L[0-9]*\+8|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
+** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	 P0, r2	@ movhi)
 ** 	vpst
 ** 	vcx1at	p0, q0, #32
 ** 	bx	lr
@@ -731,91 +698,91 @@
 
 /*
 ** test_cde_vcx2q_mfloat16x8_tuint16x8_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2t	p0, (q[0-7]), q0, #32
+** 	vcx2t	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2q_mfloat16x8_tfloat32x4_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2t	p0, (q[0-7]), q0, #32
+** 	vcx2t	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2q_mfloat32x4_tuint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2t	p0, (q[0-7]), q0, #32
+** 	vcx2t	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2q_mint64x2_tuint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2t	p0, (q[0-7]), q0, #32
+** 	vcx2t	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2q_mint8x16_tuint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2t	p0, (q[0-7]), q0, #32
+** 	vcx2t	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2q_muint16x8_tuint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2t	p0, (q[0-7]), q0, #32
+** 	vcx2t	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2q_muint8x16_tint64x2_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2t	p0, (q[0-7]), q0, #32
+** 	vcx2t	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2q_muint8x16_tint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2t	p0, (q[0-7]), q0, #32
+** 	vcx2t	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2q_muint8x16_tuint16x8_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2t	p0, (q[0-7]), q0, #32
+** 	vcx2t	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2q_muint8x16_tuint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2t	p0, (q[0-7]), q0, #32
+** 	vcx2t	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
@@ -823,91 +790,91 @@
 
 /*
 ** test_cde_vcx2qa_mfloat16x8_tuint16x8_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2at	p0, (q[0-7]), q0, #32
+** 	vcx2at	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qa_mfloat16x8_tfloat32x4_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2at	p0, (q[0-7]), q0, #32
+** 	vcx2at	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qa_mfloat32x4_tuint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2at	p0, (q[0-7]), q0, #32
+** 	vcx2at	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qa_mint64x2_tuint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2at	p0, (q[0-7]), q0, #32
+** 	vcx2at	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qa_mint8x16_tuint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2at	p0, (q[0-7]), q0, #32
+** 	vcx2at	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qa_muint16x8_tuint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2at	p0, (q[0-7]), q0, #32
+** 	vcx2at	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qa_muint8x16_tint64x2_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2at	p0, (q[0-7]), q0, #32
+** 	vcx2at	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qa_muint8x16_tint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2at	p0, (q[0-7]), q0, #32
+** 	vcx2at	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qa_muint8x16_tuint16x8_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2at	p0, (q[0-7]), q0, #32
+** 	vcx2at	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx2qa_muint8x16_tuint8x16_tint:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
+** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	 P0, r1	@ movhi)
 ** 	vpst
-** 	vcx2at	p0, (q[0-7]), q0, #32
+** 	vcx2at	p0, (q[1-7]), q0, #32
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
@@ -915,118 +882,118 @@
 
 /*
 ** test_cde_vcx3q_muint8x16_tuint8x16_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_mfloat16x8_tfloat16x8_tfloat16x8_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_mfloat32x4_tuint64x2_tfloat16x8_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_muint16x8_tuint8x16_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_muint8x16_tuint16x8_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_muint8x16_tuint8x16_tuint16x8_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_mint8x16_tuint8x16_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_muint8x16_tint8x16_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_muint8x16_tuint8x16_tint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_mint64x2_tuint8x16_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_muint8x16_tint64x2_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_muint8x16_tuint8x16_tint64x2_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3q_muint8x16_tint64x2_tint64x2_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3t	p0, (q[0-7]), q0, q1, #15
+** 	vcx3t	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
@@ -1034,118 +1001,118 @@
 
 /*
 ** test_cde_vcx3qa_muint8x16_tuint8x16_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_mfloat16x8_tfloat16x8_tfloat16x8_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_mfloat32x4_tuint64x2_tfloat16x8_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_muint16x8_tuint8x16_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_muint8x16_tuint16x8_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_muint8x16_tuint8x16_tuint16x8_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_mint8x16_tuint8x16_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_muint8x16_tint8x16_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_muint8x16_tuint8x16_tint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_mint64x2_tuint8x16_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_muint8x16_tint64x2_tuint8x16_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_muint8x16_tuint8x16_tint64x2_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */
 /*
 ** test_cde_vcx3qa_muint8x16_tint64x2_tint64x2_t:
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
-** 	(?:vldr\.64	d(?:[02468]|1[024]), \.L[0-9]*\n\tvldr\.64	d(?:[13579]|1[135]), \.L[0-9]*\+8|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
+** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	 P0, r0	@ movhi)
 ** 	vpst
-** 	vcx3at	p0, (q[0-7]), q0, q1, #15
+** 	vcx3at	p0, (q[2-7]), q0, q1, #15
 ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
 ** 	bx	lr
 */


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-06-17  9:34 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-17  9:34 [gcc r13-1149] arm: mve: Don't force trivial vector literals to the pool Richard Earnshaw

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).