public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/users/meissner/heads/work146-vpair)] Rename things so it can be combined with the vsize branch.
@ 2023-11-18  2:26 Michael Meissner
  0 siblings, 0 replies; only message in thread
From: Michael Meissner @ 2023-11-18  2:26 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:bab4c5e2d349fb0648691d1c91ccff864f279ee8

commit bab4c5e2d349fb0648691d1c91ccff864f279ee8
Author: Michael Meissner <meissner@linux.ibm.com>
Date:   Fri Nov 17 21:25:34 2023 -0500

    Rename things so it can be combined with the vsize branch.
    
    2023-11-17  Michael Meissner  <meissner@linux.ibm.com>
    
    gcc/
    
            * config/rs6000/rs6000-builtins.def (__builtin_vpair*): Rename all insn
            names from VPAIR... to VPAIR_FUNC... to allow building the combined
            vsubreg branch.
            * config/rs6000/rs6000-overload.def (__builtin_vpair*): Likewise.
            * config/rs6000/rs6000.md (toplevel): Include vpair-func.md instead of
            vector-pair.md.
            * config/rs6000/t-rs6000: (MD_INCLUDES): Change vector-pair.md to
            vpair-func.md.
            * config/rs6000/vpair-func.md: Rename from vector-pair.md to
            vpair-func.md.  Change all VPAIR names to be VPAIR_FUNC.

Diff:
---
 gcc/config/rs6000/rs6000-builtins.def              | 190 +++++-----
 gcc/config/rs6000/rs6000-overload.def              |  20 +-
 gcc/config/rs6000/rs6000.md                        |   2 +-
 gcc/config/rs6000/t-rs6000                         |   2 +-
 .../rs6000/{vector-pair.md => vpair-func.md}       | 414 ++++++++++-----------
 5 files changed, 314 insertions(+), 314 deletions(-)

diff --git a/gcc/config/rs6000/rs6000-builtins.def b/gcc/config/rs6000/rs6000-builtins.def
index b9a16c01420..c66923e3c50 100644
--- a/gcc/config/rs6000/rs6000-builtins.def
+++ b/gcc/config/rs6000/rs6000-builtins.def
@@ -4135,302 +4135,302 @@
 ;; General vector pair built-in functions
 
   v256 __builtin_vpair_zero ();
-    VPAIR_ZERO vpair_zero {mma}
+    VPAIR_FUNC_ZERO vpair_func_zero {mma}
 
 ;; vector pair built-in functions for 8 32-bit float values
 
   v256 __builtin_vpair_f32_abs (v256);
-    VPAIR_F32_ABS vpair_abs_v8sf2 {mma,pair}
+    VPAIR_FUNC_F32_ABS vpair_func_abs_v8sf2 {mma,pair}
 
   v256 __builtin_vpair_f32_add (v256, v256);
-    VPAIR_F32_ADD vpair_add_v8sf3 {mma,pair}
+    VPAIR_FUNC_F32_ADD vpair_func_add_v8sf3 {mma,pair}
 
   float __builtin_vpair_f32_add_elements (v256);
-    VPAIR_F32_ADD_ELEMENTS vpair_reduc_plus_scale_v8sf {mma,pair}
+    VPAIR_FUNC_F32_ADD_ELEMENTS vpair_func_reduc_plus_scale_v8sf {mma,pair}
 
   v256 __builtin_vpair_f32_assemble (vf, vf);
-    VPAIR_F32_ASSEMBLE vpair_assemble_v8sf {mma,pair}
+    VPAIR_FUNC_F32_ASSEMBLE vpair_func_assemble_v8sf {mma,pair}
 
   vf __builtin_vpair_f32_extract_vector (v256, const int<1>);
-    VPAIR_F32_EXTRACT_VECTOR vpair_extract_vector_v8sf {mma,pair}
+    VPAIR_FUNC_F32_EXTRACT_VECTOR vpair_func_extract_vector_v8sf {mma,pair}
 
   v256 __builtin_vpair_f32_fma (v256, v256, v256);
-    VPAIR_F32_FMA vpair_fma_v8sf4 {mma,pair}
+    VPAIR_FUNC_F32_FMA vpair_func_fma_v8sf4 {mma,pair}
 
   v256 __builtin_vpair_f32_max (v256, v256);
-    VPAIR_F32_MAX vpair_smax_v8sf3 {mma,pair}
+    VPAIR_FUNC_F32_MAX vpair_func_smax_v8sf3 {mma,pair}
 
   v256 __builtin_vpair_f32_min (v256, v256);
-    VPAIR_F32_MIN vpair_smin_v8sf3 {mma,pair}
+    VPAIR_FUNC_F32_MIN vpair_func_smin_v8sf3 {mma,pair}
 
   v256 __builtin_vpair_f32_mul (v256, v256);
-    VPAIR_F32_MUL vpair_mul_v8sf3 {mma,pair}
+    VPAIR_FUNC_F32_MUL vpair_func_mul_v8sf3 {mma,pair}
 
   v256 __builtin_vpair_f32_neg (v256);
-    VPAIR_F32_NEG vpair_neg_v8sf2 {mma,pair}
+    VPAIR_FUNC_F32_NEG vpair_func_neg_v8sf2 {mma,pair}
 
   v256 __builtin_vpair_f32_splat (float);
-    VPAIR_F32_SPLAT vpair_splat_v8sf {mma,pair}
+    VPAIR_FUNC_F32_SPLAT vpair_func_splat_v8sf {mma,pair}
 
   v256 __builtin_vpair_f32_sub (v256, v256);
-    VPAIR_F32_SUB vpair_sub_v8sf3 {mma,pair}
+    VPAIR_FUNC_F32_SUB vpair_func_sub_v8sf3 {mma,pair}
 
 ;; vector pair built-in functions for 4 64-bit double values
 
   v256 __builtin_vpair_f64_abs (v256);
-    VPAIR_F64_ABS vpair_abs_v4df2 {mma,pair}
+    VPAIR_FUNC_F64_ABS vpair_func_abs_v4df2 {mma,pair}
 
   v256 __builtin_vpair_f64_add (v256, v256);
-    VPAIR_F64_ADD vpair_add_v4df3 {mma,pair}
+    VPAIR_FUNC_F64_ADD vpair_func_add_v4df3 {mma,pair}
 
   double __builtin_vpair_f64_add_elements (v256);
-    VPAIR_F64_ADD_ELEMENTS vpair_reduc_plus_scale_v4df {mma,pair}
+    VPAIR_FUNC_F64_ADD_ELEMENTS vpair_func_reduc_plus_scale_v4df {mma,pair}
 
 v256 __builtin_vpair_f64_assemble (vd, vd);
-    VPAIR_F64_ASSEMBLE vpair_assemble_v4df {mma,pair}
+    VPAIR_FUNC_F64_ASSEMBLE vpair_func_assemble_v4df {mma,pair}
 
   vd __builtin_vpair_f64_extract_vector (v256, const int<1>);
-    VPAIR_F64_EXTRACT_VECTOR vpair_extract_vector_v4df {mma,pair}
+    VPAIR_FUNC_F64_EXTRACT_VECTOR vpair_func_extract_vector_v4df {mma,pair}
 
   v256 __builtin_vpair_f64_fma (v256, v256, v256);
-    VPAIR_F64_FMA vpair_fma_v4df4 {mma,pair}
+    VPAIR_FUNC_F64_FMA vpair_func_fma_v4df4 {mma,pair}
 
   v256 __builtin_vpair_f64_max (v256, v256);
-    VPAIR_F64_MAX vpair_smax_v4df3 {mma,pair}
+    VPAIR_FUNC_F64_MAX vpair_func_smax_v4df3 {mma,pair}
 
   v256 __builtin_vpair_f64_min (v256, v256);
-    VPAIR_F64_MIN vpair_smin_v4df3 {mma,pair}
+    VPAIR_FUNC_F64_MIN vpair_func_smin_v4df3 {mma,pair}
 
   v256 __builtin_vpair_f64_mul (v256, v256);
-    VPAIR_F64_MUL vpair_mul_v4df3 {mma,pair}
+    VPAIR_FUNC_F64_MUL vpair_func_mul_v4df3 {mma,pair}
 
   v256 __builtin_vpair_f64_neg (v256);
-    VPAIR_F64_NEG vpair_neg_v4df2 {mma,pair}
+    VPAIR_FUNC_F64_NEG vpair_func_neg_v4df2 {mma,pair}
 
   v256 __builtin_vpair_f64_splat (double);
-    VPAIR_F64_SPLAT vpair_splat_v4df {mma,pair}
+    VPAIR_FUNC_F64_SPLAT vpair_func_splat_v4df {mma,pair}
 
   v256 __builtin_vpair_f64_sub (v256, v256);
-    VPAIR_F64_SUB vpair_sub_v4df3 {mma,pair}
+    VPAIR_FUNC_F64_SUB vpair_func_sub_v4df3 {mma,pair}
 
 ;; vector pair built-in functions for 32 8-bit unsigned char or
 ;; signed char values
 
   v256 __builtin_vpair_i8_add (v256, v256);
-    VPAIR_I8_ADD vpair_add_v32qi3 {mma,pair}
+    VPAIR_FUNC_I8_ADD vpair_func_add_v32qi3 {mma,pair}
 
   v256 __builtin_vpair_i8_and (v256, v256);
-    VPAIR_I8_AND vpair_and_v32qi3 {mma,pair}
+    VPAIR_FUNC_I8_AND vpair_func_and_v32qi3 {mma,pair}
 
   v256 __builtin_vpair_i8_assemble (vsc, vsc);
-    VPAIR_I8_ASSEMBLE vpair_assemble_v32qi {mma,pair}
+    VPAIR_FUNC_I8_ASSEMBLE vpair_func_assemble_v32qi {mma,pair}
 
   vsc __builtin_vpair_i8_extract_vector (v256, const int<1>);
-    VPAIR_I8_EXTRACT_VECTOR vpair_extract_vector_v32qi {mma,pair}
+    VPAIR_FUNC_I8_EXTRACT_VECTOR vpair_func_extract_vector_v32qi {mma,pair}
 
   v256 __builtin_vpair_i8_ior (v256, v256);
-    VPAIR_I8_IOR vpair_ior_v32qi3 {mma,pair}
+    VPAIR_FUNC_I8_IOR vpair_func_ior_v32qi3 {mma,pair}
 
   v256 __builtin_vpair_i8_max (v256, v256);
-    VPAIR_I8_MAX vpair_smax_v32qi3 {mma,pair}
+    VPAIR_FUNC_I8_MAX vpair_func_smax_v32qi3 {mma,pair}
 
   v256 __builtin_vpair_i8_min (v256, v256);
-    VPAIR_I8_MIN vpair_smin_v32qi3 {mma,pair}
+    VPAIR_FUNC_I8_MIN vpair_func_smin_v32qi3 {mma,pair}
 
   v256 __builtin_vpair_i8_neg (v256);
-    VPAIR_I8_NEG vpair_neg_v32qi2 {mma,pair}
+    VPAIR_FUNC_I8_NEG vpair_func_neg_v32qi2 {mma,pair}
 
   v256 __builtin_vpair_i8_not (v256);
-    VPAIR_I8_NOT vpair_not_v32qi2 {mma,pair}
+    VPAIR_FUNC_I8_NOT vpair_func_not_v32qi2 {mma,pair}
 
   v256 __builtin_vpair_i8_splat (signed char);
-    VPAIR_I8_SPLAT vpair_splat_v32qi {mma,pair}
+    VPAIR_FUNC_I8_SPLAT vpair_func_splat_v32qi {mma,pair}
 
   v256 __builtin_vpair_i8_sub (v256, v256);
-    VPAIR_I8_SUB vpair_sub_v32qi3 {mma,pair}
+    VPAIR_FUNC_I8_SUB vpair_func_sub_v32qi3 {mma,pair}
 
   v256 __builtin_vpair_i8_xor (v256, v256);
-    VPAIR_I8_XOR vpair_xor_v32qi3 {mma,pair}
+    VPAIR_FUNC_I8_XOR vpair_func_xor_v32qi3 {mma,pair}
 
   v256 __builtin_vpair_i8u_assemble (vuc, vuc);
-    VPAIR_I8U_ASSEMBLE vpair_assemble_v32qi {mma,pair}
+    VPAIR_FUNC_I8U_ASSEMBLE vpair_func_assemble_v32qi {mma,pair}
 
   vuc __builtin_vpair_i8u_extract_vector (v256, const int<1>);
-    VPAIR_I8U_EXTRACT_VECTOR vpair_extract_vector_v32qi {mma,pair}
+    VPAIR_FUNC_I8U_EXTRACT_VECTOR vpair_func_extract_vector_v32qi {mma,pair}
 
   v256 __builtin_vpair_i8u_max (v256, v256);
-    VPAIR_I8U_MAX vpair_umax_v32qi3 {mma,pair}
+    VPAIR_FUNC_I8U_MAX vpair_func_umax_v32qi3 {mma,pair}
 
   v256 __builtin_vpair_i8u_min (v256, v256);
-    VPAIR_I8U_MIN vpair_umin_v32qi3 {mma,pair}
+    VPAIR_FUNC_I8U_MIN vpair_func_umin_v32qi3 {mma,pair}
 
   v256 __builtin_vpair_i8u_splat (unsigned char);
-    VPAIR_I8U_SPLAT vpair_splat_v32qi {mma,pair}
+    VPAIR_FUNC_I8U_SPLAT vpair_func_splat_v32qi {mma,pair}
 
 ;; vector pair built-in functions for 16 16-bit unsigned short or
 ;; signed short values
 
   v256 __builtin_vpair_i16_add (v256, v256);
-    VPAIR_I16_ADD vpair_add_v16hi3 {mma,pair}
+    VPAIR_FUNC_I16_ADD vpair_func_add_v16hi3 {mma,pair}
 
   v256 __builtin_vpair_i16_and (v256, v256);
-    VPAIR_I16_AND vpair_and_v16hi3 {mma,pair}
+    VPAIR_FUNC_I16_AND vpair_func_and_v16hi3 {mma,pair}
 
   v256 __builtin_vpair_i16_assemble (vss, vss);
-    VPAIR_I16_ASSEMBLE vpair_assemble_v16hi {mma,pair}
+    VPAIR_FUNC_I16_ASSEMBLE vpair_func_assemble_v16hi {mma,pair}
 
   vss __builtin_vpair_i16_extract_vector (v256, const int<1>);
-    VPAIR_I16_EXTRACT_VECTOR vpair_extract_vector_v16hi {mma,pair}
+    VPAIR_FUNC_I16_EXTRACT_VECTOR vpair_func_extract_vector_v16hi {mma,pair}
 
   v256 __builtin_vpair_i16_ior (v256, v256);
-    VPAIR_I16_IOR vpair_ior_v16hi3 {mma,pair}
+    VPAIR_FUNC_I16_IOR vpair_func_ior_v16hi3 {mma,pair}
 
   v256 __builtin_vpair_i16_max (v256, v256);
-    VPAIR_I16_MAX vpair_smax_v16hi3 {mma,pair}
+    VPAIR_FUNC_I16_MAX vpair_func_smax_v16hi3 {mma,pair}
 
   v256 __builtin_vpair_i16_min (v256, v256);
-    VPAIR_I16_MIN vpair_smin_v16hi3 {mma,pair}
+    VPAIR_FUNC_I16_MIN vpair_func_smin_v16hi3 {mma,pair}
 
   v256 __builtin_vpair_i16_neg (v256);
-    VPAIR_I16_NEG vpair_neg_v16hi2 {mma,pair}
+    VPAIR_FUNC_I16_NEG vpair_func_neg_v16hi2 {mma,pair}
 
   v256 __builtin_vpair_i16_not (v256);
-    VPAIR_I16_NOT vpair_not_v16hi2 {mma,pair}
+    VPAIR_FUNC_I16_NOT vpair_func_not_v16hi2 {mma,pair}
 
   v256 __builtin_vpair_i16_splat (short);
-    VPAIR_I16_SPLAT vpair_splat_v16hi {mma,pair}
+    VPAIR_FUNC_I16_SPLAT vpair_func_splat_v16hi {mma,pair}
 
   v256 __builtin_vpair_i16_sub (v256, v256);
-    VPAIR_I16_SUB vpair_sub_v16hi3 {mma,pair}
+    VPAIR_FUNC_I16_SUB vpair_func_sub_v16hi3 {mma,pair}
 
   v256 __builtin_vpair_i16_xor (v256, v256);
-    VPAIR_I16_XOR vpair_xor_v16hi3 {mma,pair}
+    VPAIR_FUNC_I16_XOR vpair_func_xor_v16hi3 {mma,pair}
 
   v256 __builtin_vpair_i16u_assemble (vus, vus);
-    VPAIR_I16U_ASSEMBLE vpair_assemble_v16hi {mma,pair}
+    VPAIR_FUNC_I16U_ASSEMBLE vpair_func_assemble_v16hi {mma,pair}
 
   vus __builtin_vpair_i16u_extract_vector (v256, const int<1>);
-    VPAIR_I16U_EXTRACT_VECTOR vpair_extract_vector_v16hi {mma,pair}
+    VPAIR_FUNC_I16U_EXTRACT_VECTOR vpair_func_extract_vector_v16hi {mma,pair}
 
   v256 __builtin_vpair_i16u_max (v256, v256);
-    VPAIR_I16U_MAX vpair_umax_v16hi3 {mma,pair}
+    VPAIR_FUNC_I16U_MAX vpair_func_umax_v16hi3 {mma,pair}
 
   v256 __builtin_vpair_i16u_min (v256, v256);
-    VPAIR_I16U_MIN vpair_umin_v16hi3 {mma,pair}
+    VPAIR_FUNC_I16U_MIN vpair_func_umin_v16hi3 {mma,pair}
 
   v256 __builtin_vpair_i16u_splat (unsigned short);
-    VPAIR_I16U_SPLAT vpair_splat_v16hi {mma,pair}
+    VPAIR_FUNC_I16U_SPLAT vpair_func_splat_v16hi {mma,pair}
 
 ;; vector pair built-in functions for 8 32-bit unsigned int or
 ;; signed int values
 
   v256 __builtin_vpair_i32_add (v256, v256);
-    VPAIR_I32_ADD vpair_add_v8si3 {mma,pair}
+    VPAIR_FUNC_I32_ADD vpair_func_add_v8si3 {mma,pair}
 
   v256 __builtin_vpair_i32_and (v256, v256);
-    VPAIR_I32_AND vpair_and_v8si3 {mma,pair}
+    VPAIR_FUNC_I32_AND vpair_func_and_v8si3 {mma,pair}
 
   v256 __builtin_vpair_i32_assemble (vsi, vsi);
-    VPAIR_I32_ASSEMBLE vpair_assemble_v8si {mma,pair}
+    VPAIR_FUNC_I32_ASSEMBLE vpair_func_assemble_v8si {mma,pair}
 
   vsi __builtin_vpair_i32_extract_vector (v256, const int<1>);
-    VPAIR_I32_EXTRACT_VECTOR vpair_extract_vector_v8si {mma,pair}
+    VPAIR_FUNC_I32_EXTRACT_VECTOR vpair_func_extract_vector_v8si {mma,pair}
 
   v256 __builtin_vpair_i32_ior (v256, v256);
-    VPAIR_I32_IOR vpair_ior_v8si3 {mma,pair}
+    VPAIR_FUNC_I32_IOR vpair_func_ior_v8si3 {mma,pair}
 
   v256 __builtin_vpair_i32_max (v256, v256);
-    VPAIR_I32_MAX vpair_smax_v8si3 {mma,pair}
+    VPAIR_FUNC_I32_MAX vpair_func_smax_v8si3 {mma,pair}
 
   v256 __builtin_vpair_i32_min (v256, v256);
-    VPAIR_I32_MIN vpair_smin_v8si3 {mma,pair}
+    VPAIR_FUNC_I32_MIN vpair_func_smin_v8si3 {mma,pair}
 
   v256 __builtin_vpair_i32_neg (v256);
-    VPAIR_I32_NEG vpair_neg_v8si2 {mma,pair}
+    VPAIR_FUNC_I32_NEG vpair_func_neg_v8si2 {mma,pair}
 
   v256 __builtin_vpair_i32_not (v256);
-    VPAIR_I32_NOT vpair_not_v8si2 {mma,pair}
+    VPAIR_FUNC_I32_NOT vpair_func_not_v8si2 {mma,pair}
 
   v256 __builtin_vpair_i32_splat (int);
-    VPAIR_I32_SPLAT vpair_splat_v8si {mma,pair}
+    VPAIR_FUNC_I32_SPLAT vpair_func_splat_v8si {mma,pair}
 
   v256 __builtin_vpair_i32_sub (v256, v256);
-    VPAIR_I32_SUB vpair_sub_v8si3 {mma,pair}
+    VPAIR_FUNC_I32_SUB vpair_func_sub_v8si3 {mma,pair}
 
   v256 __builtin_vpair_i32_xor (v256, v256);
-    VPAIR_I32_XOR vpair_xor_v8si3 {mma,pair}
+    VPAIR_FUNC_I32_XOR vpair_func_xor_v8si3 {mma,pair}
 
   v256 __builtin_vpair_i32u_assemble (vui, vui);
-    VPAIR_I32U_ASSEMBLE vpair_assemble_v8si {mma,pair}
+    VPAIR_FUNC_I32U_ASSEMBLE vpair_func_assemble_v8si {mma,pair}
 
   vui __builtin_vpair_i32u_extract_vector (v256, const int<1>);
-    VPAIR_I32U_EXTRACT_VECTOR vpair_extract_vector_v8si {mma,pair}
+    VPAIR_FUNC_I32U_EXTRACT_VECTOR vpair_func_extract_vector_v8si {mma,pair}
 
   v256 __builtin_vpair_i32u_max (v256, v256);
-    VPAIR_I32U_MAX vpair_umax_v8si3 {mma,pair}
+    VPAIR_FUNC_I32U_MAX vpair_func_umax_v8si3 {mma,pair}
 
   v256 __builtin_vpair_i32u_min (v256, v256);
-    VPAIR_I32U_MIN vpair_umin_v8si3 {mma,pair}
+    VPAIR_FUNC_I32U_MIN vpair_func_umin_v8si3 {mma,pair}
 
   v256 __builtin_vpair_i32u_splat (unsigned int);
-    VPAIR_I32U_SPLAT vpair_splat_v8si {mma,pair}
+    VPAIR_FUNC_I32U_SPLAT vpair_func_splat_v8si {mma,pair}
 
 ;; vector pair built-in functions for 4 64-bit unsigned long long or
 ;; signed long long values
 
   v256 __builtin_vpair_i64_add (v256, v256);
-    VPAIR_I64_ADD vpair_add_v4di3 {mma,pair}
+    VPAIR_FUNC_I64_ADD vpair_func_add_v4di3 {mma,pair}
 
   long long __builtin_vpair_i64_add_elements (v256);
-    VPAIR_I64_ADD_ELEMENTS vpair_reduc_plus_scale_v4di {mma,pair,no32bit}
+    VPAIR_FUNC_I64_ADD_ELEMENTS vpair_func_reduc_plus_scale_v4di {mma,pair,no32bit}
 
   v256 __builtin_vpair_i64_and (v256, v256);
-    VPAIR_I64_AND vpair_and_v4di3 {mma,pair}
+    VPAIR_FUNC_I64_AND vpair_func_and_v4di3 {mma,pair}
 
   v256 __builtin_vpair_i64_assemble (vsll, vsll);
-    VPAIR_I64_ASSEMBLE vpair_assemble_v4di {mma,pair}
+    VPAIR_FUNC_I64_ASSEMBLE vpair_func_assemble_v4di {mma,pair}
 
   vsll __builtin_vpair_i64_extract_vector (v256, const int<1>);
-    VPAIR_I64_EXTRACT_VECTOR vpair_extract_vector_v4di {mma,pair}
+    VPAIR_FUNC_I64_EXTRACT_VECTOR vpair_func_extract_vector_v4di {mma,pair}
 
   v256 __builtin_vpair_i64_ior (v256, v256);
-    VPAIR_I64_IOR vpair_ior_v4di3 {mma,pair}
+    VPAIR_FUNC_I64_IOR vpair_func_ior_v4di3 {mma,pair}
 
   v256 __builtin_vpair_i64_max (v256, v256);
-    VPAIR_I64_MAX vpair_smax_v4di3 {mma,pair}
+    VPAIR_FUNC_I64_MAX vpair_func_smax_v4di3 {mma,pair}
 
   v256 __builtin_vpair_i64_min (v256, v256);
-    VPAIR_I64_MIN vpair_smin_v4di3 {mma,pair}
+    VPAIR_FUNC_I64_MIN vpair_func_smin_v4di3 {mma,pair}
 
   v256 __builtin_vpair_i64_neg (v256);
-    VPAIR_I64_NEG vpair_neg_v4di2 {mma,pair}
+    VPAIR_FUNC_I64_NEG vpair_func_neg_v4di2 {mma,pair}
 
   v256 __builtin_vpair_i64_not (v256);
-    VPAIR_I64_NOT vpair_not_v4di2 {mma,pair}
+    VPAIR_FUNC_I64_NOT vpair_func_not_v4di2 {mma,pair}
 
   v256 __builtin_vpair_i64_splat (long long);
-    VPAIR_I64_SPLAT vpair_splat_v4di {mma,pair}
+    VPAIR_FUNC_I64_SPLAT vpair_func_splat_v4di {mma,pair}
 
   v256 __builtin_vpair_i64_sub (v256, v256);
-    VPAIR_I64_SUB vpair_sub_v4di3 {mma,pair}
+    VPAIR_FUNC_I64_SUB vpair_func_sub_v4di3 {mma,pair}
 
   v256 __builtin_vpair_i64_xor (v256, v256);
-    VPAIR_I64_XOR vpair_xor_v4di3 {mma,pair}
+    VPAIR_FUNC_I64_XOR vpair_func_xor_v4di3 {mma,pair}
 
   unsigned long long __builtin_vpair_i64u_add_elements (v256);
-    VPAIR_I64U_ADD_ELEMENTS vpair_reduc_plus_scale_v4di {mma,pair,no32bit}
+    VPAIR_FUNC_I64U_ADD_ELEMENTS vpair_func_reduc_plus_scale_v4di {mma,pair,no32bit}
 
   v256 __builtin_vpair_i64u_assemble (vull, vull);
-    VPAIR_I64U_ASSEMBLE vpair_assemble_v4di {mma,pair}
+    VPAIR_FUNC_I64U_ASSEMBLE vpair_func_assemble_v4di {mma,pair}
 
   vull __builtin_vpair_i64u_extract_vector (v256, const int<1>);
-    VPAIR_I64U_EXTRACT_VECTOR vpair_extract_vector_v4di {mma,pair}
+    VPAIR_FUNC_I64U_EXTRACT_VECTOR vpair_func_extract_vector_v4di {mma,pair}
 
   v256 __builtin_vpair_i64u_max (v256, v256);
-    VPAIR_I64U_MAX vpair_umax_v4di3 {mma,pair}
+    VPAIR_FUNC_I64U_MAX vpair_func_umax_v4di3 {mma,pair}
 
   v256 __builtin_vpair_i64u_min (v256, v256);
-    VPAIR_I64U_MIN vpair_umin_v4di3 {mma,pair}
+    VPAIR_FUNC_I64U_MIN vpair_func_umin_v4di3 {mma,pair}
 
   v256 __builtin_vpair_i64u_splat (unsigned long long);
-    VPAIR_I64U_SPLAT vpair_splat_v4di {mma,pair}
+    VPAIR_FUNC_I64U_SPLAT vpair_func_splat_v4di {mma,pair}
diff --git a/gcc/config/rs6000/rs6000-overload.def b/gcc/config/rs6000/rs6000-overload.def
index e3fb456e665..0f965b91a7c 100644
--- a/gcc/config/rs6000/rs6000-overload.def
+++ b/gcc/config/rs6000/rs6000-overload.def
@@ -6200,22 +6200,22 @@
 
 [VPAIR_ASSEMBLE, vpair_assemble, __builtin_vpair_assemble]
   v256 __builtin_vpair_assemble (vf, vf);
-    VPAIR_F32_ASSEMBLE
+    VPAIR_FUNC_F32_ASSEMBLE
   v256 __builtin_vpair_assemble (vd, vd);
-    VPAIR_F64_ASSEMBLE
+    VPAIR_FUNC_F64_ASSEMBLE
   v256 __builtin_vpair_assemble (vull, vull);
-    VPAIR_I64U_ASSEMBLE
+    VPAIR_FUNC_I64U_ASSEMBLE
   v256 __builtin_vpair_assemble (vsll, vsll);
-    VPAIR_I64_ASSEMBLE
+    VPAIR_FUNC_I64_ASSEMBLE
   v256 __builtin_vpair_assemble (vui, vui);
-    VPAIR_I32U_ASSEMBLE
+    VPAIR_FUNC_I32U_ASSEMBLE
   v256 __builtin_vpair_assemble (vsi, vsi);
-    VPAIR_I32_ASSEMBLE
+    VPAIR_FUNC_I32_ASSEMBLE
   v256 __builtin_vpair_assemble (vus, vus);
-    VPAIR_I16U_ASSEMBLE
+    VPAIR_FUNC_I16U_ASSEMBLE
   v256 __builtin_vpair_assemble (vss, vss);
-    VPAIR_I16_ASSEMBLE
+    VPAIR_FUNC_I16_ASSEMBLE
   v256 __builtin_vpair_assemble (vuc, vuc);
-    VPAIR_I8U_ASSEMBLE
+    VPAIR_FUNC_I8U_ASSEMBLE
   v256 __builtin_vpair_assemble (vsc, vsc);
-    VPAIR_I8_ASSEMBLE
+    VPAIR_FUNC_I8_ASSEMBLE
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 5a17adc1bc3..1243fad9753 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -15767,7 +15767,7 @@
 (include "vsx.md")
 (include "altivec.md")
 (include "mma.md")
-(include "vector-pair.md")
+(include "vpair-func.md")
 (include "dfp.md")
 (include "crypto.md")
 (include "htm.md")
diff --git a/gcc/config/rs6000/t-rs6000 b/gcc/config/rs6000/t-rs6000
index 5fc89499795..592e6cdf1e2 100644
--- a/gcc/config/rs6000/t-rs6000
+++ b/gcc/config/rs6000/t-rs6000
@@ -128,7 +128,7 @@ MD_INCLUDES = $(srcdir)/config/rs6000/rs64.md \
 	$(srcdir)/config/rs6000/vsx.md \
 	$(srcdir)/config/rs6000/altivec.md \
 	$(srcdir)/config/rs6000/mma.md \
-	$(srcdir)/config/rs6000/vector-pair.md \
+	$(srcdir)/config/rs6000/vpair-func.md \
 	$(srcdir)/config/rs6000/crypto.md \
 	$(srcdir)/config/rs6000/htm.md \
 	$(srcdir)/config/rs6000/dfp.md \
diff --git a/gcc/config/rs6000/vector-pair.md b/gcc/config/rs6000/vpair-func.md
similarity index 71%
rename from gcc/config/rs6000/vector-pair.md
rename to gcc/config/rs6000/vpair-func.md
index b5e9330e71f..0f967c8dab6 100644
--- a/gcc/config/rs6000/vector-pair.md
+++ b/gcc/config/rs6000/vpair-func.md
@@ -27,149 +27,149 @@
 ;; possible.
 
 (define_c_enum "unspec"
-  [UNSPEC_VPAIR_V4DF
-   UNSPEC_VPAIR_V8SF
-   UNSPEC_VPAIR_V32QI
-   UNSPEC_VPAIR_V16HI
-   UNSPEC_VPAIR_V8SI
-   UNSPEC_VPAIR_V4DI
-   UNSPEC_VPAIR_ZERO
-   UNSPEC_VPAIR_SPLAT
-   UNSPEC_VPAIR_REDUCE_PLUS_F32
-   UNSPEC_VPAIR_REDUCE_PLUS_F64
-   UNSPEC_VPAIR_REDUCE_PLUS_I64
+  [UNSPEC_VPAIR_FUNC_V4DF
+   UNSPEC_VPAIR_FUNC_V8SF
+   UNSPEC_VPAIR_FUNC_V32QI
+   UNSPEC_VPAIR_FUNC_V16HI
+   UNSPEC_VPAIR_FUNC_V8SI
+   UNSPEC_VPAIR_FUNC_V4DI
+   UNSPEC_VPAIR_FUNC_ZERO
+   UNSPEC_VPAIR_FUNC_SPLAT
+   UNSPEC_VPAIR_FUNC_REDUCE_PLUS_F32
+   UNSPEC_VPAIR_FUNC_REDUCE_PLUS_F64
+   UNSPEC_VPAIR_FUNC_REDUCE_PLUS_I64
    ])
 
 ;; Iterator doing unary/binary arithmetic on vector pairs
-(define_code_iterator VP_FP_UNARY  [abs neg])
-(define_code_iterator VP_FP_BINARY [minus mult plus smin smax])
+(define_code_iterator VP_FUNC_FP_UNARY  [abs neg])
+(define_code_iterator VP_FUNC_FP_BINARY [minus mult plus smin smax])
 
-(define_code_iterator VP_INT_BINARY  [and ior minus plus smax smin umax umin xor])
+(define_code_iterator VP_FUNC_INT_BINARY  [and ior minus plus smax smin umax umin xor])
 
 ;; Return the insn name from the VP_* code iterator
-(define_code_attr vp_insn [(abs      "abs")
-			   (and      "and")
-			   (ior      "ior")
-			   (minus    "sub")
-			   (mult     "mul")
-			   (not      "one_cmpl")
-			   (neg      "neg")
-			   (plus     "add")
-			   (smin     "smin")
-			   (smax     "smax")
-			   (umin     "umin")
-			   (umax     "umax")
-			   (xor      "xor")])
+(define_code_attr vp_func_insn [(abs      "abs")
+				(and      "and")
+				(ior      "ior")
+				(minus    "sub")
+				(mult     "mul")
+				(not      "one_cmpl")
+				(neg      "neg")
+				(plus     "add")
+				(smin     "smin")
+				(smax     "smax")
+				(umin     "umin")
+				(umax     "umax")
+				(xor      "xor")])
 
 ;; Return the register constraint ("v" or "wa") for the integer code iterator
 ;; used.  For arithmetic operations, we need to use "v" in order to use the
 ;; Altivec instruction.  For logical operations, we can use wa.
-(define_code_attr vp_ireg [(and   "wa")
-			   (ior   "wa")
-			   (minus "v")
-			   (not   "wa")
-			   (neg   "v")
-			   (plus  "v")
-			   (smax  "v")
-			   (smin  "v")
-			   (umax  "v")
-			   (umin  "v")
-			   (xor   "wa")])
+(define_code_attr vp_func_ireg [(and   "wa")
+				(ior   "wa")
+				(minus "v")
+				(not   "wa")
+				(neg   "v")
+				(plus  "v")
+				(smax  "v")
+				(smin  "v")
+				(umax  "v")
+				(umin  "v")
+				(xor   "wa")])
 
 ;; Return the register previdcate for the integer code iterator used
-(define_code_attr vp_ipredicate [(and   "vsx_register_operand")
-				 (ior   "vsx_register_operand")
-				 (minus "altivec_register_operand")
-				 (not   "vsx_register_operand")
-				 (neg   "altivec_register_operand")
-				 (plus  "altivec_register_operand")
-				 (smax  "altivec_register_operand")
-				 (smin  "altivec_register_operand")
-				 (umax  "altivec_register_operand")
-				 (umin  "altivec_register_operand")
-				 (xor   "vsx_register_operand")])
+(define_code_attr vp_func_ipredicate [(and   "vsx_register_operand")
+				      (ior   "vsx_register_operand")
+				      (minus "altivec_register_operand")
+				      (not   "vsx_register_operand")
+				      (neg   "altivec_register_operand")
+				      (plus  "altivec_register_operand")
+				      (smax  "altivec_register_operand")
+				      (smin  "altivec_register_operand")
+				      (umax  "altivec_register_operand")
+				      (umin  "altivec_register_operand")
+				      (xor   "vsx_register_operand")])
 
 ;; Iterator for creating the unspecs for vector pair built-ins
-(define_int_iterator VP_FP [UNSPEC_VPAIR_V4DF
-			    UNSPEC_VPAIR_V8SF])
+(define_int_iterator VP_FUNC_FP [UNSPEC_VPAIR_FUNC_V4DF
+				 UNSPEC_VPAIR_FUNC_V8SF])
 
-(define_int_iterator VP_INT [UNSPEC_VPAIR_V4DI
-			     UNSPEC_VPAIR_V8SI
-			     UNSPEC_VPAIR_V16HI
-			     UNSPEC_VPAIR_V32QI])
+(define_int_iterator VP_FUNC_INT [UNSPEC_VPAIR_FUNC_V4DI
+				  UNSPEC_VPAIR_FUNC_V8SI
+				  UNSPEC_VPAIR_FUNC_V16HI
+				  UNSPEC_VPAIR_FUNC_V32QI])
 
-(define_int_iterator VP_ALL [UNSPEC_VPAIR_V4DF
-			     UNSPEC_VPAIR_V8SF
-			     UNSPEC_VPAIR_V4DI
-			     UNSPEC_VPAIR_V8SI
-			     UNSPEC_VPAIR_V16HI
-			     UNSPEC_VPAIR_V32QI])
+(define_int_iterator VP_FUNC_ALL [UNSPEC_VPAIR_FUNC_V4DF
+				  UNSPEC_VPAIR_FUNC_V8SF
+				  UNSPEC_VPAIR_FUNC_V4DI
+				  UNSPEC_VPAIR_FUNC_V8SI
+				  UNSPEC_VPAIR_FUNC_V16HI
+				  UNSPEC_VPAIR_FUNC_V32QI])
 
 ;; Map VP_* to vector mode of the arguments after they are split
-(define_int_attr VP_VEC_MODE [(UNSPEC_VPAIR_V4DF  "V2DF")
-			      (UNSPEC_VPAIR_V8SF  "V4SF")
-			      (UNSPEC_VPAIR_V32QI "V16QI")
-			      (UNSPEC_VPAIR_V16HI "V8HI")
-			      (UNSPEC_VPAIR_V8SI  "V4SI")
-			      (UNSPEC_VPAIR_V4DI  "V2DI")])
+(define_int_attr VP_VEC_MODE [(UNSPEC_VPAIR_FUNC_V4DF  "V2DF")
+			      (UNSPEC_VPAIR_FUNC_V8SF  "V4SF")
+			      (UNSPEC_VPAIR_FUNC_V32QI "V16QI")
+			      (UNSPEC_VPAIR_FUNC_V16HI "V8HI")
+			      (UNSPEC_VPAIR_FUNC_V8SI  "V4SI")
+			      (UNSPEC_VPAIR_FUNC_V4DI  "V2DI")])
 
 ;; Map VP_* to a lower case name to identify the vector pair.
-(define_int_attr vp_pmode [(UNSPEC_VPAIR_V4DF  "v4df")
-			   (UNSPEC_VPAIR_V8SF  "v8sf")
-			   (UNSPEC_VPAIR_V32QI "v32qi")
-			   (UNSPEC_VPAIR_V16HI "v16hi")
-			   (UNSPEC_VPAIR_V8SI  "v8si")
-			   (UNSPEC_VPAIR_V4DI  "v4di")])
+(define_int_attr vp_pmode [(UNSPEC_VPAIR_FUNC_V4DF  "v4df")
+			   (UNSPEC_VPAIR_FUNC_V8SF  "v8sf")
+			   (UNSPEC_VPAIR_FUNC_V32QI "v32qi")
+			   (UNSPEC_VPAIR_FUNC_V16HI "v16hi")
+			   (UNSPEC_VPAIR_FUNC_V8SI  "v8si")
+			   (UNSPEC_VPAIR_FUNC_V4DI  "v4di")])
 
 ;; Map VP_* to a lower case name to identify the vector after the vector pair
 ;; has been split.
-(define_int_attr vp_vmode [(UNSPEC_VPAIR_V4DF  "v2df")
-			   (UNSPEC_VPAIR_V8SF  "v4sf")
-			   (UNSPEC_VPAIR_V32QI "v16qi")
-			   (UNSPEC_VPAIR_V16HI "v8hi")
-			   (UNSPEC_VPAIR_V8SI  "v4si")
-			   (UNSPEC_VPAIR_V4DI  "v2di")])
-
-;; Map VP_INT to constraints used for the negate scratch register.  For vectors
+(define_int_attr vp_vmode [(UNSPEC_VPAIR_FUNC_V4DF  "v2df")
+			   (UNSPEC_VPAIR_FUNC_V8SF  "v4sf")
+			   (UNSPEC_VPAIR_FUNC_V32QI "v16qi")
+			   (UNSPEC_VPAIR_FUNC_V16HI "v8hi")
+			   (UNSPEC_VPAIR_FUNC_V8SI  "v4si")
+			   (UNSPEC_VPAIR_FUNC_V4DI  "v2di")])
+
+;; Map VP_FUNC_INT to constraints used for the negate scratch register.  For vectors
 ;; of QI and HI, we need to change -a into 0 - a since we don't have a negate
 ;; operation.  We do have a vnegw/vnegd operation for SI and DI modes.
-(define_int_attr vp_neg_reg [(UNSPEC_VPAIR_V32QI "&v")
-			     (UNSPEC_VPAIR_V16HI "&v")
-			     (UNSPEC_VPAIR_V8SI  "X")
-			     (UNSPEC_VPAIR_V4DI  "X")])
+(define_int_attr vp_neg_reg [(UNSPEC_VPAIR_FUNC_V32QI "&v")
+			     (UNSPEC_VPAIR_FUNC_V16HI "&v")
+			     (UNSPEC_VPAIR_FUNC_V8SI  "X")
+			     (UNSPEC_VPAIR_FUNC_V4DI  "X")])
 
 ;; Moddes of the vector element to splat to vector pair
-(define_mode_iterator VP_SPLAT [DF SF DI SI HI QI])
+(define_mode_iterator VP_FUNC_SPLAT [DF SF DI SI HI QI])
 
 ;; Moddes of the vector to splat to vector pair
-(define_mode_iterator VP_SPLAT_VEC [V2DF V4SF V2DI V4SI V8HI V16QI])
-
-;; MAP VP_SPLAT and VP_SPLAT_VEC to the mode of the vector pair operation
-(define_mode_attr vp_splat_pmode [(DF    "v4df")
-				  (V2DF  "v4df")
-				  (SF    "v8sf")
-				  (V4SF  "v8sf")
-				  (DI    "v4di")
-				  (V2DI  "v4di")
-				  (SI    "v8si")
-				  (V4SI  "v8si")
-				  (HI    "v16hi")
-				  (V8HI  "v16hi")
-				  (QI    "v32qi")
-				  (V16QI "v32qi")])
-
-;; MAP VP_SPLAT to the mode of the vector containing the element
-(define_mode_attr VP_SPLAT_VMODE [(DF "V2DF")
-				  (SF "V4SF")
-				  (DI "V2DI")
-				  (SI "V4SI")
-				  (HI "V8HI")
-				  (QI "V16QI")])
+(define_mode_iterator VP_FUNC_SPLAT_VEC [V2DF V4SF V2DI V4SI V8HI V16QI])
+
+;; MAP VP_FUNC_SPLAT and VP_FUNC_SPLAT_VEC to the mode of the vector pair operation
+(define_mode_attr vp_func_splat_pmode [(DF    "v4df")
+				       (V2DF  "v4df")
+				       (SF    "v8sf")
+				       (V4SF  "v8sf")
+				       (DI    "v4di")
+				       (V2DI  "v4di")
+				       (SI    "v8si")
+				       (V4SI  "v8si")
+				       (HI    "v16hi")
+				       (V8HI  "v16hi")
+				       (QI    "v32qi")
+				       (V16QI "v32qi")])
+
+;; MAP VP_FUNC_SPLAT to the mode of the vector containing the element
+(define_mode_attr VP_FUNC_SPLAT_VMODE [(DF "V2DF")
+				       (SF "V4SF")
+				       (DI "V2DI")
+				       (SI "V4SI")
+				       (HI "V8HI")
+				       (QI "V16QI")])
 
 ;; Initialize a vector pair to 0
-(define_insn_and_split "vpair_zero"
+(define_insn_and_split "vpair_func_zero"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
-	(unspec:OO [(const_int 0)] UNSPEC_VPAIR_ZERO))]
+	(unspec:OO [(const_int 0)] UNSPEC_VPAIR_FUNC_ZERO))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -193,12 +193,12 @@
 ;; We cannot update the two output registers atomically, so mark the output as
 ;; an early clobber so we don't accidentally clobber the input operands.  */
 
-(define_insn_and_split "vpair_assemble_<vp_pmode>"
+(define_insn_and_split "vpair_func_assemble_<vp_pmode>"
   [(set (match_operand:OO 0 "vsx_register_operand" "=&wa")
 	(unspec:OO
 	 [(match_operand:<VP_VEC_MODE> 1 "mma_assemble_input_operand" "mwa")
 	  (match_operand:<VP_VEC_MODE> 2 "mma_assemble_input_operand" "mwa")]
-	 VP_ALL))]
+	 VP_FUNC_ALL))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -213,12 +213,12 @@
   [(set_attr "length" "8")])
 
 ;; Extract one of the two 128-bit vectors from a vector pair.
-(define_insn_and_split "vpair_extract_vector_<vp_pmode>"
+(define_insn_and_split "vpair_func_extract_vector_<vp_pmode>"
   [(set (match_operand:<VP_VEC_MODE> 0 "vsx_register_operand" "=wa")
 	(unspec:<VP_VEC_MODE>
 	 [(match_operand:OO 1 "vsx_register_operand" "wa")
 	  (match_operand 2 "const_0_to_1_operand" "n")]
-	 VP_ALL))]
+	 VP_FUNC_ALL))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -233,12 +233,12 @@
 })
 
 ;; Optimize extracting an 128-bit vector from a vector pair in memory.
-(define_insn_and_split "*vpair_extract_vector_<vp_pmode>_mem"
+(define_insn_and_split "*vpair_func_extract_vector_<vp_pmode>_mem"
   [(set (match_operand:<VP_VEC_MODE> 0 "vsx_register_operand" "=wa")
 	(unspec:<VP_VEC_MODE>
 	 [(match_operand:OO 1 "memory_operand" "o")
 	  (match_operand 2 "const_0_to_1_operand" "n")]
-	 VP_ALL))]
+	 VP_FUNC_ALL))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -251,19 +251,19 @@
 
 ;; Create a vector pair with a value splat'ed (duplicated) to all of the
 ;; elements.
-(define_expand "vpair_splat_<vp_splat_pmode>"
+(define_expand "vpair_func_splat_<vp_func_splat_pmode>"
   [(use (match_operand:OO 0 "vsx_register_operand"))
-   (use (match_operand:VP_SPLAT 1 "input_operand"))]
+   (use (match_operand:VP_FUNC_SPLAT 1 "input_operand"))]
   "TARGET_MMA"
 {
   rtx op0 = operands[0];
   rtx op1 = operands[1];
   machine_mode element_mode = <MODE>mode;
-  machine_mode vector_mode = <VP_SPLAT_VMODE>mode;
+  machine_mode vector_mode = <VP_FUNC_SPLAT_VMODE>mode;
 
   if (op1 == CONST0_RTX (element_mode))
     {
-      emit_insn (gen_vpair_zero (op0));
+      emit_insn (gen_vpair_func_zero (op0));
       DONE;
     }
 
@@ -274,18 +274,18 @@
     RTVEC_ELT (elements, i) = copy_rtx (op1);
 
   rs6000_expand_vector_init (vec, gen_rtx_PARALLEL (vector_mode, elements));
-  emit_insn (gen_vpair_splat_<vp_splat_pmode>_internal (op0, vec));
+  emit_insn (gen_vpair_func_splat_<vp_func_splat_pmode>_internal (op0, vec));
   DONE;
 })
 
 ;; Inner splat support.  Operand1 is the vector splat created above.  Allow
 ;; operand 1 to overlap with the output registers to eliminate one move
 ;; instruction.
-(define_insn_and_split "vpair_splat_<vp_splat_pmode>_internal"
+(define_insn_and_split "vpair_func_splat_<vp_func_splat_pmode>_internal"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa,wa")
 	(unspec:OO
-	 [(match_operand:VP_SPLAT_VEC 1 "vsx_register_operand" "0,wa")]
-	 UNSPEC_VPAIR_SPLAT))]
+	 [(match_operand:VP_FUNC_SPLAT_VEC 1 "vsx_register_operand" "0,wa")]
+	 UNSPEC_VPAIR_FUNC_SPLAT))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -316,31 +316,31 @@
 
 \f
 ;; Vector pair floating point unary operations
-(define_insn_and_split "vpair_<vp_insn>_<vp_pmode>2"
+(define_insn_and_split "vpair_func_<vp_func_insn>_<vp_pmode>2"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
-	(unspec:OO [(VP_FP_UNARY:OO
+	(unspec:OO [(VP_FUNC_FP_UNARY:OO
 		     (match_operand:OO 1 "vsx_register_operand" "wa"))]
-		   VP_FP))]
+		   VP_FUNC_FP))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
   [(const_int 0)]
 {
   split_unary_vector_pair (<VP_VEC_MODE>mode, operands,
-			   gen_<vp_insn><vp_vmode>2);
+			   gen_<vp_func_insn><vp_vmode>2);
   DONE;
 }
   [(set_attr "length" "8")])
 
 ;; Optimize vector pair negate of absolute value
-(define_insn_and_split "vpair_nabs_<vp_pmode>2"
+(define_insn_and_split "vpair_func_nabs_<vp_pmode>2"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
 	(unspec:OO
 	 [(neg:OO
 	   (unspec:OO
 	    [(abs:OO (match_operand:OO 1 "vsx_register_operand" "ww"))]
-	    VP_FP))]
-	 VP_FP))]
+	    VP_FUNC_FP))]
+	 VP_FUNC_FP))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -353,32 +353,32 @@
   [(set_attr "length" "8")])
 
 ;; Vector pair floating binary operations
-(define_insn_and_split "vpair_<vp_insn>_<vp_pmode>3"
+(define_insn_and_split "vpair_func_<vp_func_insn>_<vp_pmode>3"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
-	(unspec:OO [(VP_FP_BINARY:OO
+	(unspec:OO [(VP_FUNC_FP_BINARY:OO
 		     (match_operand:OO 1 "vsx_register_operand" "wa")
 		     (match_operand:OO 2 "vsx_register_operand" "wa"))]
-		   VP_FP))]
+		   VP_FUNC_FP))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
   [(const_int 0)]
 {
   split_binary_vector_pair (<VP_VEC_MODE>mode, operands,
-			    gen_<vp_insn><vp_vmode>3);
+			    gen_<vp_func_insn><vp_vmode>3);
   DONE;
 }
   [(set_attr "length" "8")])
 
 ;; Vector pair fused multiply-add floating point operations
-(define_insn_and_split "vpair_fma_<vp_pmode>4"
+(define_insn_and_split "vpair_func_fma_<vp_pmode>4"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa,wa")
 	(unspec:OO
 	 [(fma:OO
 	   (match_operand:OO 1 "vsx_register_operand" "%wa,wa")
 	   (match_operand:OO 2 "vsx_register_operand" "wa,0")
 	   (match_operand:OO 3 "vsx_register_operand" "0,wa"))]
-	 VP_FP))]
+	 VP_FUNC_FP))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -390,7 +390,7 @@
 }
   [(set_attr "length" "8")])
 
-(define_insn_and_split "vpair_fms_<vp_pmode>4"
+(define_insn_and_split "vpair_func_fms_<vp_pmode>4"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa,wa")
 	(unspec:OO
 	 [(fma:OO
@@ -398,8 +398,8 @@
 	   (match_operand:OO 2 "vsx_register_operand" "wa,0")
 	   (unspec:OO
 	    [(neg:OO (match_operand:OO 3 "vsx_register_operand" "0,wa"))]
-	     VP_FP))]
-	 VP_FP))]
+	     VP_FUNC_FP))]
+	 VP_FUNC_FP))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -411,7 +411,7 @@
 }
   [(set_attr "length" "8")])
 
-(define_insn_and_split "vpair_nfma_<vp_pmode>4"
+(define_insn_and_split "vpair_func_nfma_<vp_pmode>4"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa,wa")
 	(unspec:OO
 	 [(neg:OO
@@ -420,8 +420,8 @@
 	      (match_operand:OO 1 "vsx_register_operand" "%wa,wa")
 	      (match_operand:OO 2 "vsx_register_operand" "wa,0")
 	      (match_operand:OO 3 "vsx_register_operand" "0,wa"))]
-	    VP_FP))]
-	 VP_FP))]
+	    VP_FUNC_FP))]
+	 VP_FUNC_FP))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -433,7 +433,7 @@
 }
   [(set_attr "length" "8")])
 
-(define_insn_and_split "vpair_nfms_<vp_pmode>4"
+(define_insn_and_split "vpair_func_nfms_<vp_pmode>4"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa,wa")
 	(unspec:OO
 	 [(neg:OO
@@ -443,9 +443,9 @@
 	      (match_operand:OO 2 "vsx_register_operand" "wa,0")
 	      (unspec:OO
 	       [(neg:OO (match_operand:OO 3 "vsx_register_operand" "0,wa"))]
-	       VP_FP))]
-	   VP_FP))]
-	 VP_FP))]
+	       VP_FUNC_FP))]
+	   VP_FUNC_FP))]
+	 VP_FUNC_FP))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -458,7 +458,7 @@
   [(set_attr "length" "8")])
 
 ;; Optimize vector pair (a * b) + c into vector pair fma (a, b, c).
-(define_insn_and_split "*vpair_fma_fpcontract_<vp_pmode>4"
+(define_insn_and_split "*vpair_func_fma_fpcontract_<vp_pmode>4"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa,wa")
 	(unspec:OO
 	 [(plus:OO
@@ -466,9 +466,9 @@
 	    [(mult:OO
 	      (match_operand:OO 1 "vsx_register_operand" "%wa,wa")
 	      (match_operand:OO 2 "vsx_register_operand" "wa,0"))]
-	    VP_FP)
+	    VP_FUNC_FP)
 	   (match_operand:OO 3 "vsx_register_operand" "0,wa"))]
-	 VP_FP))]
+	 VP_FUNC_FP))]
   "TARGET_MMA && flag_fp_contract_mode == FP_CONTRACT_FAST"
   "#"
   "&& 1"
@@ -478,13 +478,13 @@
 	   (match_dup 1)
 	   (match_dup 2)
 	   (match_dup 3))]
-	 VP_FP))]
+	 VP_FUNC_FP))]
 {
 }
   [(set_attr "length" "8")])
 
 ;; Optimize vector pair (a * b) - c into vector pair fma (a, b, -c)
-(define_insn_and_split "*vpair_fms_fpcontract_<vp_pmode>4"
+(define_insn_and_split "*vpair_func_fms_fpcontract_<vp_pmode>4"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa,wa")
 	(unspec:OO
 	 [(minus:OO
@@ -492,9 +492,9 @@
 	    [(mult:OO
 	      (match_operand:OO 1 "vsx_register_operand" "%wa,wa")
 	      (match_operand:OO 2 "vsx_register_operand" "wa,0"))]
-	    VP_FP)
+	    VP_FUNC_FP)
 	   (match_operand:OO 3 "vsx_register_operand" "0,wa"))]
-	 VP_FP))]
+	 VP_FUNC_FP))]
   "TARGET_MMA && flag_fp_contract_mode == FP_CONTRACT_FAST"
   "#"
   "&& 1"
@@ -506,15 +506,15 @@
 	   (unspec:OO
 	    [(neg:OO
 	      (match_dup 3))]
-	    VP_FP))]
-	 VP_FP))]
+	    VP_FUNC_FP))]
+	 VP_FUNC_FP))]
 {
 }
   [(set_attr "length" "8")])
 
 
 ;; Optimize vector pair -((a * b) + c) into vector pair -fma (a, b, c).
-(define_insn_and_split "*vpair_nfma_fpcontract_<vp_pmode>4"
+(define_insn_and_split "*vpair_func_nfma_fpcontract_<vp_pmode>4"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa,wa")
 	(unspec:OO
 	 [(neg:OO
@@ -524,10 +524,10 @@
 	       [(mult:OO
 		 (match_operand:OO 1 "vsx_register_operand" "%wa,wa")
 		 (match_operand:OO 2 "vsx_register_operand" "wa,0"))]
-	       VP_FP)
+	       VP_FUNC_FP)
 	      (match_operand:OO 3 "vsx_register_operand" "0,wa"))]
-	    VP_FP))]
-	 VP_FP))]
+	    VP_FUNC_FP))]
+	 VP_FUNC_FP))]
   "TARGET_MMA && flag_fp_contract_mode == FP_CONTRACT_FAST"
   "#"
   "&& 1"
@@ -539,14 +539,14 @@
 	      (match_dup 1)
 	      (match_dup 2)
 	      (match_dup 3))]
-	    VP_FP))]
-	 VP_FP))]
+	    VP_FUNC_FP))]
+	 VP_FUNC_FP))]
 {
 }
   [(set_attr "length" "8")])
 
 ;; Optimize vector pair -((a * b) - c) into vector pair -fma (a, b, -c)
-(define_insn_and_split "*vpair_nfms_fpcontract_<vp_pmode>4"
+(define_insn_and_split "*vpair_func_nfms_fpcontract_<vp_pmode>4"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa,wa")
 	(unspec:OO
 	 [(neg:OO
@@ -556,10 +556,10 @@
 	       [(mult:OO
 		 (match_operand:OO 1 "vsx_register_operand" "%wa,wa")
 		 (match_operand:OO 2 "vsx_register_operand" "wa,0"))]
-	       VP_FP)
+	       VP_FUNC_FP)
 	      (match_operand:OO 3 "vsx_register_operand" "0,wa"))]
-	    VP_FP))]
-	 VP_FP))]
+	    VP_FUNC_FP))]
+	 VP_FUNC_FP))]
   "TARGET_MMA && flag_fp_contract_mode == FP_CONTRACT_FAST"
   "#"
   "&& 1"
@@ -573,19 +573,19 @@
 	      (unspec:OO
 	       [(neg:OO
 		 (match_dup 3))]
-	       VP_FP))]
-	    VP_FP))]
-	 VP_FP))]
+	       VP_FUNC_FP))]
+	    VP_FUNC_FP))]
+	 VP_FUNC_FP))]
 {
 }
   [(set_attr "length" "8")])
 
 \f
 ;; Add all elements in a pair of V4SF vectors.
-(define_insn_and_split "vpair_reduc_plus_scale_v8sf"
+(define_insn_and_split "vpair_func_reduc_plus_scale_v8sf"
   [(set (match_operand:SF 0 "vsx_register_operand" "=wa")
 	(unspec:SF [(match_operand:OO 1 "vsx_register_operand" "v")]
-		   UNSPEC_VPAIR_REDUCE_PLUS_F32))
+		   UNSPEC_VPAIR_FUNC_REDUCE_PLUS_F32))
    (clobber (match_scratch:V4SF 2 "=&v"))
    (clobber (match_scratch:V4SF 3 "=&v"))]
   "TARGET_MMA"
@@ -612,10 +612,10 @@
   [(set_attr "length" "24")])
 
 ;; Add all elements in a pair of V2DF vectors
-(define_insn_and_split "vpair_reduc_plus_scale_v4df"
+(define_insn_and_split "vpair_func_reduc_plus_scale_v4df"
   [(set (match_operand:DF 0 "vsx_register_operand" "=&wa")
 	(unspec:DF [(match_operand:OO 1 "vsx_register_operand" "wa")]
-		   UNSPEC_VPAIR_REDUCE_PLUS_F64))
+		   UNSPEC_VPAIR_FUNC_REDUCE_PLUS_F64))
    (clobber (match_scratch:DF 2 "=&wa"))
    (clobber (match_scratch:V2DF 3 "=&wa"))]
   "TARGET_MMA"
@@ -642,11 +642,11 @@
 
 \f
 ;; Vector pair integer negate support.
-(define_insn_and_split "vpair_neg_<vp_pmode>2"
+(define_insn_and_split "vpair_func_neg_<vp_pmode>2"
   [(set (match_operand:OO 0 "altivec_register_operand" "=v")
 	(unspec:OO [(neg:OO
 		     (match_operand:OO 1 "altivec_register_operand" "v"))]
-		   VP_INT))
+		   VP_FUNC_INT))
    (clobber (match_scratch:<VP_VEC_MODE> 2 "=<vp_neg_reg>"))]
   "TARGET_MMA"
   "#"
@@ -687,10 +687,10 @@
   [(set_attr "length" "8")])
 
 ;; Vector pair integer not support.
-(define_insn_and_split "vpair_not_<vp_pmode>2"
+(define_insn_and_split "vpair_func_not_<vp_pmode>2"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
 	(unspec:OO [(not:OO (match_operand:OO 1 "vsx_register_operand" "wa"))]
-		   VP_INT))]
+		   VP_FUNC_INT))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -703,33 +703,33 @@
   [(set_attr "length" "8")])
 
 ;; Vector pair integer binary operations.
-(define_insn_and_split "vpair_<vp_insn>_<vp_pmode>3"
-  [(set (match_operand:OO 0 "<vp_ipredicate>" "=<vp_ireg>")
-	(unspec:OO [(VP_INT_BINARY:OO
-		     (match_operand:OO 1 "<vp_ipredicate>" "<vp_ireg>")
-		     (match_operand:OO 2 "<vp_ipredicate>" "<vp_ireg>"))]
-		   VP_INT))]
+(define_insn_and_split "vpair_func_<vp_func_insn>_<vp_pmode>3"
+  [(set (match_operand:OO 0 "<vp_func_ipredicate>" "=<vp_func_ireg>")
+	(unspec:OO [(VP_FUNC_INT_BINARY:OO
+		     (match_operand:OO 1 "<vp_func_ipredicate>" "<vp_func_ireg>")
+		     (match_operand:OO 2 "<vp_func_ipredicate>" "<vp_func_ireg>"))]
+		   VP_FUNC_INT))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
   [(const_int 0)]
 {
   split_binary_vector_pair (<VP_VEC_MODE>mode, operands,
-			    gen_<vp_insn><vp_vmode>3);
+			    gen_<vp_func_insn><vp_vmode>3);
   DONE;
 }
   [(set_attr "length" "8")])
 
 ;; Optimize vector pair a & ~b
-(define_insn_and_split "*vpair_andc_<vp_pmode>"
+(define_insn_and_split "*vpair_func_andc_<vp_pmode>"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
 	(unspec:OO [(and:OO
 		     (unspec:OO
 		      [(not:OO
 			(match_operand:OO 1 "vsx_register_operand" "wa"))]
-		      VP_INT)
+		      VP_FUNC_INT)
 		     (match_operand:OO 2 "vsx_register_operand" "wa"))]
-		   VP_INT))]
+		   VP_FUNC_INT))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -742,15 +742,15 @@
   [(set_attr "length" "8")])
 
 ;; Optimize vector pair a | ~b
-(define_insn_and_split "*vpair_iorc_<vp_pmode>"
+(define_insn_and_split "*vpair_func_iorc_<vp_pmode>"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
 	(unspec:OO [(ior:OO
 		     (unspec:OO
 		      [(not:OO
 			(match_operand:OO 1 "vsx_register_operand" "wa"))]
-		      VP_INT)
+		      VP_FUNC_INT)
 		     (match_operand:OO 2 "vsx_register_operand" "wa"))]
-		   VP_INT))]
+		   VP_FUNC_INT))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -763,15 +763,15 @@
   [(set_attr "length" "8")])
 
 ;; Optiomize vector pair ~(a & b) or ((~a) | (~b))
-(define_insn_and_split "*vpair_nand_<vp_pmode>_1"
+(define_insn_and_split "*vpair_func_nand_<vp_pmode>_1"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
 	(unspec:OO
 	 [(not:OO
 	   (unspec:OO [(and:OO
 			(match_operand:OO 1 "vsx_register_operand" "wa")
 			(match_operand:OO 2 "vsx_register_operand" "wa"))]
-		      VP_INT))]
-	 VP_INT))]
+		      VP_FUNC_INT))]
+	 VP_FUNC_INT))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -783,19 +783,19 @@
 }
   [(set_attr "length" "8")])
 
-(define_insn_and_split "*vpair_nand_<vp_pmode>_2"
+(define_insn_and_split "*vpair_func_nand_<vp_pmode>_2"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
 	(unspec:OO
 	 [(ior:OO
 	   (unspec:OO
 	    [(not:OO
 	      (match_operand:OO 1 "vsx_register_operand" "wa"))]
-	    VP_INT)
+	    VP_FUNC_INT)
 	   (unspec:OO
 	    [(not:OO
 	      (match_operand:OO 2 "vsx_register_operand" "wa"))]
-	    VP_INT))]
-	 VP_INT))]
+	    VP_FUNC_INT))]
+	 VP_FUNC_INT))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -808,15 +808,15 @@
   [(set_attr "length" "8")])
 
 ;; Optiomize vector pair ~(a | b)  or ((~a) & (~b))
-(define_insn_and_split "*vpair_nor_<vp_pmode>_1"
+(define_insn_and_split "*vpair_func_nor_<vp_pmode>_1"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
 	(unspec:OO
 	 [(not:OO
 	   (unspec:OO [(ior:OO
 			(match_operand:OO 1 "vsx_register_operand" "wa")
 			(match_operand:OO 2 "vsx_register_operand" "wa"))]
-		      VP_INT))]
-	 VP_INT))]
+		      VP_FUNC_INT))]
+	 VP_FUNC_INT))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -828,17 +828,17 @@
 }
   [(set_attr "length" "8")])
 
-(define_insn_and_split "*vpair_nor_<vp_pmode>_2"
+(define_insn_and_split "*vpair_func_nor_<vp_pmode>_2"
   [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
 	(unspec:OO
 	 [(ior:OO
 	   (unspec:OO
 	    [(not:OO (match_operand:OO 1 "vsx_register_operand" "wa"))]
-	    VP_INT)
+	    VP_FUNC_INT)
 	   (unspec:OO
 	    [(not:OO (match_operand:OO 2 "vsx_register_operand" "wa"))]
-	    VP_INT))]
-	 VP_INT))]
+	    VP_FUNC_INT))]
+	 VP_FUNC_INT))]
   "TARGET_MMA"
   "#"
   "&& reload_completed"
@@ -851,10 +851,10 @@
   [(set_attr "length" "8")])
 
 ;; Add all elements in a pair of V2DI vectors
-(define_insn_and_split "vpair_reduc_plus_scale_v4di"
+(define_insn_and_split "vpair_func_reduc_plus_scale_v4di"
   [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
 	(unspec:DI [(match_operand:OO 1 "altivec_register_operand" "v")]
-		   UNSPEC_VPAIR_REDUCE_PLUS_I64))
+		   UNSPEC_VPAIR_FUNC_REDUCE_PLUS_I64))
    (clobber (match_scratch:V2DI 2 "=&v"))
    (clobber (match_scratch:DI 3 "=&r"))]
   "TARGET_MMA && TARGET_POWERPC64"

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-11-18  2:26 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-18  2:26 [gcc(refs/users/meissner/heads/work146-vpair)] Rename things so it can be combined with the vsize branch Michael Meissner

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).