public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/users/wschmidt/heads/builtins3)] rs6000: Add Power8 vector builtins
@ 2020-08-28 20:08 William Schmidt
0 siblings, 0 replies; 8+ messages in thread
From: William Schmidt @ 2020-08-28 20:08 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:5b46843304616970f0527646ea9df995b3604f71
commit 5b46843304616970f0527646ea9df995b3604f71
Author: Bill Schmidt <wschmidt@linux.ibm.com>
Date: Wed Jun 17 11:40:50 2020 -0500
rs6000: Add Power8 vector builtins
2020-07-26 Bill Schmidt <wschmidt@linux.ibm.com>
* config/rs6000/rs6000-builtin-new.def: Add power8-vector
builtins.
Diff:
---
gcc/config/rs6000/rs6000-builtin-new.def | 417 +++++++++++++++++++++++++++++++
1 file changed, 417 insertions(+)
diff --git a/gcc/config/rs6000/rs6000-builtin-new.def b/gcc/config/rs6000/rs6000-builtin-new.def
index 0a17cad446c..2f918c1d69e 100644
--- a/gcc/config/rs6000/rs6000-builtin-new.def
+++ b/gcc/config/rs6000/rs6000-builtin-new.def
@@ -1977,3 +1977,420 @@
DIVDEU diveu_di {}
+; Power8 vector built-ins.
+[power8-vector]
+ const vsll __builtin_altivec_abs_v2di (vsll);
+ ABS_V2DI absv2di2 {}
+
+ const vsc __builtin_altivec_eqv_v16qi (vsc, vsc);
+ EQV_V16QI eqvv16qi3 {}
+
+ const vuc __builtin_altivec_eqv_v16qi_uns (vuc, vuc);
+ EQV_V16QI_UNS eqvv16qi3 {}
+
+ const vsq __builtin_altivec_eqv_v1ti (vsq, vsq);
+ EQV_V1TI eqvv1ti3 {}
+
+ const vuq __builtin_altivec_eqv_v1ti_uns (vuq, vuq);
+ EQV_V1TI_UNS eqvv1ti3 {}
+
+ const vd __builtin_altivec_eqv_v2df (vd, vd);
+ EQV_V2DF eqvv2df3 {}
+
+ const vsll __builtin_altivec_eqv_v2di (vsll, vsll);
+ EQV_V2DI eqvv2di3 {}
+
+ const vull __builtin_altivec_eqv_v2di_uns (vull, vull);
+ EQV_V2DI_UNS eqvv2di3 {}
+
+ const vf __builtin_altivec_eqv_v4sf (vf, vf);
+ EQV_V4SF eqvv4sf3 {}
+
+ const vsi __builtin_altivec_eqv_v4si (vsi, vsi);
+ EQV_V4SI eqvv4si3 {}
+
+ const vui __builtin_altivec_eqv_v4si_uns (vui, vui);
+ EQV_V4SI_UNS eqvv4si3 {}
+
+ const vss __builtin_altivec_eqv_v8hi (vss, vss);
+ EQV_V8HI eqvv8hi3 {}
+
+ const vus __builtin_altivec_eqv_v8hi_uns (vus, vus);
+ EQV_V8HI_UNS eqvv8hi3 {}
+
+ const vsc __builtin_altivec_nand_v16qi (vsc, vsc);
+ NAND_V16QI nandv16qi3 {}
+
+ const vuc __builtin_altivec_nand_v16qi_uns (vuc, vuc);
+ NAND_V16QI_UNS nandv16qi3 {}
+
+ const vsq __builtin_altivec_nand_v1ti (vsq, vsq);
+ NAND_V1TI nandv1ti3 {}
+
+ const vuq __builtin_altivec_nand_v1ti_uns (vuq, vuq);
+ NAND_V1TI_UNS nandv1ti3 {}
+
+ const vd __builtin_altivec_nand_v2df (vd, vd);
+ NAND_V2DF nandv2df3 {}
+
+ const vsll __builtin_altivec_nand_v2di (vsll, vsll);
+ NAND_V2DI nandv2di3 {}
+
+ const vull __builtin_altivec_nand_v2di_uns (vull, vull);
+ NAND_V2DI_UNS nandv2di3 {}
+
+ const vf __builtin_altivec_nand_v4sf (vf, vf);
+ NAND_V4SF nandv4sf3 {}
+
+ const vsi __builtin_altivec_nand_v4si (vsi, vsi);
+ NAND_V4SI nandv4si3 {}
+
+ const vui __builtin_altivec_nand_v4si_uns (vui, vui);
+ NAND_V4SI_UNS nandv4si3 {}
+
+ const vss __builtin_altivec_nand_v8hi (vss, vss);
+ NAND_V8HI nandv8hi3 {}
+
+ const vus __builtin_altivec_nand_v8hi_uns (vus, vus);
+ NAND_V8HI_UNS nandv8hi3 {}
+
+ const vsc __builtin_altivec_neg_v16qi (vsc);
+ NEG_V16QI negv16qi2 {}
+
+ const vd __builtin_altivec_neg_v2df (vd);
+ NEG_V2DF negv2df2 {}
+
+ const vsll __builtin_altivec_neg_v2di (vsll);
+ NEG_V2DI negv2di2 {}
+
+ const vf __builtin_altivec_neg_v4sf (vf);
+ NEG_V4SF negv4sf2 {}
+
+ const vsi __builtin_altivec_neg_v4si (vsi);
+ NEG_V4SI negv4si2 {}
+
+ const vss __builtin_altivec_neg_v8hi (vss);
+ NEG_V8HI negv8hi2 {}
+
+ const vsc __builtin_altivec_orc_v16qi (vsc, vsc);
+ ORC_V16QI orcv16qi3 {}
+
+ const vuc __builtin_altivec_orc_v16qi_uns (vuc, vuc);
+ ORC_V16QI_UNS orcv16qi3 {}
+
+ const vsq __builtin_altivec_orc_v1ti (vsq, vsq);
+ ORC_V1TI orcv1ti3 {}
+
+ const vuq __builtin_altivec_orc_v1ti_uns (vuq, vuq);
+ ORC_V1TI_UNS orcv1ti3 {}
+
+ const vd __builtin_altivec_orc_v2df (vd, vd);
+ ORC_V2DF orcv2df3 {}
+
+ const vsll __builtin_altivec_orc_v2di (vsll, vsll);
+ ORC_V2DI orcv2di3 {}
+
+ const vull __builtin_altivec_orc_v2di_uns (vull, vull);
+ ORC_V2DI_UNS orcv2di3 {}
+
+ const vf __builtin_altivec_orc_v4sf (vf, vf);
+ ORC_V4SF orcv4sf3 {}
+
+ const vsi __builtin_altivec_orc_v4si (vsi, vsi);
+ ORC_V4SI orcv4si3 {}
+
+ const vui __builtin_altivec_orc_v4si_uns (vui, vui);
+ ORC_V4SI_UNS orcv4si3 {}
+
+ const vss __builtin_altivec_orc_v8hi (vss, vss);
+ ORC_V8HI orcv8hi3 {}
+
+ const vus __builtin_altivec_orc_v8hi_uns (vus, vus);
+ ORC_V8HI_UNS orcv8hi3 {}
+
+ const vsc __builtin_altivec_vclzb (vsc);
+ VCLZB clzv16qi2 {}
+
+ const vsll __builtin_altivec_vclzd (vsll);
+ VCLZD clzv2di2 {}
+
+ const vss __builtin_altivec_vclzh (vss);
+ VCLZH clzv8hi2 {}
+
+ const vsi __builtin_altivec_vclzw (vsi);
+ VCLZW clzv4si2 {}
+
+ const vsc __builtin_altivec_vgbbd (vsc);
+ VGBBD p8v_vgbbd {}
+
+ const vsq __builtin_altivec_vaddcuq (vsq, vsq);
+ VADDCUQ altivec_vaddcuq {}
+
+ const vsq __builtin_altivec_vaddecuq (vsq, vsq, vsq);
+ VADDECUQ altivec_vaddecuq {}
+
+ const vuq __builtin_altivec_vaddeuqm (vuq, vuq, vuq);
+ VADDEUQM altivec_vaddeuqm {}
+
+ const vsll __builtin_altivec_vaddudm (vsll, vsll);
+ VADDUDM addv2di3 {}
+
+ const vsq __builtin_altivec_vadduqm (vsq, vsq);
+ VADDUQM altivec_vadduqm {}
+
+ const vsll __builtin_altivec_vbpermq (vop, vsc);
+ VBPERMQ altivec_vbpermq {}
+
+ const vuc __builtin_altivec_vbpermq2 (vuc, vuc);
+ VBPERMQ2 altivec_vbpermq2 {}
+
+ const vbll __builtin_altivec_vcmpequd (vsll, vsll);
+ VCMPEQUD vector_eqv2di {}
+
+ const int __builtin_altivec_vcmpequd_p (int, vsll, vsll);
+ VCMPEQUD_P vector_eq_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtsd (vsll, vsll);
+ VCMPGTSD vector_gtv2di {}
+
+ const int __builtin_altivec_vcmpgtsd_p (int, vsll, vsll);
+ VCMPGTSD_P vector_gt_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtud (vull, vull);
+ VCMPGTUD vector_gtuv2di {}
+
+ const int __builtin_altivec_vcmpgtud_p (vull, vull);
+ VCMPGTUD_P vector_gtu_v2di_p {pred}
+
+ const vsll __builtin_altivec_vmaxsd (vsll, vsll);
+ VMAXSD smaxv2di3 {}
+
+ const vull __builtin_altivec_vmaxud (vull, vull);
+ VMAXUD umaxv2di3 {}
+
+ const vsll __builtin_altivec_vminsd (vsll, vsll);
+ VMINSD sminv2di3 {}
+
+ const vull __builtin_altivec_vminud (vull, vull);
+ VMINUD uminv2di3 {}
+
+ const vd __builtin_altivec_vmrgew_v2df (vd, vd);
+ VMRGEW_V2DF p8_vmrgew_v2df {}
+
+ const vsll __builtin_altivec_vmrgew_v2di (vsll, vsll);
+ VMRGEW_V2DI p8_vmrgew_v2di {}
+
+ const vf __builtin_altivec_vmrgew_v4sf (vf, vf);
+ VMRGEW_V4SF p8_vmrgew_v4sf {}
+
+ const vsi __builtin_altivec_vmrgew_v4si (vsi, vsi);
+ VMRGEW_V4SI p8_vmrgew_v4si {}
+
+ const vd __builtin_altivec_vmrgow_v2df (vd, vd);
+ VMRGOW_V2DF p8_vmrgow_v2df {}
+
+ const vsll __builtin_altivec_vmrgow_v2di (vsll, vsll);
+ VMRGOW_V2DI p8_vmrgow_v2di {}
+
+ const vf __builtin_altivec_vmrgow_v4sf (vf, vf);
+ VMRGOW_V4SF p8_vmrgow_v4sf {}
+
+ const vsi __builtin_altivec_vmrgow_v4si (vsi, vsi);
+ VMRGOW_V4SI p8_vmrgow_v4si {}
+
+ const vsll __builtin_altivec_vmulesw (vsi, vsi);
+ VMULESW vec_widen_smult_even_v4si {}
+
+ const vull __builtin_altivec_vmuleuw (vui, vui);
+ VMULEUW vec_widen_umult_even_v4si {}
+
+ const vsll __builtin_altivec_vmulosw (vsi, vsi);
+ VMULOSW vec_widen_smult_odd_v4si {}
+
+ const vull __builtin_altivec_vmulouw (vui, vui);
+ VMULOUW vec_widen_umult_odd_v4si {}
+
+ const vsc __builtin_altivec_vpermxor (vsc, vsc, vsc);
+ VPERMXOR altivec_vpermxor {}
+
+ const vsi __builtin_altivec_vpksdss (vsll, vsll);
+ VPKSDSS altivec_vpksdss {}
+
+ const vui __builtin_altivec_vpksdus (vsll, vsll);
+ VPKSDUS altivec_vpksdus {}
+
+ const vui __builtin_altivec_vpkudum (vull, vull);
+ VPKUDUM altivec_vpkudum {}
+
+ const vui __builtin_altivec_vpkudus (vull, vull);
+ VPKUDUS altivec_vpkudus {}
+
+; #### Following are duplicates of __builtin_crypto_vpmsum*. This
+; can't have ever worked properly!
+;
+; const vus __builtin_altivec_vpmsumb (vuc, vuc);
+; VPMSUMB crypto_vpmsumb {}
+;
+; const vuq __builtin_altivec_vpmsumd (vull, vull);
+; VPMSUMD crypto_vpmsumd {}
+;
+; const vui __builtin_altivec_vpmsumh (vus, vus);
+; VPMSUMH crypto_vpmsumh {}
+;
+; const vull __builtin_altivec_vpmsumw (vui, vui);
+; VPMSUMW crypto_vpmsumw {}
+
+ const vuc __builtin_altivec_vpopcntb (vsc);
+ VPOPCNTB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntd (vsll);
+ VPOPCNTD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcnth (vss);
+ VPOPCNTH popcountv8hi2 {}
+
+ const vuc __builtin_altivec_vpopcntub (vuc);
+ VPOPCNTUB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntud (vull);
+ VPOPCNTUD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcntuh (vus);
+ VPOPCNTUH popcountv8hi2 {}
+
+ const vui __builtin_altivec_vpopcntuw (vui);
+ VPOPCNTUW popcountv4si2 {}
+
+ const vui __builtin_altivec_vpopcntw (vsi);
+ VPOPCNTW popcountv4si2 {}
+
+ const vsll __builtin_altivec_vrld (vsll, vull);
+ VRLD vrotlv2di3 {}
+
+ const vsll __builtin_altivec_vsld (vsll, vull);
+ VSLD vashlv2di3 {}
+
+ const vsll __builtin_altivec_vsrad (vsll, vull);
+ VSRAD vashrv2di3 {}
+
+ const vsll __builtin_altivec_vsrd (vsll, vull);
+ VSRD vlshrv2di3 {}
+
+ const vuq __builtin_altivec_vsubcuq (vuq, vuq);
+ VSUBCUQ altivec_vsubcuq {}
+
+ const vsq __builtin_altivec_vsubecuq (vsq, vsq, vsq);
+ VSUBECUQ altivec_vsubecuq {}
+
+ const vuq __builtin_altivec_vsubeuqm (vuq, vuq, vuq);
+ VSUBEUQM altivec_vsubeuqm {}
+
+ const vull __builtin_altivec_vsubudm (vull, vull);
+ VSUBUDM subv2di3 {}
+
+ const vuq __builtin_altivec_vsubuqm (vuq, vuq);
+ VSUBUQM altivec_vsubuqm {}
+
+ const vsll __builtin_altivec_vupkhsw (vsi);
+ VUPKHSW altivec_vupkhsw {}
+
+ const vsll __builtin_altivec_vupklsw (vsi);
+ VUPKLSW altivec_vupklsw {}
+
+ const vsq __builtin_bcdadd (vsq, vsq, const int<1>);
+ BCDADD bcdadd {}
+
+ const unsigned int __builtin_bcdadd_eq (vsq, vsq, const int<1>);
+ BCDADD_EQ bcdadd_eq {}
+
+ const unsigned int __builtin_bcdadd_gt (vsq, vsq, const int<1>);
+ BCDADD_GT bcdadd_gt {}
+
+ const unsigned int __builtin_bcdadd_lt (vsq, vsq, const int<1>);
+ BCDADD_LT bcdadd_lt {}
+
+ const unsigned int __builtin_bcdadd_ov (vsq, vsq, const int<1>);
+ BCDADD_OV bcdadd_unordered {}
+
+ const vsq __builtin_bcdsub (vsq, vsq, const int<1>);
+ BCDSUB bcdsub {}
+
+ const unsigned int __builtin_bcdsub_eq (vsq, vsq, const int<1>);
+ BCDSUB_EQ bcdsub_eq {}
+
+ const unsigned int __builtin_bcdsub_gt (vsq, vsq, const int<1>);
+ BCDSUB_GT bcdsub_gt {}
+
+ const unsigned int __builtin_bcdsub_lt (vsq, vsq, const int<1>);
+ BCDSUB_LT bcdsub_lt {}
+
+ const unsigned int __builtin_bcdsub_ov (vsq, vsq, const int<1>);
+ BCDSUB_OV bcdsub_unordered {}
+
+ const vuc __builtin_crypto_vpermxor_v16qi (vuc, vuc, vuc);
+ VPERMXOR_V16QI crypto_vpermxor_v16qi {}
+
+ const vull __builtin_crypto_vpermxor_v2di (vull, vull, vull);
+ VPERMXOR_V2DI crypto_vpermxor_v2di {}
+
+ const vui __builtin_crypto_vpermxor_v4si (vui, vui, vui);
+ VPERMXOR_V4SI crypto_vpermxor_v4si {}
+
+ const vus __builtin_crypto_vpermxor_v8hi (vus, vus, vus);
+ VPERMXOR_V8HI crypto_vpermxor_v8hi {}
+
+ const vus __builtin_crypto_vpmsumb (vuc, vuc);
+ VPMSUMB crypto_vpmsumb {}
+
+ const vuq __builtin_crypto_vpmsumd (vull, vull);
+ VPMSUMD crypto_vpmsumd {}
+
+ const vui __builtin_crypto_vpmsumh (vus, vus);
+ VPMSUMH crypto_vpmsumh {}
+
+ const vull __builtin_crypto_vpmsumw (vui, vui);
+ VPMSUMW crypto_vpmsumw {}
+
+ const vf __builtin_vsx_float2_v2df (vd, vd);
+ FLOAT2_V2DF float2_v2df {}
+
+ const vf __builtin_vsx_float2_v2di (vsll, vsll);
+ FLOAT2_V2DI float2_v2di {}
+
+ const vsc __builtin_vsx_revb_v16qi (vsc);
+ REVB_V16QI revb_v16qi {}
+
+ const vsq __builtin_vsx_revb_v1ti (vsq);
+ REVB_V1TI revb_v1ti {}
+
+ const vd __builtin_vsx_revb_v2df (vd);
+ REVB_V2DF revb_v2df {}
+
+ const vsll __builtin_vsx_revb_v2di (vsll);
+ REVB_V2DI revb_v2di {}
+
+ const vf __builtin_vsx_revb_v4sf (vf);
+ REVB_V4SF revb_v4sf {}
+
+ const vsi __builtin_vsx_revb_v4si (vsi);
+ REVB_V4SI revb_v4si {}
+
+ const vss __builtin_vsx_revb_v8hi (vss);
+ REVB_V8HI revb_v8hi {}
+
+ const vf __builtin_vsx_uns_float2_v2di (vull, vull);
+ UNS_FLOAT2_V2DI uns_float2_v2di {}
+
+ const vsi __builtin_vsx_vsigned2_v2df (vd, vd);
+ VEC_VSIGNED2_V2DF vsigned2_v2df {}
+
+ const vui __builtin_vsx_vunsigned2_v2df (vd, vd);
+ VEC_VUNSIGNED2_V2DF vunsigned2_v2df {}
+
+ const vf __builtin_vsx_xscvdpspn (double);
+ XSCVDPSPN vsx_xscvdpspn {}
+
+ const double __builtin_vsx_xscvspdpn (vf);
+ XSCVSPDPN vsx_xscvspdpn {}
+
+
^ permalink raw reply [flat|nested] 8+ messages in thread
* [gcc(refs/users/wschmidt/heads/builtins3)] rs6000: Add Power8 vector builtins
@ 2020-10-29 19:51 William Schmidt
0 siblings, 0 replies; 8+ messages in thread
From: William Schmidt @ 2020-10-29 19:51 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:27bca083ed48b8933213c21a0253335e58a18dd9
commit 27bca083ed48b8933213c21a0253335e58a18dd9
Author: Bill Schmidt <wschmidt@linux.ibm.com>
Date: Wed Jun 17 11:40:50 2020 -0500
rs6000: Add Power8 vector builtins
2020-07-26 Bill Schmidt <wschmidt@linux.ibm.com>
* config/rs6000/rs6000-builtin-new.def: Add power8-vector
builtins.
Diff:
---
gcc/config/rs6000/rs6000-builtin-new.def | 417 +++++++++++++++++++++++++++++++
1 file changed, 417 insertions(+)
diff --git a/gcc/config/rs6000/rs6000-builtin-new.def b/gcc/config/rs6000/rs6000-builtin-new.def
index 0a17cad446c..2f918c1d69e 100644
--- a/gcc/config/rs6000/rs6000-builtin-new.def
+++ b/gcc/config/rs6000/rs6000-builtin-new.def
@@ -1977,3 +1977,420 @@
DIVDEU diveu_di {}
+; Power8 vector built-ins.
+[power8-vector]
+ const vsll __builtin_altivec_abs_v2di (vsll);
+ ABS_V2DI absv2di2 {}
+
+ const vsc __builtin_altivec_eqv_v16qi (vsc, vsc);
+ EQV_V16QI eqvv16qi3 {}
+
+ const vuc __builtin_altivec_eqv_v16qi_uns (vuc, vuc);
+ EQV_V16QI_UNS eqvv16qi3 {}
+
+ const vsq __builtin_altivec_eqv_v1ti (vsq, vsq);
+ EQV_V1TI eqvv1ti3 {}
+
+ const vuq __builtin_altivec_eqv_v1ti_uns (vuq, vuq);
+ EQV_V1TI_UNS eqvv1ti3 {}
+
+ const vd __builtin_altivec_eqv_v2df (vd, vd);
+ EQV_V2DF eqvv2df3 {}
+
+ const vsll __builtin_altivec_eqv_v2di (vsll, vsll);
+ EQV_V2DI eqvv2di3 {}
+
+ const vull __builtin_altivec_eqv_v2di_uns (vull, vull);
+ EQV_V2DI_UNS eqvv2di3 {}
+
+ const vf __builtin_altivec_eqv_v4sf (vf, vf);
+ EQV_V4SF eqvv4sf3 {}
+
+ const vsi __builtin_altivec_eqv_v4si (vsi, vsi);
+ EQV_V4SI eqvv4si3 {}
+
+ const vui __builtin_altivec_eqv_v4si_uns (vui, vui);
+ EQV_V4SI_UNS eqvv4si3 {}
+
+ const vss __builtin_altivec_eqv_v8hi (vss, vss);
+ EQV_V8HI eqvv8hi3 {}
+
+ const vus __builtin_altivec_eqv_v8hi_uns (vus, vus);
+ EQV_V8HI_UNS eqvv8hi3 {}
+
+ const vsc __builtin_altivec_nand_v16qi (vsc, vsc);
+ NAND_V16QI nandv16qi3 {}
+
+ const vuc __builtin_altivec_nand_v16qi_uns (vuc, vuc);
+ NAND_V16QI_UNS nandv16qi3 {}
+
+ const vsq __builtin_altivec_nand_v1ti (vsq, vsq);
+ NAND_V1TI nandv1ti3 {}
+
+ const vuq __builtin_altivec_nand_v1ti_uns (vuq, vuq);
+ NAND_V1TI_UNS nandv1ti3 {}
+
+ const vd __builtin_altivec_nand_v2df (vd, vd);
+ NAND_V2DF nandv2df3 {}
+
+ const vsll __builtin_altivec_nand_v2di (vsll, vsll);
+ NAND_V2DI nandv2di3 {}
+
+ const vull __builtin_altivec_nand_v2di_uns (vull, vull);
+ NAND_V2DI_UNS nandv2di3 {}
+
+ const vf __builtin_altivec_nand_v4sf (vf, vf);
+ NAND_V4SF nandv4sf3 {}
+
+ const vsi __builtin_altivec_nand_v4si (vsi, vsi);
+ NAND_V4SI nandv4si3 {}
+
+ const vui __builtin_altivec_nand_v4si_uns (vui, vui);
+ NAND_V4SI_UNS nandv4si3 {}
+
+ const vss __builtin_altivec_nand_v8hi (vss, vss);
+ NAND_V8HI nandv8hi3 {}
+
+ const vus __builtin_altivec_nand_v8hi_uns (vus, vus);
+ NAND_V8HI_UNS nandv8hi3 {}
+
+ const vsc __builtin_altivec_neg_v16qi (vsc);
+ NEG_V16QI negv16qi2 {}
+
+ const vd __builtin_altivec_neg_v2df (vd);
+ NEG_V2DF negv2df2 {}
+
+ const vsll __builtin_altivec_neg_v2di (vsll);
+ NEG_V2DI negv2di2 {}
+
+ const vf __builtin_altivec_neg_v4sf (vf);
+ NEG_V4SF negv4sf2 {}
+
+ const vsi __builtin_altivec_neg_v4si (vsi);
+ NEG_V4SI negv4si2 {}
+
+ const vss __builtin_altivec_neg_v8hi (vss);
+ NEG_V8HI negv8hi2 {}
+
+ const vsc __builtin_altivec_orc_v16qi (vsc, vsc);
+ ORC_V16QI orcv16qi3 {}
+
+ const vuc __builtin_altivec_orc_v16qi_uns (vuc, vuc);
+ ORC_V16QI_UNS orcv16qi3 {}
+
+ const vsq __builtin_altivec_orc_v1ti (vsq, vsq);
+ ORC_V1TI orcv1ti3 {}
+
+ const vuq __builtin_altivec_orc_v1ti_uns (vuq, vuq);
+ ORC_V1TI_UNS orcv1ti3 {}
+
+ const vd __builtin_altivec_orc_v2df (vd, vd);
+ ORC_V2DF orcv2df3 {}
+
+ const vsll __builtin_altivec_orc_v2di (vsll, vsll);
+ ORC_V2DI orcv2di3 {}
+
+ const vull __builtin_altivec_orc_v2di_uns (vull, vull);
+ ORC_V2DI_UNS orcv2di3 {}
+
+ const vf __builtin_altivec_orc_v4sf (vf, vf);
+ ORC_V4SF orcv4sf3 {}
+
+ const vsi __builtin_altivec_orc_v4si (vsi, vsi);
+ ORC_V4SI orcv4si3 {}
+
+ const vui __builtin_altivec_orc_v4si_uns (vui, vui);
+ ORC_V4SI_UNS orcv4si3 {}
+
+ const vss __builtin_altivec_orc_v8hi (vss, vss);
+ ORC_V8HI orcv8hi3 {}
+
+ const vus __builtin_altivec_orc_v8hi_uns (vus, vus);
+ ORC_V8HI_UNS orcv8hi3 {}
+
+ const vsc __builtin_altivec_vclzb (vsc);
+ VCLZB clzv16qi2 {}
+
+ const vsll __builtin_altivec_vclzd (vsll);
+ VCLZD clzv2di2 {}
+
+ const vss __builtin_altivec_vclzh (vss);
+ VCLZH clzv8hi2 {}
+
+ const vsi __builtin_altivec_vclzw (vsi);
+ VCLZW clzv4si2 {}
+
+ const vsc __builtin_altivec_vgbbd (vsc);
+ VGBBD p8v_vgbbd {}
+
+ const vsq __builtin_altivec_vaddcuq (vsq, vsq);
+ VADDCUQ altivec_vaddcuq {}
+
+ const vsq __builtin_altivec_vaddecuq (vsq, vsq, vsq);
+ VADDECUQ altivec_vaddecuq {}
+
+ const vuq __builtin_altivec_vaddeuqm (vuq, vuq, vuq);
+ VADDEUQM altivec_vaddeuqm {}
+
+ const vsll __builtin_altivec_vaddudm (vsll, vsll);
+ VADDUDM addv2di3 {}
+
+ const vsq __builtin_altivec_vadduqm (vsq, vsq);
+ VADDUQM altivec_vadduqm {}
+
+ const vsll __builtin_altivec_vbpermq (vop, vsc);
+ VBPERMQ altivec_vbpermq {}
+
+ const vuc __builtin_altivec_vbpermq2 (vuc, vuc);
+ VBPERMQ2 altivec_vbpermq2 {}
+
+ const vbll __builtin_altivec_vcmpequd (vsll, vsll);
+ VCMPEQUD vector_eqv2di {}
+
+ const int __builtin_altivec_vcmpequd_p (int, vsll, vsll);
+ VCMPEQUD_P vector_eq_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtsd (vsll, vsll);
+ VCMPGTSD vector_gtv2di {}
+
+ const int __builtin_altivec_vcmpgtsd_p (int, vsll, vsll);
+ VCMPGTSD_P vector_gt_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtud (vull, vull);
+ VCMPGTUD vector_gtuv2di {}
+
+ const int __builtin_altivec_vcmpgtud_p (vull, vull);
+ VCMPGTUD_P vector_gtu_v2di_p {pred}
+
+ const vsll __builtin_altivec_vmaxsd (vsll, vsll);
+ VMAXSD smaxv2di3 {}
+
+ const vull __builtin_altivec_vmaxud (vull, vull);
+ VMAXUD umaxv2di3 {}
+
+ const vsll __builtin_altivec_vminsd (vsll, vsll);
+ VMINSD sminv2di3 {}
+
+ const vull __builtin_altivec_vminud (vull, vull);
+ VMINUD uminv2di3 {}
+
+ const vd __builtin_altivec_vmrgew_v2df (vd, vd);
+ VMRGEW_V2DF p8_vmrgew_v2df {}
+
+ const vsll __builtin_altivec_vmrgew_v2di (vsll, vsll);
+ VMRGEW_V2DI p8_vmrgew_v2di {}
+
+ const vf __builtin_altivec_vmrgew_v4sf (vf, vf);
+ VMRGEW_V4SF p8_vmrgew_v4sf {}
+
+ const vsi __builtin_altivec_vmrgew_v4si (vsi, vsi);
+ VMRGEW_V4SI p8_vmrgew_v4si {}
+
+ const vd __builtin_altivec_vmrgow_v2df (vd, vd);
+ VMRGOW_V2DF p8_vmrgow_v2df {}
+
+ const vsll __builtin_altivec_vmrgow_v2di (vsll, vsll);
+ VMRGOW_V2DI p8_vmrgow_v2di {}
+
+ const vf __builtin_altivec_vmrgow_v4sf (vf, vf);
+ VMRGOW_V4SF p8_vmrgow_v4sf {}
+
+ const vsi __builtin_altivec_vmrgow_v4si (vsi, vsi);
+ VMRGOW_V4SI p8_vmrgow_v4si {}
+
+ const vsll __builtin_altivec_vmulesw (vsi, vsi);
+ VMULESW vec_widen_smult_even_v4si {}
+
+ const vull __builtin_altivec_vmuleuw (vui, vui);
+ VMULEUW vec_widen_umult_even_v4si {}
+
+ const vsll __builtin_altivec_vmulosw (vsi, vsi);
+ VMULOSW vec_widen_smult_odd_v4si {}
+
+ const vull __builtin_altivec_vmulouw (vui, vui);
+ VMULOUW vec_widen_umult_odd_v4si {}
+
+ const vsc __builtin_altivec_vpermxor (vsc, vsc, vsc);
+ VPERMXOR altivec_vpermxor {}
+
+ const vsi __builtin_altivec_vpksdss (vsll, vsll);
+ VPKSDSS altivec_vpksdss {}
+
+ const vui __builtin_altivec_vpksdus (vsll, vsll);
+ VPKSDUS altivec_vpksdus {}
+
+ const vui __builtin_altivec_vpkudum (vull, vull);
+ VPKUDUM altivec_vpkudum {}
+
+ const vui __builtin_altivec_vpkudus (vull, vull);
+ VPKUDUS altivec_vpkudus {}
+
+; #### Following are duplicates of __builtin_crypto_vpmsum*. This
+; can't have ever worked properly!
+;
+; const vus __builtin_altivec_vpmsumb (vuc, vuc);
+; VPMSUMB crypto_vpmsumb {}
+;
+; const vuq __builtin_altivec_vpmsumd (vull, vull);
+; VPMSUMD crypto_vpmsumd {}
+;
+; const vui __builtin_altivec_vpmsumh (vus, vus);
+; VPMSUMH crypto_vpmsumh {}
+;
+; const vull __builtin_altivec_vpmsumw (vui, vui);
+; VPMSUMW crypto_vpmsumw {}
+
+ const vuc __builtin_altivec_vpopcntb (vsc);
+ VPOPCNTB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntd (vsll);
+ VPOPCNTD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcnth (vss);
+ VPOPCNTH popcountv8hi2 {}
+
+ const vuc __builtin_altivec_vpopcntub (vuc);
+ VPOPCNTUB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntud (vull);
+ VPOPCNTUD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcntuh (vus);
+ VPOPCNTUH popcountv8hi2 {}
+
+ const vui __builtin_altivec_vpopcntuw (vui);
+ VPOPCNTUW popcountv4si2 {}
+
+ const vui __builtin_altivec_vpopcntw (vsi);
+ VPOPCNTW popcountv4si2 {}
+
+ const vsll __builtin_altivec_vrld (vsll, vull);
+ VRLD vrotlv2di3 {}
+
+ const vsll __builtin_altivec_vsld (vsll, vull);
+ VSLD vashlv2di3 {}
+
+ const vsll __builtin_altivec_vsrad (vsll, vull);
+ VSRAD vashrv2di3 {}
+
+ const vsll __builtin_altivec_vsrd (vsll, vull);
+ VSRD vlshrv2di3 {}
+
+ const vuq __builtin_altivec_vsubcuq (vuq, vuq);
+ VSUBCUQ altivec_vsubcuq {}
+
+ const vsq __builtin_altivec_vsubecuq (vsq, vsq, vsq);
+ VSUBECUQ altivec_vsubecuq {}
+
+ const vuq __builtin_altivec_vsubeuqm (vuq, vuq, vuq);
+ VSUBEUQM altivec_vsubeuqm {}
+
+ const vull __builtin_altivec_vsubudm (vull, vull);
+ VSUBUDM subv2di3 {}
+
+ const vuq __builtin_altivec_vsubuqm (vuq, vuq);
+ VSUBUQM altivec_vsubuqm {}
+
+ const vsll __builtin_altivec_vupkhsw (vsi);
+ VUPKHSW altivec_vupkhsw {}
+
+ const vsll __builtin_altivec_vupklsw (vsi);
+ VUPKLSW altivec_vupklsw {}
+
+ const vsq __builtin_bcdadd (vsq, vsq, const int<1>);
+ BCDADD bcdadd {}
+
+ const unsigned int __builtin_bcdadd_eq (vsq, vsq, const int<1>);
+ BCDADD_EQ bcdadd_eq {}
+
+ const unsigned int __builtin_bcdadd_gt (vsq, vsq, const int<1>);
+ BCDADD_GT bcdadd_gt {}
+
+ const unsigned int __builtin_bcdadd_lt (vsq, vsq, const int<1>);
+ BCDADD_LT bcdadd_lt {}
+
+ const unsigned int __builtin_bcdadd_ov (vsq, vsq, const int<1>);
+ BCDADD_OV bcdadd_unordered {}
+
+ const vsq __builtin_bcdsub (vsq, vsq, const int<1>);
+ BCDSUB bcdsub {}
+
+ const unsigned int __builtin_bcdsub_eq (vsq, vsq, const int<1>);
+ BCDSUB_EQ bcdsub_eq {}
+
+ const unsigned int __builtin_bcdsub_gt (vsq, vsq, const int<1>);
+ BCDSUB_GT bcdsub_gt {}
+
+ const unsigned int __builtin_bcdsub_lt (vsq, vsq, const int<1>);
+ BCDSUB_LT bcdsub_lt {}
+
+ const unsigned int __builtin_bcdsub_ov (vsq, vsq, const int<1>);
+ BCDSUB_OV bcdsub_unordered {}
+
+ const vuc __builtin_crypto_vpermxor_v16qi (vuc, vuc, vuc);
+ VPERMXOR_V16QI crypto_vpermxor_v16qi {}
+
+ const vull __builtin_crypto_vpermxor_v2di (vull, vull, vull);
+ VPERMXOR_V2DI crypto_vpermxor_v2di {}
+
+ const vui __builtin_crypto_vpermxor_v4si (vui, vui, vui);
+ VPERMXOR_V4SI crypto_vpermxor_v4si {}
+
+ const vus __builtin_crypto_vpermxor_v8hi (vus, vus, vus);
+ VPERMXOR_V8HI crypto_vpermxor_v8hi {}
+
+ const vus __builtin_crypto_vpmsumb (vuc, vuc);
+ VPMSUMB crypto_vpmsumb {}
+
+ const vuq __builtin_crypto_vpmsumd (vull, vull);
+ VPMSUMD crypto_vpmsumd {}
+
+ const vui __builtin_crypto_vpmsumh (vus, vus);
+ VPMSUMH crypto_vpmsumh {}
+
+ const vull __builtin_crypto_vpmsumw (vui, vui);
+ VPMSUMW crypto_vpmsumw {}
+
+ const vf __builtin_vsx_float2_v2df (vd, vd);
+ FLOAT2_V2DF float2_v2df {}
+
+ const vf __builtin_vsx_float2_v2di (vsll, vsll);
+ FLOAT2_V2DI float2_v2di {}
+
+ const vsc __builtin_vsx_revb_v16qi (vsc);
+ REVB_V16QI revb_v16qi {}
+
+ const vsq __builtin_vsx_revb_v1ti (vsq);
+ REVB_V1TI revb_v1ti {}
+
+ const vd __builtin_vsx_revb_v2df (vd);
+ REVB_V2DF revb_v2df {}
+
+ const vsll __builtin_vsx_revb_v2di (vsll);
+ REVB_V2DI revb_v2di {}
+
+ const vf __builtin_vsx_revb_v4sf (vf);
+ REVB_V4SF revb_v4sf {}
+
+ const vsi __builtin_vsx_revb_v4si (vsi);
+ REVB_V4SI revb_v4si {}
+
+ const vss __builtin_vsx_revb_v8hi (vss);
+ REVB_V8HI revb_v8hi {}
+
+ const vf __builtin_vsx_uns_float2_v2di (vull, vull);
+ UNS_FLOAT2_V2DI uns_float2_v2di {}
+
+ const vsi __builtin_vsx_vsigned2_v2df (vd, vd);
+ VEC_VSIGNED2_V2DF vsigned2_v2df {}
+
+ const vui __builtin_vsx_vunsigned2_v2df (vd, vd);
+ VEC_VUNSIGNED2_V2DF vunsigned2_v2df {}
+
+ const vf __builtin_vsx_xscvdpspn (double);
+ XSCVDPSPN vsx_xscvdpspn {}
+
+ const double __builtin_vsx_xscvspdpn (vf);
+ XSCVSPDPN vsx_xscvspdpn {}
+
+
^ permalink raw reply [flat|nested] 8+ messages in thread
* [gcc(refs/users/wschmidt/heads/builtins3)] rs6000: Add Power8 vector builtins
@ 2020-10-27 16:29 William Schmidt
0 siblings, 0 replies; 8+ messages in thread
From: William Schmidt @ 2020-10-27 16:29 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:deb057c7a19c957a5f7779d81223a70861ee7faf
commit deb057c7a19c957a5f7779d81223a70861ee7faf
Author: Bill Schmidt <wschmidt@linux.ibm.com>
Date: Wed Jun 17 11:40:50 2020 -0500
rs6000: Add Power8 vector builtins
2020-07-26 Bill Schmidt <wschmidt@linux.ibm.com>
* config/rs6000/rs6000-builtin-new.def: Add power8-vector
builtins.
Diff:
---
gcc/config/rs6000/rs6000-builtin-new.def | 417 +++++++++++++++++++++++++++++++
1 file changed, 417 insertions(+)
diff --git a/gcc/config/rs6000/rs6000-builtin-new.def b/gcc/config/rs6000/rs6000-builtin-new.def
index 0a17cad446c..2f918c1d69e 100644
--- a/gcc/config/rs6000/rs6000-builtin-new.def
+++ b/gcc/config/rs6000/rs6000-builtin-new.def
@@ -1977,3 +1977,420 @@
DIVDEU diveu_di {}
+; Power8 vector built-ins.
+[power8-vector]
+ const vsll __builtin_altivec_abs_v2di (vsll);
+ ABS_V2DI absv2di2 {}
+
+ const vsc __builtin_altivec_eqv_v16qi (vsc, vsc);
+ EQV_V16QI eqvv16qi3 {}
+
+ const vuc __builtin_altivec_eqv_v16qi_uns (vuc, vuc);
+ EQV_V16QI_UNS eqvv16qi3 {}
+
+ const vsq __builtin_altivec_eqv_v1ti (vsq, vsq);
+ EQV_V1TI eqvv1ti3 {}
+
+ const vuq __builtin_altivec_eqv_v1ti_uns (vuq, vuq);
+ EQV_V1TI_UNS eqvv1ti3 {}
+
+ const vd __builtin_altivec_eqv_v2df (vd, vd);
+ EQV_V2DF eqvv2df3 {}
+
+ const vsll __builtin_altivec_eqv_v2di (vsll, vsll);
+ EQV_V2DI eqvv2di3 {}
+
+ const vull __builtin_altivec_eqv_v2di_uns (vull, vull);
+ EQV_V2DI_UNS eqvv2di3 {}
+
+ const vf __builtin_altivec_eqv_v4sf (vf, vf);
+ EQV_V4SF eqvv4sf3 {}
+
+ const vsi __builtin_altivec_eqv_v4si (vsi, vsi);
+ EQV_V4SI eqvv4si3 {}
+
+ const vui __builtin_altivec_eqv_v4si_uns (vui, vui);
+ EQV_V4SI_UNS eqvv4si3 {}
+
+ const vss __builtin_altivec_eqv_v8hi (vss, vss);
+ EQV_V8HI eqvv8hi3 {}
+
+ const vus __builtin_altivec_eqv_v8hi_uns (vus, vus);
+ EQV_V8HI_UNS eqvv8hi3 {}
+
+ const vsc __builtin_altivec_nand_v16qi (vsc, vsc);
+ NAND_V16QI nandv16qi3 {}
+
+ const vuc __builtin_altivec_nand_v16qi_uns (vuc, vuc);
+ NAND_V16QI_UNS nandv16qi3 {}
+
+ const vsq __builtin_altivec_nand_v1ti (vsq, vsq);
+ NAND_V1TI nandv1ti3 {}
+
+ const vuq __builtin_altivec_nand_v1ti_uns (vuq, vuq);
+ NAND_V1TI_UNS nandv1ti3 {}
+
+ const vd __builtin_altivec_nand_v2df (vd, vd);
+ NAND_V2DF nandv2df3 {}
+
+ const vsll __builtin_altivec_nand_v2di (vsll, vsll);
+ NAND_V2DI nandv2di3 {}
+
+ const vull __builtin_altivec_nand_v2di_uns (vull, vull);
+ NAND_V2DI_UNS nandv2di3 {}
+
+ const vf __builtin_altivec_nand_v4sf (vf, vf);
+ NAND_V4SF nandv4sf3 {}
+
+ const vsi __builtin_altivec_nand_v4si (vsi, vsi);
+ NAND_V4SI nandv4si3 {}
+
+ const vui __builtin_altivec_nand_v4si_uns (vui, vui);
+ NAND_V4SI_UNS nandv4si3 {}
+
+ const vss __builtin_altivec_nand_v8hi (vss, vss);
+ NAND_V8HI nandv8hi3 {}
+
+ const vus __builtin_altivec_nand_v8hi_uns (vus, vus);
+ NAND_V8HI_UNS nandv8hi3 {}
+
+ const vsc __builtin_altivec_neg_v16qi (vsc);
+ NEG_V16QI negv16qi2 {}
+
+ const vd __builtin_altivec_neg_v2df (vd);
+ NEG_V2DF negv2df2 {}
+
+ const vsll __builtin_altivec_neg_v2di (vsll);
+ NEG_V2DI negv2di2 {}
+
+ const vf __builtin_altivec_neg_v4sf (vf);
+ NEG_V4SF negv4sf2 {}
+
+ const vsi __builtin_altivec_neg_v4si (vsi);
+ NEG_V4SI negv4si2 {}
+
+ const vss __builtin_altivec_neg_v8hi (vss);
+ NEG_V8HI negv8hi2 {}
+
+ const vsc __builtin_altivec_orc_v16qi (vsc, vsc);
+ ORC_V16QI orcv16qi3 {}
+
+ const vuc __builtin_altivec_orc_v16qi_uns (vuc, vuc);
+ ORC_V16QI_UNS orcv16qi3 {}
+
+ const vsq __builtin_altivec_orc_v1ti (vsq, vsq);
+ ORC_V1TI orcv1ti3 {}
+
+ const vuq __builtin_altivec_orc_v1ti_uns (vuq, vuq);
+ ORC_V1TI_UNS orcv1ti3 {}
+
+ const vd __builtin_altivec_orc_v2df (vd, vd);
+ ORC_V2DF orcv2df3 {}
+
+ const vsll __builtin_altivec_orc_v2di (vsll, vsll);
+ ORC_V2DI orcv2di3 {}
+
+ const vull __builtin_altivec_orc_v2di_uns (vull, vull);
+ ORC_V2DI_UNS orcv2di3 {}
+
+ const vf __builtin_altivec_orc_v4sf (vf, vf);
+ ORC_V4SF orcv4sf3 {}
+
+ const vsi __builtin_altivec_orc_v4si (vsi, vsi);
+ ORC_V4SI orcv4si3 {}
+
+ const vui __builtin_altivec_orc_v4si_uns (vui, vui);
+ ORC_V4SI_UNS orcv4si3 {}
+
+ const vss __builtin_altivec_orc_v8hi (vss, vss);
+ ORC_V8HI orcv8hi3 {}
+
+ const vus __builtin_altivec_orc_v8hi_uns (vus, vus);
+ ORC_V8HI_UNS orcv8hi3 {}
+
+ const vsc __builtin_altivec_vclzb (vsc);
+ VCLZB clzv16qi2 {}
+
+ const vsll __builtin_altivec_vclzd (vsll);
+ VCLZD clzv2di2 {}
+
+ const vss __builtin_altivec_vclzh (vss);
+ VCLZH clzv8hi2 {}
+
+ const vsi __builtin_altivec_vclzw (vsi);
+ VCLZW clzv4si2 {}
+
+ const vsc __builtin_altivec_vgbbd (vsc);
+ VGBBD p8v_vgbbd {}
+
+ const vsq __builtin_altivec_vaddcuq (vsq, vsq);
+ VADDCUQ altivec_vaddcuq {}
+
+ const vsq __builtin_altivec_vaddecuq (vsq, vsq, vsq);
+ VADDECUQ altivec_vaddecuq {}
+
+ const vuq __builtin_altivec_vaddeuqm (vuq, vuq, vuq);
+ VADDEUQM altivec_vaddeuqm {}
+
+ const vsll __builtin_altivec_vaddudm (vsll, vsll);
+ VADDUDM addv2di3 {}
+
+ const vsq __builtin_altivec_vadduqm (vsq, vsq);
+ VADDUQM altivec_vadduqm {}
+
+ const vsll __builtin_altivec_vbpermq (vop, vsc);
+ VBPERMQ altivec_vbpermq {}
+
+ const vuc __builtin_altivec_vbpermq2 (vuc, vuc);
+ VBPERMQ2 altivec_vbpermq2 {}
+
+ const vbll __builtin_altivec_vcmpequd (vsll, vsll);
+ VCMPEQUD vector_eqv2di {}
+
+ const int __builtin_altivec_vcmpequd_p (int, vsll, vsll);
+ VCMPEQUD_P vector_eq_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtsd (vsll, vsll);
+ VCMPGTSD vector_gtv2di {}
+
+ const int __builtin_altivec_vcmpgtsd_p (int, vsll, vsll);
+ VCMPGTSD_P vector_gt_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtud (vull, vull);
+ VCMPGTUD vector_gtuv2di {}
+
+ const int __builtin_altivec_vcmpgtud_p (vull, vull);
+ VCMPGTUD_P vector_gtu_v2di_p {pred}
+
+ const vsll __builtin_altivec_vmaxsd (vsll, vsll);
+ VMAXSD smaxv2di3 {}
+
+ const vull __builtin_altivec_vmaxud (vull, vull);
+ VMAXUD umaxv2di3 {}
+
+ const vsll __builtin_altivec_vminsd (vsll, vsll);
+ VMINSD sminv2di3 {}
+
+ const vull __builtin_altivec_vminud (vull, vull);
+ VMINUD uminv2di3 {}
+
+ const vd __builtin_altivec_vmrgew_v2df (vd, vd);
+ VMRGEW_V2DF p8_vmrgew_v2df {}
+
+ const vsll __builtin_altivec_vmrgew_v2di (vsll, vsll);
+ VMRGEW_V2DI p8_vmrgew_v2di {}
+
+ const vf __builtin_altivec_vmrgew_v4sf (vf, vf);
+ VMRGEW_V4SF p8_vmrgew_v4sf {}
+
+ const vsi __builtin_altivec_vmrgew_v4si (vsi, vsi);
+ VMRGEW_V4SI p8_vmrgew_v4si {}
+
+ const vd __builtin_altivec_vmrgow_v2df (vd, vd);
+ VMRGOW_V2DF p8_vmrgow_v2df {}
+
+ const vsll __builtin_altivec_vmrgow_v2di (vsll, vsll);
+ VMRGOW_V2DI p8_vmrgow_v2di {}
+
+ const vf __builtin_altivec_vmrgow_v4sf (vf, vf);
+ VMRGOW_V4SF p8_vmrgow_v4sf {}
+
+ const vsi __builtin_altivec_vmrgow_v4si (vsi, vsi);
+ VMRGOW_V4SI p8_vmrgow_v4si {}
+
+ const vsll __builtin_altivec_vmulesw (vsi, vsi);
+ VMULESW vec_widen_smult_even_v4si {}
+
+ const vull __builtin_altivec_vmuleuw (vui, vui);
+ VMULEUW vec_widen_umult_even_v4si {}
+
+ const vsll __builtin_altivec_vmulosw (vsi, vsi);
+ VMULOSW vec_widen_smult_odd_v4si {}
+
+ const vull __builtin_altivec_vmulouw (vui, vui);
+ VMULOUW vec_widen_umult_odd_v4si {}
+
+ const vsc __builtin_altivec_vpermxor (vsc, vsc, vsc);
+ VPERMXOR altivec_vpermxor {}
+
+ const vsi __builtin_altivec_vpksdss (vsll, vsll);
+ VPKSDSS altivec_vpksdss {}
+
+ const vui __builtin_altivec_vpksdus (vsll, vsll);
+ VPKSDUS altivec_vpksdus {}
+
+ const vui __builtin_altivec_vpkudum (vull, vull);
+ VPKUDUM altivec_vpkudum {}
+
+ const vui __builtin_altivec_vpkudus (vull, vull);
+ VPKUDUS altivec_vpkudus {}
+
+; #### Following are duplicates of __builtin_crypto_vpmsum*. This
+; can't have ever worked properly!
+;
+; const vus __builtin_altivec_vpmsumb (vuc, vuc);
+; VPMSUMB crypto_vpmsumb {}
+;
+; const vuq __builtin_altivec_vpmsumd (vull, vull);
+; VPMSUMD crypto_vpmsumd {}
+;
+; const vui __builtin_altivec_vpmsumh (vus, vus);
+; VPMSUMH crypto_vpmsumh {}
+;
+; const vull __builtin_altivec_vpmsumw (vui, vui);
+; VPMSUMW crypto_vpmsumw {}
+
+ const vuc __builtin_altivec_vpopcntb (vsc);
+ VPOPCNTB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntd (vsll);
+ VPOPCNTD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcnth (vss);
+ VPOPCNTH popcountv8hi2 {}
+
+ const vuc __builtin_altivec_vpopcntub (vuc);
+ VPOPCNTUB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntud (vull);
+ VPOPCNTUD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcntuh (vus);
+ VPOPCNTUH popcountv8hi2 {}
+
+ const vui __builtin_altivec_vpopcntuw (vui);
+ VPOPCNTUW popcountv4si2 {}
+
+ const vui __builtin_altivec_vpopcntw (vsi);
+ VPOPCNTW popcountv4si2 {}
+
+ const vsll __builtin_altivec_vrld (vsll, vull);
+ VRLD vrotlv2di3 {}
+
+ const vsll __builtin_altivec_vsld (vsll, vull);
+ VSLD vashlv2di3 {}
+
+ const vsll __builtin_altivec_vsrad (vsll, vull);
+ VSRAD vashrv2di3 {}
+
+ const vsll __builtin_altivec_vsrd (vsll, vull);
+ VSRD vlshrv2di3 {}
+
+ const vuq __builtin_altivec_vsubcuq (vuq, vuq);
+ VSUBCUQ altivec_vsubcuq {}
+
+ const vsq __builtin_altivec_vsubecuq (vsq, vsq, vsq);
+ VSUBECUQ altivec_vsubecuq {}
+
+ const vuq __builtin_altivec_vsubeuqm (vuq, vuq, vuq);
+ VSUBEUQM altivec_vsubeuqm {}
+
+ const vull __builtin_altivec_vsubudm (vull, vull);
+ VSUBUDM subv2di3 {}
+
+ const vuq __builtin_altivec_vsubuqm (vuq, vuq);
+ VSUBUQM altivec_vsubuqm {}
+
+ const vsll __builtin_altivec_vupkhsw (vsi);
+ VUPKHSW altivec_vupkhsw {}
+
+ const vsll __builtin_altivec_vupklsw (vsi);
+ VUPKLSW altivec_vupklsw {}
+
+ const vsq __builtin_bcdadd (vsq, vsq, const int<1>);
+ BCDADD bcdadd {}
+
+ const unsigned int __builtin_bcdadd_eq (vsq, vsq, const int<1>);
+ BCDADD_EQ bcdadd_eq {}
+
+ const unsigned int __builtin_bcdadd_gt (vsq, vsq, const int<1>);
+ BCDADD_GT bcdadd_gt {}
+
+ const unsigned int __builtin_bcdadd_lt (vsq, vsq, const int<1>);
+ BCDADD_LT bcdadd_lt {}
+
+ const unsigned int __builtin_bcdadd_ov (vsq, vsq, const int<1>);
+ BCDADD_OV bcdadd_unordered {}
+
+ const vsq __builtin_bcdsub (vsq, vsq, const int<1>);
+ BCDSUB bcdsub {}
+
+ const unsigned int __builtin_bcdsub_eq (vsq, vsq, const int<1>);
+ BCDSUB_EQ bcdsub_eq {}
+
+ const unsigned int __builtin_bcdsub_gt (vsq, vsq, const int<1>);
+ BCDSUB_GT bcdsub_gt {}
+
+ const unsigned int __builtin_bcdsub_lt (vsq, vsq, const int<1>);
+ BCDSUB_LT bcdsub_lt {}
+
+ const unsigned int __builtin_bcdsub_ov (vsq, vsq, const int<1>);
+ BCDSUB_OV bcdsub_unordered {}
+
+ const vuc __builtin_crypto_vpermxor_v16qi (vuc, vuc, vuc);
+ VPERMXOR_V16QI crypto_vpermxor_v16qi {}
+
+ const vull __builtin_crypto_vpermxor_v2di (vull, vull, vull);
+ VPERMXOR_V2DI crypto_vpermxor_v2di {}
+
+ const vui __builtin_crypto_vpermxor_v4si (vui, vui, vui);
+ VPERMXOR_V4SI crypto_vpermxor_v4si {}
+
+ const vus __builtin_crypto_vpermxor_v8hi (vus, vus, vus);
+ VPERMXOR_V8HI crypto_vpermxor_v8hi {}
+
+ const vus __builtin_crypto_vpmsumb (vuc, vuc);
+ VPMSUMB crypto_vpmsumb {}
+
+ const vuq __builtin_crypto_vpmsumd (vull, vull);
+ VPMSUMD crypto_vpmsumd {}
+
+ const vui __builtin_crypto_vpmsumh (vus, vus);
+ VPMSUMH crypto_vpmsumh {}
+
+ const vull __builtin_crypto_vpmsumw (vui, vui);
+ VPMSUMW crypto_vpmsumw {}
+
+ const vf __builtin_vsx_float2_v2df (vd, vd);
+ FLOAT2_V2DF float2_v2df {}
+
+ const vf __builtin_vsx_float2_v2di (vsll, vsll);
+ FLOAT2_V2DI float2_v2di {}
+
+ const vsc __builtin_vsx_revb_v16qi (vsc);
+ REVB_V16QI revb_v16qi {}
+
+ const vsq __builtin_vsx_revb_v1ti (vsq);
+ REVB_V1TI revb_v1ti {}
+
+ const vd __builtin_vsx_revb_v2df (vd);
+ REVB_V2DF revb_v2df {}
+
+ const vsll __builtin_vsx_revb_v2di (vsll);
+ REVB_V2DI revb_v2di {}
+
+ const vf __builtin_vsx_revb_v4sf (vf);
+ REVB_V4SF revb_v4sf {}
+
+ const vsi __builtin_vsx_revb_v4si (vsi);
+ REVB_V4SI revb_v4si {}
+
+ const vss __builtin_vsx_revb_v8hi (vss);
+ REVB_V8HI revb_v8hi {}
+
+ const vf __builtin_vsx_uns_float2_v2di (vull, vull);
+ UNS_FLOAT2_V2DI uns_float2_v2di {}
+
+ const vsi __builtin_vsx_vsigned2_v2df (vd, vd);
+ VEC_VSIGNED2_V2DF vsigned2_v2df {}
+
+ const vui __builtin_vsx_vunsigned2_v2df (vd, vd);
+ VEC_VUNSIGNED2_V2DF vunsigned2_v2df {}
+
+ const vf __builtin_vsx_xscvdpspn (double);
+ XSCVDPSPN vsx_xscvdpspn {}
+
+ const double __builtin_vsx_xscvspdpn (vf);
+ XSCVSPDPN vsx_xscvspdpn {}
+
+
^ permalink raw reply [flat|nested] 8+ messages in thread
* [gcc(refs/users/wschmidt/heads/builtins3)] rs6000: Add Power8 vector builtins
@ 2020-09-16 21:30 William Schmidt
0 siblings, 0 replies; 8+ messages in thread
From: William Schmidt @ 2020-09-16 21:30 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:a9b9c00e855561c6db10c944d1c7c72ec5bc6bb6
commit a9b9c00e855561c6db10c944d1c7c72ec5bc6bb6
Author: Bill Schmidt <wschmidt@linux.ibm.com>
Date: Wed Jun 17 11:40:50 2020 -0500
rs6000: Add Power8 vector builtins
2020-07-26 Bill Schmidt <wschmidt@linux.ibm.com>
* config/rs6000/rs6000-builtin-new.def: Add power8-vector
builtins.
Diff:
---
gcc/config/rs6000/rs6000-builtin-new.def | 417 +++++++++++++++++++++++++++++++
1 file changed, 417 insertions(+)
diff --git a/gcc/config/rs6000/rs6000-builtin-new.def b/gcc/config/rs6000/rs6000-builtin-new.def
index 0a17cad446c..2f918c1d69e 100644
--- a/gcc/config/rs6000/rs6000-builtin-new.def
+++ b/gcc/config/rs6000/rs6000-builtin-new.def
@@ -1977,3 +1977,420 @@
DIVDEU diveu_di {}
+; Power8 vector built-ins.
+[power8-vector]
+ const vsll __builtin_altivec_abs_v2di (vsll);
+ ABS_V2DI absv2di2 {}
+
+ const vsc __builtin_altivec_eqv_v16qi (vsc, vsc);
+ EQV_V16QI eqvv16qi3 {}
+
+ const vuc __builtin_altivec_eqv_v16qi_uns (vuc, vuc);
+ EQV_V16QI_UNS eqvv16qi3 {}
+
+ const vsq __builtin_altivec_eqv_v1ti (vsq, vsq);
+ EQV_V1TI eqvv1ti3 {}
+
+ const vuq __builtin_altivec_eqv_v1ti_uns (vuq, vuq);
+ EQV_V1TI_UNS eqvv1ti3 {}
+
+ const vd __builtin_altivec_eqv_v2df (vd, vd);
+ EQV_V2DF eqvv2df3 {}
+
+ const vsll __builtin_altivec_eqv_v2di (vsll, vsll);
+ EQV_V2DI eqvv2di3 {}
+
+ const vull __builtin_altivec_eqv_v2di_uns (vull, vull);
+ EQV_V2DI_UNS eqvv2di3 {}
+
+ const vf __builtin_altivec_eqv_v4sf (vf, vf);
+ EQV_V4SF eqvv4sf3 {}
+
+ const vsi __builtin_altivec_eqv_v4si (vsi, vsi);
+ EQV_V4SI eqvv4si3 {}
+
+ const vui __builtin_altivec_eqv_v4si_uns (vui, vui);
+ EQV_V4SI_UNS eqvv4si3 {}
+
+ const vss __builtin_altivec_eqv_v8hi (vss, vss);
+ EQV_V8HI eqvv8hi3 {}
+
+ const vus __builtin_altivec_eqv_v8hi_uns (vus, vus);
+ EQV_V8HI_UNS eqvv8hi3 {}
+
+ const vsc __builtin_altivec_nand_v16qi (vsc, vsc);
+ NAND_V16QI nandv16qi3 {}
+
+ const vuc __builtin_altivec_nand_v16qi_uns (vuc, vuc);
+ NAND_V16QI_UNS nandv16qi3 {}
+
+ const vsq __builtin_altivec_nand_v1ti (vsq, vsq);
+ NAND_V1TI nandv1ti3 {}
+
+ const vuq __builtin_altivec_nand_v1ti_uns (vuq, vuq);
+ NAND_V1TI_UNS nandv1ti3 {}
+
+ const vd __builtin_altivec_nand_v2df (vd, vd);
+ NAND_V2DF nandv2df3 {}
+
+ const vsll __builtin_altivec_nand_v2di (vsll, vsll);
+ NAND_V2DI nandv2di3 {}
+
+ const vull __builtin_altivec_nand_v2di_uns (vull, vull);
+ NAND_V2DI_UNS nandv2di3 {}
+
+ const vf __builtin_altivec_nand_v4sf (vf, vf);
+ NAND_V4SF nandv4sf3 {}
+
+ const vsi __builtin_altivec_nand_v4si (vsi, vsi);
+ NAND_V4SI nandv4si3 {}
+
+ const vui __builtin_altivec_nand_v4si_uns (vui, vui);
+ NAND_V4SI_UNS nandv4si3 {}
+
+ const vss __builtin_altivec_nand_v8hi (vss, vss);
+ NAND_V8HI nandv8hi3 {}
+
+ const vus __builtin_altivec_nand_v8hi_uns (vus, vus);
+ NAND_V8HI_UNS nandv8hi3 {}
+
+ const vsc __builtin_altivec_neg_v16qi (vsc);
+ NEG_V16QI negv16qi2 {}
+
+ const vd __builtin_altivec_neg_v2df (vd);
+ NEG_V2DF negv2df2 {}
+
+ const vsll __builtin_altivec_neg_v2di (vsll);
+ NEG_V2DI negv2di2 {}
+
+ const vf __builtin_altivec_neg_v4sf (vf);
+ NEG_V4SF negv4sf2 {}
+
+ const vsi __builtin_altivec_neg_v4si (vsi);
+ NEG_V4SI negv4si2 {}
+
+ const vss __builtin_altivec_neg_v8hi (vss);
+ NEG_V8HI negv8hi2 {}
+
+ const vsc __builtin_altivec_orc_v16qi (vsc, vsc);
+ ORC_V16QI orcv16qi3 {}
+
+ const vuc __builtin_altivec_orc_v16qi_uns (vuc, vuc);
+ ORC_V16QI_UNS orcv16qi3 {}
+
+ const vsq __builtin_altivec_orc_v1ti (vsq, vsq);
+ ORC_V1TI orcv1ti3 {}
+
+ const vuq __builtin_altivec_orc_v1ti_uns (vuq, vuq);
+ ORC_V1TI_UNS orcv1ti3 {}
+
+ const vd __builtin_altivec_orc_v2df (vd, vd);
+ ORC_V2DF orcv2df3 {}
+
+ const vsll __builtin_altivec_orc_v2di (vsll, vsll);
+ ORC_V2DI orcv2di3 {}
+
+ const vull __builtin_altivec_orc_v2di_uns (vull, vull);
+ ORC_V2DI_UNS orcv2di3 {}
+
+ const vf __builtin_altivec_orc_v4sf (vf, vf);
+ ORC_V4SF orcv4sf3 {}
+
+ const vsi __builtin_altivec_orc_v4si (vsi, vsi);
+ ORC_V4SI orcv4si3 {}
+
+ const vui __builtin_altivec_orc_v4si_uns (vui, vui);
+ ORC_V4SI_UNS orcv4si3 {}
+
+ const vss __builtin_altivec_orc_v8hi (vss, vss);
+ ORC_V8HI orcv8hi3 {}
+
+ const vus __builtin_altivec_orc_v8hi_uns (vus, vus);
+ ORC_V8HI_UNS orcv8hi3 {}
+
+ const vsc __builtin_altivec_vclzb (vsc);
+ VCLZB clzv16qi2 {}
+
+ const vsll __builtin_altivec_vclzd (vsll);
+ VCLZD clzv2di2 {}
+
+ const vss __builtin_altivec_vclzh (vss);
+ VCLZH clzv8hi2 {}
+
+ const vsi __builtin_altivec_vclzw (vsi);
+ VCLZW clzv4si2 {}
+
+ const vsc __builtin_altivec_vgbbd (vsc);
+ VGBBD p8v_vgbbd {}
+
+ const vsq __builtin_altivec_vaddcuq (vsq, vsq);
+ VADDCUQ altivec_vaddcuq {}
+
+ const vsq __builtin_altivec_vaddecuq (vsq, vsq, vsq);
+ VADDECUQ altivec_vaddecuq {}
+
+ const vuq __builtin_altivec_vaddeuqm (vuq, vuq, vuq);
+ VADDEUQM altivec_vaddeuqm {}
+
+ const vsll __builtin_altivec_vaddudm (vsll, vsll);
+ VADDUDM addv2di3 {}
+
+ const vsq __builtin_altivec_vadduqm (vsq, vsq);
+ VADDUQM altivec_vadduqm {}
+
+ const vsll __builtin_altivec_vbpermq (vop, vsc);
+ VBPERMQ altivec_vbpermq {}
+
+ const vuc __builtin_altivec_vbpermq2 (vuc, vuc);
+ VBPERMQ2 altivec_vbpermq2 {}
+
+ const vbll __builtin_altivec_vcmpequd (vsll, vsll);
+ VCMPEQUD vector_eqv2di {}
+
+ const int __builtin_altivec_vcmpequd_p (int, vsll, vsll);
+ VCMPEQUD_P vector_eq_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtsd (vsll, vsll);
+ VCMPGTSD vector_gtv2di {}
+
+ const int __builtin_altivec_vcmpgtsd_p (int, vsll, vsll);
+ VCMPGTSD_P vector_gt_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtud (vull, vull);
+ VCMPGTUD vector_gtuv2di {}
+
+ const int __builtin_altivec_vcmpgtud_p (vull, vull);
+ VCMPGTUD_P vector_gtu_v2di_p {pred}
+
+ const vsll __builtin_altivec_vmaxsd (vsll, vsll);
+ VMAXSD smaxv2di3 {}
+
+ const vull __builtin_altivec_vmaxud (vull, vull);
+ VMAXUD umaxv2di3 {}
+
+ const vsll __builtin_altivec_vminsd (vsll, vsll);
+ VMINSD sminv2di3 {}
+
+ const vull __builtin_altivec_vminud (vull, vull);
+ VMINUD uminv2di3 {}
+
+ const vd __builtin_altivec_vmrgew_v2df (vd, vd);
+ VMRGEW_V2DF p8_vmrgew_v2df {}
+
+ const vsll __builtin_altivec_vmrgew_v2di (vsll, vsll);
+ VMRGEW_V2DI p8_vmrgew_v2di {}
+
+ const vf __builtin_altivec_vmrgew_v4sf (vf, vf);
+ VMRGEW_V4SF p8_vmrgew_v4sf {}
+
+ const vsi __builtin_altivec_vmrgew_v4si (vsi, vsi);
+ VMRGEW_V4SI p8_vmrgew_v4si {}
+
+ const vd __builtin_altivec_vmrgow_v2df (vd, vd);
+ VMRGOW_V2DF p8_vmrgow_v2df {}
+
+ const vsll __builtin_altivec_vmrgow_v2di (vsll, vsll);
+ VMRGOW_V2DI p8_vmrgow_v2di {}
+
+ const vf __builtin_altivec_vmrgow_v4sf (vf, vf);
+ VMRGOW_V4SF p8_vmrgow_v4sf {}
+
+ const vsi __builtin_altivec_vmrgow_v4si (vsi, vsi);
+ VMRGOW_V4SI p8_vmrgow_v4si {}
+
+ const vsll __builtin_altivec_vmulesw (vsi, vsi);
+ VMULESW vec_widen_smult_even_v4si {}
+
+ const vull __builtin_altivec_vmuleuw (vui, vui);
+ VMULEUW vec_widen_umult_even_v4si {}
+
+ const vsll __builtin_altivec_vmulosw (vsi, vsi);
+ VMULOSW vec_widen_smult_odd_v4si {}
+
+ const vull __builtin_altivec_vmulouw (vui, vui);
+ VMULOUW vec_widen_umult_odd_v4si {}
+
+ const vsc __builtin_altivec_vpermxor (vsc, vsc, vsc);
+ VPERMXOR altivec_vpermxor {}
+
+ const vsi __builtin_altivec_vpksdss (vsll, vsll);
+ VPKSDSS altivec_vpksdss {}
+
+ const vui __builtin_altivec_vpksdus (vsll, vsll);
+ VPKSDUS altivec_vpksdus {}
+
+ const vui __builtin_altivec_vpkudum (vull, vull);
+ VPKUDUM altivec_vpkudum {}
+
+ const vui __builtin_altivec_vpkudus (vull, vull);
+ VPKUDUS altivec_vpkudus {}
+
+; #### Following are duplicates of __builtin_crypto_vpmsum*. This
+; can't have ever worked properly!
+;
+; const vus __builtin_altivec_vpmsumb (vuc, vuc);
+; VPMSUMB crypto_vpmsumb {}
+;
+; const vuq __builtin_altivec_vpmsumd (vull, vull);
+; VPMSUMD crypto_vpmsumd {}
+;
+; const vui __builtin_altivec_vpmsumh (vus, vus);
+; VPMSUMH crypto_vpmsumh {}
+;
+; const vull __builtin_altivec_vpmsumw (vui, vui);
+; VPMSUMW crypto_vpmsumw {}
+
+ const vuc __builtin_altivec_vpopcntb (vsc);
+ VPOPCNTB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntd (vsll);
+ VPOPCNTD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcnth (vss);
+ VPOPCNTH popcountv8hi2 {}
+
+ const vuc __builtin_altivec_vpopcntub (vuc);
+ VPOPCNTUB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntud (vull);
+ VPOPCNTUD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcntuh (vus);
+ VPOPCNTUH popcountv8hi2 {}
+
+ const vui __builtin_altivec_vpopcntuw (vui);
+ VPOPCNTUW popcountv4si2 {}
+
+ const vui __builtin_altivec_vpopcntw (vsi);
+ VPOPCNTW popcountv4si2 {}
+
+ const vsll __builtin_altivec_vrld (vsll, vull);
+ VRLD vrotlv2di3 {}
+
+ const vsll __builtin_altivec_vsld (vsll, vull);
+ VSLD vashlv2di3 {}
+
+ const vsll __builtin_altivec_vsrad (vsll, vull);
+ VSRAD vashrv2di3 {}
+
+ const vsll __builtin_altivec_vsrd (vsll, vull);
+ VSRD vlshrv2di3 {}
+
+ const vuq __builtin_altivec_vsubcuq (vuq, vuq);
+ VSUBCUQ altivec_vsubcuq {}
+
+ const vsq __builtin_altivec_vsubecuq (vsq, vsq, vsq);
+ VSUBECUQ altivec_vsubecuq {}
+
+ const vuq __builtin_altivec_vsubeuqm (vuq, vuq, vuq);
+ VSUBEUQM altivec_vsubeuqm {}
+
+ const vull __builtin_altivec_vsubudm (vull, vull);
+ VSUBUDM subv2di3 {}
+
+ const vuq __builtin_altivec_vsubuqm (vuq, vuq);
+ VSUBUQM altivec_vsubuqm {}
+
+ const vsll __builtin_altivec_vupkhsw (vsi);
+ VUPKHSW altivec_vupkhsw {}
+
+ const vsll __builtin_altivec_vupklsw (vsi);
+ VUPKLSW altivec_vupklsw {}
+
+ const vsq __builtin_bcdadd (vsq, vsq, const int<1>);
+ BCDADD bcdadd {}
+
+ const unsigned int __builtin_bcdadd_eq (vsq, vsq, const int<1>);
+ BCDADD_EQ bcdadd_eq {}
+
+ const unsigned int __builtin_bcdadd_gt (vsq, vsq, const int<1>);
+ BCDADD_GT bcdadd_gt {}
+
+ const unsigned int __builtin_bcdadd_lt (vsq, vsq, const int<1>);
+ BCDADD_LT bcdadd_lt {}
+
+ const unsigned int __builtin_bcdadd_ov (vsq, vsq, const int<1>);
+ BCDADD_OV bcdadd_unordered {}
+
+ const vsq __builtin_bcdsub (vsq, vsq, const int<1>);
+ BCDSUB bcdsub {}
+
+ const unsigned int __builtin_bcdsub_eq (vsq, vsq, const int<1>);
+ BCDSUB_EQ bcdsub_eq {}
+
+ const unsigned int __builtin_bcdsub_gt (vsq, vsq, const int<1>);
+ BCDSUB_GT bcdsub_gt {}
+
+ const unsigned int __builtin_bcdsub_lt (vsq, vsq, const int<1>);
+ BCDSUB_LT bcdsub_lt {}
+
+ const unsigned int __builtin_bcdsub_ov (vsq, vsq, const int<1>);
+ BCDSUB_OV bcdsub_unordered {}
+
+ const vuc __builtin_crypto_vpermxor_v16qi (vuc, vuc, vuc);
+ VPERMXOR_V16QI crypto_vpermxor_v16qi {}
+
+ const vull __builtin_crypto_vpermxor_v2di (vull, vull, vull);
+ VPERMXOR_V2DI crypto_vpermxor_v2di {}
+
+ const vui __builtin_crypto_vpermxor_v4si (vui, vui, vui);
+ VPERMXOR_V4SI crypto_vpermxor_v4si {}
+
+ const vus __builtin_crypto_vpermxor_v8hi (vus, vus, vus);
+ VPERMXOR_V8HI crypto_vpermxor_v8hi {}
+
+ const vus __builtin_crypto_vpmsumb (vuc, vuc);
+ VPMSUMB crypto_vpmsumb {}
+
+ const vuq __builtin_crypto_vpmsumd (vull, vull);
+ VPMSUMD crypto_vpmsumd {}
+
+ const vui __builtin_crypto_vpmsumh (vus, vus);
+ VPMSUMH crypto_vpmsumh {}
+
+ const vull __builtin_crypto_vpmsumw (vui, vui);
+ VPMSUMW crypto_vpmsumw {}
+
+ const vf __builtin_vsx_float2_v2df (vd, vd);
+ FLOAT2_V2DF float2_v2df {}
+
+ const vf __builtin_vsx_float2_v2di (vsll, vsll);
+ FLOAT2_V2DI float2_v2di {}
+
+ const vsc __builtin_vsx_revb_v16qi (vsc);
+ REVB_V16QI revb_v16qi {}
+
+ const vsq __builtin_vsx_revb_v1ti (vsq);
+ REVB_V1TI revb_v1ti {}
+
+ const vd __builtin_vsx_revb_v2df (vd);
+ REVB_V2DF revb_v2df {}
+
+ const vsll __builtin_vsx_revb_v2di (vsll);
+ REVB_V2DI revb_v2di {}
+
+ const vf __builtin_vsx_revb_v4sf (vf);
+ REVB_V4SF revb_v4sf {}
+
+ const vsi __builtin_vsx_revb_v4si (vsi);
+ REVB_V4SI revb_v4si {}
+
+ const vss __builtin_vsx_revb_v8hi (vss);
+ REVB_V8HI revb_v8hi {}
+
+ const vf __builtin_vsx_uns_float2_v2di (vull, vull);
+ UNS_FLOAT2_V2DI uns_float2_v2di {}
+
+ const vsi __builtin_vsx_vsigned2_v2df (vd, vd);
+ VEC_VSIGNED2_V2DF vsigned2_v2df {}
+
+ const vui __builtin_vsx_vunsigned2_v2df (vd, vd);
+ VEC_VUNSIGNED2_V2DF vunsigned2_v2df {}
+
+ const vf __builtin_vsx_xscvdpspn (double);
+ XSCVDPSPN vsx_xscvdpspn {}
+
+ const double __builtin_vsx_xscvspdpn (vf);
+ XSCVSPDPN vsx_xscvspdpn {}
+
+
^ permalink raw reply [flat|nested] 8+ messages in thread
* [gcc(refs/users/wschmidt/heads/builtins3)] rs6000: Add Power8 vector builtins
@ 2020-09-14 13:58 William Schmidt
0 siblings, 0 replies; 8+ messages in thread
From: William Schmidt @ 2020-09-14 13:58 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:66470994d64fb743551ed1e5cbcb2c84144f872a
commit 66470994d64fb743551ed1e5cbcb2c84144f872a
Author: Bill Schmidt <wschmidt@linux.ibm.com>
Date: Wed Jun 17 11:40:50 2020 -0500
rs6000: Add Power8 vector builtins
2020-07-26 Bill Schmidt <wschmidt@linux.ibm.com>
* config/rs6000/rs6000-builtin-new.def: Add power8-vector
builtins.
Diff:
---
gcc/config/rs6000/rs6000-builtin-new.def | 417 +++++++++++++++++++++++++++++++
1 file changed, 417 insertions(+)
diff --git a/gcc/config/rs6000/rs6000-builtin-new.def b/gcc/config/rs6000/rs6000-builtin-new.def
index 0a17cad446c..2f918c1d69e 100644
--- a/gcc/config/rs6000/rs6000-builtin-new.def
+++ b/gcc/config/rs6000/rs6000-builtin-new.def
@@ -1977,3 +1977,420 @@
DIVDEU diveu_di {}
+; Power8 vector built-ins.
+[power8-vector]
+ const vsll __builtin_altivec_abs_v2di (vsll);
+ ABS_V2DI absv2di2 {}
+
+ const vsc __builtin_altivec_eqv_v16qi (vsc, vsc);
+ EQV_V16QI eqvv16qi3 {}
+
+ const vuc __builtin_altivec_eqv_v16qi_uns (vuc, vuc);
+ EQV_V16QI_UNS eqvv16qi3 {}
+
+ const vsq __builtin_altivec_eqv_v1ti (vsq, vsq);
+ EQV_V1TI eqvv1ti3 {}
+
+ const vuq __builtin_altivec_eqv_v1ti_uns (vuq, vuq);
+ EQV_V1TI_UNS eqvv1ti3 {}
+
+ const vd __builtin_altivec_eqv_v2df (vd, vd);
+ EQV_V2DF eqvv2df3 {}
+
+ const vsll __builtin_altivec_eqv_v2di (vsll, vsll);
+ EQV_V2DI eqvv2di3 {}
+
+ const vull __builtin_altivec_eqv_v2di_uns (vull, vull);
+ EQV_V2DI_UNS eqvv2di3 {}
+
+ const vf __builtin_altivec_eqv_v4sf (vf, vf);
+ EQV_V4SF eqvv4sf3 {}
+
+ const vsi __builtin_altivec_eqv_v4si (vsi, vsi);
+ EQV_V4SI eqvv4si3 {}
+
+ const vui __builtin_altivec_eqv_v4si_uns (vui, vui);
+ EQV_V4SI_UNS eqvv4si3 {}
+
+ const vss __builtin_altivec_eqv_v8hi (vss, vss);
+ EQV_V8HI eqvv8hi3 {}
+
+ const vus __builtin_altivec_eqv_v8hi_uns (vus, vus);
+ EQV_V8HI_UNS eqvv8hi3 {}
+
+ const vsc __builtin_altivec_nand_v16qi (vsc, vsc);
+ NAND_V16QI nandv16qi3 {}
+
+ const vuc __builtin_altivec_nand_v16qi_uns (vuc, vuc);
+ NAND_V16QI_UNS nandv16qi3 {}
+
+ const vsq __builtin_altivec_nand_v1ti (vsq, vsq);
+ NAND_V1TI nandv1ti3 {}
+
+ const vuq __builtin_altivec_nand_v1ti_uns (vuq, vuq);
+ NAND_V1TI_UNS nandv1ti3 {}
+
+ const vd __builtin_altivec_nand_v2df (vd, vd);
+ NAND_V2DF nandv2df3 {}
+
+ const vsll __builtin_altivec_nand_v2di (vsll, vsll);
+ NAND_V2DI nandv2di3 {}
+
+ const vull __builtin_altivec_nand_v2di_uns (vull, vull);
+ NAND_V2DI_UNS nandv2di3 {}
+
+ const vf __builtin_altivec_nand_v4sf (vf, vf);
+ NAND_V4SF nandv4sf3 {}
+
+ const vsi __builtin_altivec_nand_v4si (vsi, vsi);
+ NAND_V4SI nandv4si3 {}
+
+ const vui __builtin_altivec_nand_v4si_uns (vui, vui);
+ NAND_V4SI_UNS nandv4si3 {}
+
+ const vss __builtin_altivec_nand_v8hi (vss, vss);
+ NAND_V8HI nandv8hi3 {}
+
+ const vus __builtin_altivec_nand_v8hi_uns (vus, vus);
+ NAND_V8HI_UNS nandv8hi3 {}
+
+ const vsc __builtin_altivec_neg_v16qi (vsc);
+ NEG_V16QI negv16qi2 {}
+
+ const vd __builtin_altivec_neg_v2df (vd);
+ NEG_V2DF negv2df2 {}
+
+ const vsll __builtin_altivec_neg_v2di (vsll);
+ NEG_V2DI negv2di2 {}
+
+ const vf __builtin_altivec_neg_v4sf (vf);
+ NEG_V4SF negv4sf2 {}
+
+ const vsi __builtin_altivec_neg_v4si (vsi);
+ NEG_V4SI negv4si2 {}
+
+ const vss __builtin_altivec_neg_v8hi (vss);
+ NEG_V8HI negv8hi2 {}
+
+ const vsc __builtin_altivec_orc_v16qi (vsc, vsc);
+ ORC_V16QI orcv16qi3 {}
+
+ const vuc __builtin_altivec_orc_v16qi_uns (vuc, vuc);
+ ORC_V16QI_UNS orcv16qi3 {}
+
+ const vsq __builtin_altivec_orc_v1ti (vsq, vsq);
+ ORC_V1TI orcv1ti3 {}
+
+ const vuq __builtin_altivec_orc_v1ti_uns (vuq, vuq);
+ ORC_V1TI_UNS orcv1ti3 {}
+
+ const vd __builtin_altivec_orc_v2df (vd, vd);
+ ORC_V2DF orcv2df3 {}
+
+ const vsll __builtin_altivec_orc_v2di (vsll, vsll);
+ ORC_V2DI orcv2di3 {}
+
+ const vull __builtin_altivec_orc_v2di_uns (vull, vull);
+ ORC_V2DI_UNS orcv2di3 {}
+
+ const vf __builtin_altivec_orc_v4sf (vf, vf);
+ ORC_V4SF orcv4sf3 {}
+
+ const vsi __builtin_altivec_orc_v4si (vsi, vsi);
+ ORC_V4SI orcv4si3 {}
+
+ const vui __builtin_altivec_orc_v4si_uns (vui, vui);
+ ORC_V4SI_UNS orcv4si3 {}
+
+ const vss __builtin_altivec_orc_v8hi (vss, vss);
+ ORC_V8HI orcv8hi3 {}
+
+ const vus __builtin_altivec_orc_v8hi_uns (vus, vus);
+ ORC_V8HI_UNS orcv8hi3 {}
+
+ const vsc __builtin_altivec_vclzb (vsc);
+ VCLZB clzv16qi2 {}
+
+ const vsll __builtin_altivec_vclzd (vsll);
+ VCLZD clzv2di2 {}
+
+ const vss __builtin_altivec_vclzh (vss);
+ VCLZH clzv8hi2 {}
+
+ const vsi __builtin_altivec_vclzw (vsi);
+ VCLZW clzv4si2 {}
+
+ const vsc __builtin_altivec_vgbbd (vsc);
+ VGBBD p8v_vgbbd {}
+
+ const vsq __builtin_altivec_vaddcuq (vsq, vsq);
+ VADDCUQ altivec_vaddcuq {}
+
+ const vsq __builtin_altivec_vaddecuq (vsq, vsq, vsq);
+ VADDECUQ altivec_vaddecuq {}
+
+ const vuq __builtin_altivec_vaddeuqm (vuq, vuq, vuq);
+ VADDEUQM altivec_vaddeuqm {}
+
+ const vsll __builtin_altivec_vaddudm (vsll, vsll);
+ VADDUDM addv2di3 {}
+
+ const vsq __builtin_altivec_vadduqm (vsq, vsq);
+ VADDUQM altivec_vadduqm {}
+
+ const vsll __builtin_altivec_vbpermq (vop, vsc);
+ VBPERMQ altivec_vbpermq {}
+
+ const vuc __builtin_altivec_vbpermq2 (vuc, vuc);
+ VBPERMQ2 altivec_vbpermq2 {}
+
+ const vbll __builtin_altivec_vcmpequd (vsll, vsll);
+ VCMPEQUD vector_eqv2di {}
+
+ const int __builtin_altivec_vcmpequd_p (int, vsll, vsll);
+ VCMPEQUD_P vector_eq_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtsd (vsll, vsll);
+ VCMPGTSD vector_gtv2di {}
+
+ const int __builtin_altivec_vcmpgtsd_p (int, vsll, vsll);
+ VCMPGTSD_P vector_gt_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtud (vull, vull);
+ VCMPGTUD vector_gtuv2di {}
+
+ const int __builtin_altivec_vcmpgtud_p (vull, vull);
+ VCMPGTUD_P vector_gtu_v2di_p {pred}
+
+ const vsll __builtin_altivec_vmaxsd (vsll, vsll);
+ VMAXSD smaxv2di3 {}
+
+ const vull __builtin_altivec_vmaxud (vull, vull);
+ VMAXUD umaxv2di3 {}
+
+ const vsll __builtin_altivec_vminsd (vsll, vsll);
+ VMINSD sminv2di3 {}
+
+ const vull __builtin_altivec_vminud (vull, vull);
+ VMINUD uminv2di3 {}
+
+ const vd __builtin_altivec_vmrgew_v2df (vd, vd);
+ VMRGEW_V2DF p8_vmrgew_v2df {}
+
+ const vsll __builtin_altivec_vmrgew_v2di (vsll, vsll);
+ VMRGEW_V2DI p8_vmrgew_v2di {}
+
+ const vf __builtin_altivec_vmrgew_v4sf (vf, vf);
+ VMRGEW_V4SF p8_vmrgew_v4sf {}
+
+ const vsi __builtin_altivec_vmrgew_v4si (vsi, vsi);
+ VMRGEW_V4SI p8_vmrgew_v4si {}
+
+ const vd __builtin_altivec_vmrgow_v2df (vd, vd);
+ VMRGOW_V2DF p8_vmrgow_v2df {}
+
+ const vsll __builtin_altivec_vmrgow_v2di (vsll, vsll);
+ VMRGOW_V2DI p8_vmrgow_v2di {}
+
+ const vf __builtin_altivec_vmrgow_v4sf (vf, vf);
+ VMRGOW_V4SF p8_vmrgow_v4sf {}
+
+ const vsi __builtin_altivec_vmrgow_v4si (vsi, vsi);
+ VMRGOW_V4SI p8_vmrgow_v4si {}
+
+ const vsll __builtin_altivec_vmulesw (vsi, vsi);
+ VMULESW vec_widen_smult_even_v4si {}
+
+ const vull __builtin_altivec_vmuleuw (vui, vui);
+ VMULEUW vec_widen_umult_even_v4si {}
+
+ const vsll __builtin_altivec_vmulosw (vsi, vsi);
+ VMULOSW vec_widen_smult_odd_v4si {}
+
+ const vull __builtin_altivec_vmulouw (vui, vui);
+ VMULOUW vec_widen_umult_odd_v4si {}
+
+ const vsc __builtin_altivec_vpermxor (vsc, vsc, vsc);
+ VPERMXOR altivec_vpermxor {}
+
+ const vsi __builtin_altivec_vpksdss (vsll, vsll);
+ VPKSDSS altivec_vpksdss {}
+
+ const vui __builtin_altivec_vpksdus (vsll, vsll);
+ VPKSDUS altivec_vpksdus {}
+
+ const vui __builtin_altivec_vpkudum (vull, vull);
+ VPKUDUM altivec_vpkudum {}
+
+ const vui __builtin_altivec_vpkudus (vull, vull);
+ VPKUDUS altivec_vpkudus {}
+
+; #### Following are duplicates of __builtin_crypto_vpmsum*. This
+; can't have ever worked properly!
+;
+; const vus __builtin_altivec_vpmsumb (vuc, vuc);
+; VPMSUMB crypto_vpmsumb {}
+;
+; const vuq __builtin_altivec_vpmsumd (vull, vull);
+; VPMSUMD crypto_vpmsumd {}
+;
+; const vui __builtin_altivec_vpmsumh (vus, vus);
+; VPMSUMH crypto_vpmsumh {}
+;
+; const vull __builtin_altivec_vpmsumw (vui, vui);
+; VPMSUMW crypto_vpmsumw {}
+
+ const vuc __builtin_altivec_vpopcntb (vsc);
+ VPOPCNTB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntd (vsll);
+ VPOPCNTD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcnth (vss);
+ VPOPCNTH popcountv8hi2 {}
+
+ const vuc __builtin_altivec_vpopcntub (vuc);
+ VPOPCNTUB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntud (vull);
+ VPOPCNTUD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcntuh (vus);
+ VPOPCNTUH popcountv8hi2 {}
+
+ const vui __builtin_altivec_vpopcntuw (vui);
+ VPOPCNTUW popcountv4si2 {}
+
+ const vui __builtin_altivec_vpopcntw (vsi);
+ VPOPCNTW popcountv4si2 {}
+
+ const vsll __builtin_altivec_vrld (vsll, vull);
+ VRLD vrotlv2di3 {}
+
+ const vsll __builtin_altivec_vsld (vsll, vull);
+ VSLD vashlv2di3 {}
+
+ const vsll __builtin_altivec_vsrad (vsll, vull);
+ VSRAD vashrv2di3 {}
+
+ const vsll __builtin_altivec_vsrd (vsll, vull);
+ VSRD vlshrv2di3 {}
+
+ const vuq __builtin_altivec_vsubcuq (vuq, vuq);
+ VSUBCUQ altivec_vsubcuq {}
+
+ const vsq __builtin_altivec_vsubecuq (vsq, vsq, vsq);
+ VSUBECUQ altivec_vsubecuq {}
+
+ const vuq __builtin_altivec_vsubeuqm (vuq, vuq, vuq);
+ VSUBEUQM altivec_vsubeuqm {}
+
+ const vull __builtin_altivec_vsubudm (vull, vull);
+ VSUBUDM subv2di3 {}
+
+ const vuq __builtin_altivec_vsubuqm (vuq, vuq);
+ VSUBUQM altivec_vsubuqm {}
+
+ const vsll __builtin_altivec_vupkhsw (vsi);
+ VUPKHSW altivec_vupkhsw {}
+
+ const vsll __builtin_altivec_vupklsw (vsi);
+ VUPKLSW altivec_vupklsw {}
+
+ const vsq __builtin_bcdadd (vsq, vsq, const int<1>);
+ BCDADD bcdadd {}
+
+ const unsigned int __builtin_bcdadd_eq (vsq, vsq, const int<1>);
+ BCDADD_EQ bcdadd_eq {}
+
+ const unsigned int __builtin_bcdadd_gt (vsq, vsq, const int<1>);
+ BCDADD_GT bcdadd_gt {}
+
+ const unsigned int __builtin_bcdadd_lt (vsq, vsq, const int<1>);
+ BCDADD_LT bcdadd_lt {}
+
+ const unsigned int __builtin_bcdadd_ov (vsq, vsq, const int<1>);
+ BCDADD_OV bcdadd_unordered {}
+
+ const vsq __builtin_bcdsub (vsq, vsq, const int<1>);
+ BCDSUB bcdsub {}
+
+ const unsigned int __builtin_bcdsub_eq (vsq, vsq, const int<1>);
+ BCDSUB_EQ bcdsub_eq {}
+
+ const unsigned int __builtin_bcdsub_gt (vsq, vsq, const int<1>);
+ BCDSUB_GT bcdsub_gt {}
+
+ const unsigned int __builtin_bcdsub_lt (vsq, vsq, const int<1>);
+ BCDSUB_LT bcdsub_lt {}
+
+ const unsigned int __builtin_bcdsub_ov (vsq, vsq, const int<1>);
+ BCDSUB_OV bcdsub_unordered {}
+
+ const vuc __builtin_crypto_vpermxor_v16qi (vuc, vuc, vuc);
+ VPERMXOR_V16QI crypto_vpermxor_v16qi {}
+
+ const vull __builtin_crypto_vpermxor_v2di (vull, vull, vull);
+ VPERMXOR_V2DI crypto_vpermxor_v2di {}
+
+ const vui __builtin_crypto_vpermxor_v4si (vui, vui, vui);
+ VPERMXOR_V4SI crypto_vpermxor_v4si {}
+
+ const vus __builtin_crypto_vpermxor_v8hi (vus, vus, vus);
+ VPERMXOR_V8HI crypto_vpermxor_v8hi {}
+
+ const vus __builtin_crypto_vpmsumb (vuc, vuc);
+ VPMSUMB crypto_vpmsumb {}
+
+ const vuq __builtin_crypto_vpmsumd (vull, vull);
+ VPMSUMD crypto_vpmsumd {}
+
+ const vui __builtin_crypto_vpmsumh (vus, vus);
+ VPMSUMH crypto_vpmsumh {}
+
+ const vull __builtin_crypto_vpmsumw (vui, vui);
+ VPMSUMW crypto_vpmsumw {}
+
+ const vf __builtin_vsx_float2_v2df (vd, vd);
+ FLOAT2_V2DF float2_v2df {}
+
+ const vf __builtin_vsx_float2_v2di (vsll, vsll);
+ FLOAT2_V2DI float2_v2di {}
+
+ const vsc __builtin_vsx_revb_v16qi (vsc);
+ REVB_V16QI revb_v16qi {}
+
+ const vsq __builtin_vsx_revb_v1ti (vsq);
+ REVB_V1TI revb_v1ti {}
+
+ const vd __builtin_vsx_revb_v2df (vd);
+ REVB_V2DF revb_v2df {}
+
+ const vsll __builtin_vsx_revb_v2di (vsll);
+ REVB_V2DI revb_v2di {}
+
+ const vf __builtin_vsx_revb_v4sf (vf);
+ REVB_V4SF revb_v4sf {}
+
+ const vsi __builtin_vsx_revb_v4si (vsi);
+ REVB_V4SI revb_v4si {}
+
+ const vss __builtin_vsx_revb_v8hi (vss);
+ REVB_V8HI revb_v8hi {}
+
+ const vf __builtin_vsx_uns_float2_v2di (vull, vull);
+ UNS_FLOAT2_V2DI uns_float2_v2di {}
+
+ const vsi __builtin_vsx_vsigned2_v2df (vd, vd);
+ VEC_VSIGNED2_V2DF vsigned2_v2df {}
+
+ const vui __builtin_vsx_vunsigned2_v2df (vd, vd);
+ VEC_VUNSIGNED2_V2DF vunsigned2_v2df {}
+
+ const vf __builtin_vsx_xscvdpspn (double);
+ XSCVDPSPN vsx_xscvdpspn {}
+
+ const double __builtin_vsx_xscvspdpn (vf);
+ XSCVSPDPN vsx_xscvspdpn {}
+
+
^ permalink raw reply [flat|nested] 8+ messages in thread
* [gcc(refs/users/wschmidt/heads/builtins3)] rs6000: Add Power8 vector builtins
@ 2020-08-20 16:39 William Schmidt
0 siblings, 0 replies; 8+ messages in thread
From: William Schmidt @ 2020-08-20 16:39 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:19c8d1b379de3ad4187619971db4fb6af22a9b45
commit 19c8d1b379de3ad4187619971db4fb6af22a9b45
Author: Bill Schmidt <wschmidt@linux.ibm.com>
Date: Wed Jun 17 11:40:50 2020 -0500
rs6000: Add Power8 vector builtins
2020-07-26 Bill Schmidt <wschmidt@linux.ibm.com>
* config/rs6000/rs6000-builtin-new.def: Add power8-vector
builtins.
Diff:
---
gcc/config/rs6000/rs6000-builtin-new.def | 417 +++++++++++++++++++++++++++++++
1 file changed, 417 insertions(+)
diff --git a/gcc/config/rs6000/rs6000-builtin-new.def b/gcc/config/rs6000/rs6000-builtin-new.def
index 0a17cad446c..2f918c1d69e 100644
--- a/gcc/config/rs6000/rs6000-builtin-new.def
+++ b/gcc/config/rs6000/rs6000-builtin-new.def
@@ -1977,3 +1977,420 @@
DIVDEU diveu_di {}
+; Power8 vector built-ins.
+[power8-vector]
+ const vsll __builtin_altivec_abs_v2di (vsll);
+ ABS_V2DI absv2di2 {}
+
+ const vsc __builtin_altivec_eqv_v16qi (vsc, vsc);
+ EQV_V16QI eqvv16qi3 {}
+
+ const vuc __builtin_altivec_eqv_v16qi_uns (vuc, vuc);
+ EQV_V16QI_UNS eqvv16qi3 {}
+
+ const vsq __builtin_altivec_eqv_v1ti (vsq, vsq);
+ EQV_V1TI eqvv1ti3 {}
+
+ const vuq __builtin_altivec_eqv_v1ti_uns (vuq, vuq);
+ EQV_V1TI_UNS eqvv1ti3 {}
+
+ const vd __builtin_altivec_eqv_v2df (vd, vd);
+ EQV_V2DF eqvv2df3 {}
+
+ const vsll __builtin_altivec_eqv_v2di (vsll, vsll);
+ EQV_V2DI eqvv2di3 {}
+
+ const vull __builtin_altivec_eqv_v2di_uns (vull, vull);
+ EQV_V2DI_UNS eqvv2di3 {}
+
+ const vf __builtin_altivec_eqv_v4sf (vf, vf);
+ EQV_V4SF eqvv4sf3 {}
+
+ const vsi __builtin_altivec_eqv_v4si (vsi, vsi);
+ EQV_V4SI eqvv4si3 {}
+
+ const vui __builtin_altivec_eqv_v4si_uns (vui, vui);
+ EQV_V4SI_UNS eqvv4si3 {}
+
+ const vss __builtin_altivec_eqv_v8hi (vss, vss);
+ EQV_V8HI eqvv8hi3 {}
+
+ const vus __builtin_altivec_eqv_v8hi_uns (vus, vus);
+ EQV_V8HI_UNS eqvv8hi3 {}
+
+ const vsc __builtin_altivec_nand_v16qi (vsc, vsc);
+ NAND_V16QI nandv16qi3 {}
+
+ const vuc __builtin_altivec_nand_v16qi_uns (vuc, vuc);
+ NAND_V16QI_UNS nandv16qi3 {}
+
+ const vsq __builtin_altivec_nand_v1ti (vsq, vsq);
+ NAND_V1TI nandv1ti3 {}
+
+ const vuq __builtin_altivec_nand_v1ti_uns (vuq, vuq);
+ NAND_V1TI_UNS nandv1ti3 {}
+
+ const vd __builtin_altivec_nand_v2df (vd, vd);
+ NAND_V2DF nandv2df3 {}
+
+ const vsll __builtin_altivec_nand_v2di (vsll, vsll);
+ NAND_V2DI nandv2di3 {}
+
+ const vull __builtin_altivec_nand_v2di_uns (vull, vull);
+ NAND_V2DI_UNS nandv2di3 {}
+
+ const vf __builtin_altivec_nand_v4sf (vf, vf);
+ NAND_V4SF nandv4sf3 {}
+
+ const vsi __builtin_altivec_nand_v4si (vsi, vsi);
+ NAND_V4SI nandv4si3 {}
+
+ const vui __builtin_altivec_nand_v4si_uns (vui, vui);
+ NAND_V4SI_UNS nandv4si3 {}
+
+ const vss __builtin_altivec_nand_v8hi (vss, vss);
+ NAND_V8HI nandv8hi3 {}
+
+ const vus __builtin_altivec_nand_v8hi_uns (vus, vus);
+ NAND_V8HI_UNS nandv8hi3 {}
+
+ const vsc __builtin_altivec_neg_v16qi (vsc);
+ NEG_V16QI negv16qi2 {}
+
+ const vd __builtin_altivec_neg_v2df (vd);
+ NEG_V2DF negv2df2 {}
+
+ const vsll __builtin_altivec_neg_v2di (vsll);
+ NEG_V2DI negv2di2 {}
+
+ const vf __builtin_altivec_neg_v4sf (vf);
+ NEG_V4SF negv4sf2 {}
+
+ const vsi __builtin_altivec_neg_v4si (vsi);
+ NEG_V4SI negv4si2 {}
+
+ const vss __builtin_altivec_neg_v8hi (vss);
+ NEG_V8HI negv8hi2 {}
+
+ const vsc __builtin_altivec_orc_v16qi (vsc, vsc);
+ ORC_V16QI orcv16qi3 {}
+
+ const vuc __builtin_altivec_orc_v16qi_uns (vuc, vuc);
+ ORC_V16QI_UNS orcv16qi3 {}
+
+ const vsq __builtin_altivec_orc_v1ti (vsq, vsq);
+ ORC_V1TI orcv1ti3 {}
+
+ const vuq __builtin_altivec_orc_v1ti_uns (vuq, vuq);
+ ORC_V1TI_UNS orcv1ti3 {}
+
+ const vd __builtin_altivec_orc_v2df (vd, vd);
+ ORC_V2DF orcv2df3 {}
+
+ const vsll __builtin_altivec_orc_v2di (vsll, vsll);
+ ORC_V2DI orcv2di3 {}
+
+ const vull __builtin_altivec_orc_v2di_uns (vull, vull);
+ ORC_V2DI_UNS orcv2di3 {}
+
+ const vf __builtin_altivec_orc_v4sf (vf, vf);
+ ORC_V4SF orcv4sf3 {}
+
+ const vsi __builtin_altivec_orc_v4si (vsi, vsi);
+ ORC_V4SI orcv4si3 {}
+
+ const vui __builtin_altivec_orc_v4si_uns (vui, vui);
+ ORC_V4SI_UNS orcv4si3 {}
+
+ const vss __builtin_altivec_orc_v8hi (vss, vss);
+ ORC_V8HI orcv8hi3 {}
+
+ const vus __builtin_altivec_orc_v8hi_uns (vus, vus);
+ ORC_V8HI_UNS orcv8hi3 {}
+
+ const vsc __builtin_altivec_vclzb (vsc);
+ VCLZB clzv16qi2 {}
+
+ const vsll __builtin_altivec_vclzd (vsll);
+ VCLZD clzv2di2 {}
+
+ const vss __builtin_altivec_vclzh (vss);
+ VCLZH clzv8hi2 {}
+
+ const vsi __builtin_altivec_vclzw (vsi);
+ VCLZW clzv4si2 {}
+
+ const vsc __builtin_altivec_vgbbd (vsc);
+ VGBBD p8v_vgbbd {}
+
+ const vsq __builtin_altivec_vaddcuq (vsq, vsq);
+ VADDCUQ altivec_vaddcuq {}
+
+ const vsq __builtin_altivec_vaddecuq (vsq, vsq, vsq);
+ VADDECUQ altivec_vaddecuq {}
+
+ const vuq __builtin_altivec_vaddeuqm (vuq, vuq, vuq);
+ VADDEUQM altivec_vaddeuqm {}
+
+ const vsll __builtin_altivec_vaddudm (vsll, vsll);
+ VADDUDM addv2di3 {}
+
+ const vsq __builtin_altivec_vadduqm (vsq, vsq);
+ VADDUQM altivec_vadduqm {}
+
+ const vsll __builtin_altivec_vbpermq (vop, vsc);
+ VBPERMQ altivec_vbpermq {}
+
+ const vuc __builtin_altivec_vbpermq2 (vuc, vuc);
+ VBPERMQ2 altivec_vbpermq2 {}
+
+ const vbll __builtin_altivec_vcmpequd (vsll, vsll);
+ VCMPEQUD vector_eqv2di {}
+
+ const int __builtin_altivec_vcmpequd_p (int, vsll, vsll);
+ VCMPEQUD_P vector_eq_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtsd (vsll, vsll);
+ VCMPGTSD vector_gtv2di {}
+
+ const int __builtin_altivec_vcmpgtsd_p (int, vsll, vsll);
+ VCMPGTSD_P vector_gt_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtud (vull, vull);
+ VCMPGTUD vector_gtuv2di {}
+
+ const int __builtin_altivec_vcmpgtud_p (vull, vull);
+ VCMPGTUD_P vector_gtu_v2di_p {pred}
+
+ const vsll __builtin_altivec_vmaxsd (vsll, vsll);
+ VMAXSD smaxv2di3 {}
+
+ const vull __builtin_altivec_vmaxud (vull, vull);
+ VMAXUD umaxv2di3 {}
+
+ const vsll __builtin_altivec_vminsd (vsll, vsll);
+ VMINSD sminv2di3 {}
+
+ const vull __builtin_altivec_vminud (vull, vull);
+ VMINUD uminv2di3 {}
+
+ const vd __builtin_altivec_vmrgew_v2df (vd, vd);
+ VMRGEW_V2DF p8_vmrgew_v2df {}
+
+ const vsll __builtin_altivec_vmrgew_v2di (vsll, vsll);
+ VMRGEW_V2DI p8_vmrgew_v2di {}
+
+ const vf __builtin_altivec_vmrgew_v4sf (vf, vf);
+ VMRGEW_V4SF p8_vmrgew_v4sf {}
+
+ const vsi __builtin_altivec_vmrgew_v4si (vsi, vsi);
+ VMRGEW_V4SI p8_vmrgew_v4si {}
+
+ const vd __builtin_altivec_vmrgow_v2df (vd, vd);
+ VMRGOW_V2DF p8_vmrgow_v2df {}
+
+ const vsll __builtin_altivec_vmrgow_v2di (vsll, vsll);
+ VMRGOW_V2DI p8_vmrgow_v2di {}
+
+ const vf __builtin_altivec_vmrgow_v4sf (vf, vf);
+ VMRGOW_V4SF p8_vmrgow_v4sf {}
+
+ const vsi __builtin_altivec_vmrgow_v4si (vsi, vsi);
+ VMRGOW_V4SI p8_vmrgow_v4si {}
+
+ const vsll __builtin_altivec_vmulesw (vsi, vsi);
+ VMULESW vec_widen_smult_even_v4si {}
+
+ const vull __builtin_altivec_vmuleuw (vui, vui);
+ VMULEUW vec_widen_umult_even_v4si {}
+
+ const vsll __builtin_altivec_vmulosw (vsi, vsi);
+ VMULOSW vec_widen_smult_odd_v4si {}
+
+ const vull __builtin_altivec_vmulouw (vui, vui);
+ VMULOUW vec_widen_umult_odd_v4si {}
+
+ const vsc __builtin_altivec_vpermxor (vsc, vsc, vsc);
+ VPERMXOR altivec_vpermxor {}
+
+ const vsi __builtin_altivec_vpksdss (vsll, vsll);
+ VPKSDSS altivec_vpksdss {}
+
+ const vui __builtin_altivec_vpksdus (vsll, vsll);
+ VPKSDUS altivec_vpksdus {}
+
+ const vui __builtin_altivec_vpkudum (vull, vull);
+ VPKUDUM altivec_vpkudum {}
+
+ const vui __builtin_altivec_vpkudus (vull, vull);
+ VPKUDUS altivec_vpkudus {}
+
+; #### Following are duplicates of __builtin_crypto_vpmsum*. This
+; can't have ever worked properly!
+;
+; const vus __builtin_altivec_vpmsumb (vuc, vuc);
+; VPMSUMB crypto_vpmsumb {}
+;
+; const vuq __builtin_altivec_vpmsumd (vull, vull);
+; VPMSUMD crypto_vpmsumd {}
+;
+; const vui __builtin_altivec_vpmsumh (vus, vus);
+; VPMSUMH crypto_vpmsumh {}
+;
+; const vull __builtin_altivec_vpmsumw (vui, vui);
+; VPMSUMW crypto_vpmsumw {}
+
+ const vuc __builtin_altivec_vpopcntb (vsc);
+ VPOPCNTB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntd (vsll);
+ VPOPCNTD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcnth (vss);
+ VPOPCNTH popcountv8hi2 {}
+
+ const vuc __builtin_altivec_vpopcntub (vuc);
+ VPOPCNTUB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntud (vull);
+ VPOPCNTUD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcntuh (vus);
+ VPOPCNTUH popcountv8hi2 {}
+
+ const vui __builtin_altivec_vpopcntuw (vui);
+ VPOPCNTUW popcountv4si2 {}
+
+ const vui __builtin_altivec_vpopcntw (vsi);
+ VPOPCNTW popcountv4si2 {}
+
+ const vsll __builtin_altivec_vrld (vsll, vull);
+ VRLD vrotlv2di3 {}
+
+ const vsll __builtin_altivec_vsld (vsll, vull);
+ VSLD vashlv2di3 {}
+
+ const vsll __builtin_altivec_vsrad (vsll, vull);
+ VSRAD vashrv2di3 {}
+
+ const vsll __builtin_altivec_vsrd (vsll, vull);
+ VSRD vlshrv2di3 {}
+
+ const vuq __builtin_altivec_vsubcuq (vuq, vuq);
+ VSUBCUQ altivec_vsubcuq {}
+
+ const vsq __builtin_altivec_vsubecuq (vsq, vsq, vsq);
+ VSUBECUQ altivec_vsubecuq {}
+
+ const vuq __builtin_altivec_vsubeuqm (vuq, vuq, vuq);
+ VSUBEUQM altivec_vsubeuqm {}
+
+ const vull __builtin_altivec_vsubudm (vull, vull);
+ VSUBUDM subv2di3 {}
+
+ const vuq __builtin_altivec_vsubuqm (vuq, vuq);
+ VSUBUQM altivec_vsubuqm {}
+
+ const vsll __builtin_altivec_vupkhsw (vsi);
+ VUPKHSW altivec_vupkhsw {}
+
+ const vsll __builtin_altivec_vupklsw (vsi);
+ VUPKLSW altivec_vupklsw {}
+
+ const vsq __builtin_bcdadd (vsq, vsq, const int<1>);
+ BCDADD bcdadd {}
+
+ const unsigned int __builtin_bcdadd_eq (vsq, vsq, const int<1>);
+ BCDADD_EQ bcdadd_eq {}
+
+ const unsigned int __builtin_bcdadd_gt (vsq, vsq, const int<1>);
+ BCDADD_GT bcdadd_gt {}
+
+ const unsigned int __builtin_bcdadd_lt (vsq, vsq, const int<1>);
+ BCDADD_LT bcdadd_lt {}
+
+ const unsigned int __builtin_bcdadd_ov (vsq, vsq, const int<1>);
+ BCDADD_OV bcdadd_unordered {}
+
+ const vsq __builtin_bcdsub (vsq, vsq, const int<1>);
+ BCDSUB bcdsub {}
+
+ const unsigned int __builtin_bcdsub_eq (vsq, vsq, const int<1>);
+ BCDSUB_EQ bcdsub_eq {}
+
+ const unsigned int __builtin_bcdsub_gt (vsq, vsq, const int<1>);
+ BCDSUB_GT bcdsub_gt {}
+
+ const unsigned int __builtin_bcdsub_lt (vsq, vsq, const int<1>);
+ BCDSUB_LT bcdsub_lt {}
+
+ const unsigned int __builtin_bcdsub_ov (vsq, vsq, const int<1>);
+ BCDSUB_OV bcdsub_unordered {}
+
+ const vuc __builtin_crypto_vpermxor_v16qi (vuc, vuc, vuc);
+ VPERMXOR_V16QI crypto_vpermxor_v16qi {}
+
+ const vull __builtin_crypto_vpermxor_v2di (vull, vull, vull);
+ VPERMXOR_V2DI crypto_vpermxor_v2di {}
+
+ const vui __builtin_crypto_vpermxor_v4si (vui, vui, vui);
+ VPERMXOR_V4SI crypto_vpermxor_v4si {}
+
+ const vus __builtin_crypto_vpermxor_v8hi (vus, vus, vus);
+ VPERMXOR_V8HI crypto_vpermxor_v8hi {}
+
+ const vus __builtin_crypto_vpmsumb (vuc, vuc);
+ VPMSUMB crypto_vpmsumb {}
+
+ const vuq __builtin_crypto_vpmsumd (vull, vull);
+ VPMSUMD crypto_vpmsumd {}
+
+ const vui __builtin_crypto_vpmsumh (vus, vus);
+ VPMSUMH crypto_vpmsumh {}
+
+ const vull __builtin_crypto_vpmsumw (vui, vui);
+ VPMSUMW crypto_vpmsumw {}
+
+ const vf __builtin_vsx_float2_v2df (vd, vd);
+ FLOAT2_V2DF float2_v2df {}
+
+ const vf __builtin_vsx_float2_v2di (vsll, vsll);
+ FLOAT2_V2DI float2_v2di {}
+
+ const vsc __builtin_vsx_revb_v16qi (vsc);
+ REVB_V16QI revb_v16qi {}
+
+ const vsq __builtin_vsx_revb_v1ti (vsq);
+ REVB_V1TI revb_v1ti {}
+
+ const vd __builtin_vsx_revb_v2df (vd);
+ REVB_V2DF revb_v2df {}
+
+ const vsll __builtin_vsx_revb_v2di (vsll);
+ REVB_V2DI revb_v2di {}
+
+ const vf __builtin_vsx_revb_v4sf (vf);
+ REVB_V4SF revb_v4sf {}
+
+ const vsi __builtin_vsx_revb_v4si (vsi);
+ REVB_V4SI revb_v4si {}
+
+ const vss __builtin_vsx_revb_v8hi (vss);
+ REVB_V8HI revb_v8hi {}
+
+ const vf __builtin_vsx_uns_float2_v2di (vull, vull);
+ UNS_FLOAT2_V2DI uns_float2_v2di {}
+
+ const vsi __builtin_vsx_vsigned2_v2df (vd, vd);
+ VEC_VSIGNED2_V2DF vsigned2_v2df {}
+
+ const vui __builtin_vsx_vunsigned2_v2df (vd, vd);
+ VEC_VUNSIGNED2_V2DF vunsigned2_v2df {}
+
+ const vf __builtin_vsx_xscvdpspn (double);
+ XSCVDPSPN vsx_xscvdpspn {}
+
+ const double __builtin_vsx_xscvspdpn (vf);
+ XSCVSPDPN vsx_xscvspdpn {}
+
+
^ permalink raw reply [flat|nested] 8+ messages in thread
* [gcc(refs/users/wschmidt/heads/builtins3)] rs6000: Add Power8 vector builtins
@ 2020-08-18 18:45 William Schmidt
0 siblings, 0 replies; 8+ messages in thread
From: William Schmidt @ 2020-08-18 18:45 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:bc7598256549fffea93338c7c4de5f7545f88e73
commit bc7598256549fffea93338c7c4de5f7545f88e73
Author: Bill Schmidt <wschmidt@linux.ibm.com>
Date: Wed Jun 17 11:40:50 2020 -0500
rs6000: Add Power8 vector builtins
2020-07-26 Bill Schmidt <wschmidt@linux.ibm.com>
* config/rs6000/rs6000-builtin-new.def: Add power8-vector
builtins.
Diff:
---
gcc/config/rs6000/rs6000-builtin-new.def | 417 +++++++++++++++++++++++++++++++
1 file changed, 417 insertions(+)
diff --git a/gcc/config/rs6000/rs6000-builtin-new.def b/gcc/config/rs6000/rs6000-builtin-new.def
index 0a17cad446c..2f918c1d69e 100644
--- a/gcc/config/rs6000/rs6000-builtin-new.def
+++ b/gcc/config/rs6000/rs6000-builtin-new.def
@@ -1977,3 +1977,420 @@
DIVDEU diveu_di {}
+; Power8 vector built-ins.
+[power8-vector]
+ const vsll __builtin_altivec_abs_v2di (vsll);
+ ABS_V2DI absv2di2 {}
+
+ const vsc __builtin_altivec_eqv_v16qi (vsc, vsc);
+ EQV_V16QI eqvv16qi3 {}
+
+ const vuc __builtin_altivec_eqv_v16qi_uns (vuc, vuc);
+ EQV_V16QI_UNS eqvv16qi3 {}
+
+ const vsq __builtin_altivec_eqv_v1ti (vsq, vsq);
+ EQV_V1TI eqvv1ti3 {}
+
+ const vuq __builtin_altivec_eqv_v1ti_uns (vuq, vuq);
+ EQV_V1TI_UNS eqvv1ti3 {}
+
+ const vd __builtin_altivec_eqv_v2df (vd, vd);
+ EQV_V2DF eqvv2df3 {}
+
+ const vsll __builtin_altivec_eqv_v2di (vsll, vsll);
+ EQV_V2DI eqvv2di3 {}
+
+ const vull __builtin_altivec_eqv_v2di_uns (vull, vull);
+ EQV_V2DI_UNS eqvv2di3 {}
+
+ const vf __builtin_altivec_eqv_v4sf (vf, vf);
+ EQV_V4SF eqvv4sf3 {}
+
+ const vsi __builtin_altivec_eqv_v4si (vsi, vsi);
+ EQV_V4SI eqvv4si3 {}
+
+ const vui __builtin_altivec_eqv_v4si_uns (vui, vui);
+ EQV_V4SI_UNS eqvv4si3 {}
+
+ const vss __builtin_altivec_eqv_v8hi (vss, vss);
+ EQV_V8HI eqvv8hi3 {}
+
+ const vus __builtin_altivec_eqv_v8hi_uns (vus, vus);
+ EQV_V8HI_UNS eqvv8hi3 {}
+
+ const vsc __builtin_altivec_nand_v16qi (vsc, vsc);
+ NAND_V16QI nandv16qi3 {}
+
+ const vuc __builtin_altivec_nand_v16qi_uns (vuc, vuc);
+ NAND_V16QI_UNS nandv16qi3 {}
+
+ const vsq __builtin_altivec_nand_v1ti (vsq, vsq);
+ NAND_V1TI nandv1ti3 {}
+
+ const vuq __builtin_altivec_nand_v1ti_uns (vuq, vuq);
+ NAND_V1TI_UNS nandv1ti3 {}
+
+ const vd __builtin_altivec_nand_v2df (vd, vd);
+ NAND_V2DF nandv2df3 {}
+
+ const vsll __builtin_altivec_nand_v2di (vsll, vsll);
+ NAND_V2DI nandv2di3 {}
+
+ const vull __builtin_altivec_nand_v2di_uns (vull, vull);
+ NAND_V2DI_UNS nandv2di3 {}
+
+ const vf __builtin_altivec_nand_v4sf (vf, vf);
+ NAND_V4SF nandv4sf3 {}
+
+ const vsi __builtin_altivec_nand_v4si (vsi, vsi);
+ NAND_V4SI nandv4si3 {}
+
+ const vui __builtin_altivec_nand_v4si_uns (vui, vui);
+ NAND_V4SI_UNS nandv4si3 {}
+
+ const vss __builtin_altivec_nand_v8hi (vss, vss);
+ NAND_V8HI nandv8hi3 {}
+
+ const vus __builtin_altivec_nand_v8hi_uns (vus, vus);
+ NAND_V8HI_UNS nandv8hi3 {}
+
+ const vsc __builtin_altivec_neg_v16qi (vsc);
+ NEG_V16QI negv16qi2 {}
+
+ const vd __builtin_altivec_neg_v2df (vd);
+ NEG_V2DF negv2df2 {}
+
+ const vsll __builtin_altivec_neg_v2di (vsll);
+ NEG_V2DI negv2di2 {}
+
+ const vf __builtin_altivec_neg_v4sf (vf);
+ NEG_V4SF negv4sf2 {}
+
+ const vsi __builtin_altivec_neg_v4si (vsi);
+ NEG_V4SI negv4si2 {}
+
+ const vss __builtin_altivec_neg_v8hi (vss);
+ NEG_V8HI negv8hi2 {}
+
+ const vsc __builtin_altivec_orc_v16qi (vsc, vsc);
+ ORC_V16QI orcv16qi3 {}
+
+ const vuc __builtin_altivec_orc_v16qi_uns (vuc, vuc);
+ ORC_V16QI_UNS orcv16qi3 {}
+
+ const vsq __builtin_altivec_orc_v1ti (vsq, vsq);
+ ORC_V1TI orcv1ti3 {}
+
+ const vuq __builtin_altivec_orc_v1ti_uns (vuq, vuq);
+ ORC_V1TI_UNS orcv1ti3 {}
+
+ const vd __builtin_altivec_orc_v2df (vd, vd);
+ ORC_V2DF orcv2df3 {}
+
+ const vsll __builtin_altivec_orc_v2di (vsll, vsll);
+ ORC_V2DI orcv2di3 {}
+
+ const vull __builtin_altivec_orc_v2di_uns (vull, vull);
+ ORC_V2DI_UNS orcv2di3 {}
+
+ const vf __builtin_altivec_orc_v4sf (vf, vf);
+ ORC_V4SF orcv4sf3 {}
+
+ const vsi __builtin_altivec_orc_v4si (vsi, vsi);
+ ORC_V4SI orcv4si3 {}
+
+ const vui __builtin_altivec_orc_v4si_uns (vui, vui);
+ ORC_V4SI_UNS orcv4si3 {}
+
+ const vss __builtin_altivec_orc_v8hi (vss, vss);
+ ORC_V8HI orcv8hi3 {}
+
+ const vus __builtin_altivec_orc_v8hi_uns (vus, vus);
+ ORC_V8HI_UNS orcv8hi3 {}
+
+ const vsc __builtin_altivec_vclzb (vsc);
+ VCLZB clzv16qi2 {}
+
+ const vsll __builtin_altivec_vclzd (vsll);
+ VCLZD clzv2di2 {}
+
+ const vss __builtin_altivec_vclzh (vss);
+ VCLZH clzv8hi2 {}
+
+ const vsi __builtin_altivec_vclzw (vsi);
+ VCLZW clzv4si2 {}
+
+ const vsc __builtin_altivec_vgbbd (vsc);
+ VGBBD p8v_vgbbd {}
+
+ const vsq __builtin_altivec_vaddcuq (vsq, vsq);
+ VADDCUQ altivec_vaddcuq {}
+
+ const vsq __builtin_altivec_vaddecuq (vsq, vsq, vsq);
+ VADDECUQ altivec_vaddecuq {}
+
+ const vuq __builtin_altivec_vaddeuqm (vuq, vuq, vuq);
+ VADDEUQM altivec_vaddeuqm {}
+
+ const vsll __builtin_altivec_vaddudm (vsll, vsll);
+ VADDUDM addv2di3 {}
+
+ const vsq __builtin_altivec_vadduqm (vsq, vsq);
+ VADDUQM altivec_vadduqm {}
+
+ const vsll __builtin_altivec_vbpermq (vop, vsc);
+ VBPERMQ altivec_vbpermq {}
+
+ const vuc __builtin_altivec_vbpermq2 (vuc, vuc);
+ VBPERMQ2 altivec_vbpermq2 {}
+
+ const vbll __builtin_altivec_vcmpequd (vsll, vsll);
+ VCMPEQUD vector_eqv2di {}
+
+ const int __builtin_altivec_vcmpequd_p (int, vsll, vsll);
+ VCMPEQUD_P vector_eq_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtsd (vsll, vsll);
+ VCMPGTSD vector_gtv2di {}
+
+ const int __builtin_altivec_vcmpgtsd_p (int, vsll, vsll);
+ VCMPGTSD_P vector_gt_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtud (vull, vull);
+ VCMPGTUD vector_gtuv2di {}
+
+ const int __builtin_altivec_vcmpgtud_p (vull, vull);
+ VCMPGTUD_P vector_gtu_v2di_p {pred}
+
+ const vsll __builtin_altivec_vmaxsd (vsll, vsll);
+ VMAXSD smaxv2di3 {}
+
+ const vull __builtin_altivec_vmaxud (vull, vull);
+ VMAXUD umaxv2di3 {}
+
+ const vsll __builtin_altivec_vminsd (vsll, vsll);
+ VMINSD sminv2di3 {}
+
+ const vull __builtin_altivec_vminud (vull, vull);
+ VMINUD uminv2di3 {}
+
+ const vd __builtin_altivec_vmrgew_v2df (vd, vd);
+ VMRGEW_V2DF p8_vmrgew_v2df {}
+
+ const vsll __builtin_altivec_vmrgew_v2di (vsll, vsll);
+ VMRGEW_V2DI p8_vmrgew_v2di {}
+
+ const vf __builtin_altivec_vmrgew_v4sf (vf, vf);
+ VMRGEW_V4SF p8_vmrgew_v4sf {}
+
+ const vsi __builtin_altivec_vmrgew_v4si (vsi, vsi);
+ VMRGEW_V4SI p8_vmrgew_v4si {}
+
+ const vd __builtin_altivec_vmrgow_v2df (vd, vd);
+ VMRGOW_V2DF p8_vmrgow_v2df {}
+
+ const vsll __builtin_altivec_vmrgow_v2di (vsll, vsll);
+ VMRGOW_V2DI p8_vmrgow_v2di {}
+
+ const vf __builtin_altivec_vmrgow_v4sf (vf, vf);
+ VMRGOW_V4SF p8_vmrgow_v4sf {}
+
+ const vsi __builtin_altivec_vmrgow_v4si (vsi, vsi);
+ VMRGOW_V4SI p8_vmrgow_v4si {}
+
+ const vsll __builtin_altivec_vmulesw (vsi, vsi);
+ VMULESW vec_widen_smult_even_v4si {}
+
+ const vull __builtin_altivec_vmuleuw (vui, vui);
+ VMULEUW vec_widen_umult_even_v4si {}
+
+ const vsll __builtin_altivec_vmulosw (vsi, vsi);
+ VMULOSW vec_widen_smult_odd_v4si {}
+
+ const vull __builtin_altivec_vmulouw (vui, vui);
+ VMULOUW vec_widen_umult_odd_v4si {}
+
+ const vsc __builtin_altivec_vpermxor (vsc, vsc, vsc);
+ VPERMXOR altivec_vpermxor {}
+
+ const vsi __builtin_altivec_vpksdss (vsll, vsll);
+ VPKSDSS altivec_vpksdss {}
+
+ const vui __builtin_altivec_vpksdus (vsll, vsll);
+ VPKSDUS altivec_vpksdus {}
+
+ const vui __builtin_altivec_vpkudum (vull, vull);
+ VPKUDUM altivec_vpkudum {}
+
+ const vui __builtin_altivec_vpkudus (vull, vull);
+ VPKUDUS altivec_vpkudus {}
+
+; #### Following are duplicates of __builtin_crypto_vpmsum*. This
+; can't have ever worked properly!
+;
+; const vus __builtin_altivec_vpmsumb (vuc, vuc);
+; VPMSUMB crypto_vpmsumb {}
+;
+; const vuq __builtin_altivec_vpmsumd (vull, vull);
+; VPMSUMD crypto_vpmsumd {}
+;
+; const vui __builtin_altivec_vpmsumh (vus, vus);
+; VPMSUMH crypto_vpmsumh {}
+;
+; const vull __builtin_altivec_vpmsumw (vui, vui);
+; VPMSUMW crypto_vpmsumw {}
+
+ const vuc __builtin_altivec_vpopcntb (vsc);
+ VPOPCNTB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntd (vsll);
+ VPOPCNTD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcnth (vss);
+ VPOPCNTH popcountv8hi2 {}
+
+ const vuc __builtin_altivec_vpopcntub (vuc);
+ VPOPCNTUB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntud (vull);
+ VPOPCNTUD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcntuh (vus);
+ VPOPCNTUH popcountv8hi2 {}
+
+ const vui __builtin_altivec_vpopcntuw (vui);
+ VPOPCNTUW popcountv4si2 {}
+
+ const vui __builtin_altivec_vpopcntw (vsi);
+ VPOPCNTW popcountv4si2 {}
+
+ const vsll __builtin_altivec_vrld (vsll, vull);
+ VRLD vrotlv2di3 {}
+
+ const vsll __builtin_altivec_vsld (vsll, vull);
+ VSLD vashlv2di3 {}
+
+ const vsll __builtin_altivec_vsrad (vsll, vull);
+ VSRAD vashrv2di3 {}
+
+ const vsll __builtin_altivec_vsrd (vsll, vull);
+ VSRD vlshrv2di3 {}
+
+ const vuq __builtin_altivec_vsubcuq (vuq, vuq);
+ VSUBCUQ altivec_vsubcuq {}
+
+ const vsq __builtin_altivec_vsubecuq (vsq, vsq, vsq);
+ VSUBECUQ altivec_vsubecuq {}
+
+ const vuq __builtin_altivec_vsubeuqm (vuq, vuq, vuq);
+ VSUBEUQM altivec_vsubeuqm {}
+
+ const vull __builtin_altivec_vsubudm (vull, vull);
+ VSUBUDM subv2di3 {}
+
+ const vuq __builtin_altivec_vsubuqm (vuq, vuq);
+ VSUBUQM altivec_vsubuqm {}
+
+ const vsll __builtin_altivec_vupkhsw (vsi);
+ VUPKHSW altivec_vupkhsw {}
+
+ const vsll __builtin_altivec_vupklsw (vsi);
+ VUPKLSW altivec_vupklsw {}
+
+ const vsq __builtin_bcdadd (vsq, vsq, const int<1>);
+ BCDADD bcdadd {}
+
+ const unsigned int __builtin_bcdadd_eq (vsq, vsq, const int<1>);
+ BCDADD_EQ bcdadd_eq {}
+
+ const unsigned int __builtin_bcdadd_gt (vsq, vsq, const int<1>);
+ BCDADD_GT bcdadd_gt {}
+
+ const unsigned int __builtin_bcdadd_lt (vsq, vsq, const int<1>);
+ BCDADD_LT bcdadd_lt {}
+
+ const unsigned int __builtin_bcdadd_ov (vsq, vsq, const int<1>);
+ BCDADD_OV bcdadd_unordered {}
+
+ const vsq __builtin_bcdsub (vsq, vsq, const int<1>);
+ BCDSUB bcdsub {}
+
+ const unsigned int __builtin_bcdsub_eq (vsq, vsq, const int<1>);
+ BCDSUB_EQ bcdsub_eq {}
+
+ const unsigned int __builtin_bcdsub_gt (vsq, vsq, const int<1>);
+ BCDSUB_GT bcdsub_gt {}
+
+ const unsigned int __builtin_bcdsub_lt (vsq, vsq, const int<1>);
+ BCDSUB_LT bcdsub_lt {}
+
+ const unsigned int __builtin_bcdsub_ov (vsq, vsq, const int<1>);
+ BCDSUB_OV bcdsub_unordered {}
+
+ const vuc __builtin_crypto_vpermxor_v16qi (vuc, vuc, vuc);
+ VPERMXOR_V16QI crypto_vpermxor_v16qi {}
+
+ const vull __builtin_crypto_vpermxor_v2di (vull, vull, vull);
+ VPERMXOR_V2DI crypto_vpermxor_v2di {}
+
+ const vui __builtin_crypto_vpermxor_v4si (vui, vui, vui);
+ VPERMXOR_V4SI crypto_vpermxor_v4si {}
+
+ const vus __builtin_crypto_vpermxor_v8hi (vus, vus, vus);
+ VPERMXOR_V8HI crypto_vpermxor_v8hi {}
+
+ const vus __builtin_crypto_vpmsumb (vuc, vuc);
+ VPMSUMB crypto_vpmsumb {}
+
+ const vuq __builtin_crypto_vpmsumd (vull, vull);
+ VPMSUMD crypto_vpmsumd {}
+
+ const vui __builtin_crypto_vpmsumh (vus, vus);
+ VPMSUMH crypto_vpmsumh {}
+
+ const vull __builtin_crypto_vpmsumw (vui, vui);
+ VPMSUMW crypto_vpmsumw {}
+
+ const vf __builtin_vsx_float2_v2df (vd, vd);
+ FLOAT2_V2DF float2_v2df {}
+
+ const vf __builtin_vsx_float2_v2di (vsll, vsll);
+ FLOAT2_V2DI float2_v2di {}
+
+ const vsc __builtin_vsx_revb_v16qi (vsc);
+ REVB_V16QI revb_v16qi {}
+
+ const vsq __builtin_vsx_revb_v1ti (vsq);
+ REVB_V1TI revb_v1ti {}
+
+ const vd __builtin_vsx_revb_v2df (vd);
+ REVB_V2DF revb_v2df {}
+
+ const vsll __builtin_vsx_revb_v2di (vsll);
+ REVB_V2DI revb_v2di {}
+
+ const vf __builtin_vsx_revb_v4sf (vf);
+ REVB_V4SF revb_v4sf {}
+
+ const vsi __builtin_vsx_revb_v4si (vsi);
+ REVB_V4SI revb_v4si {}
+
+ const vss __builtin_vsx_revb_v8hi (vss);
+ REVB_V8HI revb_v8hi {}
+
+ const vf __builtin_vsx_uns_float2_v2di (vull, vull);
+ UNS_FLOAT2_V2DI uns_float2_v2di {}
+
+ const vsi __builtin_vsx_vsigned2_v2df (vd, vd);
+ VEC_VSIGNED2_V2DF vsigned2_v2df {}
+
+ const vui __builtin_vsx_vunsigned2_v2df (vd, vd);
+ VEC_VUNSIGNED2_V2DF vunsigned2_v2df {}
+
+ const vf __builtin_vsx_xscvdpspn (double);
+ XSCVDPSPN vsx_xscvdpspn {}
+
+ const double __builtin_vsx_xscvspdpn (vf);
+ XSCVSPDPN vsx_xscvspdpn {}
+
+
^ permalink raw reply [flat|nested] 8+ messages in thread
* [gcc(refs/users/wschmidt/heads/builtins3)] rs6000: Add Power8 vector builtins
@ 2020-07-27 18:48 William Schmidt
0 siblings, 0 replies; 8+ messages in thread
From: William Schmidt @ 2020-07-27 18:48 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:b45f99586cb0f77038579e4eb7f59d3df8dabbf1
commit b45f99586cb0f77038579e4eb7f59d3df8dabbf1
Author: Bill Schmidt <wschmidt@linux.ibm.com>
Date: Wed Jun 17 11:40:50 2020 -0500
rs6000: Add Power8 vector builtins
2020-07-26 Bill Schmidt <wschmidt@linux.ibm.com>
* config/rs6000/rs6000-builtin-new.def: Add power8-vector
builtins.
Diff:
---
gcc/config/rs6000/rs6000-builtin-new.def | 417 +++++++++++++++++++++++++++++++
1 file changed, 417 insertions(+)
diff --git a/gcc/config/rs6000/rs6000-builtin-new.def b/gcc/config/rs6000/rs6000-builtin-new.def
index 0a17cad446c..2f918c1d69e 100644
--- a/gcc/config/rs6000/rs6000-builtin-new.def
+++ b/gcc/config/rs6000/rs6000-builtin-new.def
@@ -1977,3 +1977,420 @@
DIVDEU diveu_di {}
+; Power8 vector built-ins.
+[power8-vector]
+ const vsll __builtin_altivec_abs_v2di (vsll);
+ ABS_V2DI absv2di2 {}
+
+ const vsc __builtin_altivec_eqv_v16qi (vsc, vsc);
+ EQV_V16QI eqvv16qi3 {}
+
+ const vuc __builtin_altivec_eqv_v16qi_uns (vuc, vuc);
+ EQV_V16QI_UNS eqvv16qi3 {}
+
+ const vsq __builtin_altivec_eqv_v1ti (vsq, vsq);
+ EQV_V1TI eqvv1ti3 {}
+
+ const vuq __builtin_altivec_eqv_v1ti_uns (vuq, vuq);
+ EQV_V1TI_UNS eqvv1ti3 {}
+
+ const vd __builtin_altivec_eqv_v2df (vd, vd);
+ EQV_V2DF eqvv2df3 {}
+
+ const vsll __builtin_altivec_eqv_v2di (vsll, vsll);
+ EQV_V2DI eqvv2di3 {}
+
+ const vull __builtin_altivec_eqv_v2di_uns (vull, vull);
+ EQV_V2DI_UNS eqvv2di3 {}
+
+ const vf __builtin_altivec_eqv_v4sf (vf, vf);
+ EQV_V4SF eqvv4sf3 {}
+
+ const vsi __builtin_altivec_eqv_v4si (vsi, vsi);
+ EQV_V4SI eqvv4si3 {}
+
+ const vui __builtin_altivec_eqv_v4si_uns (vui, vui);
+ EQV_V4SI_UNS eqvv4si3 {}
+
+ const vss __builtin_altivec_eqv_v8hi (vss, vss);
+ EQV_V8HI eqvv8hi3 {}
+
+ const vus __builtin_altivec_eqv_v8hi_uns (vus, vus);
+ EQV_V8HI_UNS eqvv8hi3 {}
+
+ const vsc __builtin_altivec_nand_v16qi (vsc, vsc);
+ NAND_V16QI nandv16qi3 {}
+
+ const vuc __builtin_altivec_nand_v16qi_uns (vuc, vuc);
+ NAND_V16QI_UNS nandv16qi3 {}
+
+ const vsq __builtin_altivec_nand_v1ti (vsq, vsq);
+ NAND_V1TI nandv1ti3 {}
+
+ const vuq __builtin_altivec_nand_v1ti_uns (vuq, vuq);
+ NAND_V1TI_UNS nandv1ti3 {}
+
+ const vd __builtin_altivec_nand_v2df (vd, vd);
+ NAND_V2DF nandv2df3 {}
+
+ const vsll __builtin_altivec_nand_v2di (vsll, vsll);
+ NAND_V2DI nandv2di3 {}
+
+ const vull __builtin_altivec_nand_v2di_uns (vull, vull);
+ NAND_V2DI_UNS nandv2di3 {}
+
+ const vf __builtin_altivec_nand_v4sf (vf, vf);
+ NAND_V4SF nandv4sf3 {}
+
+ const vsi __builtin_altivec_nand_v4si (vsi, vsi);
+ NAND_V4SI nandv4si3 {}
+
+ const vui __builtin_altivec_nand_v4si_uns (vui, vui);
+ NAND_V4SI_UNS nandv4si3 {}
+
+ const vss __builtin_altivec_nand_v8hi (vss, vss);
+ NAND_V8HI nandv8hi3 {}
+
+ const vus __builtin_altivec_nand_v8hi_uns (vus, vus);
+ NAND_V8HI_UNS nandv8hi3 {}
+
+ const vsc __builtin_altivec_neg_v16qi (vsc);
+ NEG_V16QI negv16qi2 {}
+
+ const vd __builtin_altivec_neg_v2df (vd);
+ NEG_V2DF negv2df2 {}
+
+ const vsll __builtin_altivec_neg_v2di (vsll);
+ NEG_V2DI negv2di2 {}
+
+ const vf __builtin_altivec_neg_v4sf (vf);
+ NEG_V4SF negv4sf2 {}
+
+ const vsi __builtin_altivec_neg_v4si (vsi);
+ NEG_V4SI negv4si2 {}
+
+ const vss __builtin_altivec_neg_v8hi (vss);
+ NEG_V8HI negv8hi2 {}
+
+ const vsc __builtin_altivec_orc_v16qi (vsc, vsc);
+ ORC_V16QI orcv16qi3 {}
+
+ const vuc __builtin_altivec_orc_v16qi_uns (vuc, vuc);
+ ORC_V16QI_UNS orcv16qi3 {}
+
+ const vsq __builtin_altivec_orc_v1ti (vsq, vsq);
+ ORC_V1TI orcv1ti3 {}
+
+ const vuq __builtin_altivec_orc_v1ti_uns (vuq, vuq);
+ ORC_V1TI_UNS orcv1ti3 {}
+
+ const vd __builtin_altivec_orc_v2df (vd, vd);
+ ORC_V2DF orcv2df3 {}
+
+ const vsll __builtin_altivec_orc_v2di (vsll, vsll);
+ ORC_V2DI orcv2di3 {}
+
+ const vull __builtin_altivec_orc_v2di_uns (vull, vull);
+ ORC_V2DI_UNS orcv2di3 {}
+
+ const vf __builtin_altivec_orc_v4sf (vf, vf);
+ ORC_V4SF orcv4sf3 {}
+
+ const vsi __builtin_altivec_orc_v4si (vsi, vsi);
+ ORC_V4SI orcv4si3 {}
+
+ const vui __builtin_altivec_orc_v4si_uns (vui, vui);
+ ORC_V4SI_UNS orcv4si3 {}
+
+ const vss __builtin_altivec_orc_v8hi (vss, vss);
+ ORC_V8HI orcv8hi3 {}
+
+ const vus __builtin_altivec_orc_v8hi_uns (vus, vus);
+ ORC_V8HI_UNS orcv8hi3 {}
+
+ const vsc __builtin_altivec_vclzb (vsc);
+ VCLZB clzv16qi2 {}
+
+ const vsll __builtin_altivec_vclzd (vsll);
+ VCLZD clzv2di2 {}
+
+ const vss __builtin_altivec_vclzh (vss);
+ VCLZH clzv8hi2 {}
+
+ const vsi __builtin_altivec_vclzw (vsi);
+ VCLZW clzv4si2 {}
+
+ const vsc __builtin_altivec_vgbbd (vsc);
+ VGBBD p8v_vgbbd {}
+
+ const vsq __builtin_altivec_vaddcuq (vsq, vsq);
+ VADDCUQ altivec_vaddcuq {}
+
+ const vsq __builtin_altivec_vaddecuq (vsq, vsq, vsq);
+ VADDECUQ altivec_vaddecuq {}
+
+ const vuq __builtin_altivec_vaddeuqm (vuq, vuq, vuq);
+ VADDEUQM altivec_vaddeuqm {}
+
+ const vsll __builtin_altivec_vaddudm (vsll, vsll);
+ VADDUDM addv2di3 {}
+
+ const vsq __builtin_altivec_vadduqm (vsq, vsq);
+ VADDUQM altivec_vadduqm {}
+
+ const vsll __builtin_altivec_vbpermq (vop, vsc);
+ VBPERMQ altivec_vbpermq {}
+
+ const vuc __builtin_altivec_vbpermq2 (vuc, vuc);
+ VBPERMQ2 altivec_vbpermq2 {}
+
+ const vbll __builtin_altivec_vcmpequd (vsll, vsll);
+ VCMPEQUD vector_eqv2di {}
+
+ const int __builtin_altivec_vcmpequd_p (int, vsll, vsll);
+ VCMPEQUD_P vector_eq_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtsd (vsll, vsll);
+ VCMPGTSD vector_gtv2di {}
+
+ const int __builtin_altivec_vcmpgtsd_p (int, vsll, vsll);
+ VCMPGTSD_P vector_gt_v2di_p {pred}
+
+ const vbll __builtin_altivec_vcmpgtud (vull, vull);
+ VCMPGTUD vector_gtuv2di {}
+
+ const int __builtin_altivec_vcmpgtud_p (vull, vull);
+ VCMPGTUD_P vector_gtu_v2di_p {pred}
+
+ const vsll __builtin_altivec_vmaxsd (vsll, vsll);
+ VMAXSD smaxv2di3 {}
+
+ const vull __builtin_altivec_vmaxud (vull, vull);
+ VMAXUD umaxv2di3 {}
+
+ const vsll __builtin_altivec_vminsd (vsll, vsll);
+ VMINSD sminv2di3 {}
+
+ const vull __builtin_altivec_vminud (vull, vull);
+ VMINUD uminv2di3 {}
+
+ const vd __builtin_altivec_vmrgew_v2df (vd, vd);
+ VMRGEW_V2DF p8_vmrgew_v2df {}
+
+ const vsll __builtin_altivec_vmrgew_v2di (vsll, vsll);
+ VMRGEW_V2DI p8_vmrgew_v2di {}
+
+ const vf __builtin_altivec_vmrgew_v4sf (vf, vf);
+ VMRGEW_V4SF p8_vmrgew_v4sf {}
+
+ const vsi __builtin_altivec_vmrgew_v4si (vsi, vsi);
+ VMRGEW_V4SI p8_vmrgew_v4si {}
+
+ const vd __builtin_altivec_vmrgow_v2df (vd, vd);
+ VMRGOW_V2DF p8_vmrgow_v2df {}
+
+ const vsll __builtin_altivec_vmrgow_v2di (vsll, vsll);
+ VMRGOW_V2DI p8_vmrgow_v2di {}
+
+ const vf __builtin_altivec_vmrgow_v4sf (vf, vf);
+ VMRGOW_V4SF p8_vmrgow_v4sf {}
+
+ const vsi __builtin_altivec_vmrgow_v4si (vsi, vsi);
+ VMRGOW_V4SI p8_vmrgow_v4si {}
+
+ const vsll __builtin_altivec_vmulesw (vsi, vsi);
+ VMULESW vec_widen_smult_even_v4si {}
+
+ const vull __builtin_altivec_vmuleuw (vui, vui);
+ VMULEUW vec_widen_umult_even_v4si {}
+
+ const vsll __builtin_altivec_vmulosw (vsi, vsi);
+ VMULOSW vec_widen_smult_odd_v4si {}
+
+ const vull __builtin_altivec_vmulouw (vui, vui);
+ VMULOUW vec_widen_umult_odd_v4si {}
+
+ const vsc __builtin_altivec_vpermxor (vsc, vsc, vsc);
+ VPERMXOR altivec_vpermxor {}
+
+ const vsi __builtin_altivec_vpksdss (vsll, vsll);
+ VPKSDSS altivec_vpksdss {}
+
+ const vui __builtin_altivec_vpksdus (vsll, vsll);
+ VPKSDUS altivec_vpksdus {}
+
+ const vui __builtin_altivec_vpkudum (vull, vull);
+ VPKUDUM altivec_vpkudum {}
+
+ const vui __builtin_altivec_vpkudus (vull, vull);
+ VPKUDUS altivec_vpkudus {}
+
+; #### Following are duplicates of __builtin_crypto_vpmsum*. This
+; can't have ever worked properly!
+;
+; const vus __builtin_altivec_vpmsumb (vuc, vuc);
+; VPMSUMB crypto_vpmsumb {}
+;
+; const vuq __builtin_altivec_vpmsumd (vull, vull);
+; VPMSUMD crypto_vpmsumd {}
+;
+; const vui __builtin_altivec_vpmsumh (vus, vus);
+; VPMSUMH crypto_vpmsumh {}
+;
+; const vull __builtin_altivec_vpmsumw (vui, vui);
+; VPMSUMW crypto_vpmsumw {}
+
+ const vuc __builtin_altivec_vpopcntb (vsc);
+ VPOPCNTB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntd (vsll);
+ VPOPCNTD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcnth (vss);
+ VPOPCNTH popcountv8hi2 {}
+
+ const vuc __builtin_altivec_vpopcntub (vuc);
+ VPOPCNTUB popcountv16qi2 {}
+
+ const vull __builtin_altivec_vpopcntud (vull);
+ VPOPCNTUD popcountv2di2 {}
+
+ const vus __builtin_altivec_vpopcntuh (vus);
+ VPOPCNTUH popcountv8hi2 {}
+
+ const vui __builtin_altivec_vpopcntuw (vui);
+ VPOPCNTUW popcountv4si2 {}
+
+ const vui __builtin_altivec_vpopcntw (vsi);
+ VPOPCNTW popcountv4si2 {}
+
+ const vsll __builtin_altivec_vrld (vsll, vull);
+ VRLD vrotlv2di3 {}
+
+ const vsll __builtin_altivec_vsld (vsll, vull);
+ VSLD vashlv2di3 {}
+
+ const vsll __builtin_altivec_vsrad (vsll, vull);
+ VSRAD vashrv2di3 {}
+
+ const vsll __builtin_altivec_vsrd (vsll, vull);
+ VSRD vlshrv2di3 {}
+
+ const vuq __builtin_altivec_vsubcuq (vuq, vuq);
+ VSUBCUQ altivec_vsubcuq {}
+
+ const vsq __builtin_altivec_vsubecuq (vsq, vsq, vsq);
+ VSUBECUQ altivec_vsubecuq {}
+
+ const vuq __builtin_altivec_vsubeuqm (vuq, vuq, vuq);
+ VSUBEUQM altivec_vsubeuqm {}
+
+ const vull __builtin_altivec_vsubudm (vull, vull);
+ VSUBUDM subv2di3 {}
+
+ const vuq __builtin_altivec_vsubuqm (vuq, vuq);
+ VSUBUQM altivec_vsubuqm {}
+
+ const vsll __builtin_altivec_vupkhsw (vsi);
+ VUPKHSW altivec_vupkhsw {}
+
+ const vsll __builtin_altivec_vupklsw (vsi);
+ VUPKLSW altivec_vupklsw {}
+
+ const vsq __builtin_bcdadd (vsq, vsq, const int<1>);
+ BCDADD bcdadd {}
+
+ const unsigned int __builtin_bcdadd_eq (vsq, vsq, const int<1>);
+ BCDADD_EQ bcdadd_eq {}
+
+ const unsigned int __builtin_bcdadd_gt (vsq, vsq, const int<1>);
+ BCDADD_GT bcdadd_gt {}
+
+ const unsigned int __builtin_bcdadd_lt (vsq, vsq, const int<1>);
+ BCDADD_LT bcdadd_lt {}
+
+ const unsigned int __builtin_bcdadd_ov (vsq, vsq, const int<1>);
+ BCDADD_OV bcdadd_unordered {}
+
+ const vsq __builtin_bcdsub (vsq, vsq, const int<1>);
+ BCDSUB bcdsub {}
+
+ const unsigned int __builtin_bcdsub_eq (vsq, vsq, const int<1>);
+ BCDSUB_EQ bcdsub_eq {}
+
+ const unsigned int __builtin_bcdsub_gt (vsq, vsq, const int<1>);
+ BCDSUB_GT bcdsub_gt {}
+
+ const unsigned int __builtin_bcdsub_lt (vsq, vsq, const int<1>);
+ BCDSUB_LT bcdsub_lt {}
+
+ const unsigned int __builtin_bcdsub_ov (vsq, vsq, const int<1>);
+ BCDSUB_OV bcdsub_unordered {}
+
+ const vuc __builtin_crypto_vpermxor_v16qi (vuc, vuc, vuc);
+ VPERMXOR_V16QI crypto_vpermxor_v16qi {}
+
+ const vull __builtin_crypto_vpermxor_v2di (vull, vull, vull);
+ VPERMXOR_V2DI crypto_vpermxor_v2di {}
+
+ const vui __builtin_crypto_vpermxor_v4si (vui, vui, vui);
+ VPERMXOR_V4SI crypto_vpermxor_v4si {}
+
+ const vus __builtin_crypto_vpermxor_v8hi (vus, vus, vus);
+ VPERMXOR_V8HI crypto_vpermxor_v8hi {}
+
+ const vus __builtin_crypto_vpmsumb (vuc, vuc);
+ VPMSUMB crypto_vpmsumb {}
+
+ const vuq __builtin_crypto_vpmsumd (vull, vull);
+ VPMSUMD crypto_vpmsumd {}
+
+ const vui __builtin_crypto_vpmsumh (vus, vus);
+ VPMSUMH crypto_vpmsumh {}
+
+ const vull __builtin_crypto_vpmsumw (vui, vui);
+ VPMSUMW crypto_vpmsumw {}
+
+ const vf __builtin_vsx_float2_v2df (vd, vd);
+ FLOAT2_V2DF float2_v2df {}
+
+ const vf __builtin_vsx_float2_v2di (vsll, vsll);
+ FLOAT2_V2DI float2_v2di {}
+
+ const vsc __builtin_vsx_revb_v16qi (vsc);
+ REVB_V16QI revb_v16qi {}
+
+ const vsq __builtin_vsx_revb_v1ti (vsq);
+ REVB_V1TI revb_v1ti {}
+
+ const vd __builtin_vsx_revb_v2df (vd);
+ REVB_V2DF revb_v2df {}
+
+ const vsll __builtin_vsx_revb_v2di (vsll);
+ REVB_V2DI revb_v2di {}
+
+ const vf __builtin_vsx_revb_v4sf (vf);
+ REVB_V4SF revb_v4sf {}
+
+ const vsi __builtin_vsx_revb_v4si (vsi);
+ REVB_V4SI revb_v4si {}
+
+ const vss __builtin_vsx_revb_v8hi (vss);
+ REVB_V8HI revb_v8hi {}
+
+ const vf __builtin_vsx_uns_float2_v2di (vull, vull);
+ UNS_FLOAT2_V2DI uns_float2_v2di {}
+
+ const vsi __builtin_vsx_vsigned2_v2df (vd, vd);
+ VEC_VSIGNED2_V2DF vsigned2_v2df {}
+
+ const vui __builtin_vsx_vunsigned2_v2df (vd, vd);
+ VEC_VUNSIGNED2_V2DF vunsigned2_v2df {}
+
+ const vf __builtin_vsx_xscvdpspn (double);
+ XSCVDPSPN vsx_xscvdpspn {}
+
+ const double __builtin_vsx_xscvspdpn (vf);
+ XSCVSPDPN vsx_xscvspdpn {}
+
+
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2020-10-29 19:51 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-28 20:08 [gcc(refs/users/wschmidt/heads/builtins3)] rs6000: Add Power8 vector builtins William Schmidt
-- strict thread matches above, loose matches on Subject: below --
2020-10-29 19:51 William Schmidt
2020-10-27 16:29 William Schmidt
2020-09-16 21:30 William Schmidt
2020-09-14 13:58 William Schmidt
2020-08-20 16:39 William Schmidt
2020-08-18 18:45 William Schmidt
2020-07-27 18:48 William Schmidt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).