From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 1816) id 3F5623858D28; Wed, 21 Jun 2023 12:39:21 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 3F5623858D28 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gcc.gnu.org; s=default; t=1687351161; bh=6/InavfYgdilM+Ame/IOXFoji+AnJRZW6/CkOPnOBp0=; h=From:To:Subject:Date:From; b=nDMeFDfTe5n+eSThjKBaSb7BUe0MNbYi1jK5Q6l0ZdKiWIblRcRicJukJssnDede5 o5MBivUfhWs7yOVRL5H5y1LxIQlAzd0DDm44k0fHS8GnH5kKgADu0cNDB8PUv/nBAA nnKpaxBqU7WzyHjJxHjA38ENge/a41I7iEYWmeT0= MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Type: text/plain; charset="utf-8" From: Kyrylo Tkachov To: gcc-cvs@gcc.gnu.org Subject: [gcc r14-2017] Revert "aarch64: Convert SVE gather patterns to compact syntax" X-Act-Checkin: gcc X-Git-Author: Kyrylo Tkachov X-Git-Refname: refs/heads/master X-Git-Oldrev: 4b23d10ce8c7015c19e911324602633c36a2b828 X-Git-Newrev: 31cd5f9ae4d34765e593acd83c447006d4e8791c Message-Id: <20230621123921.3F5623858D28@sourceware.org> Date: Wed, 21 Jun 2023 12:39:21 +0000 (GMT) List-Id: https://gcc.gnu.org/g:31cd5f9ae4d34765e593acd83c447006d4e8791c commit r14-2017-g31cd5f9ae4d34765e593acd83c447006d4e8791c Author: Kyrylo Tkachov Date: Wed Jun 21 13:38:56 2023 +0100 Revert "aarch64: Convert SVE gather patterns to compact syntax" This reverts commit bb3c69058a5fb874ea3c5c26bfb331d33d0497c3. Diff: --- gcc/config/aarch64/aarch64-sve.md | 430 +++++++++++++++---------------------- gcc/config/aarch64/aarch64-sve2.md | 36 ++-- 2 files changed, 191 insertions(+), 275 deletions(-) diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index da5534c3e32..2de651a1989 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -1418,79 +1418,64 @@ ;; Predicated gather loads for 32-bit elements. Operand 3 is true for ;; unsigned extension and false for signed extension. (define_insn "mask_gather_load" - [(set (match_operand:SVE_4 0 "register_operand") + [(set (match_operand:SVE_4 0 "register_operand" "=w, w, w, w, w, w") (unspec:SVE_4 - [(match_operand:VNx4BI 5 "register_operand") - (match_operand:DI 1 "aarch64_sve_gather_offset_") - (match_operand:VNx4SI 2 "register_operand") - (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl") + (match_operand:DI 1 "aarch64_sve_gather_offset_" "Z, vgw, rk, rk, rk, rk") + (match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w") + (match_operand:DI 3 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, Ui1, Ui1, Ui1, i, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5 ] - [&w, Z, w, Ui1, Ui1, Upl] ld1\t%0.s, %5/z, [%2.s] - [?w, Z, 0, Ui1, Ui1, Upl] ^ - [&w, vgw, w, Ui1, Ui1, Upl] ld1\t%0.s, %5/z, [%2.s, #%1] - [?w, vgw, 0, Ui1, Ui1, Upl] ^ - [&w, rk, w, Z, Ui1, Upl] ld1\t%0.s, %5/z, [%1, %2.s, sxtw] - [?w, rk, 0, Z, Ui1, Upl] ^ - [&w, rk, w, Ui1, Ui1, Upl] ld1\t%0.s, %5/z, [%1, %2.s, uxtw] - [?w, rk, 0, Ui1, Ui1, Upl] ^ - [&w, rk, w, Z, i, Upl] ld1\t%0.s, %5/z, [%1, %2.s, sxtw %p4] - [?w, rk, 0, Z, i, Upl] ^ - [&w, rk, w, Ui1, i, Upl] ld1\t%0.s, %5/z, [%1, %2.s, uxtw %p4] - [?w, rk, 0, Ui1, i, Upl] ^ - } + "@ + ld1\t%0.s, %5/z, [%2.s] + ld1\t%0.s, %5/z, [%2.s, #%1] + ld1\t%0.s, %5/z, [%1, %2.s, sxtw] + ld1\t%0.s, %5/z, [%1, %2.s, uxtw] + ld1\t%0.s, %5/z, [%1, %2.s, sxtw %p4] + ld1\t%0.s, %5/z, [%1, %2.s, uxtw %p4]" ) ;; Predicated gather loads for 64-bit elements. The value of operand 3 ;; doesn't matter in this case. (define_insn "mask_gather_load" - [(set (match_operand:SVE_2 0 "register_operand") + [(set (match_operand:SVE_2 0 "register_operand" "=w, w, w, w") (unspec:SVE_2 - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "aarch64_sve_gather_offset_") - (match_operand:VNx2DI 2 "register_operand") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl") + (match_operand:DI 1 "aarch64_sve_gather_offset_" "Z, vgd, rk, rk") + (match_operand:VNx2DI 2 "register_operand" "w, w, w, w") (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, Ui1, Ui1, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, Z, w, i, Ui1, Upl] ld1\t%0.d, %5/z, [%2.d] - [?w, Z, 0, i, Ui1, Upl] ^ - [&w, vgd, w, i, Ui1, Upl] ld1\t%0.d, %5/z, [%2.d, #%1] - [?w, vgd, 0, i, Ui1, Upl] ^ - [&w, rk, w, i, Ui1, Upl] ld1\t%0.d, %5/z, [%1, %2.d] - [?w, rk, 0, i, Ui1, Upl] ^ - [&w, rk, w, i, i, Upl] ld1\t%0.d, %5/z, [%1, %2.d, lsl %p4] - [?w, rk, 0, i, i, Upl] ^ - } + "@ + ld1\t%0.d, %5/z, [%2.d] + ld1\t%0.d, %5/z, [%2.d, #%1] + ld1\t%0.d, %5/z, [%1, %2.d] + ld1\t%0.d, %5/z, [%1, %2.d, lsl %p4]" ) ;; Likewise, but with the offset being extended from 32 bits. (define_insn_and_rewrite "*mask_gather_load_xtw_unpacked" - [(set (match_operand:SVE_2 0 "register_operand") + [(set (match_operand:SVE_2 0 "register_operand" "=w, w") (unspec:SVE_2 - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "register_operand") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") + (match_operand:DI 1 "register_operand" "rk, rk") (unspec:VNx2DI [(match_operand 6) (ANY_EXTEND:VNx2DI - (match_operand:VNx2SI 2 "register_operand"))] + (match_operand:VNx2SI 2 "register_operand" "w, w"))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, rk, w, i, Ui1, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, xtw] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, xtw %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ld1\t%0.d, %5/z, [%1, %2.d, xtw] + ld1\t%0.d, %5/z, [%1, %2.d, xtw %p4]" "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1500,27 +1485,24 @@ ;; Likewise, but with the offset being truncated to 32 bits and then ;; sign-extended. (define_insn_and_rewrite "*mask_gather_load_sxtw" - [(set (match_operand:SVE_2 0 "register_operand") + [(set (match_operand:SVE_2 0 "register_operand" "=w, w") (unspec:SVE_2 - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "register_operand") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") + (match_operand:DI 1 "register_operand" "rk, rk") (unspec:VNx2DI [(match_operand 6) (sign_extend:VNx2DI (truncate:VNx2SI - (match_operand:VNx2DI 2 "register_operand")))] + (match_operand:VNx2DI 2 "register_operand" "w, w")))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, rk, w, i, Ui1, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, sxtw] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, sxtw %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ld1\t%0.d, %5/z, [%1, %2.d, sxtw] + ld1\t%0.d, %5/z, [%1, %2.d, sxtw %p4]" "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1530,24 +1512,21 @@ ;; Likewise, but with the offset being truncated to 32 bits and then ;; zero-extended. (define_insn "*mask_gather_load_uxtw" - [(set (match_operand:SVE_2 0 "register_operand") + [(set (match_operand:SVE_2 0 "register_operand" "=w, w") (unspec:SVE_2 - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "register_operand") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") + (match_operand:DI 1 "register_operand" "rk, rk") (and:VNx2DI - (match_operand:VNx2DI 2 "register_operand") + (match_operand:VNx2DI 2 "register_operand" "w, w") (match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate")) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, rk, w, i, Ui1, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, uxtw] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, uxtw %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ld1\t%0.d, %5/z, [%1, %2.d, uxtw] + ld1\t%0.d, %5/z, [%1, %2.d, uxtw %p4]" ) ;; ------------------------------------------------------------------------- @@ -1565,34 +1544,27 @@ ;; Predicated extending gather loads for 32-bit elements. Operand 3 is ;; true for unsigned extension and false for signed extension. (define_insn_and_rewrite "@aarch64_gather_load_" - [(set (match_operand:SVE_4HSI 0 "register_operand") + [(set (match_operand:SVE_4HSI 0 "register_operand" "=w, w, w, w, w, w") (unspec:SVE_4HSI - [(match_operand:VNx4BI 6 "general_operand") + [(match_operand:VNx4BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm, UplDnm, UplDnm") (ANY_EXTEND:SVE_4HSI (unspec:SVE_4BHI - [(match_operand:VNx4BI 5 "register_operand") - (match_operand:DI 1 "aarch64_sve_gather_offset_") - (match_operand:VNx4SI 2 "register_operand") - (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl") + (match_operand:DI 1 "aarch64_sve_gather_offset_" "Z, vg, rk, rk, rk, rk") + (match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w") + (match_operand:DI 3 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, Ui1, Ui1, Ui1, i, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE && (~ & ) == 0" - {@ [cons: =0, 1, 2, 3, 4, 5, 6] - [&w, Z, w, Ui1, Ui1, Upl, UplDnm] ld1\t%0.s, %5/z, [%2.s] - [?w, Z, 0, Ui1, Ui1, Upl, UplDnm] ^ - [&w, vg, w, Ui1, Ui1, Upl, UplDnm] ld1\t%0.s, %5/z, [%2.s, #%1] - [?w, vg, 0, Ui1, Ui1, Upl, UplDnm] ^ - [&w, rk, w, Z, Ui1, Upl, UplDnm] ld1\t%0.s, %5/z, [%1, %2.s, sxtw] - [?w, rk, 0, Z, Ui1, Upl, UplDnm] ^ - [&w, rk, w, Ui1, Ui1, Upl, UplDnm] ld1\t%0.s, %5/z, [%1, %2.s, uxtw] - [?w, rk, 0, Ui1, Ui1, Upl, UplDnm] ^ - [&w, rk, w, Z, i, Upl, UplDnm] ld1\t%0.s, %5/z, [%1, %2.s, sxtw %p4] - [?w, rk, 0, Z, i, Upl, UplDnm] ^ - [&w, rk, w, Ui1, i, Upl, UplDnm] ld1\t%0.s, %5/z, [%1, %2.s, uxtw %p4] - [?w, rk, 0, Ui1, i, Upl, UplDnm] ^ - } + "@ + ld1\t%0.s, %5/z, [%2.s] + ld1\t%0.s, %5/z, [%2.s, #%1] + ld1\t%0.s, %5/z, [%1, %2.s, sxtw] + ld1\t%0.s, %5/z, [%1, %2.s, uxtw] + ld1\t%0.s, %5/z, [%1, %2.s, sxtw %p4] + ld1\t%0.s, %5/z, [%1, %2.s, uxtw %p4]" "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx4BImode); @@ -1602,30 +1574,25 @@ ;; Predicated extending gather loads for 64-bit elements. The value of ;; operand 3 doesn't matter in this case. (define_insn_and_rewrite "@aarch64_gather_load_" - [(set (match_operand:SVE_2HSDI 0 "register_operand") + [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w, w, w") (unspec:SVE_2HSDI - [(match_operand:VNx2BI 6 "general_operand") + [(match_operand:VNx2BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm") (ANY_EXTEND:SVE_2HSDI (unspec:SVE_2BHSI - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "aarch64_sve_gather_offset_") - (match_operand:VNx2DI 2 "register_operand") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl") + (match_operand:DI 1 "aarch64_sve_gather_offset_" "Z, vg, rk, rk") + (match_operand:VNx2DI 2 "register_operand" "w, w, w, w") (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, Ui1, Ui1, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE && (~ & ) == 0" - {@ [cons: =0, 1, 2, 3, 4, 5, 6] - [&w, Z, w, i, Ui1, Upl, UplDnm] ld1\t%0.d, %5/z, [%2.d] - [?w, Z, 0, i, Ui1, Upl, UplDnm] ^ - [&w, vg, w, i, Ui1, Upl, UplDnm] ld1\t%0.d, %5/z, [%2.d, #%1] - [?w, vg, 0, i, Ui1, Upl, UplDnm] ^ - [&w, rk, w, i, Ui1, Upl, UplDnm] ld1\t%0.d, %5/z, [%1, %2.d] - [?w, rk, 0, i, Ui1, Upl, UplDnm] ^ - [&w, rk, w, i, i, Upl, UplDnm] ld1\t%0.d, %5/z, [%1, %2.d, lsl %p4] - [?w, rk, 0, i, i, Upl, UplDnm] ^ - } + "@ + ld1\t%0.d, %5/z, [%2.d] + ld1\t%0.d, %5/z, [%2.d, #%1] + ld1\t%0.d, %5/z, [%1, %2.d] + ld1\t%0.d, %5/z, [%1, %2.d, lsl %p4]" "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1634,30 +1601,27 @@ ;; Likewise, but with the offset being extended from 32 bits. (define_insn_and_rewrite "*aarch64_gather_load__xtw_unpacked" - [(set (match_operand:SVE_2HSDI 0 "register_operand") + [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w") (unspec:SVE_2HSDI [(match_operand 6) (ANY_EXTEND:SVE_2HSDI (unspec:SVE_2BHSI - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "aarch64_reg_or_zero") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") + (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk") (unspec:VNx2DI [(match_operand 7) (ANY_EXTEND2:VNx2DI - (match_operand:VNx2SI 2 "register_operand"))] + (match_operand:VNx2SI 2 "register_operand" "w, w"))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE && (~ & ) == 0" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, rk, w, i, Ui1, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, xtw] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, xtw %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ld1\t%0.d, %5/z, [%1, %2.d, xtw] + ld1\t%0.d, %5/z, [%1, %2.d, xtw %p4]" "&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1668,31 +1632,28 @@ ;; Likewise, but with the offset being truncated to 32 bits and then ;; sign-extended. (define_insn_and_rewrite "*aarch64_gather_load__sxtw" - [(set (match_operand:SVE_2HSDI 0 "register_operand") + [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w") (unspec:SVE_2HSDI [(match_operand 6) (ANY_EXTEND:SVE_2HSDI (unspec:SVE_2BHSI - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "aarch64_reg_or_zero") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") + (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk") (unspec:VNx2DI [(match_operand 7) (sign_extend:VNx2DI (truncate:VNx2SI - (match_operand:VNx2DI 2 "register_operand")))] + (match_operand:VNx2DI 2 "register_operand" "w, w")))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE && (~ & ) == 0" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, rk, w, i, Ui1, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, sxtw] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, sxtw %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ld1\t%0.d, %5/z, [%1, %2.d, sxtw] + ld1\t%0.d, %5/z, [%1, %2.d, sxtw %p4]" "&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1703,28 +1664,25 @@ ;; Likewise, but with the offset being truncated to 32 bits and then ;; zero-extended. (define_insn_and_rewrite "*aarch64_gather_load__uxtw" - [(set (match_operand:SVE_2HSDI 0 "register_operand") + [(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w") (unspec:SVE_2HSDI [(match_operand 7) (ANY_EXTEND:SVE_2HSDI (unspec:SVE_2BHSI - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "aarch64_reg_or_zero") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") + (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk") (and:VNx2DI - (match_operand:VNx2DI 2 "register_operand") + (match_operand:VNx2DI 2 "register_operand" "w, w") (match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate")) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE && (~ & ) == 0" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, rk, w, i, Ui1, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, uxtw] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ld1\t%0.d, %5/z, [%1, %2.d, uxtw %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ld1\t%0.d, %5/z, [%1, %2.d, uxtw] + ld1\t%0.d, %5/z, [%1, %2.d, uxtw %p4]" "&& !CONSTANT_P (operands[7])" { operands[7] = CONSTM1_RTX (VNx2BImode); @@ -1742,83 +1700,68 @@ ;; Predicated first-faulting gather loads for 32-bit elements. Operand ;; 3 is true for unsigned extension and false for signed extension. (define_insn "@aarch64_ldff1_gather" - [(set (match_operand:SVE_FULL_S 0 "register_operand") + [(set (match_operand:SVE_FULL_S 0 "register_operand" "=w, w, w, w, w, w") (unspec:SVE_FULL_S - [(match_operand:VNx4BI 5 "register_operand") - (match_operand:DI 1 "aarch64_sve_gather_offset_w") - (match_operand:VNx4SI 2 "register_operand") - (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_w") + [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl") + (match_operand:DI 1 "aarch64_sve_gather_offset_w" "Z, vgw, rk, rk, rk, rk") + (match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w") + (match_operand:DI 3 "const_int_operand" "i, i, Z, Ui1, Z, Ui1") + (match_operand:DI 4 "aarch64_gather_scale_operand_w" "Ui1, Ui1, Ui1, Ui1, i, i") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5 ] - [&w, Z, w, i, Ui1, Upl] ldff1w\t%0.s, %5/z, [%2.s] - [?w, Z, 0, i, Ui1, Upl] ^ - [&w, vgw, w, i, Ui1, Upl] ldff1w\t%0.s, %5/z, [%2.s, #%1] - [?w, vgw, 0, i, Ui1, Upl] ^ - [&w, rk, w, Z, Ui1, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw] - [?w, rk, 0, Z, Ui1, Upl] ^ - [&w, rk, w, Ui1, Ui1, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw] - [?w, rk, 0, Ui1, Ui1, Upl] ^ - [&w, rk, w, Z, i, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4] - [?w, rk, 0, Z, i, Upl] ^ - [&w, rk, w, Ui1, i, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4] - [?w, rk, 0, Ui1, i, Upl] ^ - } + "@ + ldff1w\t%0.s, %5/z, [%2.s] + ldff1w\t%0.s, %5/z, [%2.s, #%1] + ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw] + ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw] + ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4] + ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4]" ) ;; Predicated first-faulting gather loads for 64-bit elements. The value ;; of operand 3 doesn't matter in this case. (define_insn "@aarch64_ldff1_gather" - [(set (match_operand:SVE_FULL_D 0 "register_operand") + [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w, w, w") (unspec:SVE_FULL_D - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "aarch64_sve_gather_offset_d") - (match_operand:VNx2DI 2 "register_operand") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl") + (match_operand:DI 1 "aarch64_sve_gather_offset_d" "Z, vgd, rk, rk") + (match_operand:VNx2DI 2 "register_operand" "w, w, w, w") (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_d") + (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, Ui1, Ui1, i") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5 ] - [&w, Z, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%2.d] - [?w, Z, 0, i, Ui1, Upl ] ^ - [&w, vgd, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%2.d, #%1] - [?w, vgd, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, lsl %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ldff1d\t%0.d, %5/z, [%2.d] + ldff1d\t%0.d, %5/z, [%2.d, #%1] + ldff1d\t%0.d, %5/z, [%1, %2.d] + ldff1d\t%0.d, %5/z, [%1, %2.d, lsl %p4]" ) ;; Likewise, but with the offset being sign-extended from 32 bits. (define_insn_and_rewrite "*aarch64_ldff1_gather_sxtw" - [(set (match_operand:SVE_FULL_D 0 "register_operand") + [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w") (unspec:SVE_FULL_D - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "register_operand") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") + (match_operand:DI 1 "register_operand" "rk, rk") (unspec:VNx2DI [(match_operand 6) (sign_extend:VNx2DI (truncate:VNx2SI - (match_operand:VNx2DI 2 "register_operand")))] + (match_operand:VNx2DI 2 "register_operand" "w, w")))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_d") + (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, i") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw] + ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw %p4]" "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1827,25 +1770,22 @@ ;; Likewise, but with the offset being zero-extended from 32 bits. (define_insn "*aarch64_ldff1_gather_uxtw" - [(set (match_operand:SVE_FULL_D 0 "register_operand") + [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w") (unspec:SVE_FULL_D - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "register_operand") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") + (match_operand:DI 1 "register_operand" "rk, rk") (and:VNx2DI - (match_operand:VNx2DI 2 "register_operand") + (match_operand:VNx2DI 2 "register_operand" "w, w") (match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate")) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_d") + (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, i") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw] + ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw %p4]" ) ;; ------------------------------------------------------------------------- @@ -1863,35 +1803,28 @@ ;; Predicated extending first-faulting gather loads for 32-bit elements. ;; Operand 3 is true for unsigned extension and false for signed extension. (define_insn_and_rewrite "@aarch64_ldff1_gather_" - [(set (match_operand:VNx4_WIDE 0 "register_operand") + [(set (match_operand:VNx4_WIDE 0 "register_operand" "=w, w, w, w, w, w") (unspec:VNx4_WIDE - [(match_operand:VNx4BI 6 "general_operand") + [(match_operand:VNx4BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm, UplDnm, UplDnm") (ANY_EXTEND:VNx4_WIDE (unspec:VNx4_NARROW - [(match_operand:VNx4BI 5 "register_operand") - (match_operand:DI 1 "aarch64_sve_gather_offset_") - (match_operand:VNx4_WIDE 2 "register_operand") - (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + [(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl") + (match_operand:DI 1 "aarch64_sve_gather_offset_" "Z, vg, rk, rk, rk, rk") + (match_operand:VNx4_WIDE 2 "register_operand" "w, w, w, w, w, w") + (match_operand:DI 3 "const_int_operand" "i, i, Z, Ui1, Z, Ui1") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, Ui1, Ui1, Ui1, i, i") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5, 6] - [&w, Z, w, i, Ui1, Upl, UplDnm] ldff1\t%0.s, %5/z, [%2.s] - [?w, Z, 0, i, Ui1, Upl, UplDnm] ^ - [&w, vg, w, i, Ui1, Upl, UplDnm] ldff1\t%0.s, %5/z, [%2.s, #%1] - [?w, vg, 0, i, Ui1, Upl, UplDnm] ^ - [&w, rk, w, Z, Ui1, Upl, UplDnm] ldff1\t%0.s, %5/z, [%1, %2.s, sxtw] - [?w, rk, 0, Z, Ui1, Upl, UplDnm] ^ - [&w, rk, w, Ui1, Ui1, Upl, UplDnm] ldff1\t%0.s, %5/z, [%1, %2.s, uxtw] - [?w, rk, 0, Ui1, Ui1, Upl, UplDnm] ^ - [&w, rk, w, Z, i, Upl, UplDnm] ldff1\t%0.s, %5/z, [%1, %2.s, sxtw %p4] - [?w, rk, 0, Z, i, Upl, UplDnm] ^ - [&w, rk, w, Ui1, i, Upl, UplDnm] ldff1\t%0.s, %5/z, [%1, %2.s, uxtw %p4] - [?w, rk, 0, Ui1, i, Upl, UplDnm] ^ - } + "@ + ldff1\t%0.s, %5/z, [%2.s] + ldff1\t%0.s, %5/z, [%2.s, #%1] + ldff1\t%0.s, %5/z, [%1, %2.s, sxtw] + ldff1\t%0.s, %5/z, [%1, %2.s, uxtw] + ldff1\t%0.s, %5/z, [%1, %2.s, sxtw %p4] + ldff1\t%0.s, %5/z, [%1, %2.s, uxtw %p4]" "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx4BImode); @@ -1901,31 +1834,26 @@ ;; Predicated extending first-faulting gather loads for 64-bit elements. ;; The value of operand 3 doesn't matter in this case. (define_insn_and_rewrite "@aarch64_ldff1_gather_" - [(set (match_operand:VNx2_WIDE 0 "register_operand") + [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w, w, w") (unspec:VNx2_WIDE - [(match_operand:VNx2BI 6 "general_operand") + [(match_operand:VNx2BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm") (ANY_EXTEND:VNx2_WIDE (unspec:VNx2_NARROW - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "aarch64_sve_gather_offset_") - (match_operand:VNx2_WIDE 2 "register_operand") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl") + (match_operand:DI 1 "aarch64_sve_gather_offset_" "Z, vg, rk, rk") + (match_operand:VNx2_WIDE 2 "register_operand" "w, w, w, w") (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, Ui1, Ui1, i") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5, 6] - [&w, Z, w, i, Ui1, Upl, UplDnm] ldff1\t%0.d, %5/z, [%2.d] - [?w, Z, 0, i, Ui1, Upl, UplDnm] ^ - [&w, vg, w, i, Ui1, Upl, UplDnm] ldff1\t%0.d, %5/z, [%2.d, #%1] - [?w, vg, 0, i, Ui1, Upl, UplDnm] ^ - [&w, rk, w, i, Ui1, Upl, UplDnm] ldff1\t%0.d, %5/z, [%1, %2.d] - [?w, rk, 0, i, Ui1, Upl, UplDnm] ^ - [&w, rk, w, i, i, Upl, UplDnm] ldff1\t%0.d, %5/z, [%1, %2.d, lsl %p4] - [?w, rk, w, i, i, Upl, UplDnm] ^ - } + "@ + ldff1\t%0.d, %5/z, [%2.d] + ldff1\t%0.d, %5/z, [%2.d, #%1] + ldff1\t%0.d, %5/z, [%1, %2.d] + ldff1\t%0.d, %5/z, [%1, %2.d, lsl %p4]" "&& !CONSTANT_P (operands[6])" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1934,32 +1862,29 @@ ;; Likewise, but with the offset being sign-extended from 32 bits. (define_insn_and_rewrite "*aarch64_ldff1_gather__sxtw" - [(set (match_operand:VNx2_WIDE 0 "register_operand") + [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w") (unspec:VNx2_WIDE [(match_operand 6) (ANY_EXTEND:VNx2_WIDE (unspec:VNx2_NARROW - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "aarch64_reg_or_zero") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") + (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk") (unspec:VNx2DI [(match_operand 7) (sign_extend:VNx2DI (truncate:VNx2SI - (match_operand:VNx2DI 2 "register_operand")))] + (match_operand:VNx2DI 2 "register_operand" "w, w")))] UNSPEC_PRED_X) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, rk, w, i, Ui1, Upl ] ldff1\t%0.d, %5/z, [%1, %2.d, sxtw] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ldff1\t%0.d, %5/z, [%1, %2.d, sxtw %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ldff1\t%0.d, %5/z, [%1, %2.d, sxtw] + ldff1\t%0.d, %5/z, [%1, %2.d, sxtw %p4]" "&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))" { operands[6] = CONSTM1_RTX (VNx2BImode); @@ -1969,29 +1894,26 @@ ;; Likewise, but with the offset being zero-extended from 32 bits. (define_insn_and_rewrite "*aarch64_ldff1_gather__uxtw" - [(set (match_operand:VNx2_WIDE 0 "register_operand") + [(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w") (unspec:VNx2_WIDE [(match_operand 7) (ANY_EXTEND:VNx2_WIDE (unspec:VNx2_NARROW - [(match_operand:VNx2BI 5 "register_operand") - (match_operand:DI 1 "aarch64_reg_or_zero") + [(match_operand:VNx2BI 5 "register_operand" "Upl, Upl") + (match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk") (and:VNx2DI - (match_operand:VNx2DI 2 "register_operand") + (match_operand:VNx2DI 2 "register_operand" "w, w") (match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate")) (match_operand:DI 3 "const_int_operand") - (match_operand:DI 4 "aarch64_gather_scale_operand_") + (match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i") (mem:BLK (scratch)) (reg:VNx16BI FFRT_REGNUM)] UNSPEC_LDFF1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE" - {@ [cons: =0, 1, 2, 3, 4, 5] - [&w, rk, w, i, Ui1, Upl ] ldff1\t%0.d, %5/z, [%1, %2.d, uxtw] - [?w, rk, 0, i, Ui1, Upl ] ^ - [&w, rk, w, i, i, Upl ] ldff1\t%0.d, %5/z, [%1, %2.d, uxtw %p4] - [?w, rk, 0, i, i, Upl ] ^ - } + "@ + ldff1\t%0.d, %5/z, [%1, %2.d, uxtw] + ldff1\t%0.d, %5/z, [%1, %2.d, uxtw %p4]" "&& !CONSTANT_P (operands[7])" { operands[7] = CONSTM1_RTX (VNx2BImode); diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md index 7a77e9b7502..da8a424dd57 100644 --- a/gcc/config/aarch64/aarch64-sve2.md +++ b/gcc/config/aarch64/aarch64-sve2.md @@ -102,43 +102,37 @@ ;; Non-extending loads. (define_insn "@aarch64_gather_ldnt" - [(set (match_operand:SVE_FULL_SD 0 "register_operand") + [(set (match_operand:SVE_FULL_SD 0 "register_operand" "=w, w") (unspec:SVE_FULL_SD - [(match_operand: 1 "register_operand") - (match_operand:DI 2 "aarch64_reg_or_zero") - (match_operand: 3 "register_operand") + [(match_operand: 1 "register_operand" "Upl, Upl") + (match_operand:DI 2 "aarch64_reg_or_zero" "Z, r") + (match_operand: 3 "register_operand" "w, w") (mem:BLK (scratch))] UNSPEC_LDNT1_GATHER))] "TARGET_SVE2" - {@ [cons: =0, 1, 2, 3] - [&w, Upl, Z, w ] ldnt1\t%0., %1/z, [%3.] - [?w, Upl, Z, 0 ] ^ - [&w, Upl, r, w ] ldnt1\t%0., %1/z, [%3., %2] - [?w, Upl, r, 0 ] ^ - } + "@ + ldnt1\t%0., %1/z, [%3.] + ldnt1\t%0., %1/z, [%3., %2]" ) ;; Extending loads. (define_insn_and_rewrite "@aarch64_gather_ldnt_" - [(set (match_operand:SVE_FULL_SDI 0 "register_operand") + [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, w") (unspec:SVE_FULL_SDI - [(match_operand: 4 "general_operand") + [(match_operand: 4 "general_operand" "UplDnm, UplDnm") (ANY_EXTEND:SVE_FULL_SDI (unspec:SVE_PARTIAL_I - [(match_operand: 1 "register_operand") - (match_operand:DI 2 "aarch64_reg_or_zero") - (match_operand: 3 "register_operand") + [(match_operand: 1 "register_operand" "Upl, Upl") + (match_operand:DI 2 "aarch64_reg_or_zero" "Z, r") + (match_operand: 3 "register_operand" "w, w") (mem:BLK (scratch))] UNSPEC_LDNT1_GATHER))] UNSPEC_PRED_X))] "TARGET_SVE2 && (~ & ) == 0" - {@ [cons: =0, 1, 2, 3, 4] - [&w, Upl, Z, w, UplDnm] ldnt1\t%0., %1/z, [%3.] - [?w, Upl, Z, 0, UplDnm] ^ - [&w, Upl, r, w, UplDnm] ldnt1\t%0., %1/z, [%3., %2] - [?w, Upl, r, 0, UplDnm] ^ - } + "@ + ldnt1\t%0., %1/z, [%3.] + ldnt1\t%0., %1/z, [%3., %2]" "&& !CONSTANT_P (operands[4])" { operands[4] = CONSTM1_RTX (mode);