From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by sourceware.org (Postfix) with ESMTP id DBB863834E5B for ; Wed, 1 Jun 2022 08:42:42 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org DBB863834E5B Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 69A3D23A; Wed, 1 Jun 2022 01:42:42 -0700 (PDT) Received: from localhost (e121540-lin.manchester.arm.com [10.32.98.37]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id BE28A3F73D; Wed, 1 Jun 2022 01:42:41 -0700 (PDT) From: Richard Sandiford To: Prathamesh Kulkarni Mail-Followup-To: Prathamesh Kulkarni , gcc Patches , richard.sandiford@arm.com Cc: gcc Patches Subject: Re: [1/2] PR96463 - aarch64 specific changes References: Date: Wed, 01 Jun 2022 09:42:40 +0100 In-Reply-To: (Prathamesh Kulkarni's message of "Tue, 31 May 2022 17:02:19 +0530") Message-ID: User-Agent: Gnus/5.13 (Gnus v5.13) Emacs/26.3 (gnu/linux) MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: quoted-printable X-Spam-Status: No, score=-12.3 required=5.0 tests=BAYES_00, GIT_PATCH_0, KAM_DMARC_STATUS, SPF_HELO_NONE, SPF_PASS, TXREP, T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org X-BeenThere: gcc-patches@gcc.gnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Gcc-patches mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 01 Jun 2022 08:42:46 -0000 Prathamesh Kulkarni writes: > On Thu, 12 May 2022 at 16:15, Richard Sandiford > wrote: >> >> Prathamesh Kulkarni writes: >> > On Wed, 11 May 2022 at 12:44, Richard Sandiford >> > wrote: >> >> >> >> Prathamesh Kulkarni writes: >> >> > On Fri, 6 May 2022 at 16:00, Richard Sandiford >> >> > wrote: >> >> >> >> >> >> Prathamesh Kulkarni writes: >> >> >> > diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/g= cc/config/aarch64/aarch64-sve-builtins-base.cc >> >> >> > index c24c0548724..1ef4ea2087b 100644 >> >> >> > --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc >> >> >> > +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc >> >> >> > @@ -44,6 +44,14 @@ >> >> >> > #include "aarch64-sve-builtins-shapes.h" >> >> >> > #include "aarch64-sve-builtins-base.h" >> >> >> > #include "aarch64-sve-builtins-functions.h" >> >> >> > +#include "aarch64-builtins.h" >> >> >> > +#include "gimple-ssa.h" >> >> >> > +#include "tree-phinodes.h" >> >> >> > +#include "tree-ssa-operands.h" >> >> >> > +#include "ssa-iterators.h" >> >> >> > +#include "stringpool.h" >> >> >> > +#include "value-range.h" >> >> >> > +#include "tree-ssanames.h" >> >> >> >> >> >> Minor, but: I think the preferred approach is to include "ssa.h" >> >> >> rather than include some of these headers directly. >> >> >> >> >> >> > >> >> >> > using namespace aarch64_sve; >> >> >> > >> >> >> > @@ -1207,6 +1215,56 @@ public: >> >> >> > insn_code icode =3D code_for_aarch64_sve_ld1rq (e.vector_mo= de (0)); >> >> >> > return e.use_contiguous_load_insn (icode); >> >> >> > } >> >> >> > + >> >> >> > + gimple * >> >> >> > + fold (gimple_folder &f) const OVERRIDE >> >> >> > + { >> >> >> > + tree arg0 =3D gimple_call_arg (f.call, 0); >> >> >> > + tree arg1 =3D gimple_call_arg (f.call, 1); >> >> >> > + >> >> >> > + /* Transform: >> >> >> > + lhs =3D svld1rq ({-1, -1, ... }, arg1) >> >> >> > + into: >> >> >> > + tmp =3D mem_ref [(int * {ref-all}) arg1] >> >> >> > + lhs =3D vec_perm_expr. >> >> >> > + on little endian target. */ >> >> >> > + >> >> >> > + if (!BYTES_BIG_ENDIAN >> >> >> > + && integer_all_onesp (arg0)) >> >> >> > + { >> >> >> > + tree lhs =3D gimple_call_lhs (f.call); >> >> >> > + auto simd_type =3D aarch64_get_simd_info_for_type (Int32x4= _t); >> >> >> >> >> >> Does this work for other element sizes? I would have expected it >> >> >> to be the (128-bit) Advanced SIMD vector associated with the same >> >> >> element type as the SVE vector. >> >> >> >> >> >> The testcase should cover more than just int32x4_t -> svint32_t, >> >> >> just to be sure. >> >> > In the attached patch, it obtains corresponding advsimd type with: >> >> > >> >> > tree eltype =3D TREE_TYPE (lhs_type); >> >> > unsigned nunits =3D 128 / TREE_INT_CST_LOW (TYPE_SIZE (eltype)); >> >> > tree vectype =3D build_vector_type (eltype, nunits); >> >> > >> >> > While this seems to work with different element sizes, I am not sur= e if it's >> >> > the correct approach ? >> >> >> >> Yeah, that looks correct. Other SVE code uses aarch64_vq_mode >> >> to get the vector mode associated with a .Q =E2=80=9Celement=E2=80=9D= , so an >> >> alternative would be: >> >> >> >> machine_mode vq_mode =3D aarch64_vq_mode (TYPE_MODE (eltype)).req= uire (); >> >> tree vectype =3D build_vector_type_for_mode (eltype, vq_mode); >> >> >> >> which is more explicit about wanting an Advanced SIMD vector. >> >> >> >> >> > + >> >> >> > + tree elt_ptr_type >> >> >> > + =3D build_pointer_type_for_mode (simd_type.eltype, VOIDm= ode, true); >> >> >> > + tree zero =3D build_zero_cst (elt_ptr_type); >> >> >> > + >> >> >> > + /* Use element type alignment. */ >> >> >> > + tree access_type >> >> >> > + =3D build_aligned_type (simd_type.itype, TYPE_ALIGN (sim= d_type.eltype)); >> >> >> > + >> >> >> > + tree tmp =3D make_ssa_name_fn (cfun, access_type, 0); >> >> >> > + gimple *mem_ref_stmt >> >> >> > + =3D gimple_build_assign (tmp, fold_build2 (MEM_REF, acce= ss_type, arg1, zero)); >> >> >> >> >> >> Long line. Might be easier to format by assigning the fold_build2= result >> >> >> to a temporary variable. >> >> >> >> >> >> > + gsi_insert_before (f.gsi, mem_ref_stmt, GSI_SAME_STMT); >> >> >> > + >> >> >> > + tree mem_ref_lhs =3D gimple_get_lhs (mem_ref_stmt); >> >> >> > + tree vectype =3D TREE_TYPE (mem_ref_lhs); >> >> >> > + tree lhs_type =3D TREE_TYPE (lhs); >> >> >> >> >> >> Is this necessary? The code above supplied the types and I wouldn= 't >> >> >> have expected them to change during the build process. >> >> >> >> >> >> > + >> >> >> > + int source_nelts =3D TYPE_VECTOR_SUBPARTS (vectype).to_con= stant (); >> >> >> > + vec_perm_builder sel (TYPE_VECTOR_SUBPARTS (lhs_type), sou= rce_nelts, 1); >> >> >> > + for (int i =3D 0; i < source_nelts; i++) >> >> >> > + sel.quick_push (i); >> >> >> > + >> >> >> > + vec_perm_indices indices (sel, 1, source_nelts); >> >> >> > + gcc_checking_assert (can_vec_perm_const_p (TYPE_MODE (lhs_= type), indices)); >> >> >> > + tree mask =3D vec_perm_indices_to_tree (lhs_type, indices); >> >> >> > + return gimple_build_assign (lhs, VEC_PERM_EXPR, mem_ref_lh= s, mem_ref_lhs, mask); >> >> >> >> >> >> Nit: long line. >> >> >> >> >> >> > + } >> >> >> > + >> >> >> > + return NULL; >> >> >> > + } >> >> >> > }; >> >> >> > >> >> >> > class svld1ro_impl : public load_replicate >> >> >> > diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/= aarch64.cc >> >> >> > index f650abbc4ce..47810fec804 100644 >> >> >> > --- a/gcc/config/aarch64/aarch64.cc >> >> >> > +++ b/gcc/config/aarch64/aarch64.cc >> >> >> > @@ -23969,6 +23969,35 @@ aarch64_evpc_sve_tbl (struct expand_vec= _perm_d *d) >> >> >> > return true; >> >> >> > } >> >> >> > >> >> >> > +/* Try to implement D using SVE dup instruction. */ >> >> >> > + >> >> >> > +static bool >> >> >> > +aarch64_evpc_sve_dup (struct expand_vec_perm_d *d) >> >> >> > +{ >> >> >> > + if (BYTES_BIG_ENDIAN >> >> >> > + || d->perm.length ().is_constant () >> >> >> > + || !d->one_vector_p >> >> >> > + || d->target =3D=3D NULL >> >> >> > + || d->op0 =3D=3D NULL >> >> >> >> >> >> These last two lines mean that we always return false for d->testi= ng. >> >> >> The idea instead is that the return value should be the same for b= oth >> >> >> d->testing and !d->testing. The difference is that for !d->testin= g we >> >> >> also emit code to do the permute. >> >> >> >> It doesn't look like the new patch addresses this. There should be >> >> no checks for/uses of =E2=80=9Cd->target=E2=80=9D and =E2=80=9Cd->op0= =E2=80=9D until after: >> >> >> >> if (d->testing_p) >> >> return true; >> >> >> >> This... >> >> >> >> >> > + || GET_MODE_NUNITS (GET_MODE (d->target)).is_constant () >> >> >> >> >> >> Sorry, I've forgotten the context now, but: these positive tests >> >> >> for is_constant surprised me. Do we really only want to do this >> >> >> for variable-length SVE code generation, rather than fixed-length? >> >> >> >> >> >> > + || !GET_MODE_NUNITS (GET_MODE (d->op0)).is_constant ()) >> >> >> > + return false; >> >> >> > + >> >> >> > + if (d->testing_p) >> >> >> > + return true; >> >> >> >> >> >> This should happen after the later tests, once we're sure that the >> >> >> permute vector has the right form. If the issue is that op0 isn't >> >> >> provided for testing then I think the hook needs to be passed the >> >> >> input mode alongside the result mode. >> >> >> >> ...was my guess about why the checks were there. >> > Ah right sorry. IIUC, if d->testing is true, then d->op0 could be NULL= ? >> > In that case, how do we obtain input mode ? >> >> Well, like I say, I think we might need to extend the vec_perm_const >> hook interface so that it gets passed the input mode, now that that >> isn't necessarily the same as the output mode. >> >> It would be good to do that as a separate prepatch, since it would >> affect other targets too. And for safety, that patch should make all >> existing implementations of the hook return false if the modes aren't >> equal, including for aarch64. The current patch can then make the >> aarch64 hook treat the dup case as an exception. > Hi Richard, > I have attached updated patch, which tries to address above suggestions. > I had a question about couple of things: > (1) The patch resulted in ICE for float operands, because we were > using lhs_type to build mask, which is float vector type. > So I adjusted the patch to make mask vector of integer_type_node with > length =3D=3D length(lhs_type) if lhs has float vector type. > Does that look OK ? Let's use: build_vector_type (ssizetype, lhs_len) unconditionally, even for integers. > (2) Moved check for d->vmode !=3D op_mode (and only checking for dup in > that case), inside vec_perm_const_1, > since it does some initial bookkeeping (like swapping operands), > before calling respective functions. > Does that look OK ? > > Thanks, > Prathamesh >> >> Thanks, >> Richard > > diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/gcc/config= /aarch64/aarch64-sve-builtins-base.cc > index bee410929bd..48e849bec34 100644 > --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc > +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc > @@ -44,6 +44,7 @@ > #include "aarch64-sve-builtins-shapes.h" > #include "aarch64-sve-builtins-base.h" > #include "aarch64-sve-builtins-functions.h" > +#include "ssa.h" >=20=20 > using namespace aarch64_sve; >=20=20 > @@ -1207,6 +1208,66 @@ public: > insn_code icode =3D code_for_aarch64_sve_ld1rq (e.vector_mode (0)); > return e.use_contiguous_load_insn (icode); > } > + > + gimple * > + fold (gimple_folder &f) const override > + { > + tree arg0 =3D gimple_call_arg (f.call, 0); > + tree arg1 =3D gimple_call_arg (f.call, 1); > + > + /* Transform: > + lhs =3D svld1rq ({-1, -1, ... }, arg1) > + into: > + tmp =3D mem_ref [(int * {ref-all}) arg1] > + lhs =3D vec_perm_expr. > + on little endian target. > + vectype is the corresponding ADVSIMD type. */ > + > + if (!BYTES_BIG_ENDIAN > + && integer_all_onesp (arg0)) > + { > + tree lhs =3D gimple_call_lhs (f.call); > + tree lhs_type =3D TREE_TYPE (lhs); > + poly_uint64 lhs_len =3D TYPE_VECTOR_SUBPARTS (lhs_type); > + tree eltype =3D TREE_TYPE (lhs_type); > + > + scalar_mode elmode =3D GET_MODE_INNER (TYPE_MODE (lhs_type)); > + machine_mode vq_mode =3D aarch64_vq_mode (elmode).require (); > + tree vectype =3D build_vector_type_for_mode (eltype, vq_mode); > + > + tree elt_ptr_type > + =3D build_pointer_type_for_mode (eltype, VOIDmode, true); > + tree zero =3D build_zero_cst (elt_ptr_type); > + > + /* Use element type alignment. */ > + tree access_type > + =3D build_aligned_type (vectype, TYPE_ALIGN (eltype)); > + > + tree mem_ref_lhs =3D make_ssa_name_fn (cfun, access_type, 0); > + tree mem_ref_op =3D fold_build2 (MEM_REF, access_type, arg1, zero); > + gimple *mem_ref_stmt > + =3D gimple_build_assign (mem_ref_lhs, mem_ref_op); > + gsi_insert_before (f.gsi, mem_ref_stmt, GSI_SAME_STMT); > + > + int source_nelts =3D TYPE_VECTOR_SUBPARTS (access_type).to_constant (); > + vec_perm_builder sel (lhs_len, source_nelts, 1); > + for (int i =3D 0; i < source_nelts; i++) > + sel.quick_push (i); > + > + vec_perm_indices indices (sel, 1, source_nelts); > + gcc_checking_assert (can_vec_perm_const_p (TYPE_MODE (lhs_type), > + TYPE_MODE (access_type), > + indices)); > + tree mask_type =3D (FLOAT_TYPE_P (eltype)) > + ? build_vector_type (integer_type_node, lhs_len) > + : lhs_type; > + tree mask =3D vec_perm_indices_to_tree (mask_type, indices); > + return gimple_build_assign (lhs, VEC_PERM_EXPR, > + mem_ref_lhs, mem_ref_lhs, mask); > + } > + > + return NULL; > + } > }; >=20=20 > class svld1ro_impl : public load_replicate > diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc > index d4c575ce976..ae8e913d525 100644 > --- a/gcc/config/aarch64/aarch64.cc > +++ b/gcc/config/aarch64/aarch64.cc > @@ -23401,7 +23401,8 @@ struct expand_vec_perm_d > bool testing_p; > }; >=20=20 > -static bool aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d= ); > +static bool aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d, > + machine_mode op_mode); >=20=20 > /* Generate a variable permutation. */ >=20=20 > @@ -23638,7 +23639,7 @@ aarch64_evpc_reencode (struct expand_vec_perm_d *= d) > newd.one_vector_p =3D d->one_vector_p; >=20=20 > newd.perm.new_vector (newpermconst, newd.one_vector_p ? 1 : 2, nelt / = 2); > - return aarch64_expand_vec_perm_const_1 (&newd); > + return aarch64_expand_vec_perm_const_1 (&newd, newd.vmode); > } >=20=20 > /* Recognize patterns suitable for the UZP instructions. */ > @@ -23945,6 +23946,32 @@ aarch64_evpc_sve_tbl (struct expand_vec_perm_d *= d) > return true; > } >=20=20 > +/* Try to implement D using SVE dup instruction. */ > + > +static bool > +aarch64_evpc_sve_dup (struct expand_vec_perm_d *d, machine_mode op_mode) > +{ > + if (BYTES_BIG_ENDIAN > + || d->perm.length ().is_constant () Sorry, I've forgotten: why do we need this is_constant check? > + || !d->one_vector_p > + || aarch64_classify_vector_mode (op_mode) !=3D VEC_ADVSIMD) > + return false; We need to check that nelts_per_pattern is 1 as well. > + int npatterns =3D d->perm.encoding ().npatterns (); > + if (!known_eq (npatterns, GET_MODE_NUNITS (op_mode))) > + return false; > + > + for (int i =3D 0; i < npatterns; i++) > + if (!known_eq (d->perm[i], i)) > + return false; > + > + if (d->testing_p) > + return true; > + > + aarch64_expand_sve_dupq (d->target, GET_MODE (d->target), d->op0); > + return true; > +} > + > /* Try to implement D using SVE SEL instruction. */ >=20=20 > static bool > @@ -24066,7 +24093,8 @@ aarch64_evpc_ins (struct expand_vec_perm_d *d) > } >=20=20 > static bool > -aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) > +aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d, > + machine_mode op_mode) I think we should add op_mode to expand_vec_perm_d instead. Let's also add an op_vec_flags to cache the aarch64_classify_vector_mode result. > { > /* The pattern matching functions above are written to look for a small > number to begin the sequence (0, 1, N/2). If we begin with an index > @@ -24084,6 +24112,12 @@ aarch64_expand_vec_perm_const_1 (struct expand_v= ec_perm_d *d) > || d->vec_flags =3D=3D VEC_SVE_PRED) > && known_gt (nelt, 1)) > { > + /* If operand and result modes differ, then only check > + for dup case. */ > + if (d->vmode !=3D op_mode) > + return (d->vec_flags =3D=3D VEC_SVE_DATA) > + ? aarch64_evpc_sve_dup (d, op_mode) : false; > + I think it'd be more future-proof to format this as: if (d->vmod =3D=3D d->op_mode) { =E2=80=A6existing code=E2=80=A6 } else { if (aarch64_evpc_sve_dup (d)) return true; } with the d->vec_flags =3D=3D VEC_SVE_DATA check being in aarch64_evpc_sve_d= up, alongside the op_mode check. I think we'll be adding more checks here over time. > if (aarch64_evpc_rev_local (d)) > return true; > else if (aarch64_evpc_rev_global (d)) > @@ -24105,7 +24139,12 @@ aarch64_expand_vec_perm_const_1 (struct expand_v= ec_perm_d *d) > else if (aarch64_evpc_reencode (d)) > return true; > if (d->vec_flags =3D=3D VEC_SVE_DATA) > - return aarch64_evpc_sve_tbl (d); > + { > + if (aarch64_evpc_sve_tbl (d)) > + return true; > + else if (aarch64_evpc_sve_dup (d, op_mode)) > + return true; > + } > else if (d->vec_flags =3D=3D VEC_ADVSIMD) > return aarch64_evpc_tbl (d); > } Is this part still needed, given the above? Thanks, Richard > @@ -24119,9 +24158,6 @@ aarch64_vectorize_vec_perm_const (machine_mode vm= ode, machine_mode op_mode, > rtx target, rtx op0, rtx op1, > const vec_perm_indices &sel) > { > - if (vmode !=3D op_mode) > - return false; > - > struct expand_vec_perm_d d; >=20=20 > /* Check whether the mask can be applied to a single vector. */ > @@ -24154,10 +24190,10 @@ aarch64_vectorize_vec_perm_const (machine_mode = vmode, machine_mode op_mode, > d.testing_p =3D !target; >=20=20 > if (!d.testing_p) > - return aarch64_expand_vec_perm_const_1 (&d); > + return aarch64_expand_vec_perm_const_1 (&d, op_mode); >=20=20 > rtx_insn *last =3D get_last_insn (); > - bool ret =3D aarch64_expand_vec_perm_const_1 (&d); > + bool ret =3D aarch64_expand_vec_perm_const_1 (&d, op_mode); > gcc_assert (last =3D=3D get_last_insn ()); >=20=20 > return ret;