diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/gcc/config/aarch64/aarch64-sve-builtins-base.cc index bee410929bd..48e849bec34 100644 --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc @@ -44,6 +44,7 @@ #include "aarch64-sve-builtins-shapes.h" #include "aarch64-sve-builtins-base.h" #include "aarch64-sve-builtins-functions.h" +#include "ssa.h" using namespace aarch64_sve; @@ -1207,6 +1208,66 @@ public: insn_code icode = code_for_aarch64_sve_ld1rq (e.vector_mode (0)); return e.use_contiguous_load_insn (icode); } + + gimple * + fold (gimple_folder &f) const override + { + tree arg0 = gimple_call_arg (f.call, 0); + tree arg1 = gimple_call_arg (f.call, 1); + + /* Transform: + lhs = svld1rq ({-1, -1, ... }, arg1) + into: + tmp = mem_ref [(int * {ref-all}) arg1] + lhs = vec_perm_expr. + on little endian target. + vectype is the corresponding ADVSIMD type. */ + + if (!BYTES_BIG_ENDIAN + && integer_all_onesp (arg0)) + { + tree lhs = gimple_call_lhs (f.call); + tree lhs_type = TREE_TYPE (lhs); + poly_uint64 lhs_len = TYPE_VECTOR_SUBPARTS (lhs_type); + tree eltype = TREE_TYPE (lhs_type); + + scalar_mode elmode = GET_MODE_INNER (TYPE_MODE (lhs_type)); + machine_mode vq_mode = aarch64_vq_mode (elmode).require (); + tree vectype = build_vector_type_for_mode (eltype, vq_mode); + + tree elt_ptr_type + = build_pointer_type_for_mode (eltype, VOIDmode, true); + tree zero = build_zero_cst (elt_ptr_type); + + /* Use element type alignment. */ + tree access_type + = build_aligned_type (vectype, TYPE_ALIGN (eltype)); + + tree mem_ref_lhs = make_ssa_name_fn (cfun, access_type, 0); + tree mem_ref_op = fold_build2 (MEM_REF, access_type, arg1, zero); + gimple *mem_ref_stmt + = gimple_build_assign (mem_ref_lhs, mem_ref_op); + gsi_insert_before (f.gsi, mem_ref_stmt, GSI_SAME_STMT); + + int source_nelts = TYPE_VECTOR_SUBPARTS (access_type).to_constant (); + vec_perm_builder sel (lhs_len, source_nelts, 1); + for (int i = 0; i < source_nelts; i++) + sel.quick_push (i); + + vec_perm_indices indices (sel, 1, source_nelts); + gcc_checking_assert (can_vec_perm_const_p (TYPE_MODE (lhs_type), + TYPE_MODE (access_type), + indices)); + tree mask_type = (FLOAT_TYPE_P (eltype)) + ? build_vector_type (integer_type_node, lhs_len) + : lhs_type; + tree mask = vec_perm_indices_to_tree (mask_type, indices); + return gimple_build_assign (lhs, VEC_PERM_EXPR, + mem_ref_lhs, mem_ref_lhs, mask); + } + + return NULL; + } }; class svld1ro_impl : public load_replicate diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc index d4c575ce976..ae8e913d525 100644 --- a/gcc/config/aarch64/aarch64.cc +++ b/gcc/config/aarch64/aarch64.cc @@ -23401,7 +23401,8 @@ struct expand_vec_perm_d bool testing_p; }; -static bool aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d); +static bool aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d, + machine_mode op_mode); /* Generate a variable permutation. */ @@ -23638,7 +23639,7 @@ aarch64_evpc_reencode (struct expand_vec_perm_d *d) newd.one_vector_p = d->one_vector_p; newd.perm.new_vector (newpermconst, newd.one_vector_p ? 1 : 2, nelt / 2); - return aarch64_expand_vec_perm_const_1 (&newd); + return aarch64_expand_vec_perm_const_1 (&newd, newd.vmode); } /* Recognize patterns suitable for the UZP instructions. */ @@ -23945,6 +23946,32 @@ aarch64_evpc_sve_tbl (struct expand_vec_perm_d *d) return true; } +/* Try to implement D using SVE dup instruction. */ + +static bool +aarch64_evpc_sve_dup (struct expand_vec_perm_d *d, machine_mode op_mode) +{ + if (BYTES_BIG_ENDIAN + || d->perm.length ().is_constant () + || !d->one_vector_p + || aarch64_classify_vector_mode (op_mode) != VEC_ADVSIMD) + return false; + + int npatterns = d->perm.encoding ().npatterns (); + if (!known_eq (npatterns, GET_MODE_NUNITS (op_mode))) + return false; + + for (int i = 0; i < npatterns; i++) + if (!known_eq (d->perm[i], i)) + return false; + + if (d->testing_p) + return true; + + aarch64_expand_sve_dupq (d->target, GET_MODE (d->target), d->op0); + return true; +} + /* Try to implement D using SVE SEL instruction. */ static bool @@ -24066,7 +24093,8 @@ aarch64_evpc_ins (struct expand_vec_perm_d *d) } static bool -aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) +aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d, + machine_mode op_mode) { /* The pattern matching functions above are written to look for a small number to begin the sequence (0, 1, N/2). If we begin with an index @@ -24084,6 +24112,12 @@ aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) || d->vec_flags == VEC_SVE_PRED) && known_gt (nelt, 1)) { + /* If operand and result modes differ, then only check + for dup case. */ + if (d->vmode != op_mode) + return (d->vec_flags == VEC_SVE_DATA) + ? aarch64_evpc_sve_dup (d, op_mode) : false; + if (aarch64_evpc_rev_local (d)) return true; else if (aarch64_evpc_rev_global (d)) @@ -24105,7 +24139,12 @@ aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) else if (aarch64_evpc_reencode (d)) return true; if (d->vec_flags == VEC_SVE_DATA) - return aarch64_evpc_sve_tbl (d); + { + if (aarch64_evpc_sve_tbl (d)) + return true; + else if (aarch64_evpc_sve_dup (d, op_mode)) + return true; + } else if (d->vec_flags == VEC_ADVSIMD) return aarch64_evpc_tbl (d); } @@ -24119,9 +24158,6 @@ aarch64_vectorize_vec_perm_const (machine_mode vmode, machine_mode op_mode, rtx target, rtx op0, rtx op1, const vec_perm_indices &sel) { - if (vmode != op_mode) - return false; - struct expand_vec_perm_d d; /* Check whether the mask can be applied to a single vector. */ @@ -24154,10 +24190,10 @@ aarch64_vectorize_vec_perm_const (machine_mode vmode, machine_mode op_mode, d.testing_p = !target; if (!d.testing_p) - return aarch64_expand_vec_perm_const_1 (&d); + return aarch64_expand_vec_perm_const_1 (&d, op_mode); rtx_insn *last = get_last_insn (); - bool ret = aarch64_expand_vec_perm_const_1 (&d); + bool ret = aarch64_expand_vec_perm_const_1 (&d, op_mode); gcc_assert (last == get_last_insn ()); return ret;