Hi All, This patch series is to add recognition of pairwise operations (reductions) in match.pd such that we can benefit from them even at -O1 when the vectorizer isn't enabled. Ths use of these allow for a lot simpler codegen in AArch64 and allows us to avoid quite a lot of codegen warts. As an example a simple: typedef float v4sf __attribute__((vector_size (16))); float foo3 (v4sf x) { return x[1] + x[2]; } currently generates: foo3: dup s1, v0.s[1] dup s0, v0.s[2] fadd s0, s1, s0 ret while with this patch series now generates: foo3: ext v0.16b, v0.16b, v0.16b, #4 faddp s0, v0.2s ret This patch will not perform the operation if the source is not a gimple register and leaves memory sources to the vectorizer as it's able to deal correctly with clobbers. The use of these instruction makes a significant difference in codegen quality for AArch64 and Arm. NOTE: The last entry in the series contains tests for all of the previous patches as it's a bit of an all or nothing thing. Bootstrapped Regtested on aarch64-none-linux-gnu, x86_64-pc-linux-gnu and no issues. Ok for master? Thanks, Tamar gcc/ChangeLog: * match.pd (adjacent_data_access_p): Import. Add new pattern for bitwise plus, min, max, fmax, fmin. * tree-cfg.cc (verify_gimple_call): Allow function arguments in IFNs. * tree.cc (adjacent_data_access_p): New. * tree.h (adjacent_data_access_p): New. --- inline copy of patch -- diff --git a/gcc/match.pd b/gcc/match.pd index 2617d56091dfbd41ae49f980ee0af3757f5ec1cf..aecaa3520b36e770d11ea9a10eb18db23c0cd9f7 100644 --- a/gcc/match.pd +++ b/gcc/match.pd @@ -39,7 +39,8 @@ along with GCC; see the file COPYING3. If not see HONOR_NANS uniform_vector_p expand_vec_cmp_expr_p - bitmask_inv_cst_vector_p) + bitmask_inv_cst_vector_p + adjacent_data_access_p) /* Operator lists. */ (define_operator_list tcc_comparison @@ -7195,6 +7196,47 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) /* Canonicalizations of BIT_FIELD_REFs. */ +/* Canonicalize BIT_FIELD_REFS to pairwise operations. */ +(for op (plus min max FMIN_ALL FMAX_ALL) + ifn (IFN_REDUC_PLUS IFN_REDUC_MIN IFN_REDUC_MAX + IFN_REDUC_FMIN IFN_REDUC_FMAX) + (simplify + (op @0 @1) + (if (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)) + (with { poly_uint64 nloc = 0; + tree src = adjacent_data_access_p (@0, @1, &nloc, true); + tree ntype = build_vector_type (type, 2); + tree size = TYPE_SIZE (ntype); + tree pos = build_int_cst (TREE_TYPE (size), nloc); + poly_uint64 _sz; + poly_uint64 _total; } + (if (src && is_gimple_reg (src) && ntype + && poly_int_tree_p (size, &_sz) + && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (src)), &_total) + && known_ge (_total, _sz + nloc)) + (ifn (BIT_FIELD_REF:ntype { src; } { size; } { pos; }))))))) + +(for op (lt gt) + ifni (IFN_REDUC_MIN IFN_REDUC_MAX) + ifnf (IFN_REDUC_FMIN IFN_REDUC_FMAX) + (simplify + (cond (op @0 @1) @0 @1) + (if (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)) + (with { poly_uint64 nloc = 0; + tree src = adjacent_data_access_p (@0, @1, &nloc, false); + tree ntype = build_vector_type (type, 2); + tree size = TYPE_SIZE (ntype); + tree pos = build_int_cst (TREE_TYPE (size), nloc); + poly_uint64 _sz; + poly_uint64 _total; } + (if (src && is_gimple_reg (src) && ntype + && poly_int_tree_p (size, &_sz) + && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (src)), &_total) + && known_ge (_total, _sz + nloc)) + (if (SCALAR_FLOAT_MODE_P (TYPE_MODE (type))) + (ifnf (BIT_FIELD_REF:ntype { src; } { size; } { pos; })) + (ifni (BIT_FIELD_REF:ntype { src; } { size; } { pos; })))))))) + (simplify (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4) (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); })) diff --git a/gcc/tree-cfg.cc b/gcc/tree-cfg.cc index 91ec33c80a41e1e0cc6224e137dd42144724a168..b19710392940cf469de52d006603ae1e3deb6b76 100644 --- a/gcc/tree-cfg.cc +++ b/gcc/tree-cfg.cc @@ -3492,6 +3492,7 @@ verify_gimple_call (gcall *stmt) { tree arg = gimple_call_arg (stmt, i); if ((is_gimple_reg_type (TREE_TYPE (arg)) + && !is_gimple_variable (arg) && !is_gimple_val (arg)) || (!is_gimple_reg_type (TREE_TYPE (arg)) && !is_gimple_lvalue (arg))) diff --git a/gcc/tree.h b/gcc/tree.h index e6564aaccb7b69cd938ff60b6121aec41b7e8a59..8f8a9660c9e0605eb516de194640b8c1b531b798 100644 --- a/gcc/tree.h +++ b/gcc/tree.h @@ -5006,6 +5006,11 @@ extern bool integer_pow2p (const_tree); extern tree bitmask_inv_cst_vector_p (tree); +/* TRUE if the two operands represent adjacent access of data such that a + pairwise operation can be used. */ + +extern tree adjacent_data_access_p (tree, tree, poly_uint64*, bool); + /* integer_nonzerop (tree x) is nonzero if X is an integer constant with a nonzero value. */ diff --git a/gcc/tree.cc b/gcc/tree.cc index 007c9325b17076f474e6681c49966c59cf6b91c7..5315af38a1ead89ca5f75dc4b19de9841e29d311 100644 --- a/gcc/tree.cc +++ b/gcc/tree.cc @@ -10457,6 +10457,90 @@ bitmask_inv_cst_vector_p (tree t) return builder.build (); } +/* Returns base address if the two operands represent adjacent access of data + such that a pairwise operation can be used. OP1 must be a lower subpart + than OP2. If POS is not NULL then on return if a value is returned POS + will indicate the position of the lower address. If COMMUTATIVE_P then + the operation is also tried by flipping op1 and op2. */ + +tree adjacent_data_access_p (tree op1, tree op2, poly_uint64 *pos, + bool commutative_p) +{ + gcc_assert (op1); + gcc_assert (op2); + if (TREE_CODE (op1) != TREE_CODE (op2) + || TREE_TYPE (op1) != TREE_TYPE (op2)) + return NULL; + + tree type = TREE_TYPE (op1); + gimple *stmt1 = NULL, *stmt2 = NULL; + unsigned int bits = GET_MODE_BITSIZE (GET_MODE_INNER (TYPE_MODE (type))); + + if (TREE_CODE (op1) == BIT_FIELD_REF + && operand_equal_p (TREE_OPERAND (op1, 0), TREE_OPERAND (op2, 0), 0) + && operand_equal_p (TREE_OPERAND (op1, 1), TREE_OPERAND (op2, 1), 0) + && known_eq (bit_field_size (op1), bits)) + { + poly_uint64 offset1 = bit_field_offset (op1); + poly_uint64 offset2 = bit_field_offset (op2); + if (known_eq (offset2 - offset1, bits)) + { + if (pos) + *pos = offset1; + return TREE_OPERAND (op1, 0); + } + else if (commutative_p && known_eq (offset1 - offset2, bits)) + { + if (pos) + *pos = offset2; + return TREE_OPERAND (op1, 0); + } + } + else if (TREE_CODE (op1) == ARRAY_REF + && operand_equal_p (get_base_address (op1), get_base_address (op2))) + { + wide_int size1 = wi::to_wide (array_ref_element_size (op1)); + wide_int size2 = wi::to_wide (array_ref_element_size (op2)); + if (wi::ne_p (size1, size2) || wi::ne_p (size1, bits / 8) + || !tree_fits_poly_uint64_p (TREE_OPERAND (op1, 1)) + || !tree_fits_poly_uint64_p (TREE_OPERAND (op2, 1))) + return NULL; + + poly_uint64 offset1 = tree_to_poly_uint64 (TREE_OPERAND (op1, 1)); + poly_uint64 offset2 = tree_to_poly_uint64 (TREE_OPERAND (op2, 1)); + if (known_eq (offset2 - offset1, 1UL)) + { + if (pos) + *pos = offset1 * bits; + return TREE_OPERAND (op1, 0); + } + else if (commutative_p && known_eq (offset1 - offset2, 1UL)) + { + if (pos) + *pos = offset2 * bits; + return TREE_OPERAND (op1, 0); + } + } + else if (TREE_CODE (op1) == SSA_NAME + && (stmt1 = SSA_NAME_DEF_STMT (op1)) != NULL + && (stmt2 = SSA_NAME_DEF_STMT (op2)) != NULL + && is_gimple_assign (stmt1) + && is_gimple_assign (stmt2)) + { + if (gimple_assign_rhs_code (stmt1) != ARRAY_REF + && gimple_assign_rhs_code (stmt1) != BIT_FIELD_REF + && gimple_assign_rhs_code (stmt2) != ARRAY_REF + && gimple_assign_rhs_code (stmt2) != BIT_FIELD_REF) + return NULL; + + return adjacent_data_access_p (gimple_assign_rhs1 (stmt1), + gimple_assign_rhs1 (stmt2), pos, + commutative_p); + } + + return NULL; +} + /* If VECTOR_CST T has a single nonzero element, return the index of that element, otherwise return -1. */ --