commit 846d5932041e04bbf386efbc739aee9749051bc7 Author: Alan Lawrence Date: Wed Aug 13 17:25:13 2014 +0100 AArch64: Reintroduce gimple_fold for min+max+plus diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c index a49da89..283469b 100644 --- a/gcc/config/aarch64/aarch64-builtins.c +++ b/gcc/config/aarch64/aarch64-builtins.c @@ -1188,9 +1188,6 @@ aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args, return NULL_TREE; } -/* Handling of reduction operations temporarily removed so as to decouple - changes to tree codes from AArch64 NEON Intrinsics. */ -#if 0 bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi) { @@ -1200,19 +1197,6 @@ aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi) tree fndecl; gimple new_stmt = NULL; - /* The operations folded below are reduction operations. These are - defined to leave their result in the 0'th element (from the perspective - of GCC). The architectural instruction we are folding will leave the - result in the 0'th element (from the perspective of the architecture). - For big-endian systems, these perspectives are not aligned. - - It is therefore wrong to perform this fold on big-endian. There - are some tricks we could play with shuffling, but the mid-end is - inconsistent in the way it treats reduction operations, so we will - end up in difficulty. Until we fix the ambiguity - just bail out. */ - if (BYTES_BIG_ENDIAN) - return false; - if (call) { fndecl = gimple_call_fndecl (stmt); @@ -1224,23 +1208,28 @@ aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi) ? gimple_call_arg_ptr (stmt, 0) : &error_mark_node); + /* We use gimple's REDUC_(PLUS|MIN|MAX)_EXPRs for float, signed int + and unsigned int; it will distinguish according to the types of + the arguments to the __builtin. */ switch (fcode) { - BUILTIN_VALL (UNOP, reduc_splus_, 10) - new_stmt = gimple_build_assign_with_ops ( + BUILTIN_VALL (UNOP, reduc_plus_scal_, 10) + new_stmt = gimple_build_assign_with_ops ( REDUC_PLUS_EXPR, gimple_call_lhs (stmt), args[0], NULL_TREE); break; - BUILTIN_VDQIF (UNOP, reduc_smax_, 10) + BUILTIN_VDQIF (UNOP, reduc_smax_scal_, 10) + BUILTIN_VDQ_BHSI (UNOPU, reduc_umax_scal_, 10) new_stmt = gimple_build_assign_with_ops ( REDUC_MAX_EXPR, gimple_call_lhs (stmt), args[0], NULL_TREE); break; - BUILTIN_VDQIF (UNOP, reduc_smin_, 10) + BUILTIN_VDQIF (UNOP, reduc_smin_scal_, 10) + BUILTIN_VDQ_BHSI (UNOPU, reduc_umin_scal_, 10) new_stmt = gimple_build_assign_with_ops ( REDUC_MIN_EXPR, gimple_call_lhs (stmt), @@ -1262,7 +1251,6 @@ aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi) return changed; } -#endif void aarch64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update) diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 27d82f3..db5ff59 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -10015,8 +10015,8 @@ aarch64_asan_shadow_offset (void) #undef TARGET_FRAME_POINTER_REQUIRED #define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required -//#undef TARGET_GIMPLE_FOLD_BUILTIN -//#define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin +#undef TARGET_GIMPLE_FOLD_BUILTIN +#define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin #undef TARGET_GIMPLIFY_VA_ARG_EXPR #define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr