From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 9764 invoked by alias); 15 Jan 2014 16:39:28 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Received: (qmail 9754 invoked by uid 89); 15 Jan 2014 16:39:28 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-3.2 required=5.0 tests=AWL,BAYES_00 autolearn=ham version=3.3.2 X-HELO: mx2.suse.de Received: from cantor2.suse.de (HELO mx2.suse.de) (195.135.220.15) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with (CAMELLIA256-SHA encrypted) ESMTPS; Wed, 15 Jan 2014 16:39:27 +0000 Received: from relay1.suse.de (charybdis-ext.suse.de [195.135.220.254]) by mx2.suse.de (Postfix) with ESMTP id 51E0EAC3E; Wed, 15 Jan 2014 16:39:24 +0000 (UTC) Date: Wed, 15 Jan 2014 16:39:00 -0000 From: Martin Jambor To: Richard Biener Cc: Eric Botcazou , GCC Patches Subject: Re: [RFC] Move ehcleanup pass to before early SRA Message-ID: <20140115163923.GF23848@virgil.suse> Mail-Followup-To: Richard Biener , Eric Botcazou , GCC Patches References: <1931947.uvSAzT6pjY@polaris> <1516660.2kBhdnQEvf@polaris> MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Disposition: inline In-Reply-To: User-Agent: Mutt/1.5.21 (2010-09-15) X-IsSubscribed: yes X-SW-Source: 2014-01/txt/msg00886.txt.bz2 Hi, On Wed, Jan 15, 2014 at 03:39:09PM +0100, Richard Biener wrote: > On Wed, Jan 15, 2014 at 1:06 PM, Eric Botcazou wrote: > > Yes, it's scalarized during SRA but not ESRA because there is an ehcleanup > > pass in-between. It used to be scalarized during ESRA up to 4.6.x. > > I'm saying even ESRA should be able to scalarize it just fine. It just > needs to be careful where to insert the loads from the aggregate result > (on the single outgoing non-EH edge). > > Wouldn't that fix your issue? Eric, more specifically, would the (untested proof of concept) patch below help? Martin diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c index 4992b4c..a4c0e21 100644 --- a/gcc/tree-sra.c +++ b/gcc/tree-sra.c @@ -1142,6 +1142,26 @@ build_access_from_expr (tree expr, gimple stmt, bool write) return false; } +/* Return the single non-EH successor edge of BB or NULL if there is none or + more than one. */ + +static edge +single_non_eh_succ (basic_block bb) +{ + edge e, res = NULL; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) + if (!(e->flags & EDGE_EH)) + { + if (res) + return NULL; + res = e; + } + + return res; +} + /* Disqualify LHS and RHS for scalarization if STMT must end its basic block in modes in which it matters, return true iff they have been disqualified. RHS may be NULL, in that case ignore it. If we scalarize an aggregate in @@ -1153,6 +1173,9 @@ disqualify_ops_if_throwing_stmt (gimple stmt, tree lhs, tree rhs) if ((sra_mode == SRA_MODE_EARLY_INTRA || sra_mode == SRA_MODE_INTRA) && (stmt_can_throw_internal (stmt) || stmt_ends_bb_p (stmt))) { + if (single_non_eh_succ (gimple_bb (stmt))) + return false; + disqualify_base_of_expr (lhs, "LHS of a throwing stmt."); if (rhs) disqualify_base_of_expr (rhs, "RHS of a throwing stmt."); @@ -2720,6 +2743,19 @@ get_access_for_expr (tree expr) return get_var_base_offset_size_access (base, offset, max_size); } +/* Aplit the sinle non-EH successor edge from BB (there must be exactly one) + and return a gsi to the new block. */ + +static gimple_stmt_iterator +gsi_for_eh_followups (basic_block bb) +{ + edge e = single_non_eh_succ (bb); + gcc_assert (e); + + basic_block new_bb = split_edge (e); + return gsi_start_bb (new_bb); +} + /* Replace the expression EXPR with a scalar replacement if there is one and generate other statements to do type conversion or subtree copying if necessary. GSI is used to place newly created statements, WRITE is true if @@ -2749,6 +2785,14 @@ sra_modify_expr (tree *expr, gimple_stmt_iterator *gsi, bool write) type = TREE_TYPE (*expr); loc = gimple_location (gsi_stmt (*gsi)); + gimple_stmt_iterator alt_gsi = gsi_none (); + if (write && (stmt_can_throw_internal (gsi_stmt (*gsi)) + || stmt_ends_bb_p (gsi_stmt (*gsi)))) + { + alt_gsi = gsi_for_eh_followups (gsi_bb (*gsi)); + gsi = &alt_gsi; + } + if (access->grp_to_be_replaced) { tree repl = get_access_replacement (access); @@ -3208,14 +3252,25 @@ sra_modify_assign (gimple *stmt, gimple_stmt_iterator *gsi) if (modify_this_stmt || gimple_has_volatile_ops (*stmt) || contains_vce_or_bfcref_p (rhs) - || contains_vce_or_bfcref_p (lhs)) + || contains_vce_or_bfcref_p (lhs) + || stmt_can_throw_internal (*stmt) + || stmt_ends_bb_p (*stmt)) { if (access_has_children_p (racc)) generate_subtree_copies (racc->first_child, racc->base, 0, 0, 0, gsi, false, false, loc); if (access_has_children_p (lacc)) - generate_subtree_copies (lacc->first_child, lacc->base, 0, 0, 0, - gsi, true, true, loc); + { + gimple_stmt_iterator alt_gsi = gsi_none (); + if (stmt_can_throw_internal (*stmt) + || stmt_ends_bb_p (*stmt)) + { + alt_gsi = gsi_for_eh_followups (gsi_bb (*gsi)); + gsi = &alt_gsi; + } + generate_subtree_copies (lacc->first_child, lacc->base, 0, 0, 0, + gsi, true, true, loc); + } sra_stats.separate_lhs_rhs_handling++; /* This gimplification must be done after generate_subtree_copies, @@ -3309,8 +3364,13 @@ sra_modify_function_body (void) { bool cfg_changed = false; basic_block bb; + vec bb_list = vNULL; FOR_EACH_BB_FN (bb, cfun) + bb_list.safe_push (bb); + + unsigned bb_vec_index; + FOR_EACH_VEC_ELT (bb_list, bb_vec_index, bb) { gimple_stmt_iterator gsi = gsi_start_bb (bb); while (!gsi_end_p (gsi)) @@ -3379,6 +3439,7 @@ sra_modify_function_body (void) } } + bb_list.release (); return cfg_changed; }