From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 2631 invoked by alias); 13 Jun 2011 15:34:13 -0000 Received: (qmail 2440 invoked by uid 22791); 13 Jun 2011 15:34:12 -0000 X-SWARE-Spam-Status: No, hits=-1.8 required=5.0 tests=AWL,BAYES_00,T_RP_MATCHES_RCVD X-Spam-Check-By: sourceware.org Received: from nikam.ms.mff.cuni.cz (HELO nikam.ms.mff.cuni.cz) (195.113.20.16) by sourceware.org (qpsmtpd/0.43rc1) with ESMTP; Mon, 13 Jun 2011 15:33:55 +0000 Received: by nikam.ms.mff.cuni.cz (Postfix, from userid 16202) id 69D3F9AC847; Mon, 13 Jun 2011 17:33:54 +0200 (CEST) Date: Mon, 13 Jun 2011 17:52:00 -0000 From: Jan Hubicka To: gcc-patches@gcc.gnu.org Subject: Cgraph alias reorg 21/14 (topological sorting for early opts) Message-ID: <20110613153354.GA12283@kam.mff.cuni.cz> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline User-Agent: Mutt/1.5.18 (2008-05-17) Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org X-SW-Source: 2011-06/txt/msg00978.txt.bz2 Hi. this patch makes ipa_reverse_postorder to walk the alias references improving effectivit of early opts on units with aliases. Bootstrapped/regtested x86_64-linux, comitted. Honza * ipa-utils.c (postorder_stack): New structure. (ipa_reverse_postorder): Handle aliases. Index: ipa-utils.c =================================================================== --- ipa-utils.c (revision 174958) +++ ipa-utils.c (working copy) @@ -233,6 +233,13 @@ ipa_free_postorder_info (void) } } +struct postorder_stack +{ + struct cgraph_node *node; + struct cgraph_edge *edge; + int ref; +}; + /* Fill array order with all nodes with output flag set in the reverse topological order. Return the number of elements in the array. FIXME: While walking, consider aliases, too. */ @@ -243,11 +250,12 @@ ipa_reverse_postorder (struct cgraph_nod struct cgraph_node *node, *node2; int stack_size = 0; int order_pos = 0; - struct cgraph_edge *edge, last; + struct cgraph_edge *edge; int pass; + struct ipa_ref *ref; - struct cgraph_node **stack = - XCNEWVEC (struct cgraph_node *, cgraph_n_nodes); + struct postorder_stack *stack = + XCNEWVEC (struct postorder_stack, cgraph_n_nodes); /* We have to deal with cycles nicely, so use a depth first traversal output algorithm. Ignore the fact that some functions won't need @@ -261,47 +269,51 @@ ipa_reverse_postorder (struct cgraph_nod && (pass || (!node->address_taken && !node->global.inlined_to - && !cgraph_only_called_directly_or_aliased_p (node)))) + && !node->alias && !node->thunk.thunk_p + && !cgraph_only_called_directly_p (node)))) { - node2 = node; - if (!node->callers) - node->aux = &last; - else - node->aux = node->callers; - while (node2) + stack_size = 0; + stack[stack_size].node = node; + stack[stack_size].edge = node->callers; + stack[stack_size].ref = 0; + node->aux = (void *)(size_t)1; + while (stack_size >= 0) { - while (node2->aux != &last) + while (true) { - edge = (struct cgraph_edge *) node2->aux; - if (edge->next_caller) - node2->aux = edge->next_caller; - else - node2->aux = &last; - /* Break possible cycles involving always-inline - functions by ignoring edges from always-inline - functions to non-always-inline functions. */ - if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl) - && !DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)) - continue; - if (!edge->caller->aux) + node2 = NULL; + while (stack[stack_size].edge && !node2) { - if (!edge->caller->callers) - edge->caller->aux = &last; - else - edge->caller->aux = edge->caller->callers; - stack[stack_size++] = node2; + edge = stack[stack_size].edge; node2 = edge->caller; - break; + stack[stack_size].edge = edge->next_caller; + /* Break possible cycles involving always-inline + functions by ignoring edges from always-inline + functions to non-always-inline functions. */ + if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl) + && !DECL_DISREGARD_INLINE_LIMITS + (cgraph_function_node (edge->callee, NULL)->decl)) + node2 = NULL; + } + for (;ipa_ref_list_refering_iterate (&stack[stack_size].node->ref_list, + stack[stack_size].ref, + ref) && !node2; + stack[stack_size].ref++) + { + if (ref->use == IPA_REF_ALIAS) + node2 = ipa_ref_refering_node (ref); + } + if (!node2) + break; + if (!node2->aux) + { + stack[++stack_size].node = node2; + stack[stack_size].edge = node2->callers; + stack[stack_size].ref = 0; + node2->aux = (void *)(size_t)1; } } - if (node2->aux == &last) - { - order[order_pos++] = node2; - if (stack_size) - node2 = stack[--stack_size]; - else - node2 = NULL; - } + order[order_pos++] = stack[stack_size--].node; } } free (stack);