From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 1666) id 449943857735; Fri, 21 Apr 2023 07:48:52 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 449943857735 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gcc.gnu.org; s=default; t=1682063332; bh=ng16SuVDmpXadyb9NRKho+HDBbB2fRtuK+ak3eNnLd0=; h=From:To:Subject:Date:From; b=eVDiYRBs4CjyTqUEGVVNpYBjt9ijQRGLTo6Y3OO2KA5BsGLNRPgYthgclvOqyzYKR 0tJECiGvBItq03zLt2Hrj219INJQM6c7dZkkanGzMAyNnYbdCsw6DegecEx/Cwzkv7 7lxiGyKi4bH+PSATIxrUx8A2tvh8wiOn0qxfIM4A= MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Type: text/plain; charset="utf-8" From: Richard Biener To: gcc-cvs@gcc.gnu.org Subject: [gcc r14-131] Fix LCM dataflow CFG order X-Act-Checkin: gcc X-Git-Author: Richard Biener X-Git-Refname: refs/heads/master X-Git-Oldrev: a80c68a08604b0ac625ac7fc59eae40b551b1176 X-Git-Newrev: a322f37a57bc164d9ab8445079655afc533ddae9 Message-Id: <20230421074852.449943857735@sourceware.org> Date: Fri, 21 Apr 2023 07:48:52 +0000 (GMT) List-Id: https://gcc.gnu.org/g:a322f37a57bc164d9ab8445079655afc533ddae9 commit r14-131-ga322f37a57bc164d9ab8445079655afc533ddae9 Author: Richard Biener Date: Thu Apr 20 13:56:21 2023 +0200 Fix LCM dataflow CFG order The following fixes the initial order the LCM dataflow routines process BBs. For a forward problem you want reverse postorder, for a backward problem you want reverse postorder on the inverted graph. The LCM iteration has very many other issues but this allows to turn inverted_post_order_compute into computing a reverse postorder more easily. * lcm.cc (compute_antinout_edge): Use RPO on the inverted graph. (compute_laterin): Use RPO. (compute_available): Likewise. Diff: --- gcc/lcm.cc | 47 ++++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/gcc/lcm.cc b/gcc/lcm.cc index d7a86c75cd9..5adb4eb1a11 100644 --- a/gcc/lcm.cc +++ b/gcc/lcm.cc @@ -99,16 +99,20 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, bitmap_vector_ones (antin, last_basic_block_for_fn (cfun)); /* Put every block on the worklist; this is necessary because of the - optimistic initialization of ANTIN above. */ - int *postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); - int postorder_num = post_order_compute (postorder, false, false); - for (int i = 0; i < postorder_num; ++i) + optimistic initialization of ANTIN above. Use reverse postorder + on the inverted graph to make the backward dataflow problem require + less iterations. */ + auto_vec postorder; + inverted_post_order_compute (&postorder); + for (int i = postorder.length () - 1; i >= 0; --i) { bb = BASIC_BLOCK_FOR_FN (cfun, postorder[i]); + if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) + || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)) + continue; *qin++ = bb; bb->aux = bb; } - free (postorder); qin = worklist; qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS]; @@ -270,17 +274,15 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, /* Add all the blocks to the worklist. This prevents an early exit from the loop given our optimistic initialization of LATER above. */ - auto_vec postorder; - inverted_post_order_compute (&postorder); - for (unsigned int i = 0; i < postorder.length (); ++i) + int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS); + int n = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, false); + for (int i = 0; i < n; ++i) { - bb = BASIC_BLOCK_FOR_FN (cfun, postorder[i]); - if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) - || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)) - continue; + bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]); *qin++ = bb; bb->aux = bb; } + free (rpo); /* Note that we do not use the last allocated element for our queue, as EXIT_BLOCK is never inserted into it. */ @@ -298,13 +300,14 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, if (qout >= qend) qout = worklist; - /* Compute the intersection of LATERIN for each incoming edge to B. */ + /* Compute LATERIN as the intersection of LATER for each incoming + edge to BB. */ bitmap_ones (laterin[bb->index]); FOR_EACH_EDGE (e, ei, bb->preds) bitmap_and (laterin[bb->index], laterin[bb->index], later[(size_t)e->aux]); - /* Calculate LATER for all outgoing edges. */ + /* Calculate LATER for all outgoing edges of BB. */ FOR_EACH_EDGE (e, ei, bb->succs) if (bitmap_ior_and_compl (later[(size_t) e->aux], earliest[(size_t) e->aux], @@ -509,19 +512,17 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, bitmap_vector_ones (avout, last_basic_block_for_fn (cfun)); /* Put every block on the worklist; this is necessary because of the - optimistic initialization of AVOUT above. Use inverted postorder - to make the dataflow problem require less iterations. */ - auto_vec postorder; - inverted_post_order_compute (&postorder); - for (unsigned int i = 0; i < postorder.length (); ++i) + optimistic initialization of AVOUT above. Use reverse postorder + to make the forward dataflow problem require less iterations. */ + int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS); + int n = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, false); + for (int i = 0; i < n; ++i) { - bb = BASIC_BLOCK_FOR_FN (cfun, postorder[i]); - if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) - || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)) - continue; + bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]); *qin++ = bb; bb->aux = bb; } + free (rpo); qin = worklist; qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];