From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 602 invoked by alias); 6 Aug 2014 17:19:58 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Received: (qmail 32542 invoked by uid 89); 6 Aug 2014 17:19:54 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-2.5 required=5.0 tests=AWL,BAYES_00,RP_MATCHES_RCVD,SPF_HELO_PASS,SPF_PASS autolearn=ham version=3.3.2 X-HELO: mx1.redhat.com Received: from mx1.redhat.com (HELO mx1.redhat.com) (209.132.183.28) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with (AES256-GCM-SHA384 encrypted) ESMTPS; Wed, 06 Aug 2014 17:19:52 +0000 Received: from int-mx13.intmail.prod.int.phx2.redhat.com (int-mx13.intmail.prod.int.phx2.redhat.com [10.5.11.26]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id s76HJnGD024311 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=OK) for ; Wed, 6 Aug 2014 13:19:49 -0400 Received: from c64.redhat.com (vpn-239-139.phx2.redhat.com [10.3.239.139]) by int-mx13.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id s76HJ2ou030913; Wed, 6 Aug 2014 13:19:49 -0400 From: David Malcolm To: gcc-patches@gcc.gnu.org Cc: David Malcolm Subject: [PATCH 080/236] haifa-sched.c: Use rtx_insn Date: Wed, 06 Aug 2014 17:19:00 -0000 Message-Id: <1407345815-14551-81-git-send-email-dmalcolm@redhat.com> In-Reply-To: <1407345815-14551-1-git-send-email-dmalcolm@redhat.com> References: <1407345815-14551-1-git-send-email-dmalcolm@redhat.com> X-IsSubscribed: yes X-SW-Source: 2014-08/txt/msg00512.txt.bz2 gcc/ * haifa-sched.c (bb_header): Strengthen from rtx * to rtx_insn **. (add_delay_dependencies): Strengthen local "pro" from rtx to rtx_insn *. (recompute_todo_spec): Likewise. (dep_cost_1): Likewise for locals "insn", "used". (schedule_insn): Likewise for local "dbg". (schedule_insn): Likewise for locals "pro", "next". (unschedule_insns_until): Likewise for local "con". (restore_pattern): Likewise for local "next". (estimate_insn_tick): Likewise for local "pro". (resolve_dependencies): Likewise for local "next". (fix_inter_tick): Likewise. (fix_tick_ready): Likewise for local "pro". (add_to_speculative_block): Likewise for locals "check", "twin", "pro". (sched_extend_bb): Likewise for locals "end", "insn". (init_before_recovery): Likewise for local "x". (sched_create_recovery_block): Likewise for local "barrier". (create_check_block_twin): Likewise for local "pro". (fix_recovery_deps): Likewise for locals "note", "insn", "jump", "consumer". (unlink_bb_notes): Update for change to type of bb_header. Strengthen locals "prev", "label", "note", "next" from rtx to rtx_insn *. (clear_priorities): Likewise for local "pro". --- gcc/haifa-sched.c | 60 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c index 04a3576..fd46977 100644 --- a/gcc/haifa-sched.c +++ b/gcc/haifa-sched.c @@ -261,7 +261,7 @@ bool haifa_recovery_bb_ever_added_p; static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control; /* Array used in {unlink, restore}_bb_notes. */ -static rtx *bb_header = 0; +static rtx_insn **bb_header = 0; /* Basic block after which recovery blocks will be created. */ static basic_block before_recovery; @@ -798,7 +798,7 @@ add_delay_dependencies (rtx insn) FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep) { - rtx pro = DEP_PRO (dep); + rtx_insn *pro = DEP_PRO (dep); struct delay_pair *other_pair = delay_htab_i2.find_with_hash (pro, htab_hash_pointer (pro)); if (!other_pair || other_pair->stages) @@ -1208,7 +1208,7 @@ recompute_todo_spec (rtx next, bool for_backtrack) FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep) { - rtx pro = DEP_PRO (dep); + rtx_insn *pro = DEP_PRO (dep); ds_t ds = DEP_STATUS (dep) & SPECULATIVE; if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next)) @@ -1414,8 +1414,8 @@ insn_cost (rtx insn) int dep_cost_1 (dep_t link, dw_t dw) { - rtx insn = DEP_PRO (link); - rtx used = DEP_CON (link); + rtx_insn *insn = DEP_PRO (link); + rtx_insn *used = DEP_CON (link); int cost; if (DEP_COST (link) != UNKNOWN_DEP_COST) @@ -3787,7 +3787,7 @@ schedule_insn (rtx insn) for (sd_it = sd_iterator_start (insn, SD_LIST_BACK); sd_iterator_cond (&sd_it, &dep);) { - rtx dbg = DEP_PRO (dep); + rtx_insn *dbg = DEP_PRO (dep); struct reg_use_data *use, *next; if (DEP_STATUS (dep) & DEP_CANCELLED) @@ -3876,7 +3876,7 @@ schedule_insn (rtx insn) sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it)) { struct dep_replacement *desc = DEP_REPLACE (dep); - rtx pro = DEP_PRO (dep); + rtx_insn *pro = DEP_PRO (dep); if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED && desc != NULL && desc->insn == pro) apply_replacement (dep, false); @@ -3886,7 +3886,7 @@ schedule_insn (rtx insn) for (sd_it = sd_iterator_start (insn, SD_LIST_FORW); sd_iterator_cond (&sd_it, &dep);) { - rtx next = DEP_CON (dep); + rtx_insn *next = DEP_CON (dep); bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0; /* Resolve the dependence between INSN and NEXT. @@ -4251,7 +4251,7 @@ unschedule_insns_until (rtx insn) for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW); sd_iterator_cond (&sd_it, &dep);) { - rtx con = DEP_CON (dep); + rtx_insn *con = DEP_CON (dep); sd_unresolve_dep (sd_it); if (!MUST_RECOMPUTE_SPEC_P (con)) { @@ -4496,7 +4496,7 @@ apply_replacement (dep_t dep, bool immediately) static void restore_pattern (dep_t dep, bool immediately) { - rtx next = DEP_CON (dep); + rtx_insn *next = DEP_CON (dep); int tick = INSN_TICK (next); /* If we already scheduled the insn, the modified version is @@ -4581,7 +4581,7 @@ estimate_insn_tick (bitmap processed, rtx insn, int budget) FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep) { - rtx pro = DEP_PRO (dep); + rtx_insn *pro = DEP_PRO (dep); int t; if (DEP_STATUS (dep) & DEP_CANCELLED) @@ -4658,7 +4658,7 @@ resolve_dependencies (rtx insn) for (sd_it = sd_iterator_start (insn, SD_LIST_FORW); sd_iterator_cond (&sd_it, &dep);) { - rtx next = DEP_CON (dep); + rtx_insn *next = DEP_CON (dep); if (sched_verbose >= 4) fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn), @@ -6926,7 +6926,7 @@ fix_inter_tick (rtx head, rtx tail) FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep) { - rtx next; + rtx_insn *next; next = DEP_CON (dep); tick = INSN_TICK (next); @@ -7112,7 +7112,7 @@ fix_tick_ready (rtx next) FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep) { - rtx pro = DEP_PRO (dep); + rtx_insn *pro = DEP_PRO (dep); int tick1; gcc_assert (INSN_TICK (pro) >= MIN_TICK); @@ -7380,7 +7380,7 @@ add_to_speculative_block (rtx insn) for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); sd_iterator_cond (&sd_it, &dep);) { - rtx check = DEP_PRO (dep); + rtx_insn *check = DEP_PRO (dep); if (IS_SPECULATION_SIMPLE_CHECK_P (check)) { @@ -7399,7 +7399,7 @@ add_to_speculative_block (rtx insn) while (1) { - rtx check, twin; + rtx_insn *check, *twin; basic_block rec; /* Get the first backward dependency of INSN. */ @@ -7436,7 +7436,7 @@ add_to_speculative_block (rtx insn) instructions from REC. */ FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep) { - rtx pro = DEP_PRO (dep); + rtx_insn *pro = DEP_PRO (dep); gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE); @@ -7458,7 +7458,7 @@ add_to_speculative_block (rtx insn) for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); sd_iterator_cond (&sd_it, &dep);) { - rtx pro = DEP_PRO (dep); + rtx_insn *pro = DEP_PRO (dep); if (BLOCK_FOR_INSN (pro) == rec) sd_delete_dep (sd_it); @@ -7541,8 +7541,8 @@ static void sched_extend_bb (void) { /* The following is done to keep current_sched_info->next_tail non null. */ - rtx end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb); - rtx insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end; + rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb); + rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end; if (NEXT_INSN (end) == 0 || (!NOTE_P (insn) && !LABEL_P (insn) @@ -7582,7 +7582,8 @@ init_before_recovery (basic_block *before_recovery_ptr) Between these two blocks recovery blocks will be emitted. */ basic_block single, empty; - rtx x, label; + rtx_insn *x; + rtx label; /* If the fallthrough edge to exit we've found is from the block we've created before, don't do anything more. */ @@ -7646,7 +7647,7 @@ basic_block sched_create_recovery_block (basic_block *before_recovery_ptr) { rtx label; - rtx barrier; + rtx_insn *barrier; basic_block rec; haifa_recovery_bb_recently_added_p = true; @@ -7855,7 +7856,7 @@ create_check_block_twin (rtx insn, bool mutate_p) /* First, create dependencies between INSN's producers and CHECK & TWIN. */ FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep) { - rtx pro = DEP_PRO (dep); + rtx_insn *pro = DEP_PRO (dep); ds_t ds; /* If BEGIN_DATA: [insn ~~TRUE~~> producer]: @@ -7999,7 +8000,8 @@ create_check_block_twin (rtx insn, bool mutate_p) static void fix_recovery_deps (basic_block rec) { - rtx note, insn, jump, ready_list = 0; + rtx_insn *note, *insn, *jump; + rtx ready_list = 0; bitmap_head in_ready; rtx link; @@ -8020,7 +8022,7 @@ fix_recovery_deps (basic_block rec) for (sd_it = sd_iterator_start (insn, SD_LIST_FORW); sd_iterator_cond (&sd_it, &dep);) { - rtx consumer = DEP_CON (dep); + rtx_insn *consumer = DEP_CON (dep); if (BLOCK_FOR_INSN (consumer) != rec) { @@ -8144,7 +8146,7 @@ unlink_bb_notes (basic_block first, basic_block last) if (first == last) return; - bb_header = XNEWVEC (rtx, last_basic_block_for_fn (cfun)); + bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun)); /* Make a sentinel. */ if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun)) @@ -8153,7 +8155,7 @@ unlink_bb_notes (basic_block first, basic_block last) first = first->next_bb; do { - rtx prev, label, note, next; + rtx_insn *prev, *label, *note, *next; label = BB_HEAD (last); if (LABEL_P (label)) @@ -8194,7 +8196,7 @@ restore_bb_notes (basic_block first) while (first != EXIT_BLOCK_PTR_FOR_FN (cfun) && bb_header[first->index]) { - rtx prev, label, note, next; + rtx_insn *prev, *label, *note, *next; label = bb_header[first->index]; prev = PREV_INSN (label); @@ -8328,7 +8330,7 @@ clear_priorities (rtx insn, rtx_vec_t *roots_ptr) FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep) { - rtx pro = DEP_PRO (dep); + rtx_insn *pro = DEP_PRO (dep); if (INSN_PRIORITY_STATUS (pro) >= 0 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED) -- 1.8.5.3