Subject: [PATCH draft v2] sched: Don't skip empty block in scheduling --- gcc/haifa-sched.cc | 172 ++++++++++++++++++++++++++------------------- gcc/rtl.h | 4 +- gcc/sched-ebb.cc | 7 +- gcc/sched-rgn.cc | 23 +++++- 4 files changed, 126 insertions(+), 80 deletions(-) diff --git a/gcc/haifa-sched.cc b/gcc/haifa-sched.cc index 8e8add709b3..7b4c4a92bb0 100644 --- a/gcc/haifa-sched.cc +++ b/gcc/haifa-sched.cc @@ -1207,6 +1207,11 @@ recompute_todo_spec (rtx_insn *next, bool for_backtrack) int n_replace = 0; bool first_p = true; + /* Since we don't skip no_real_insns_p block any more, it's + possible to meet NOTE insn now, early return if so. */ + if (NOTE_P (next)) + return 0; + if (sd_lists_empty_p (next, SD_LIST_BACK)) /* NEXT has all its dependencies resolved. */ return 0; @@ -1726,6 +1731,11 @@ setup_insn_reg_pressure_info (rtx_insn *insn) int *max_reg_pressure; static int death[N_REG_CLASSES]; + /* Since we don't skip no_real_insns_p block any more, it's possible to + schedule NOTE insn now, we should check for it first. */ + if (NOTE_P (insn)) + return; + gcc_checking_assert (!DEBUG_INSN_P (insn)); excess_cost_change = 0; @@ -4017,10 +4027,10 @@ schedule_insn (rtx_insn *insn) /* Scheduling instruction should have all its dependencies resolved and should have been removed from the ready list. */ - gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK)); + gcc_assert (NOTE_P (insn) || sd_lists_empty_p (insn, SD_LIST_HARD_BACK)); /* Reset debug insns invalidated by moving this insn. */ - if (MAY_HAVE_DEBUG_BIND_INSNS && !DEBUG_INSN_P (insn)) + if (MAY_HAVE_DEBUG_BIND_INSNS && NONDEBUG_INSN_P (insn)) for (sd_it = sd_iterator_start (insn, SD_LIST_BACK); sd_iterator_cond (&sd_it, &dep);) { @@ -4106,61 +4116,66 @@ schedule_insn (rtx_insn *insn) check_clobbered_conditions (insn); - /* Update dependent instructions. First, see if by scheduling this insn - now we broke a dependence in a way that requires us to change another - insn. */ - for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); - sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it)) + /* Since we don't skip no_real_insns_p block any more, it's possible to + schedule NOTE insn now, we should check for it first. */ + if (!NOTE_P (insn)) { - struct dep_replacement *desc = DEP_REPLACE (dep); - rtx_insn *pro = DEP_PRO (dep); - if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED - && desc != NULL && desc->insn == pro) - apply_replacement (dep, false); - } + /* Update dependent instructions. First, see if by scheduling this insn + now we broke a dependence in a way that requires us to change another + insn. */ + for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); + sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it)) + { + struct dep_replacement *desc = DEP_REPLACE (dep); + rtx_insn *pro = DEP_PRO (dep); + if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED && desc != NULL + && desc->insn == pro) + apply_replacement (dep, false); + } - /* Go through and resolve forward dependencies. */ - for (sd_it = sd_iterator_start (insn, SD_LIST_FORW); - sd_iterator_cond (&sd_it, &dep);) - { - rtx_insn *next = DEP_CON (dep); - bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0; + /* Go through and resolve forward dependencies. */ + for (sd_it = sd_iterator_start (insn, SD_LIST_FORW); + sd_iterator_cond (&sd_it, &dep);) + { + rtx_insn *next = DEP_CON (dep); + bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0; - /* Resolve the dependence between INSN and NEXT. - sd_resolve_dep () moves current dep to another list thus - advancing the iterator. */ - sd_resolve_dep (sd_it); + /* Resolve the dependence between INSN and NEXT. + sd_resolve_dep () moves current dep to another list thus + advancing the iterator. */ + sd_resolve_dep (sd_it); - if (cancelled) - { - if (must_restore_pattern_p (next, dep)) - restore_pattern (dep, false); - continue; - } + if (cancelled) + { + if (must_restore_pattern_p (next, dep)) + restore_pattern (dep, false); + continue; + } - /* Don't bother trying to mark next as ready if insn is a debug - insn. If insn is the last hard dependency, it will have - already been discounted. */ - if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next)) - continue; + /* Don't bother trying to mark next as ready if insn is a debug + insn. If insn is the last hard dependency, it will have + already been discounted. */ + if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next)) + continue; - if (!IS_SPECULATION_BRANCHY_CHECK_P (insn)) - { - int effective_cost; + if (!IS_SPECULATION_BRANCHY_CHECK_P (insn)) + { + int effective_cost; - effective_cost = try_ready (next); + effective_cost = try_ready (next); - if (effective_cost >= 0 - && SCHED_GROUP_P (next) - && advance < effective_cost) - advance = effective_cost; - } - else - /* Check always has only one forward dependence (to the first insn in - the recovery block), therefore, this will be executed only once. */ - { - gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW)); - fix_recovery_deps (RECOVERY_BLOCK (insn)); + if (effective_cost >= 0 && SCHED_GROUP_P (next) + && advance < effective_cost) + advance = effective_cost; + } + else + /* Check always has only one forward dependence (to the first insn + in the recovery block), therefore, this will be executed only + once. */ + { + gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW)); + fix_recovery_deps (RECOVERY_BLOCK (insn)); + } } } @@ -4170,9 +4185,9 @@ schedule_insn (rtx_insn *insn) may use this information to decide how the instruction should be aligned. */ if (issue_rate > 1 + && NONDEBUG_INSN_P (insn) && GET_CODE (PATTERN (insn)) != USE - && GET_CODE (PATTERN (insn)) != CLOBBER - && !DEBUG_INSN_P (insn)) + && GET_CODE (PATTERN (insn)) != CLOBBER) { if (reload_completed) PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode); @@ -5036,8 +5051,11 @@ get_ebb_head_tail (basic_block beg, basic_block end, /* Return true if there are no real insns in the range [ HEAD, TAIL ]. */ bool -no_real_insns_p (const rtx_insn *head, const rtx_insn *tail) +no_real_insns_p (const rtx_insn *head ATTRIBUTE_UNUSED, + const rtx_insn *tail ATTRIBUTE_UNUSED) { + return false; +#if 0 while (head != NEXT_INSN (tail)) { if (!NOTE_P (head) && !LABEL_P (head)) @@ -5045,6 +5063,7 @@ no_real_insns_p (const rtx_insn *head, const rtx_insn *tail) head = NEXT_INSN (head); } return true; +#endif } /* Restore-other-notes: NOTE_LIST is the end of a chain of notes @@ -6224,8 +6243,12 @@ commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb) scheduled_insns.iterate (i, &insn); i++) { - if (control_flow_insn_p (last_scheduled_insn) - || current_sched_info->advance_target_bb (*target_bb, insn)) + /* Since we don't skip no_real_insns_p block any more, it's possible + to schedule NOTE insn now, we should check for it here to avoid + unexpected target bb advance. */ + if ((control_flow_insn_p (last_scheduled_insn) + || current_sched_info->advance_target_bb (*target_bb, insn)) + && !NOTE_P (insn)) { *target_bb = current_sched_info->advance_target_bb (*target_bb, 0); @@ -6245,7 +6268,7 @@ commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb) (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn); move_insn (insn, last_scheduled_insn, current_sched_info->next_tail); - if (!DEBUG_INSN_P (insn)) + if (NONDEBUG_INSN_P (insn)) reemit_notes (insn); last_scheduled_insn = insn; } @@ -6296,7 +6319,7 @@ prune_ready_list (state_t temp_state, bool first_cycle_insn_p, int cost = 0; const char *reason = "resource conflict"; - if (DEBUG_INSN_P (insn)) + if (DEBUG_INSN_P (insn) || NOTE_P (insn)) continue; if (sched_group_found && !SCHED_GROUP_P (insn) @@ -6504,7 +6527,7 @@ schedule_block (basic_block *target_bb, state_t init_state) and caused problems because schedule_block and compute_forward_dependences had different notions of what the "head" insn was. */ - gcc_assert (head != tail || INSN_P (head)); + gcc_assert (head != tail || INSN_P (head) || NOTE_P (head)); haifa_recovery_bb_recently_added_p = false; @@ -6539,15 +6562,15 @@ schedule_block (basic_block *target_bb, state_t init_state) if (targetm.sched.init) targetm.sched.init (sched_dump, sched_verbose, ready.veclen); + gcc_assert (((NOTE_P (prev_head) || DEBUG_INSN_P (prev_head)) + && BLOCK_FOR_INSN (prev_head) == *target_bb) + || (head == tail && NOTE_P (head))); + /* We start inserting insns after PREV_HEAD. */ last_scheduled_insn = prev_head; last_nondebug_scheduled_insn = NULL; nonscheduled_insns_begin = NULL; - gcc_assert ((NOTE_P (last_scheduled_insn) - || DEBUG_INSN_P (last_scheduled_insn)) - && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb); - /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the queue. */ q_ptr = 0; @@ -6725,15 +6748,16 @@ schedule_block (basic_block *target_bb, state_t init_state) } } - /* We don't want md sched reorder to even see debug isns, so put - them out right away. */ - if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)) + /* We don't want md sched reorder to even see debug and note insns, + so put them out right away. */ + if (ready.n_ready + && !NONDEBUG_INSN_P (ready_element (&ready, 0)) && (*current_sched_info->schedule_more_p) ()) { - while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))) + while (ready.n_ready && !NONDEBUG_INSN_P (ready_element (&ready, 0))) { rtx_insn *insn = ready_remove_first (&ready); - gcc_assert (DEBUG_INSN_P (insn)); + gcc_assert (DEBUG_INSN_P (insn) || NOTE_P (insn)); (*current_sched_info->begin_schedule_ready) (insn); scheduled_insns.safe_push (insn); last_scheduled_insn = insn; @@ -7145,17 +7169,18 @@ schedule_block (basic_block *target_bb, state_t init_state) int set_priorities (rtx_insn *head, rtx_insn *tail) { + /* Since we don't skip no_real_insns_p block any more, it's + possible to meet NOTE insn now, we don't need to compute + priority for such block, so early return. */ + if (head == tail && !INSN_P (head)) + return 1; + rtx_insn *insn; - int n_insn; + int n_insn = 0; int sched_max_insns_priority = current_sched_info->sched_max_insns_priority; rtx_insn *prev_head; - if (head == tail && ! INSN_P (head)) - gcc_unreachable (); - - n_insn = 0; - prev_head = PREV_INSN (head); for (insn = tail; insn != prev_head; insn = PREV_INSN (insn)) { @@ -7688,7 +7713,8 @@ fix_tick_ready (rtx_insn *next) { int tick, delay; - if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK)) + if (NONDEBUG_INSN_P (next) + && !sd_lists_empty_p (next, SD_LIST_RES_BACK)) { int full_p; sd_iterator_def sd_it; diff --git a/gcc/rtl.h b/gcc/rtl.h index e4b6cc0dbb5..34b3f31d1ee 100644 --- a/gcc/rtl.h +++ b/gcc/rtl.h @@ -2695,8 +2695,8 @@ do { \ /* During sched, 1 if RTX is an insn that must be scheduled together with the preceding insn. */ #define SCHED_GROUP_P(RTX) \ - (RTL_FLAG_CHECK4 ("SCHED_GROUP_P", (RTX), DEBUG_INSN, INSN, \ - JUMP_INSN, CALL_INSN)->in_struct) + (RTL_FLAG_CHECK5 ("SCHED_GROUP_P", (RTX), DEBUG_INSN, INSN, \ + JUMP_INSN, CALL_INSN, NOTE)->in_struct) /* For a SET rtx, SET_DEST is the place that is set and SET_SRC is the value it is set to. */ diff --git a/gcc/sched-ebb.cc b/gcc/sched-ebb.cc index 110fcdbca4d..c07e65696b9 100644 --- a/gcc/sched-ebb.cc +++ b/gcc/sched-ebb.cc @@ -478,12 +478,10 @@ schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling) a note or two. */ while (head != tail) { - if (NOTE_P (head) || DEBUG_INSN_P (head)) + if (LABEL_P (head) || NOTE_P (head) || DEBUG_INSN_P (head)) head = NEXT_INSN (head); else if (NOTE_P (tail) || DEBUG_INSN_P (tail)) tail = PREV_INSN (tail); - else if (LABEL_P (head)) - head = NEXT_INSN (head); else break; } @@ -494,7 +492,8 @@ schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling) if (no_real_insns_p (head, tail)) return BLOCK_FOR_INSN (tail); - gcc_assert (INSN_P (head) && INSN_P (tail)); + gcc_assert ((NOTE_P (head) && head == tail) + || (INSN_P (head) && INSN_P (tail))); if (!bitmap_bit_p (&dont_calc_deps, first_bb->index)) { diff --git a/gcc/sched-rgn.cc b/gcc/sched-rgn.cc index e5964f54ead..658349ba2b6 100644 --- a/gcc/sched-rgn.cc +++ b/gcc/sched-rgn.cc @@ -228,6 +228,9 @@ static edgeset *pot_split; /* For every bb, a set of its ancestor edges. */ static edgeset *ancestor_edges; +/* Indicate the bb is empty initially if set. */ +static bitmap rgn_init_empty_bb; + #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN)))) /* Speculative scheduling functions. */ @@ -3216,6 +3219,14 @@ schedule_region (int rgn) /* Clean up. */ if (current_nr_blocks > 1) free_trg_info (); + + /* This empty block isn't empty initially, it means the only NOTE + inside was not counted when computing rgn_n_insns, so fix it up + now. */ + if (head == tail + && NOTE_P (head) + && !bitmap_bit_p (rgn_init_empty_bb, bb)) + rgn_n_insns++; } /* Sanity check: verify that all region insns were scheduled. */ @@ -3448,7 +3459,16 @@ sched_rgn_local_init (int rgn) continue; FOR_EACH_EDGE (e, ei, block->succs) e->aux = NULL; - } + } + } + + rgn_init_empty_bb = BITMAP_ALLOC (NULL); + for (bb = 0; bb < current_nr_blocks; bb++) + { + rtx_insn *head, *tail; + get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); + if (head == tail && NOTE_P (head)) + bitmap_set_bit (rgn_init_empty_bb, bb); } } @@ -3461,6 +3481,7 @@ sched_rgn_local_free (void) sbitmap_vector_free (pot_split); sbitmap_vector_free (ancestor_edges); free (rgn_edges); + BITMAP_FREE (rgn_init_empty_bb); } /* Free data computed for the finished region. */ -- 2.39.3