> > > + && LOOP_VINFO_LOOP_IV_COND (loop_vinfo) != cond) > > > + *relevant = vect_used_in_scope; > > but why not simply mark all gconds as vect_used_in_scope? > We break outer-loop vectorization since doing so would pull the inner loop's exit into scope for the outerloop. Also we can't force the loop's main IV exit to be in scope, since it will be replaced by the vectorizer. I've updated the code to remove the quadratic lookup. > > > + } > > > > > > /* changing memory. */ > > > if (gimple_code (stmt_info->stmt) != GIMPLE_PHI) @@ -374,6 +379,11 @@ > > > vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo, > > > *relevant = vect_used_in_scope; > > > } > > > > > > + auto_vec exits = get_loop_exit_edges (loop); auto_bitmap > > > + exit_bbs; for (edge exit : exits) > > is it your mail client messing patches up? missing line-break > again. > Yeah, seems it was, hopefully fixed now. > > > + bitmap_set_bit (exit_bbs, exit->dest->index); > > > + > > you don't seem to use the bitmap? > > > > /* uses outside the loop. */ > > > FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, > > > SSA_OP_DEF) > > > { > > > @@ -392,7 +402,6 @@ vect_stmt_relevant_p (stmt_vec_info stmt_info, > > > loop_vec_info loop_vinfo, > > > /* We expect all such uses to be in the loop exit phis > > > (because of loop closed form) */ > > > gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI); > > > - gcc_assert (bb == single_exit (loop)->dest); > > > > > > *live_p = true; > > > } > > > @@ -793,6 +802,20 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info > > > loop_vinfo, bool *fatal) > > > return res; > > > } > > > } > > > + } > > > + else if (gcond *cond = dyn_cast (stmt_vinfo->stmt)) > > > + { > > > + enum tree_code rhs_code = gimple_cond_code (cond); > > > + gcc_assert (TREE_CODE_CLASS (rhs_code) == tcc_comparison); > > > + opt_result res > > > + = process_use (stmt_vinfo, gimple_cond_lhs (cond), > > > + loop_vinfo, relevant, &worklist, false); > > > + if (!res) > > > + return res; > > > + res = process_use (stmt_vinfo, gimple_cond_rhs (cond), > > > + loop_vinfo, relevant, &worklist, false); > > > + if (!res) > > > + return res; > > > } > > I guess we're missing an > > else > gcc_unreachable (); > > to catch not handled stmt kinds (do we have gcond patterns yet?) > > > > else if (gcall *call = dyn_cast (stmt_vinfo->stmt)) > > > { > > > @@ -13043,11 +13066,15 @@ vect_analyze_stmt (vec_info *vinfo, > > > node_instance, cost_vec); > > > if (!res) > > > return res; > > > - } > > > + } > > > + > > > + if (is_ctrl_stmt (stmt_info->stmt)) > > > + STMT_VINFO_DEF_TYPE (stmt_info) = vect_early_exit_def; > > I think it should rather be vect_condition_def. It's also not > this functions business to set STMT_VINFO_DEF_TYPE. If we ever > get to handle not if-converted code (or BB vectorization of that) > then a gcond would define the mask stmts are under. > Hmm sure, I've had to place it in multiple other places but moved it away from here. The main ones are set during dataflow analysis when we determine which statements need to be moved. > > > switch (STMT_VINFO_DEF_TYPE (stmt_info)) > > > { > > > case vect_internal_def: > > > + case vect_early_exit_def: > > > break; > > > > > > case vect_reduction_def: > > > @@ -13080,6 +13107,7 @@ vect_analyze_stmt (vec_info *vinfo, > > > { > > > gcall *call = dyn_cast (stmt_info->stmt); > > > gcc_assert (STMT_VINFO_VECTYPE (stmt_info) > > > + || gimple_code (stmt_info->stmt) == GIMPLE_COND > > > || (call && gimple_call_lhs (call) == NULL_TREE)); > > > *need_to_vectorize = true; > > > } > > > @@ -13835,6 +13863,14 @@ vect_is_simple_use (vec_info *vinfo, > > > stmt_vec_info stmt, slp_tree slp_node, > > > else > > > *op = gimple_op (ass, operand + 1); > > > } > > > + else if (gcond *cond = dyn_cast (stmt->stmt)) > > > + { > > > + gimple_match_op m_op; > > > + if (!gimple_extract_op (cond, &m_op)) > > > + return false; > > > + gcc_assert (m_op.code.is_tree_code ()); > > > + *op = m_op.ops[operand]; > > > + } > > Please do not use gimple_extract_op, use > > *op = gimple_op (cond, operand); > > > > else if (gcall *call = dyn_cast (stmt->stmt)) > > > *op = gimple_call_arg (call, operand); > > > else > > > @@ -14445,6 +14481,8 @@ vect_get_vector_types_for_stmt (vec_info > > > *vinfo, stmt_vec_info stmt_info, > > > *nunits_vectype_out = NULL_TREE; > > > > > > if (gimple_get_lhs (stmt) == NULL_TREE > > > + /* Allow vector conditionals through here. */ > > > + && !is_ctrl_stmt (stmt) > > !is_a (stmt) > > > > /* MASK_STORE has no lhs, but is ok. */ > > > && !gimple_call_internal_p (stmt, IFN_MASK_STORE)) > > > { > > > @@ -14461,7 +14499,7 @@ vect_get_vector_types_for_stmt (vec_info > > > *vinfo, stmt_vec_info stmt_info, > > > } > > > > > > return opt_result::failure_at (stmt, > > > - "not vectorized: irregular stmt.%G", stmt); > > > + "not vectorized: irregular stmt: %G", stmt); > > > } > > > > > > tree vectype; > > > @@ -14490,6 +14528,14 @@ vect_get_vector_types_for_stmt (vec_info > > > *vinfo, stmt_vec_info stmt_info, > > > scalar_type = TREE_TYPE (DR_REF (dr)); > > > else if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) > > > scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3)); > > > + else if (is_ctrl_stmt (stmt)) > > else if (gcond *cond = dyn_cast <...>) > > > > + { > > > + gcond *cond = dyn_cast (stmt); > > > + if (!cond) > > > + return opt_result::failure_at (stmt, "not vectorized: unsupported" > > > + " control flow statement.\n"); > > > + scalar_type = TREE_TYPE (gimple_cond_rhs (stmt)); > > As said in the other patch STMT_VINFO_VECTYPE of the gcond should > be the _mask_ type the compare produces, not the vector type of > the inputs (the nunits_vectype might be that one though). > You possibly need to adjust vect_get_smallest_scalar_type for this. > Fixed, but is in other patch now. Ok for master? Thanks, Tamar gcc/ChangeLog: * tree-vect-patterns.cc (vect_mark_pattern_stmts): Support gcond patterns. * tree-vect-stmts.cc (vect_stmt_relevant_p, vect_mark_stmts_to_be_vectorized, vect_analyze_stmt, vect_is_simple_use, vect_get_vector_types_for_stmt): Support early breaks. --- inline copy of patch --- diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc index c6cedf4fe7c1f1e1126ce166a059a4b2a2b49cbd..ea59ad337f14d802607850e8a7cf0125777ce2bc 100644 --- a/gcc/tree-vect-patterns.cc +++ b/gcc/tree-vect-patterns.cc @@ -6987,6 +6987,10 @@ vect_mark_pattern_stmts (vec_info *vinfo, vect_set_pattern_stmt (vinfo, pattern_stmt, orig_stmt_info, pattern_vectype); + /* For any conditionals mark them as vect_condition_def. */ + if (is_a (pattern_stmt)) + STMT_VINFO_DEF_TYPE (STMT_VINFO_RELATED_STMT (orig_stmt_info)) = vect_condition_def; + /* Transfer reduction path info to the pattern. */ if (STMT_VINFO_REDUC_IDX (orig_stmt_info_saved) != -1) { diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc index d801b72a149ebe6aa4d1f2942324b042d07be530..1e2698fcb7e95ae7f0009d10a79ba8c891a8227d 100644 --- a/gcc/tree-vect-stmts.cc +++ b/gcc/tree-vect-stmts.cc @@ -361,7 +361,9 @@ vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo, /* cond stmt other than loop exit cond. */ gimple *stmt = STMT_VINFO_STMT (stmt_info); - if (dyn_cast (stmt)) + if (is_a (stmt) + && LOOP_VINFO_LOOP_IV_COND (loop_vinfo) != stmt + && (!loop->inner || gimple_bb (stmt)->loop_father == loop)) *relevant = vect_used_in_scope; /* changing memory. */ @@ -393,7 +395,6 @@ vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo, /* We expect all such uses to be in the loop exit phis (because of loop closed form) */ gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI); - gcc_assert (bb == single_exit (loop)->dest); *live_p = true; } @@ -807,6 +808,20 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo, bool *fatal) return res; } } + } + else if (gcond *cond = dyn_cast (stmt_vinfo->stmt)) + { + enum tree_code rhs_code = gimple_cond_code (cond); + gcc_assert (TREE_CODE_CLASS (rhs_code) == tcc_comparison); + opt_result res + = process_use (stmt_vinfo, gimple_cond_lhs (cond), + loop_vinfo, relevant, &worklist, false); + if (!res) + return res; + res = process_use (stmt_vinfo, gimple_cond_rhs (cond), + loop_vinfo, relevant, &worklist, false); + if (!res) + return res; } else if (gcall *call = dyn_cast (stmt_vinfo->stmt)) { @@ -820,6 +835,8 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo, bool *fatal) return res; } } + else + gcc_unreachable (); } else FOR_EACH_PHI_OR_STMT_USE (use_p, stmt_vinfo->stmt, iter, SSA_OP_USE) @@ -13044,11 +13061,12 @@ vect_analyze_stmt (vec_info *vinfo, node_instance, cost_vec); if (!res) return res; - } + } switch (STMT_VINFO_DEF_TYPE (stmt_info)) { case vect_internal_def: + case vect_condition_def: break; case vect_reduction_def: @@ -13081,6 +13099,7 @@ vect_analyze_stmt (vec_info *vinfo, { gcall *call = dyn_cast (stmt_info->stmt); gcc_assert (STMT_VINFO_VECTYPE (stmt_info) + || gimple_code (stmt_info->stmt) == GIMPLE_COND || (call && gimple_call_lhs (call) == NULL_TREE)); *need_to_vectorize = true; } @@ -13855,6 +13874,8 @@ vect_is_simple_use (vec_info *vinfo, stmt_vec_info stmt, slp_tree slp_node, else *op = gimple_op (ass, operand + 1); } + else if (gcond *cond = dyn_cast (stmt->stmt)) + *op = gimple_op (cond, operand); else if (gcall *call = dyn_cast (stmt->stmt)) *op = gimple_call_arg (call, operand); else @@ -14465,6 +14486,8 @@ vect_get_vector_types_for_stmt (vec_info *vinfo, stmt_vec_info stmt_info, *nunits_vectype_out = NULL_TREE; if (gimple_get_lhs (stmt) == NULL_TREE + /* Allow vector conditionals through here. */ + && !is_a (stmt) /* MASK_STORE has no lhs, but is ok. */ && !gimple_call_internal_p (stmt, IFN_MASK_STORE)) { @@ -14481,7 +14504,7 @@ vect_get_vector_types_for_stmt (vec_info *vinfo, stmt_vec_info stmt_info, } return opt_result::failure_at (stmt, - "not vectorized: irregular stmt.%G", stmt); + "not vectorized: irregular stmt: %G", stmt); } tree vectype;