diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h index 08ce5f1..931c8b8 100644 --- a/gcc/config/aarch64/aarch64-protos.h +++ b/gcc/config/aarch64/aarch64-protos.h @@ -162,12 +162,20 @@ struct cpu_vector_cost const int cond_not_taken_branch_cost; /* Cost of not taken branch. */ }; +/* Branch costs. */ +struct cpu_branch_cost +{ + const int predictable; /* Predictable branch or optimizing for size. */ + const int unpredictable; /* Unpredictable branch or optimizing for speed. */ +}; + struct tune_params { const struct cpu_cost_table *const insn_extra_cost; const struct cpu_addrcost_table *const addr_cost; const struct cpu_regmove_cost *const regmove_cost; const struct cpu_vector_cost *const vec_costs; + const struct cpu_branch_cost *const branch_costs; const int memmov_cost; const int issue_rate; const unsigned int fuseable_ops; @@ -184,6 +192,7 @@ struct tune_params HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned); int aarch64_get_condition_code (rtx); bool aarch64_bitmask_imm (HOST_WIDE_INT val, machine_mode); +int aarch64_branch_cost (bool, bool); enum aarch64_symbol_type aarch64_classify_symbolic_expression (rtx, enum aarch64_symbol_context); bool aarch64_const_vec_all_same_int_p (rtx, HOST_WIDE_INT); diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 374b0a9..7bc28ae 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -340,12 +340,20 @@ static const struct cpu_vector_cost xgene1_vector_cost = #define AARCH64_FUSE_ADRP_LDR (1 << 3) #define AARCH64_FUSE_CMP_BRANCH (1 << 4) +/* Generic costs for branch instructions. */ +static const struct cpu_branch_cost generic_branch_cost = +{ + 2, /* Predictable. */ + 2 /* Unpredictable. */ +}; + static const struct tune_params generic_tunings = { &cortexa57_extra_costs, &generic_addrcost_table, &generic_regmove_cost, &generic_vector_cost, + &generic_branch_cost, 4, /* memmov_cost */ 2, /* issue_rate */ AARCH64_FUSE_NOTHING, /* fuseable_ops */ @@ -365,6 +373,7 @@ static const struct tune_params cortexa53_tunings = &generic_addrcost_table, &cortexa53_regmove_cost, &generic_vector_cost, + &generic_branch_cost, 4, /* memmov_cost */ 2, /* issue_rate */ (AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD @@ -385,6 +394,7 @@ static const struct tune_params cortexa57_tunings = &cortexa57_addrcost_table, &cortexa57_regmove_cost, &cortexa57_vector_cost, + &generic_branch_cost, 4, /* memmov_cost */ 3, /* issue_rate */ (AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD @@ -405,6 +415,7 @@ static const struct tune_params thunderx_tunings = &generic_addrcost_table, &thunderx_regmove_cost, &generic_vector_cost, + &generic_branch_cost, 6, /* memmov_cost */ 2, /* issue_rate */ AARCH64_FUSE_CMP_BRANCH, /* fuseable_ops */ @@ -424,6 +435,7 @@ static const struct tune_params xgene1_tunings = &xgene1_addrcost_table, &xgene1_regmove_cost, &xgene1_vector_cost, + &generic_branch_cost, 6, /* memmov_cost */ 4, /* issue_rate */ AARCH64_FUSE_NOTHING, /* fuseable_ops */ @@ -5409,6 +5421,23 @@ aarch64_address_cost (rtx x, return cost; } +/* Return the cost of a branch. If SPEED_P is true then the compiler is + optimizing for speed. If PREDICTABLE_P is true then the branch is predicted + to be taken. */ + +int +aarch64_branch_cost (bool speed_p, bool predictable_p) +{ + /* When optimizing for speed, use the cost of unpredictable branches. */ + const struct cpu_branch_cost *branch_costs = + aarch64_tune_params->branch_costs; + + if (!speed_p || predictable_p) + return branch_costs->predictable; + else + return branch_costs->unpredictable; +} + /* Return true if the RTX X in mode MODE is a zero or sign extract usable in an ADD or SUB (extended register) instruction. */ static bool diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h index 3fd1b3f..c85d279 100644 --- a/gcc/config/aarch64/aarch64.h +++ b/gcc/config/aarch64/aarch64.h @@ -827,7 +827,8 @@ do { \ #define TRAMPOLINE_SECTION text_section /* To start with. */ -#define BRANCH_COST(SPEED_P, PREDICTABLE_P) 2 +#define BRANCH_COST(SPEED_P, PREDICTABLE_P) \ + (aarch64_branch_cost (SPEED_P, PREDICTABLE_P)) /* Assembly output. */