From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 2140) id 055BD3858006; Wed, 15 Sep 2021 17:03:17 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 055BD3858006 Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: Alexandre Oliva To: gcc-cvs@gcc.gnu.org Subject: [gcc(refs/users/aoliva/heads/testme)] harden conditional branches X-Act-Checkin: gcc X-Git-Author: Alexandre Oliva X-Git-Refname: refs/users/aoliva/heads/testme X-Git-Oldrev: 2709337c1181510e3d4f74e86bf75aa83f7b18aa X-Git-Newrev: 0a13d419fa294fdf8ca09cd3c163cb617dfb2383 Message-Id: <20210915170317.055BD3858006@sourceware.org> Date: Wed, 15 Sep 2021 17:03:17 +0000 (GMT) X-BeenThere: gcc-cvs@gcc.gnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Gcc-cvs mailing list List-Unsubscribe: , List-Archive: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 15 Sep 2021 17:03:17 -0000 https://gcc.gnu.org/g:0a13d419fa294fdf8ca09cd3c163cb617dfb2383 commit 0a13d419fa294fdf8ca09cd3c163cb617dfb2383 Author: Alexandre Oliva Date: Wed Sep 15 14:00:47 2021 -0300 harden conditional branches Diff: --- gcc/Makefile.in | 1 + gcc/common.opt | 10 + gcc/doc/invoke.texi | 19 ++ gcc/gimple-harden-conditionals.cc | 393 ++++++++++++++++++++++++++++++++++++++ gcc/passes.def | 2 + gcc/tree-pass.h | 3 + 6 files changed, 428 insertions(+) diff --git a/gcc/Makefile.in b/gcc/Makefile.in index b8229adf580..911d095601f 100644 --- a/gcc/Makefile.in +++ b/gcc/Makefile.in @@ -1389,6 +1389,7 @@ OBJS = \ gimple-if-to-switch.o \ gimple-iterator.o \ gimple-fold.o \ + gimple-harden-conditionals.o \ gimple-laddress.o \ gimple-loop-interchange.o \ gimple-loop-jam.o \ diff --git a/gcc/common.opt b/gcc/common.opt index b921f5e3b25..6c0fa691063 100644 --- a/gcc/common.opt +++ b/gcc/common.opt @@ -1719,6 +1719,16 @@ fguess-branch-probability Common Var(flag_guess_branch_prob) Optimization Enable guessing of branch probabilities. +; ??? Do NOT enable by default +fharden-compares +Common Var(flag_harden_compares) Optimization Init(1) +Harden conditionals not used in branches, checking reversed conditions. + +; ??? Do NOT enable by default +fharden-conditional-branches +Common Var(flag_harden_conditional_branches) Optimization Init(1) +Harden conditional branches by checking reversed conditions. + ; Nonzero means ignore `#ident' directives. 0 means handle them. ; Generate position-independent code for executables if possible ; On SVR4 targets, it also controls whether or not to emit a diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi index 78cfc100ac2..84bbf1c7113 100644 --- a/gcc/doc/invoke.texi +++ b/gcc/doc/invoke.texi @@ -595,6 +595,7 @@ Objective-C and Objective-C++ Dialects}. -fasan-shadow-offset=@var{number} -fsanitize-sections=@var{s1},@var{s2},... @gol -fsanitize-undefined-trap-on-error -fbounds-check @gol -fcf-protection=@r{[}full@r{|}branch@r{|}return@r{|}none@r{|}check@r{]} @gol +-fharden-compares -fharden-conditional-branches @gol -fstack-protector -fstack-protector-all -fstack-protector-strong @gol -fstack-protector-explicit -fstack-check @gol -fstack-limit-register=@var{reg} -fstack-limit-symbol=@var{sym} @gol @@ -15487,6 +15488,24 @@ which functions and calls should be skipped from instrumentation Currently the x86 GNU/Linux target provides an implementation based on Intel Control-flow Enforcement Technology (CET). +@item -fharden-compares +@opindex fharden-compares +For every logical test that survives gimple optimizations and is +@emph{not} the condition in a conditional branch (for example, +conditions tested for conditional moves, or to store in boolean +variables), emit extra code to compute and verify the reversed +condition, and to call @code{__builtin_trap} if the results do not +match. Use with @samp{-fharden-conditional-branches} to cover all +conditionals. + +@item -fharden-conditional-branches +@opindex fharden-conditional-branches +For every non-vectorized conditional branch that survives gimple +optimizations, emit extra code to compute and verify the reversed +condition, and to call @code{__builtin_trap} if the result is +unexpected. Use with @samp{-fharden-compares} to cover all +conditionals. + @item -fstack-protector @opindex fstack-protector Emit extra code to check for buffer overflows, such as stack smashing diff --git a/gcc/gimple-harden-conditionals.cc b/gcc/gimple-harden-conditionals.cc new file mode 100644 index 00000000000..5f3cf845d73 --- /dev/null +++ b/gcc/gimple-harden-conditionals.cc @@ -0,0 +1,393 @@ +/* Harden conditionals. + Copyright (C) 2021 Free Software Foundation, Inc. + Contributed by Alexandre Oliva . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "backend.h" +#include "tree.h" +#include "fold-const.h" +#include "gimple.h" +#include "gimplify.h" +#include "tree-pass.h" +#include "ssa.h" +#include "gimple-iterator.h" +#include "tree-cfg.h" +#include "basic-block.h" +#include "cfghooks.h" +#include "cfgloop.h" +#include "diagnostic.h" +#include "intl.h" + +namespace { + +/* This pass introduces redundant, but reversed conditionals at every + compare, be it used for conditional branches, other conditional + operations, or for boolean computation. This doesn't make sense + for abstract CPUs, but this kind of hardening may avoid undesirable + execution paths on CPUs under such attacks as of power + deprivation. */ + +/* Define a pass to harden conditionals other than branches. */ +const pass_data pass_data_harden_compares = { + GIMPLE_PASS, + "hardcmp", + OPTGROUP_NONE, + TV_NONE, + PROP_cfg | PROP_ssa, // properties_required + 0, // properties_provided + 0, // properties_destroyed + 0, // properties_start + TODO_update_ssa + | TODO_cleanup_cfg + | TODO_verify_il, // properties_finish +}; + +class pass_harden_compares : public gimple_opt_pass +{ +public: + pass_harden_compares (gcc::context *ctxt) + : gimple_opt_pass (pass_data_harden_compares, ctxt) + {} + opt_pass *clone () { return new pass_harden_compares (m_ctxt); } + virtual bool gate (function *) { + return flag_harden_compares; + } + virtual unsigned int execute (function *); +}; + +/* This pass introduces redundant, but reversed conditionals at every + conditional branch. This doesn't make sense for abstract CPUs, but + this kind of hardening may avoid undesirable execution paths on + CPUs under such attacks as of power deprivation. This pass must + run after the above, otherwise it will re-harden the checks + introduced by the above. */ + +/* Define a pass to harden conditionals in branches. */ +const pass_data pass_data_harden_conditional_branches = { + GIMPLE_PASS, + "hardcbr", + OPTGROUP_NONE, + TV_NONE, + PROP_cfg | PROP_ssa, // properties_required + 0, // properties_provided + 0, // properties_destroyed + 0, // properties_start + TODO_update_ssa + | TODO_cleanup_cfg + | TODO_verify_il, // properties_finish +}; + +class pass_harden_conditional_branches : public gimple_opt_pass +{ +public: + pass_harden_conditional_branches (gcc::context *ctxt) + : gimple_opt_pass (pass_data_harden_conditional_branches, ctxt) + {} + opt_pass *clone () { return new pass_harden_conditional_branches (m_ctxt); } + virtual bool gate (function *) { + return flag_harden_conditional_branches; + } + virtual unsigned int execute (function *); +}; + +} + +static inline tree +detach_value (gimple_stmt_iterator *gsip, tree val) +{ + if (TREE_CONSTANT (val) || TREE_CODE (val) != SSA_NAME) + { + gcc_checking_assert (tree_invariant_p (val)); + return val; + } + + tree ret = copy_ssa_name (val); + + vec *inputs = NULL; + vec *outputs = NULL; + vec_safe_push (outputs, + build_tree_list + (build_tree_list + (NULL_TREE, build_string (2, "=g")), + ret)); + vec_safe_push (inputs, + build_tree_list + (build_tree_list + (NULL_TREE, build_string (1, "0")), + val)); + gasm *detach = gimple_build_asm_vec ("", inputs, outputs, + NULL, NULL); + gsi_insert_before (gsip, detach, GSI_SAME_STMT); + + SSA_NAME_DEF_STMT (ret) = detach; + + return ret; +} + +static inline void +insert_check_and_trap (gimple_stmt_iterator *gsip, int flags, + enum tree_code cop, tree lhs, tree rhs) +{ + basic_block chk = gsi_bb (*gsip); + + gcond *cond = gimple_build_cond (cop, lhs, rhs, NULL, NULL); + gsi_insert_before (gsip, cond, GSI_SAME_STMT); + + basic_block trp = create_empty_bb (chk); + + gimple_stmt_iterator gsit = gsi_after_labels (trp); + gcall *trap = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); + gsi_insert_before (&gsit, trap, GSI_SAME_STMT); + + if (BB_PARTITION (chk)) + BB_SET_PARTITION (trp, BB_COLD_PARTITION); + + int true_false_flag = flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); + gcc_assert (true_false_flag); + int neg_true_false_flag = (~flags) & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); + + /* Make the edge to the trap block the fallthru one, since we don't + know where the other block is placed. */ + single_succ_edge (chk)->flags |= neg_true_false_flag; + single_succ_edge (chk)->flags &= ~EDGE_FALLTHRU; + make_edge (chk, trp, true_false_flag | EDGE_FALLTHRU); + + if (dom_info_available_p (CDI_DOMINATORS)) + set_immediate_dominator (CDI_DOMINATORS, trp, chk); + if (current_loops) + add_bb_to_loop (trp, current_loops->tree_root); +} + +static inline void +insert_edge_check_and_trap (edge e, bool fallthru ATTRIBUTE_UNUSED, + enum tree_code cop, tree lhs, tree rhs) +{ + int flags = e->flags; + + basic_block chk = split_edge (e); + e = NULL; + + gimple_stmt_iterator gsik = gsi_after_labels (chk); + + bool same_p = (lhs == rhs); + lhs = detach_value (&gsik, lhs); + rhs = same_p ? lhs : detach_value (&gsik, rhs); + + insert_check_and_trap (&gsik, flags, cop, lhs, rhs); + +#if 0 + /* ??? Attempt to improve block placement. This is fragile, leaving + a cond branch without a fallthru edge is prone to breaking the + expander. */ + if (fallthru) + { + if (chk->prev_bb != single_pred (chk)) + move_block_after (chk, single_pred (chk)); + if (chk->next_bb != FALLTHRU_EDGE (chk)->dest) + split_edge (FALLTHRU_EDGE (chk)); + } + else + { + if (single_pred_p (FALLTHRU_EDGE (chk)->dest)) + move_block_after (chk, FALLTHRU_EDGE (chk)->dest->prev_bb); + else if (chk->next_bb != FALLTHRU_EDGE (chk)->dest) + split_edge (FALLTHRU_EDGE (chk)); + } +#endif + + gcc_checking_assert (!fallthru || !(flags & EDGE_FALLTHRU) + || chk->prev_bb == single_pred (chk)); +} + +unsigned int +pass_harden_conditional_branches::execute (function *fun) +{ + basic_block bb; + FOR_EACH_BB_REVERSE_FN (bb, fun) + { + gimple_stmt_iterator gsi = gsi_last_bb (bb); + + if (gsi_end_p (gsi)) + continue; + + gcond *cond = dyn_cast (gsi_stmt (gsi)); + if (!cond) + continue; + + /* Turn: + + if (x op y) goto l1; else goto l2; + + into: + + if (x op y) goto l1'; else goto l2'; + l1': if (x' cop y') goto l1'trap; else goto l1; + l1'trap: __builtin_trap (); + l2': if (x' cop y') goto l2; else goto l2'trap; + l2'trap: __builtin_trap (); + + where cop is a complementary boolean operation to op; l1', l1'trap, + l2' and l2'trap are newly-created labels; and x' and y' hold the same + value as x and y, but in a way that does not enable the compiler to + optimize the redundant compare away. + */ + + enum tree_code op = gimple_cond_code (cond); + tree lhs = gimple_cond_lhs (cond); + tree rhs = gimple_cond_rhs (cond); + + enum tree_code cop = invert_tree_comparison (op, HONOR_NANS (lhs)); + + if (cop == ERROR_MARK) + /* ??? Can we do better? */ + continue; + + /* Each insertion splits one of the edges. Split the fallthru + edge last, so that it remains fallthru. */ + edge branch = BRANCH_EDGE (bb), fallthru = FALLTHRU_EDGE (bb); + insert_edge_check_and_trap (branch, false, cop, lhs, rhs); + insert_edge_check_and_trap (fallthru, true, cop, lhs, rhs); + } + + return 0; +} + +gimple_opt_pass * +make_pass_harden_conditional_branches (gcc::context *ctxt) +{ + return new pass_harden_conditional_branches (ctxt); +} + +unsigned int +pass_harden_compares::execute (function *fun) +{ + basic_block bb; + FOR_EACH_BB_REVERSE_FN (bb, fun) + for (gimple_stmt_iterator gsi = gsi_last_bb (bb); + !gsi_end_p (gsi); gsi_prev (&gsi)) + { + gassign *asgn = dyn_cast (gsi_stmt (gsi)); + if (!asgn) + continue; + + /* Turn: + + z = x op y; + + into: + + z = x op y; + z' = x' cop y'; + if (z == z') __builtin_trap (); + + where cop is a complementary boolean operation to op; and x' + and y' hold the same value as x and y, but in a way that does + not enable the compiler to optimize the redundant compare + away. + */ + + enum tree_code op = gimple_assign_rhs_code (asgn); + + enum tree_code cop; + + switch (op) + { + case EQ_EXPR: + case NE_EXPR: + case GT_EXPR: + case GE_EXPR: + case LT_EXPR: + case LE_EXPR: + case LTGT_EXPR: + case UNEQ_EXPR: + case UNGT_EXPR: + case UNGE_EXPR: + case UNLT_EXPR: + case UNLE_EXPR: + case ORDERED_EXPR: + case UNORDERED_EXPR: + cop = invert_tree_comparison (op, + HONOR_NANS + (gimple_assign_rhs1 (asgn))); + + if (cop == ERROR_MARK) + /* ??? Can we do better? */ + continue; + + break; + + /* ??? Maybe handle these too? */ + case TRUTH_NOT_EXPR: /* Unary! */ + case TRUTH_ANDIF_EXPR: + case TRUTH_ORIF_EXPR: + case TRUTH_AND_EXPR: + case TRUTH_OR_EXPR: + case TRUTH_XOR_EXPR: + default: + continue; + } + + /* These are the operands for the verification. */ + tree lhs = gimple_assign_lhs (asgn); + tree op1 = gimple_assign_rhs1 (asgn); + tree op2 = gimple_assign_rhs2 (asgn); + + gcc_checking_assert (TREE_TYPE (lhs) == boolean_type_node); + + /* Vector booleans can't be used in conditional branches. + ??? Can we do better? */ + if (VECTOR_TYPE_P (TREE_TYPE (op1))) + continue; + + tree rhs = copy_ssa_name (lhs); + + gimple_stmt_iterator gsi_split = gsi; + gsi_next (&gsi_split); + + bool same_p = (op1 == op2); + op1 = detach_value (&gsi_split, op1); + op2 = same_p ? op1 : detach_value (&gsi_split, op2); + + gassign *asgnck = gimple_build_assign (rhs, cop, op1, op2); + gsi_insert_before (&gsi_split, asgnck, GSI_SAME_STMT); + + if (!gsi_end_p (gsi_split)) + { + gsi_prev (&gsi_split); + split_block (bb, gsi_stmt (gsi_split)); + gsi_next (&gsi_split); + gcc_checking_assert (gsi_end_p (gsi_split)); + } + + gcc_checking_assert (single_succ_p (bb)); + + insert_check_and_trap (&gsi_split, EDGE_TRUE_VALUE, + EQ_EXPR, lhs, rhs); + } + + return 0; +} + +gimple_opt_pass * +make_pass_harden_compares (gcc::context *ctxt) +{ + return new pass_harden_compares (ctxt); +} diff --git a/gcc/passes.def b/gcc/passes.def index d7a1f8c97a6..a3d854ca5a6 100644 --- a/gcc/passes.def +++ b/gcc/passes.def @@ -419,6 +419,8 @@ along with GCC; see the file COPYING3. If not see NEXT_PASS (pass_lower_resx); NEXT_PASS (pass_nrv); NEXT_PASS (pass_gimple_isel); + NEXT_PASS (pass_harden_conditional_branches); + NEXT_PASS (pass_harden_compares); NEXT_PASS (pass_cleanup_cfg_post_optimizing); NEXT_PASS (pass_warn_function_noreturn); NEXT_PASS (pass_warn_access); diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h index eb75eb17951..5df71fd0c9e 100644 --- a/gcc/tree-pass.h +++ b/gcc/tree-pass.h @@ -642,6 +642,9 @@ extern gimple_opt_pass *make_pass_update_address_taken (gcc::context *ctxt); extern gimple_opt_pass *make_pass_convert_switch (gcc::context *ctxt); extern gimple_opt_pass *make_pass_lower_vaarg (gcc::context *ctxt); extern gimple_opt_pass *make_pass_gimple_isel (gcc::context *ctxt); +extern gimple_opt_pass *make_pass_harden_compares (gcc::context *ctxt); +extern gimple_opt_pass *make_pass_harden_conditional_branches (gcc::context + *ctxt); /* Current optimization pass. */ extern opt_pass *current_pass;