From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 2049) id BF2F2385842A; Mon, 28 Feb 2022 12:07:59 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org BF2F2385842A Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: Matthew Malcomson To: gcc-cvs@gcc.gnu.org Subject: [gcc(refs/vendors/ARM/heads/morello)] Allow some more block moves if we know it's safe according to the alignment X-Act-Checkin: gcc X-Git-Author: Stam Markianos-Wright X-Git-Refname: refs/vendors/ARM/heads/morello X-Git-Oldrev: 3fb5b3645dd5d39d634b5bb7236349315f21b8cd X-Git-Newrev: cee9cc1574ab3c867315fb053c180267892beb91 Message-Id: <20220228120759.BF2F2385842A@sourceware.org> Date: Mon, 28 Feb 2022 12:07:59 +0000 (GMT) X-BeenThere: gcc-cvs@gcc.gnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Gcc-cvs mailing list List-Unsubscribe: , List-Archive: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 28 Feb 2022 12:07:59 -0000 https://gcc.gnu.org/g:cee9cc1574ab3c867315fb053c180267892beb91 commit cee9cc1574ab3c867315fb053c180267892beb91 Author: Stam Markianos-Wright Date: Fri Dec 10 16:37:06 2021 +0000 Allow some more block moves if we know it's safe according to the alignment This fixes the second bug found in the compat.exp part of the testsuite. The compiler uses the block move mechanics for some parts of parameter passing, like copying values from one memory location to the stack when passing them by value on the stack. There was an edge case within this that would stop the copy from happening completely, because `emit_block_move` did not have any checking against `is_move_done` and it would always assume that the required insn had been emitted. It also had no other fall-back method of doing the copy. Here I have extended the block move logic to allow calls to `emit_block_move_via_loop` if the alignment allows for it, which means that we now do succeed in copying the required data. I have also added some checking to `emit_block_move` and have added an assert so that we ICE if this ever happens again. Diff: --- gcc/expr.c | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/gcc/expr.c b/gcc/expr.c index cffc02f692f..c19e5c560f3 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -1718,11 +1718,17 @@ emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method, } else if (might_overlap) *is_move_done = false; - else if (targetm.capability_mode().exists()) + /* For targets that use capabilities and if the alignment is greater than or + equal to the alignment of capability_mode(), stop here and do not attempt + the below call to emit_block_move_via_loop, because it is not safe if any + capabilities are within the BLK data. + This is still safe for smaller alignments, because an alignment less than + natural capability alignment means that the struct does not contain any + capabilities, or, if it does, these are are focibly underaligned (in e.g. + a packed struct) and thus invalid and not dereferenceable. */ + else if (targetm.capability_mode().exists() + && align >= get_mode_alignment (targetm.capability_mode().require())) { - /* For targets that use capabilities stop here and do not attempt the - below call to emit_block_move_via_loop, because it is not safe if - capabilities are within the BLK data. */ if (is_move_done) *is_move_done = false; return retval; @@ -1744,8 +1750,11 @@ emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method) min = max = UINTVAL (size); else max = GET_MODE_MASK (GET_MODE (size)); - return emit_block_move_hints (x, y, size, method, 0, -1, - min, max, max); + bool done = false; + rtx tmp = emit_block_move_hints (x, y, size, method, 0, -1, + min, max, max, false, &done, false); + gcc_assert (done); + return tmp; } /* A subroutine of emit_block_move. Returns true if calling the @@ -1917,8 +1926,8 @@ emit_block_move_via_loop (rtx x, rtx y, rtx size, { rtx_code_label *cmp_label, *top_label; rtx iter, x_addr, y_addr, tmp; - machine_mode x_addr_mode = get_address_mode (x); - machine_mode y_addr_mode = get_address_mode (y); + scalar_addr_mode x_addr_mode = get_address_mode (x); + scalar_addr_mode y_addr_mode = get_address_mode (y); machine_mode iter_mode; iter_mode = GET_MODE (size); @@ -1926,8 +1935,8 @@ emit_block_move_via_loop (rtx x, rtx y, rtx size, iter_mode = word_mode; gcc_assert (!CAPABILITY_MODE_P (iter_mode) - && !CAPABILITY_MODE_P (x_addr_mode) - && !CAPABILITY_MODE_P (y_addr_mode) + && !CAPABILITY_MODE_P (GET_MODE (x)) + && !CAPABILITY_MODE_P (GET_MODE (y)) && !CAPABILITY_MODE_P (GET_MODE (size))); top_label = gen_label_rtx (); @@ -1943,12 +1952,14 @@ emit_block_move_via_loop (rtx x, rtx y, rtx size, emit_jump (cmp_label); emit_label (top_label); - tmp = convert_modes (x_addr_mode, iter_mode, iter, true); - x_addr = simplify_gen_binary (PLUS, x_addr_mode, x_addr, tmp); + tmp = convert_modes (noncapability_mode (x_addr_mode), + iter_mode, iter, true); + x_addr = gen_pointer_plus (x_addr_mode, x_addr, tmp); if (x_addr_mode != y_addr_mode) - tmp = convert_modes (y_addr_mode, iter_mode, iter, true); - y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp); + tmp = convert_modes (noncapability_mode (y_addr_mode), + iter_mode, iter, true); + y_addr = gen_pointer_plus (y_addr_mode, y_addr, tmp); x = change_address (x, QImode, x_addr); y = change_address (y, QImode, y_addr);