From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 128720 invoked by alias); 9 Dec 2016 13:20:15 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Received: (qmail 128696 invoked by uid 89); 9 Dec 2016 13:20:14 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-4.9 required=5.0 tests=BAYES_00,RP_MATCHES_RCVD,SPF_PASS autolearn=ham version=3.3.2 spammy=MASK X-HELO: foss.arm.com Received: from foss.arm.com (HELO foss.arm.com) (217.140.101.70) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Fri, 09 Dec 2016 13:20:04 +0000 Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 62CEE707; Fri, 9 Dec 2016 05:20:02 -0800 (PST) Received: from localhost (e105548-lin.manchester.arm.com [10.45.32.67]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 3F5093F477 for ; Fri, 9 Dec 2016 05:20:01 -0800 (PST) From: Richard Sandiford To: gcc-patches@gcc.gnu.org Mail-Followup-To: gcc-patches@gcc.gnu.org, richard.sandiford@arm.com Subject: [41/67] Split scalar integer handling out of force_to_mode References: <87h96dp8u6.fsf@e105548-lin.cambridge.arm.com> Date: Fri, 09 Dec 2016 13:20:00 -0000 In-Reply-To: <87h96dp8u6.fsf@e105548-lin.cambridge.arm.com> (Richard Sandiford's message of "Fri, 09 Dec 2016 12:48:01 +0000") Message-ID: <87k2b9i6io.fsf@e105548-lin.cambridge.arm.com> User-Agent: Gnus/5.130012 (Ma Gnus v0.12) Emacs/24.3 (gnu/linux) MIME-Version: 1.0 Content-Type: text/plain X-SW-Source: 2016-12/txt/msg00813.txt.bz2 force_to_mode exits partway through for modes that aren't scalar integers. This patch splits the remainder of the function out into a subroutine, force_int_to_mode, so that the modes from that point on can have type scalar_int_mode. The patch also makes sure that xmode is kept up-to-date with x and uses xmode instead of GET_MODE (x) throughout. gcc/ 2016-11-24 Richard Sandiford Alan Hayward David Sherwood * combine.c (force_int_to_mode): New function, split out from... (force_to_here): ...here. Keep xmode up-to-date and use it instead of GET_MODE (x). diff --git a/gcc/combine.c b/gcc/combine.c index 449f371..0fcd428 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -446,6 +446,8 @@ static rtx extract_left_shift (rtx, int); static int get_pos_from_mask (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT *); static rtx canon_reg_for_combine (rtx, rtx); +static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode, + scalar_int_mode, unsigned HOST_WIDE_INT, int); static rtx force_to_mode (rtx, machine_mode, unsigned HOST_WIDE_INT, int); static rtx if_then_else_cond (rtx, rtx *, rtx *); @@ -8490,8 +8492,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, enum rtx_code code = GET_CODE (x); int next_select = just_select || code == XOR || code == NOT || code == NEG; machine_mode op_mode; - unsigned HOST_WIDE_INT fuller_mask, nonzero; - rtx op0, op1, temp; + unsigned HOST_WIDE_INT nonzero; /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the code below will do the wrong thing since the mode of such an @@ -8519,15 +8520,6 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, if (op_mode) mask &= GET_MODE_MASK (op_mode); - /* When we have an arithmetic operation, or a shift whose count we - do not know, we need to assume that all bits up to the highest-order - bit in MASK will be needed. This is how we form such a mask. */ - if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) - fuller_mask = HOST_WIDE_INT_M1U; - else - fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1)) - - 1); - /* Determine what bits of X are guaranteed to be (non)zero. */ nonzero = nonzero_bits (x, mode); @@ -8565,9 +8557,42 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))))))) return force_to_mode (SUBREG_REG (x), mode, mask, next_select); - /* The arithmetic simplifications here only work for scalar integer modes. */ - if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x))) - return gen_lowpart_or_truncate (mode, x); + scalar_int_mode int_mode, xmode; + if (is_a (mode, &int_mode) + && is_a (GET_MODE (x), &xmode)) + /* OP_MODE is either MODE or XMODE, so it must be a scalar + integer too. */ + return force_int_to_mode (x, int_mode, xmode, + as_a (op_mode), + mask, just_select); + + return gen_lowpart_or_truncate (mode, x); +} + +/* Subroutine of force_to_mode that handles cases where both X and + the result are scalar integers. MODE is the mode of the result, + XMODE is the mode of X, and OP_MODE says which of MODE or XMODE + is preferred for simplified versions of X. The other arguments + are as for force_to_mode. */ + +static rtx +force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode, + scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask, + int just_select) +{ + enum rtx_code code = GET_CODE (x); + int next_select = just_select || code == XOR || code == NOT || code == NEG; + unsigned HOST_WIDE_INT fuller_mask; + rtx op0, op1, temp; + + /* When we have an arithmetic operation, or a shift whose count we + do not know, we need to assume that all bits up to the highest-order + bit in MASK will be needed. This is how we form such a mask. */ + if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) + fuller_mask = HOST_WIDE_INT_M1U; + else + fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1)) + - 1); switch (code) { @@ -8598,14 +8623,14 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, { x = simplify_and_const_int (x, op_mode, XEXP (x, 0), mask & INTVAL (XEXP (x, 1))); + xmode = op_mode; /* If X is still an AND, see if it is an AND with a mask that is just some low-order bits. If so, and it is MASK, we don't need it. */ if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)) - && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x))) - == mask)) + && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask) x = XEXP (x, 0); /* If it remains an AND, try making another AND with the bits @@ -8614,18 +8639,17 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, cheaper constant. */ if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)) - && GET_MODE_MASK (GET_MODE (x)) != mask - && HWI_COMPUTABLE_MODE_P (GET_MODE (x))) + && GET_MODE_MASK (xmode) != mask + && HWI_COMPUTABLE_MODE_P (xmode)) { unsigned HOST_WIDE_INT cval - = UINTVAL (XEXP (x, 1)) - | (GET_MODE_MASK (GET_MODE (x)) & ~mask); + = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask); rtx y; - y = simplify_gen_binary (AND, GET_MODE (x), XEXP (x, 0), - gen_int_mode (cval, GET_MODE (x))); - if (set_src_cost (y, GET_MODE (x), optimize_this_for_speed_p) - < set_src_cost (x, GET_MODE (x), optimize_this_for_speed_p)) + y = simplify_gen_binary (AND, xmode, XEXP (x, 0), + gen_int_mode (cval, xmode)); + if (set_src_cost (y, xmode, optimize_this_for_speed_p) + < set_src_cost (x, xmode, optimize_this_for_speed_p)) x = y; } @@ -8655,7 +8679,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, && pow2p_hwi (- smask) && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0 && (INTVAL (XEXP (x, 1)) & ~smask) != 0) - return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0), + return force_to_mode (plus_constant (xmode, XEXP (x, 0), (INTVAL (XEXP (x, 1)) & smask)), mode, smask, next_select); } @@ -8686,8 +8710,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, if (CONST_INT_P (XEXP (x, 0)) && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask) { - x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1), - GET_MODE (x)); + x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode); return force_to_mode (x, mode, mask, next_select); } @@ -8696,8 +8719,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, if (CONST_INT_P (XEXP (x, 0)) && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0)))) { - x = simplify_gen_unary (NOT, GET_MODE (x), - XEXP (x, 1), GET_MODE (x)); + x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode); return force_to_mode (x, mode, mask, next_select); } @@ -8718,16 +8740,16 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, && CONST_INT_P (XEXP (x, 1)) && ((INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (INTVAL (XEXP (x, 1)))) - < GET_MODE_PRECISION (GET_MODE (x))) + < GET_MODE_PRECISION (xmode)) && (UINTVAL (XEXP (x, 1)) - & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0) + & ~nonzero_bits (XEXP (x, 0), xmode)) == 0) { temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask) << INTVAL (XEXP (XEXP (x, 0), 1)), - GET_MODE (x)); - temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x), + xmode); + temp = simplify_gen_binary (GET_CODE (x), xmode, XEXP (XEXP (x, 0), 0), temp); - x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp, + x = simplify_gen_binary (LSHIFTRT, xmode, temp, XEXP (XEXP (x, 0), 1)); return force_to_mode (x, mode, mask, next_select); } @@ -8751,8 +8773,11 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, op0 = gen_lowpart_or_truncate (op_mode, op0); op1 = gen_lowpart_or_truncate (op_mode, op1); - if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) - x = simplify_gen_binary (code, op_mode, op0, op1); + if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) + { + x = simplify_gen_binary (code, op_mode, op0, op1); + xmode = op_mode; + } break; case ASHIFT: @@ -8785,8 +8810,11 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, force_to_mode (XEXP (x, 0), op_mode, mask, next_select)); - if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0)) - x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1)); + if (op_mode != xmode || op0 != XEXP (x, 0)) + { + x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1)); + xmode = op_mode; + } break; case LSHIFTRT: @@ -8808,13 +8836,16 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, /* We can only change the mode of the shift if we can do arithmetic in the mode of the shift and INNER_MASK is no wider than the width of X's mode. */ - if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0) - op_mode = GET_MODE (x); + if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0) + op_mode = xmode; inner = force_to_mode (inner, op_mode, inner_mask, next_select); - if (GET_MODE (x) != op_mode || inner != XEXP (x, 0)) - x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1)); + if (xmode != op_mode || inner != XEXP (x, 0)) + { + x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1)); + xmode = op_mode; + } } /* If we have (and (lshiftrt FOO C1) C2) where the combination of the @@ -8827,17 +8858,17 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, bit. */ && ((INTVAL (XEXP (x, 1)) + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))) - >= GET_MODE_PRECISION (GET_MODE (x))) + >= GET_MODE_PRECISION (xmode)) && pow2p_hwi (mask + 1) /* Number of bits left after the shift must be more than the mask needs. */ && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1)) - <= GET_MODE_PRECISION (GET_MODE (x))) + <= GET_MODE_PRECISION (xmode)) /* Must be more sign bit copies than the mask needs. */ && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))) >= exact_log2 (mask + 1))) - x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0), - GEN_INT (GET_MODE_PRECISION (GET_MODE (x)) + x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), + GEN_INT (GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1))); goto shiftrt; @@ -8845,7 +8876,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, case ASHIFTRT: /* If we are just looking for the sign bit, we don't need this shift at all, even if it has a variable count. */ - if (val_signbit_p (GET_MODE (x), mask)) + if (val_signbit_p (xmode, mask)) return force_to_mode (XEXP (x, 0), mode, mask, next_select); /* If this is a shift by a constant, get a mask that contains those bits @@ -8858,13 +8889,14 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) { + unsigned HOST_WIDE_INT nonzero; int i; /* If the considered data is wider than HOST_WIDE_INT, we can't represent a mask for all its bits in a single scalar. But we only care about the lower bits, so calculate these. */ - if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT) + if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT) { nonzero = HOST_WIDE_INT_M1U; @@ -8873,21 +8905,21 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, We need only shift if these are fewer than nonzero can hold. If not, we must keep all bits set in nonzero. */ - if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1)) + if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) nonzero >>= INTVAL (XEXP (x, 1)) + HOST_BITS_PER_WIDE_INT - - GET_MODE_PRECISION (GET_MODE (x)) ; + - GET_MODE_PRECISION (xmode); } else { - nonzero = GET_MODE_MASK (GET_MODE (x)); + nonzero = GET_MODE_MASK (xmode); nonzero >>= INTVAL (XEXP (x, 1)); } if ((mask & ~nonzero) == 0) { - x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x), + x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0), INTVAL (XEXP (x, 1))); if (GET_CODE (x) != ASHIFTRT) return force_to_mode (x, mode, mask, next_select); @@ -8896,8 +8928,8 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, else if ((i = exact_log2 (mask)) >= 0) { x = simplify_shift_const - (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0), - GET_MODE_PRECISION (GET_MODE (x)) - 1 - i); + (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0), + GET_MODE_PRECISION (xmode) - 1 - i); if (GET_CODE (x) != ASHIFTRT) return force_to_mode (x, mode, mask, next_select); @@ -8907,8 +8939,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, /* If MASK is 1, convert this to an LSHIFTRT. This can be done even if the shift count isn't a constant. */ if (mask == 1) - x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), - XEXP (x, 0), XEXP (x, 1)); + x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1)); shiftrt: @@ -8920,7 +8951,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, && CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 && (INTVAL (XEXP (x, 1)) - <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1)) + <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1)) && GET_CODE (XEXP (x, 0)) == ASHIFT && XEXP (XEXP (x, 0), 1) == XEXP (x, 1)) return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask, @@ -8938,12 +8969,11 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, && INTVAL (XEXP (x, 1)) >= 0) { temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE, - GET_MODE (x), - gen_int_mode (mask, GET_MODE (x)), + xmode, gen_int_mode (mask, xmode), XEXP (x, 1)); if (temp && CONST_INT_P (temp)) - x = simplify_gen_binary (code, GET_MODE (x), - force_to_mode (XEXP (x, 0), GET_MODE (x), + x = simplify_gen_binary (code, xmode, + force_to_mode (XEXP (x, 0), xmode, INTVAL (temp), next_select), XEXP (x, 1)); } @@ -8970,14 +9000,12 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, && CONST_INT_P (XEXP (XEXP (x, 0), 1)) && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask) - < GET_MODE_PRECISION (GET_MODE (x))) + < GET_MODE_PRECISION (xmode)) && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT) { - temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), - GET_MODE (x)); - temp = simplify_gen_binary (XOR, GET_MODE (x), - XEXP (XEXP (x, 0), 0), temp); - x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), + temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode); + temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp); + x = simplify_gen_binary (LSHIFTRT, xmode, temp, XEXP (XEXP (x, 0), 1)); return force_to_mode (x, mode, mask, next_select); @@ -8991,8 +9019,11 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, op0 = gen_lowpart_or_truncate (op_mode, force_to_mode (XEXP (x, 0), mode, mask, next_select)); - if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0)) - x = simplify_gen_unary (code, op_mode, op0, op_mode); + if (op_mode != xmode || op0 != XEXP (x, 0)) + { + x = simplify_gen_unary (code, op_mode, op0, op_mode); + xmode = op_mode; + } break; case NE: @@ -9013,14 +9044,14 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, /* We have no way of knowing if the IF_THEN_ELSE can itself be written in a narrower mode. We play it safe and do not do so. */ - op0 = gen_lowpart_or_truncate (GET_MODE (x), + op0 = gen_lowpart_or_truncate (xmode, force_to_mode (XEXP (x, 1), mode, mask, next_select)); - op1 = gen_lowpart_or_truncate (GET_MODE (x), + op1 = gen_lowpart_or_truncate (xmode, force_to_mode (XEXP (x, 2), mode, mask, next_select)); if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2)) - x = simplify_gen_ternary (IF_THEN_ELSE, GET_MODE (x), + x = simplify_gen_ternary (IF_THEN_ELSE, xmode, GET_MODE (XEXP (x, 0)), XEXP (x, 0), op0, op1); break;