From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 30587 invoked by alias); 23 Oct 2017 17:14:53 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Received: (qmail 30511 invoked by uid 89); 23 Oct 2017 17:14:52 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-11.0 required=5.0 tests=AWL,BAYES_00,GIT_PATCH_2,GIT_PATCH_3,KAM_ASCII_DIVIDERS,RCVD_IN_DNSWL_NONE,SPF_PASS autolearn=ham version=3.3.2 spammy= X-HELO: mail-wm0-f53.google.com Received: from mail-wm0-f53.google.com (HELO mail-wm0-f53.google.com) (74.125.82.53) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Mon, 23 Oct 2017 17:14:49 +0000 Received: by mail-wm0-f53.google.com with SMTP id q132so11198930wmd.2 for ; Mon, 23 Oct 2017 10:14:49 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:mail-followup-to:subject:references:date :in-reply-to:message-id:user-agent:mime-version; bh=mEq0QXh1rRxuRLdH+Min94Bi1ZKGVe+UDFdK/8AtYOY=; b=ArNB3+jzioi2zDdLbHi80fdQicW5ovEo5FvoQ9qOqcQt4PJ20qbHN1QshkOtFI2pzS Fzmd4BabOvCiWOJICLr4fE54gaCeeMRwV42oBVZqtri99qwT1WLaHSg7zTxSqvcD1HdQ KhhOfxMkgn//SJxw30zQWQeUZsNlRcPFcBAh+YQXaIyp0D8NW0Y5r2K1dC+8qi5cJnkI ZvC7uuoEUXBaZs8uLj3ckUl+xRsjFW/I6mPglRLWzFaQJuts57lLBbCa1C+id4qVLXrv cq2c5TSiBtPH814ubpTkSj7AJIxXb3mZqd7fnpvePGRUd451SNrazHXx3MbmTBzLXVJ2 ZjWQ== X-Gm-Message-State: AMCzsaX5EY5lIrorYJiTS3UGpCp0UjHowrWVs/bZUf9HA20vhmu05UHi w0MqcuZMuAtD7yJuCb9FOKNC+yTPLAw= X-Google-Smtp-Source: ABhQp+RpxzYK9kHqaDsVYJ+SK0rAziHN82BC+N2EDtXrFLBwLsdKaGo4oWfhqwey0SIukyqVrMgH6g== X-Received: by 10.28.141.1 with SMTP id p1mr5347198wmd.68.1508778887279; Mon, 23 Oct 2017 10:14:47 -0700 (PDT) Received: from localhost ([2.26.27.199]) by smtp.gmail.com with ESMTPSA id e8sm5993484wmc.46.2017.10.23.10.14.46 for (version=TLS1_2 cipher=ECDHE-RSA-CHACHA20-POLY1305 bits=256/256); Mon, 23 Oct 2017 10:14:46 -0700 (PDT) From: Richard Sandiford To: gcc-patches@gcc.gnu.org Mail-Followup-To: gcc-patches@gcc.gnu.org, richard.sandiford@linaro.org Subject: [037/nnn] poly_int: get_bit_range References: <871sltvm7r.fsf@linaro.org> Date: Mon, 23 Oct 2017 17:16:00 -0000 In-Reply-To: <871sltvm7r.fsf@linaro.org> (Richard Sandiford's message of "Mon, 23 Oct 2017 17:54:32 +0100") Message-ID: <87fua9okfu.fsf@linaro.org> User-Agent: Gnus/5.13 (Gnus v5.13) Emacs/25.2 (gnu/linux) MIME-Version: 1.0 Content-Type: text/plain X-SW-Source: 2017-10/txt/msg01538.txt.bz2 This patch makes get_bit_range return the range and position as poly_ints. 2017-10-23 Richard Sandiford Alan Hayward David Sherwood gcc/ * expr.h (get_bit_range): Return the bitstart and bitend as poly_uint64s rather than unsigned HOST_WIDE_INTs. Return the bitpos as a poly_int64 rather than a HOST_WIDE_INT. * expr.c (get_bit_range): Likewise. (expand_assignment): Update call accordingly. * fold-const.c (optimize_bit_field_compare): Likewise. Index: gcc/expr.h =================================================================== --- gcc/expr.h 2017-10-23 17:07:40.476203026 +0100 +++ gcc/expr.h 2017-10-23 17:18:43.842393134 +0100 @@ -240,8 +240,8 @@ extern bool emit_push_insn (rtx, machine int, rtx, int, rtx, rtx, int, rtx, bool); /* Extract the accessible bit-range from a COMPONENT_REF. */ -extern void get_bit_range (unsigned HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, - tree, HOST_WIDE_INT *, tree *); +extern void get_bit_range (poly_uint64_pod *, poly_uint64_pod *, tree, + poly_int64_pod *, tree *); /* Expand an assignment that stores the value of FROM into TO. */ extern void expand_assignment (tree, tree, bool); Index: gcc/expr.c =================================================================== --- gcc/expr.c 2017-10-23 17:16:50.364529087 +0100 +++ gcc/expr.c 2017-10-23 17:18:43.842393134 +0100 @@ -4804,13 +4804,10 @@ optimize_bitfield_assignment_op (poly_ui *BITSTART and *BITEND. */ void -get_bit_range (unsigned HOST_WIDE_INT *bitstart, - unsigned HOST_WIDE_INT *bitend, - tree exp, - HOST_WIDE_INT *bitpos, - tree *offset) +get_bit_range (poly_uint64_pod *bitstart, poly_uint64_pod *bitend, tree exp, + poly_int64_pod *bitpos, tree *offset) { - HOST_WIDE_INT bitoffset; + poly_int64 bitoffset; tree field, repr; gcc_assert (TREE_CODE (exp) == COMPONENT_REF); @@ -4831,13 +4828,13 @@ get_bit_range (unsigned HOST_WIDE_INT *b if (handled_component_p (TREE_OPERAND (exp, 0))) { machine_mode rmode; - HOST_WIDE_INT rbitsize, rbitpos; + poly_int64 rbitsize, rbitpos; tree roffset; int unsignedp, reversep, volatilep = 0; get_inner_reference (TREE_OPERAND (exp, 0), &rbitsize, &rbitpos, &roffset, &rmode, &unsignedp, &reversep, &volatilep); - if ((rbitpos % BITS_PER_UNIT) != 0) + if (!multiple_p (rbitpos, BITS_PER_UNIT)) { *bitstart = *bitend = 0; return; @@ -4848,10 +4845,10 @@ get_bit_range (unsigned HOST_WIDE_INT *b relative to the representative. DECL_FIELD_OFFSET of field and repr are the same by construction if they are not constants, see finish_bitfield_layout. */ - if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)) - && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))) - bitoffset = (tree_to_uhwi (DECL_FIELD_OFFSET (field)) - - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT; + poly_uint64 field_offset, repr_offset; + if (poly_int_tree_p (DECL_FIELD_OFFSET (field), &field_offset) + && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset)) + bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT; else bitoffset = 0; bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) @@ -4860,17 +4857,16 @@ get_bit_range (unsigned HOST_WIDE_INT *b /* If the adjustment is larger than bitpos, we would have a negative bit position for the lower bound and this may wreak havoc later. Adjust offset and bitpos to make the lower bound non-negative in that case. */ - if (bitoffset > *bitpos) + if (may_gt (bitoffset, *bitpos)) { - HOST_WIDE_INT adjust = bitoffset - *bitpos; - gcc_assert ((adjust % BITS_PER_UNIT) == 0); + poly_int64 adjust_bits = upper_bound (bitoffset, *bitpos) - *bitpos; + poly_int64 adjust_bytes = exact_div (adjust_bits, BITS_PER_UNIT); - *bitpos += adjust; + *bitpos += adjust_bits; if (*offset == NULL_TREE) - *offset = size_int (-adjust / BITS_PER_UNIT); + *offset = size_int (-adjust_bytes); else - *offset - = size_binop (MINUS_EXPR, *offset, size_int (adjust / BITS_PER_UNIT)); + *offset = size_binop (MINUS_EXPR, *offset, size_int (adjust_bytes)); *bitstart = 0; } else @@ -4983,9 +4979,9 @@ expand_assignment (tree to, tree from, b || TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE) { machine_mode mode1; - HOST_WIDE_INT bitsize, bitpos; - unsigned HOST_WIDE_INT bitregion_start = 0; - unsigned HOST_WIDE_INT bitregion_end = 0; + poly_int64 bitsize, bitpos; + poly_uint64 bitregion_start = 0; + poly_uint64 bitregion_end = 0; tree offset; int unsignedp, reversep, volatilep = 0; tree tem; @@ -4995,11 +4991,11 @@ expand_assignment (tree to, tree from, b &unsignedp, &reversep, &volatilep); /* Make sure bitpos is not negative, it can wreak havoc later. */ - if (bitpos < 0) + if (may_lt (bitpos, 0)) { gcc_assert (offset == NULL_TREE); - offset = size_int (bitpos >> LOG2_BITS_PER_UNIT); - bitpos &= BITS_PER_UNIT - 1; + offset = size_int (bits_to_bytes_round_down (bitpos)); + bitpos = num_trailing_bits (bitpos); } if (TREE_CODE (to) == COMPONENT_REF @@ -5009,9 +5005,9 @@ expand_assignment (tree to, tree from, b However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or BITSIZE are not byte-aligned, there is no need to limit the range we can access. This can occur with packed structures in Ada. */ - else if (bitsize > 0 - && bitsize % BITS_PER_UNIT == 0 - && bitpos % BITS_PER_UNIT == 0) + else if (may_gt (bitsize, 0) + && multiple_p (bitsize, BITS_PER_UNIT) + && multiple_p (bitpos, BITS_PER_UNIT)) { bitregion_start = bitpos; bitregion_end = bitpos + bitsize - 1; @@ -5073,16 +5069,18 @@ expand_assignment (tree to, tree from, b This is only done for aligned data values, as these can be expected to result in single move instructions. */ + poly_int64 bytepos; if (mode1 != VOIDmode - && bitpos != 0 - && bitsize > 0 - && (bitpos % bitsize) == 0 - && (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0 + && maybe_nonzero (bitpos) + && may_gt (bitsize, 0) + && multiple_p (bitpos, BITS_PER_UNIT, &bytepos) + && multiple_p (bitpos, bitsize) + && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1)) && MEM_ALIGN (to_rtx) >= GET_MODE_ALIGNMENT (mode1)) { - to_rtx = adjust_address (to_rtx, mode1, bitpos / BITS_PER_UNIT); + to_rtx = adjust_address (to_rtx, mode1, bytepos); bitregion_start = 0; - if (bitregion_end >= (unsigned HOST_WIDE_INT) bitpos) + if (must_ge (bitregion_end, poly_uint64 (bitpos))) bitregion_end -= bitpos; bitpos = 0; } @@ -5097,8 +5095,7 @@ expand_assignment (tree to, tree from, b code contains an out-of-bounds access to a small array. */ if (!MEM_P (to_rtx) && GET_MODE (to_rtx) != BLKmode - && (unsigned HOST_WIDE_INT) bitpos - >= GET_MODE_PRECISION (GET_MODE (to_rtx))) + && must_ge (bitpos, GET_MODE_PRECISION (GET_MODE (to_rtx)))) { expand_normal (from); result = NULL; @@ -5108,25 +5105,26 @@ expand_assignment (tree to, tree from, b { unsigned short mode_bitsize = GET_MODE_BITSIZE (GET_MODE (to_rtx)); if (COMPLEX_MODE_P (TYPE_MODE (TREE_TYPE (from))) - && bitpos == 0 - && bitsize == mode_bitsize) + && known_zero (bitpos) + && must_eq (bitsize, mode_bitsize)) result = store_expr (from, to_rtx, false, nontemporal, reversep); - else if (bitsize == mode_bitsize / 2 - && (bitpos == 0 || bitpos == mode_bitsize / 2)) - result = store_expr (from, XEXP (to_rtx, bitpos != 0), false, - nontemporal, reversep); - else if (bitpos + bitsize <= mode_bitsize / 2) + else if (must_eq (bitsize, mode_bitsize / 2) + && (known_zero (bitpos) + || must_eq (bitpos, mode_bitsize / 2))) + result = store_expr (from, XEXP (to_rtx, maybe_nonzero (bitpos)), + false, nontemporal, reversep); + else if (must_le (bitpos + bitsize, mode_bitsize / 2)) result = store_field (XEXP (to_rtx, 0), bitsize, bitpos, bitregion_start, bitregion_end, mode1, from, get_alias_set (to), nontemporal, reversep); - else if (bitpos >= mode_bitsize / 2) + else if (must_ge (bitpos, mode_bitsize / 2)) result = store_field (XEXP (to_rtx, 1), bitsize, bitpos - mode_bitsize / 2, bitregion_start, bitregion_end, mode1, from, get_alias_set (to), nontemporal, reversep); - else if (bitpos == 0 && bitsize == mode_bitsize) + else if (known_zero (bitpos) && must_eq (bitsize, mode_bitsize)) { rtx from_rtx; result = expand_normal (from); Index: gcc/fold-const.c =================================================================== --- gcc/fold-const.c 2017-10-23 17:17:05.755450644 +0100 +++ gcc/fold-const.c 2017-10-23 17:18:43.843393046 +0100 @@ -4168,12 +4168,13 @@ optimize_bit_field_compare (location_t l } /* Honor the C++ memory model and mimic what RTL expansion does. */ - unsigned HOST_WIDE_INT bitstart = 0; - unsigned HOST_WIDE_INT bitend = 0; + poly_uint64 bitstart = 0; + poly_uint64 bitend = 0; if (TREE_CODE (lhs) == COMPONENT_REF) { - get_bit_range (&bitstart, &bitend, lhs, &lbitpos, &offset); - if (offset != NULL_TREE) + poly_int64 plbitpos; + get_bit_range (&bitstart, &bitend, lhs, &plbitpos, &offset); + if (!plbitpos.is_constant (&lbitpos) || offset != NULL_TREE) return 0; }