From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 7058 invoked by alias); 6 Jun 2012 21:52:34 -0000 Received: (qmail 7007 invoked by uid 22791); 6 Jun 2012 21:52:30 -0000 X-SWARE-Spam-Status: No, hits=-4.9 required=5.0 tests=AWL,BAYES_00,DKIM_SIGNED,DKIM_VALID,FREEMAIL_ENVFROM_END_DIGIT,FREEMAIL_FROM,KHOP_RCVD_TRUST,KHOP_THREADED,RCVD_IN_DNSWL_LOW,RCVD_IN_HOSTKARMA_YE,TW_DD,TW_SR X-Spam-Check-By: sourceware.org Received: from mail-pb0-f41.google.com (HELO mail-pb0-f41.google.com) (209.85.160.41) by sourceware.org (qpsmtpd/0.43rc1) with ESMTP; Wed, 06 Jun 2012 21:52:13 +0000 Received: by pbbrp2 with SMTP id rp2so116609pbb.0 for ; Wed, 06 Jun 2012 14:52:12 -0700 (PDT) Received: by 10.68.219.202 with SMTP id pq10mr812542pbc.82.1339019532635; Wed, 06 Jun 2012 14:52:12 -0700 (PDT) Received: from anchor.twiddle.home ([173.160.232.49]) by mx.google.com with ESMTPS id to1sm1658307pbc.27.2012.06.06.14.52.11 (version=TLSv1/SSLv3 cipher=OTHER); Wed, 06 Jun 2012 14:52:12 -0700 (PDT) From: Richard Henderson To: libc-ports@sourceware.org Subject: [PATCH 4/5] alpha: Fix [BZ #13718] Date: Wed, 06 Jun 2012 21:52:00 -0000 Message-Id: <1339019524-32478-4-git-send-email-rth@twiddle.net> In-Reply-To: <1339019524-32478-1-git-send-email-rth@twiddle.net> References: <1339019524-32478-1-git-send-email-rth@twiddle.net> X-IsSubscribed: yes Mailing-List: contact libc-ports-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Subscribe: List-Post: List-Help: , Sender: libc-ports-owner@sourceware.org X-SW-Source: 2012-06/txt/msg00014.txt.bz2 The routines expect to be able to bias the count by a small number. If the count is near -1ull, the count will overflow. Since we cannot use the whole 64-bit address space, bound the count to LONG_MAX. --- ChangeLog.alpha | 4 ++++ sysdeps/alpha/alphaev6/stxncpy.S | 19 +++++++++++++------ sysdeps/alpha/stxncpy.S | 23 +++++++++++++---------- 3 files changed, 30 insertions(+), 16 deletions(-) diff --git a/ChangeLog.alpha b/ChangeLog.alpha index 0598d7e..d291df9 100644 --- a/ChangeLog.alpha +++ b/ChangeLog.alpha @@ -1,5 +1,9 @@ 2012-06-06 Richard Henderson + [BZ #13718] + * sysdeps/alpha/stxncmp.S: Bound count to LONG_MAX at startup. + * sysdeps/alpha/alphaev6/stxncmp.S: Likewise. + * sysdeps/alpha/fpu/e_sqrt.c: Include before redefining __ieee758_sqrt. diff --git a/sysdeps/alpha/alphaev6/stxncpy.S b/sysdeps/alpha/alphaev6/stxncpy.S index d134eb8..28495df 100644 --- a/sysdeps/alpha/alphaev6/stxncpy.S +++ b/sysdeps/alpha/alphaev6/stxncpy.S @@ -143,18 +143,25 @@ $a_eoc: .align 4 __stxncpy: /* Are source and destination co-aligned? */ + lda t2, -1 # E : xor a0, a1, t1 # E : and a0, 7, t0 # E : find dest misalignment - and t1, 7, t1 # E : (stall) - addq a2, t0, a2 # E : bias count by dest misalignment (stall) + nop # E : - subq a2, 1, a2 # E : + srl t2, 1, t2 # U : + and t1, 7, t1 # E : + cmovlt a2, t2, a2 # E : bound count to LONG_MAX (stall) + nop # E : + + addq a2, t0, a2 # E : bias count by dest misalignment + subq a2, 1, a2 # E : (stall) and a2, 7, t2 # E : (stall) - srl a2, 3, a2 # U : a2 = loop counter = (count - 1)/8 (stall) - addq zero, 1, t10 # E : + lda t10, 1 # E : + srl a2, 3, a2 # U : a2 = loop counter = (count - 1)/8 sll t10, t2, t10 # U : t10 = bitmask of last count byte - bne t1, $unaligned # U : + nop # E : + bne t1, $unaligned # U : (stall) /* We are co-aligned; take care of a partial first word. */ ldq_u t1, 0(a1) # L : load first src word diff --git a/sysdeps/alpha/stxncpy.S b/sysdeps/alpha/stxncpy.S index f8b494a..d2cb9c3 100644 --- a/sysdeps/alpha/stxncpy.S +++ b/sysdeps/alpha/stxncpy.S @@ -123,16 +123,19 @@ $a_eoc: .align 3 __stxncpy: /* Are source and destination co-aligned? */ - xor a0, a1, t1 # e0 : - and a0, 7, t0 # .. e1 : find dest misalignment - and t1, 7, t1 # e0 : - addq a2, t0, a2 # .. e1 : bias count by dest misalignment - subq a2, 1, a2 # e0 : - and a2, 7, t2 # e1 : - srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8 - addq zero, 1, t10 # .. e1 : - sll t10, t2, t10 # e0 : t10 = bitmask of last count byte - bne t1, $unaligned # .. e1 : + lda t2, -1 + xor a0, a1, t1 + srl t2, 1, t2 + and a0, 7, t0 # find dest misalignment + cmovlt a2, t2, a2 # bound neg count to LONG_MAX + and t1, 7, t1 + addq a2, t0, a2 # bias count by dest misalignment + subq a2, 1, a2 + and a2, 7, t2 + srl a2, 3, a2 # a2 = loop counter = (count - 1)/8 + addq zero, 1, t10 + sll t10, t2, t10 # t10 = bitmask of last count byte + bne t1, $unaligned /* We are co-aligned; take care of a partial first word. */ -- 1.7.7.6