From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 64155 invoked by alias); 20 Dec 2018 23:39:24 -0000 Mailing-List: contact libc-stable-help@sourceware.org; run by ezmlm Precedence: bulk List-Post: List-Help: List-Subscribe: List-Archive: Sender: libc-stable-owner@sourceware.org Received: (qmail 64142 invoked by uid 89); 20 Dec 2018 23:39:23 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Checked: by ClamAV 0.100.2 on sourceware.org X-Virus-Found: No X-Spam-SWARE-Status: No, score=-25.9 required=5.0 tests=BAYES_00,GIT_PATCH_0,GIT_PATCH_1,GIT_PATCH_2,GIT_PATCH_3,KAM_LAZY_DOMAIN_SECURITY autolearn=ham version=3.3.2 spammy=Large, Hx-languages-length:4555, jle, 20180323 X-Spam-Status: No, score=-25.9 required=5.0 tests=BAYES_00,GIT_PATCH_0,GIT_PATCH_1,GIT_PATCH_2,GIT_PATCH_3,KAM_LAZY_DOMAIN_SECURITY autolearn=ham version=3.3.2 X-Spam-Checker-Version: SpamAssassin 3.3.2 (2011-06-06) on sourceware.org X-Spam-Level: X-HELO: hall.aurel32.net Received: from hall.aurel32.net (HELO hall.aurel32.net) (163.172.24.10) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Thu, 20 Dec 2018 23:39:21 +0000 Received: from [2a01:e35:2e4c:a861:655e:aef3:f589:b897] (helo=ohm.rr44.fr) by hall.aurel32.net with esmtpsa (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.89) (envelope-from ) id 1ga7uR-0002hB-01; Fri, 21 Dec 2018 00:39:19 +0100 Received: from aurel32 by ohm.rr44.fr with local (Exim 4.91) (envelope-from ) id 1ga7uQ-0005QG-EA; Fri, 21 Dec 2018 00:39:18 +0100 From: Aurelien Jarno To: libc-stable@sourceware.org Cc: Andrew Senkevich Subject: [2.24 COMMITTED 1/4] Fix i386 memmove issue (bug 22644). Date: Mon, 01 Jan 2018 00:00:00 -0000 Message-Id: <20181220233902.20796-1-aurelien@aurel32.net> X-Mailer: git-send-email 2.19.2 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-IsSubscribed: yes X-SW-Source: 2018-12/txt/msg00021.txt.bz2 From: Andrew Senkevich [BZ #22644] * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed branch conditions. * string/test-memmove.c (do_test2): New testcase. (cherry picked from commit cd66c0e584c6d692bc8347b5e72723d02b8a8ada) --- ChangeLog | 8 +++ NEWS | 2 + string/test-memmove.c | 57 +++++++++++++++++++ .../i686/multiarch/memcpy-sse2-unaligned.S | 12 ++-- 4 files changed, 73 insertions(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index 3305107bca..63813de2d5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,11 @@ +2018-03-23 Andrew Senkevich + Max Horn + + [BZ #22644] + * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed + branch conditions. + * string/test-memmove.c (do_test2): New testcase. + 2018-09-06 Stefan Liebler * sysdeps/unix/sysv/linux/spawni.c (maybe_script_execute): diff --git a/NEWS b/NEWS index d11dfe1429..77f7f1ad0e 100644 --- a/NEWS +++ b/NEWS @@ -60,6 +60,8 @@ The following bugs are resolved with this release: [21609] x86-64: Align the stack in __tls_get_addr [21624] Unsafe alloca allows local attackers to alias stack and heap (CVE-2017-1000366) [21654] nss: Fix invalid cast in group merging + [22644] string: memmove-sse2-unaligned on 32bit x86 produces garbage when + crossing 2GB threshold (CVE-2017-18269) [22715] x86-64: Properly align La_x86_64_retval to VEC_SIZE Version 2.24 diff --git a/string/test-memmove.c b/string/test-memmove.c index 43433297e5..f44c05d669 100644 --- a/string/test-memmove.c +++ b/string/test-memmove.c @@ -245,6 +245,60 @@ do_random_tests (void) } } +static void +do_test2 (void) +{ + size_t size = 0x20000000; + uint32_t * large_buf; + + large_buf = mmap ((void*) 0x70000000, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON, -1, 0); + + if (large_buf == MAP_FAILED) + error (77, errno, "Large mmap failed"); + + if ((uintptr_t) large_buf > 0x80000000 - 128 + || 0x80000000 - (uintptr_t) large_buf > 0x20000000) + { + error (0, 0, "Large mmap allocated improperly"); + ret = 77; + munmap ((void *) large_buf, size); + return; + } + + size_t bytes_move = 0x80000000 - (uintptr_t) large_buf; + size_t arr_size = bytes_move / sizeof (uint32_t); + size_t i; + + FOR_EACH_IMPL (impl, 0) + { + for (i = 0; i < arr_size; i++) + large_buf[i] = (uint32_t) i; + + uint32_t * dst = &large_buf[33]; + +#ifdef TEST_BCOPY + CALL (impl, (char *) large_buf, (char *) dst, bytes_move); +#else + CALL (impl, (char *) dst, (char *) large_buf, bytes_move); +#endif + + for (i = 0; i < arr_size; i++) + { + if (dst[i] != (uint32_t) i) + { + error (0, 0, + "Wrong result in function %s dst \"%p\" src \"%p\" offset \"%zd\"", + impl->name, dst, large_buf, i); + ret = 1; + break; + } + } + } + + munmap ((void *) large_buf, size); +} + int test_main (void) { @@ -284,6 +338,9 @@ test_main (void) } do_random_tests (); + + do_test2 (); + return ret; } diff --git a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S index 76f34291a3..bb26708d67 100644 --- a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S +++ b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S @@ -72,7 +72,7 @@ ENTRY (MEMCPY) cmp %edx, %eax # ifdef USE_AS_MEMMOVE - jg L(check_forward) + ja L(check_forward) L(mm_len_0_or_more_backward): /* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128] @@ -81,7 +81,7 @@ L(mm_len_0_or_more_backward): jbe L(mm_len_0_16_bytes_backward) cmpl $32, %ecx - jg L(mm_len_32_or_more_backward) + ja L(mm_len_32_or_more_backward) /* Copy [0..32] and return. */ movdqu (%eax), %xmm0 @@ -92,7 +92,7 @@ L(mm_len_0_or_more_backward): L(mm_len_32_or_more_backward): cmpl $64, %ecx - jg L(mm_len_64_or_more_backward) + ja L(mm_len_64_or_more_backward) /* Copy [0..64] and return. */ movdqu (%eax), %xmm0 @@ -107,7 +107,7 @@ L(mm_len_32_or_more_backward): L(mm_len_64_or_more_backward): cmpl $128, %ecx - jg L(mm_len_128_or_more_backward) + ja L(mm_len_128_or_more_backward) /* Copy [0..128] and return. */ movdqu (%eax), %xmm0 @@ -132,7 +132,7 @@ L(mm_len_128_or_more_backward): add %ecx, %eax cmp %edx, %eax movl SRC(%esp), %eax - jle L(forward) + jbe L(forward) PUSH (%esi) PUSH (%edi) PUSH (%ebx) @@ -269,7 +269,7 @@ L(check_forward): add %edx, %ecx cmp %eax, %ecx movl LEN(%esp), %ecx - jle L(forward) + jbe L(forward) /* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128] separately. */ -- 2.19.2