public inbox for libc-stable@sourceware.org
 help / color / mirror / Atom feed
* [2.27 COMMITTED] arm: CVE-2020-6096: Fix multiarch memcpy for negative length [BZ #25620]
@ 2020-11-16 21:01 Dmitry V. Levin
  0 siblings, 0 replies; only message in thread
From: Dmitry V. Levin @ 2020-11-16 21:01 UTC (permalink / raw)
  To: libc-stable

From: Alexander Anisimov <a.anisimov@omprussia.ru>

Unsigned branch instructions could be used for r2 to fix the wrong
behavior when a negative length is passed to memcpy.
This commit fixes the armv7 version.

(cherry picked from commit beea361050728138b82c57dda0c4810402d342b9)
---
 sysdeps/arm/armv7/multiarch/memcpy_impl.S | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/sysdeps/arm/armv7/multiarch/memcpy_impl.S b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
index 2de172635c..802c310f3e 100644
--- a/sysdeps/arm/armv7/multiarch/memcpy_impl.S
+++ b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
@@ -268,7 +268,7 @@ ENTRY(memcpy)
 
 	mov	dst, dstin	/* Preserve dstin, we need to return it.  */
 	cmp	count, #64
-	bge	.Lcpy_not_short
+	bhs	.Lcpy_not_short
 	/* Deal with small copies quickly by dropping straight into the
 	   exit block.  */
 
@@ -351,10 +351,10 @@ ENTRY(memcpy)
 
 1:
 	subs	tmp2, count, #64	/* Use tmp2 for count.  */
-	blt	.Ltail63aligned
+	blo	.Ltail63aligned
 
 	cmp	tmp2, #512
-	bge	.Lcpy_body_long
+	bhs	.Lcpy_body_long
 
 .Lcpy_body_medium:			/* Count in tmp2.  */
 #ifdef USE_VFP
@@ -378,7 +378,7 @@ ENTRY(memcpy)
 	add	src, src, #64
 	vstr	d1, [dst, #56]
 	add	dst, dst, #64
-	bge	1b
+	bhs	1b
 	tst	tmp2, #0x3f
 	beq	.Ldone
 
@@ -412,7 +412,7 @@ ENTRY(memcpy)
 	ldrd	A_l, A_h, [src, #64]!
 	strd	A_l, A_h, [dst, #64]!
 	subs	tmp2, tmp2, #64
-	bge	1b
+	bhs	1b
 	tst	tmp2, #0x3f
 	bne	1f
 	ldr	tmp2,[sp], #FRAME_SIZE
@@ -482,7 +482,7 @@ ENTRY(memcpy)
 	add	src, src, #32
 
 	subs	tmp2, tmp2, #prefetch_lines * 64 * 2
-	blt	2f
+	blo	2f
 1:
 	cpy_line_vfp	d3, 0
 	cpy_line_vfp	d4, 64
@@ -494,7 +494,7 @@ ENTRY(memcpy)
 	add	dst, dst, #2 * 64
 	add	src, src, #2 * 64
 	subs	tmp2, tmp2, #prefetch_lines * 64
-	bge	1b
+	bhs	1b
 
 2:
 	cpy_tail_vfp	d3, 0
@@ -615,8 +615,8 @@ ENTRY(memcpy)
 1:
 	pld	[src, #(3 * 64)]
 	subs	count, count, #64
-	ldrmi	tmp2, [sp], #FRAME_SIZE
-	bmi	.Ltail63unaligned
+	ldrlo	tmp2, [sp], #FRAME_SIZE
+	blo	.Ltail63unaligned
 	pld	[src, #(4 * 64)]
 
 #ifdef USE_NEON
@@ -633,7 +633,7 @@ ENTRY(memcpy)
 	neon_load_multi d0-d3, src
 	neon_load_multi d4-d7, src
 	subs	count, count, #64
-	bmi	2f
+	blo	2f
 1:
 	pld	[src, #(4 * 64)]
 	neon_store_multi d0-d3, dst
@@ -641,7 +641,7 @@ ENTRY(memcpy)
 	neon_store_multi d4-d7, dst
 	neon_load_multi d4-d7, src
 	subs	count, count, #64
-	bpl	1b
+	bhs	1b
 2:
 	neon_store_multi d0-d3, dst
 	neon_store_multi d4-d7, dst
-- 
ldv


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2020-11-16 21:01 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-16 21:01 [2.27 COMMITTED] arm: CVE-2020-6096: Fix multiarch memcpy for negative length [BZ #25620] Dmitry V. Levin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).