This is the mail archive of the libc-ports@sources.redhat.com mailing list for the libc-ports project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH 4/5] alpha: Fix [BZ #13718]


The routines expect to be able to bias the count by a small number.
If the count is near -1ull, the count will overflow.  Since we cannot
use the whole 64-bit address space, bound the count to LONG_MAX.
---
 ChangeLog.alpha                  |    4 ++++
 sysdeps/alpha/alphaev6/stxncpy.S |   19 +++++++++++++------
 sysdeps/alpha/stxncpy.S          |   23 +++++++++++++----------
 3 files changed, 30 insertions(+), 16 deletions(-)

diff --git a/ChangeLog.alpha b/ChangeLog.alpha
index 0598d7e..d291df9 100644
--- a/ChangeLog.alpha
+++ b/ChangeLog.alpha
@@ -1,5 +1,9 @@
 2012-06-06  Richard Henderson  <rth@twiddle.net>
 
+	[BZ #13718]
+	* sysdeps/alpha/stxncmp.S: Bound count to LONG_MAX at startup.
+	* sysdeps/alpha/alphaev6/stxncmp.S: Likewise.
+
 	* sysdeps/alpha/fpu/e_sqrt.c: Include <math_private.h> before
 	redefining __ieee758_sqrt.
 
diff --git a/sysdeps/alpha/alphaev6/stxncpy.S b/sysdeps/alpha/alphaev6/stxncpy.S
index d134eb8..28495df 100644
--- a/sysdeps/alpha/alphaev6/stxncpy.S
+++ b/sysdeps/alpha/alphaev6/stxncpy.S
@@ -143,18 +143,25 @@ $a_eoc:
 	.align 4
 __stxncpy:
 	/* Are source and destination co-aligned?  */
+	lda	t2, -1		# E :
 	xor	a0, a1, t1	# E :
 	and	a0, 7, t0	# E : find dest misalignment
-	and	t1, 7, t1	# E : (stall)
-	addq	a2, t0, a2	# E : bias count by dest misalignment (stall)
+	nop			# E :
 
-	subq	a2, 1, a2	# E :
+	srl	t2, 1, t2	# U :
+	and	t1, 7, t1	# E :
+	cmovlt	a2, t2, a2	# E : bound count to LONG_MAX (stall)
+	nop			# E :
+
+	addq	a2, t0, a2	# E : bias count by dest misalignment
+	subq	a2, 1, a2	# E : (stall)
 	and	a2, 7, t2	# E : (stall)
-	srl	a2, 3, a2	# U : a2 = loop counter = (count - 1)/8 (stall)
-	addq	zero, 1, t10	# E :
+	lda	t10, 1		# E :
 
+	srl	a2, 3, a2	# U : a2 = loop counter = (count - 1)/8
 	sll	t10, t2, t10	# U : t10 = bitmask of last count byte
-	bne	t1, $unaligned	# U :
+	nop			# E :
+	bne	t1, $unaligned	# U : (stall)
 
 	/* We are co-aligned; take care of a partial first word.  */
 	ldq_u	t1, 0(a1)	# L : load first src word
diff --git a/sysdeps/alpha/stxncpy.S b/sysdeps/alpha/stxncpy.S
index f8b494a..d2cb9c3 100644
--- a/sysdeps/alpha/stxncpy.S
+++ b/sysdeps/alpha/stxncpy.S
@@ -123,16 +123,19 @@ $a_eoc:
 	.align 3
 __stxncpy:
 	/* Are source and destination co-aligned?  */
-	xor	a0, a1, t1	# e0    :
-	and	a0, 7, t0	# .. e1 : find dest misalignment
-	and	t1, 7, t1	# e0    :
-	addq	a2, t0, a2	# .. e1 : bias count by dest misalignment
-	subq	a2, 1, a2	# e0    :
-	and	a2, 7, t2	# e1    :
-	srl	a2, 3, a2	# e0    : a2 = loop counter = (count - 1)/8
-	addq	zero, 1, t10	# .. e1 :
-	sll	t10, t2, t10	# e0    : t10 = bitmask of last count byte
-	bne	t1, $unaligned	# .. e1 :
+	lda	t2, -1
+	xor	a0, a1, t1
+	srl	t2, 1, t2
+	and	a0, 7, t0		# find dest misalignment
+	cmovlt	a2, t2, a2		# bound neg count to LONG_MAX
+	and	t1, 7, t1
+	addq	a2, t0, a2		# bias count by dest misalignment
+	subq	a2, 1, a2
+	and	a2, 7, t2
+	srl	a2, 3, a2		# a2 = loop counter = (count - 1)/8
+	addq	zero, 1, t10
+	sll	t10, t2, t10		# t10 = bitmask of last count byte
+	bne	t1, $unaligned
 
 	/* We are co-aligned; take care of a partial first word.  */
 
-- 
1.7.7.6


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]