public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
To: Martin Galvan <martin.galvan@tallertechnologies.com>,
	 Marek Polacek <polacek@redhat.com>
Cc: "gcc-patches@gcc.gnu.org" <gcc-patches@gcc.gnu.org>
Subject: Re: Fwd: [PING 2][PATCH] libgcc: Add CFI directives to the soft floating point support code for ARM
Date: Fri, 15 May 2015 17:01:00 -0000	[thread overview]
Message-ID: <555625BC.70708@arm.com> (raw)
In-Reply-To: <CAOKbPbY0OWCkqw5dKk1A--Bc07NY=FWT4KCn_3jNpkY4uW4onw@mail.gmail.com>

[-- Attachment #1: Type: text/plain, Size: 1394 bytes --]



On 13/05/15 19:11, Martin Galvan wrote:
> Here's the new patch. I downloaded the gcc sources from the SVN
> repository, removed the extra semicolon from my version of the files
> and re-generated the patch using svn diff, making sure the context
> info had all the tabs from the original. I then e-mailed the patch to
> myself as an attachment, applied it to the fresh SVN sources by doing
> patch --dry-run -p0 < cfi-svn.patch and checked that the compilation
> and tests were successful.
>

Thanks for doing this now the patch looks much better and I can apply it 
! Unfortunately there are still problems with it and I'm not sure about 
your testing procedures.

>
>
>  	mov	ip, r0
>  	mov	r0, r2
> @@ -1149,12 +1250,15 @@
>  	mov	r3, ip
>  	b	6f
>  	
> -ARM_FUNC_START aeabi_cdcmpeq
>  ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq

How did this even build ?

I've cleaned up some lines which were > 80 characters, added a 
definition for aeabi_cdcmpeq again and applied the attached after 
*doing* a full bootstrap and test run on arm-none-linux-gnueabihf along 
with a test run on arm-none-eabi.


regards
Ramana

2015-05-15  Martin Galvan  <martin.galvan@tallertechnologies.com>

         * config/arm/lib1funcs.S (CFI_START_FUNCTION, CFI_END_FUNCTION):
         New macros.
         * config/arm/ieee754-df.S: Add CFI directives.
         * config/arm/ieee754-sf.S: Add CFI directives.


[-- Attachment #2: cfi-committed.txt --]
[-- Type: text/plain, Size: 22462 bytes --]

Index: libgcc/config/arm/ieee754-df.S
===================================================================
--- libgcc/config/arm/ieee754-df.S	(revision 223219)
+++ libgcc/config/arm/ieee754-df.S	(working copy)
@@ -33,8 +33,12 @@
  * Only the default rounding mode is intended for best performances.
  * Exceptions aren't supported yet, but that can be added quite easily
  * if necessary without impacting performances.
+ *
+ * In the CFI related comments, 'previousOffset' refers to the previous offset
+ * from sp used to compute the CFA.
  */
 
+	.cfi_sections .debug_frame
 
 #ifndef __ARMEB__
 #define xl r0
@@ -53,11 +57,13 @@
 
 ARM_FUNC_START negdf2
 ARM_FUNC_ALIAS aeabi_dneg negdf2
+	CFI_START_FUNCTION
 
 	@ flip sign bit
 	eor	xh, xh, #0x80000000
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_dneg
 	FUNC_END negdf2
 
@@ -66,6 +72,7 @@
 #ifdef L_arm_addsubdf3
 
 ARM_FUNC_START aeabi_drsub
+	CFI_START_FUNCTION
 
 	eor	xh, xh, #0x80000000	@ flip sign bit of first arg
 	b	1f	
@@ -81,7 +88,11 @@
 ARM_FUNC_START adddf3
 ARM_FUNC_ALIAS aeabi_dadd adddf3
 
-1:	do_push	{r4, r5, lr}
+1:  do_push {r4, r5, lr}        @ sp -= 12
+	.cfi_adjust_cfa_offset 12   @ CFA is now sp + previousOffset + 12
+	.cfi_rel_offset r4, 0       @ Registers are saved from sp to sp + 8
+	.cfi_rel_offset r5, 4
+	.cfi_rel_offset lr, 8
 
 	@ Look for zeroes, equal values, INF, or NAN.
 	shift1	lsl, r4, xh, #1
@@ -148,6 +159,11 @@
 	@ Since this is not common case, rescale them off line.
 	teq	r4, r5
 	beq	LSYM(Lad_d)
+
+@ CFI note: we're lucky that the branches to Lad_* that appear after this function
+@ have a CFI state that's exactly the same as the one we're in at this
+@ point. Otherwise the CFI would change to a different state after the branch,
+@ which would be disastrous for backtracing.
 LSYM(Lad_x):
 
 	@ Compensate for the exponent overlapping the mantissa MSB added later
@@ -413,6 +429,7 @@
 	orrne	xh, xh, #0x00080000	@ quiet NAN
 	RETLDM	"r4, r5"
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_dsub
 	FUNC_END subdf3
 	FUNC_END aeabi_dadd
@@ -420,12 +437,19 @@
 
 ARM_FUNC_START floatunsidf
 ARM_FUNC_ALIAS aeabi_ui2d floatunsidf
+	CFI_START_FUNCTION
 
 	teq	r0, #0
 	do_it	eq, t
 	moveq	r1, #0
 	RETc(eq)
-	do_push	{r4, r5, lr}
+
+	do_push {r4, r5, lr}        @ sp -= 12
+	.cfi_adjust_cfa_offset 12   @ CFA is now sp + previousOffset + 12
+	.cfi_rel_offset r4, 0       @ Registers are saved from sp + 0 to sp + 8.
+	.cfi_rel_offset r5, 4
+	.cfi_rel_offset lr, 8
+
 	mov	r4, #0x400		@ initial exponent
 	add	r4, r4, #(52-1 - 1)
 	mov	r5, #0			@ sign bit is 0
@@ -435,17 +459,25 @@
 	mov	xh, #0
 	b	LSYM(Lad_l)
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_ui2d
 	FUNC_END floatunsidf
 
 ARM_FUNC_START floatsidf
 ARM_FUNC_ALIAS aeabi_i2d floatsidf
+	CFI_START_FUNCTION
 
 	teq	r0, #0
 	do_it	eq, t
 	moveq	r1, #0
 	RETc(eq)
-	do_push	{r4, r5, lr}
+
+	do_push {r4, r5, lr}        @ sp -= 12
+	.cfi_adjust_cfa_offset 12   @ CFA is now sp + previousOffset + 12
+	.cfi_rel_offset r4, 0       @ Registers are saved from sp + 0 to sp + 8.
+	.cfi_rel_offset r5, 4
+	.cfi_rel_offset lr, 8
+
 	mov	r4, #0x400		@ initial exponent
 	add	r4, r4, #(52-1 - 1)
 	ands	r5, r0, #0x80000000	@ sign bit in r5
@@ -457,11 +489,13 @@
 	mov	xh, #0
 	b	LSYM(Lad_l)
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_i2d
 	FUNC_END floatsidf
 
 ARM_FUNC_START extendsfdf2
 ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
+	CFI_START_FUNCTION
 
 	movs	r2, r0, lsl #1		@ toss sign bit
 	mov	xh, r2, asr #3		@ stretch exponent
@@ -480,22 +514,34 @@
 
 	@ value was denormalized.  We can normalize it now.
 	do_push	{r4, r5, lr}
+	.cfi_adjust_cfa_offset 12   @ CFA is now sp + previousOffset + 12
+	.cfi_rel_offset r4, 0       @ Registers are saved from sp + 0 to sp + 8.
+	.cfi_rel_offset r5, 4
+	.cfi_rel_offset lr, 8
+
 	mov	r4, #0x380		@ setup corresponding exponent
 	and	r5, xh, #0x80000000	@ move sign bit in r5
 	bic	xh, xh, #0x80000000
 	b	LSYM(Lad_l)
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_f2d
 	FUNC_END extendsfdf2
 
 ARM_FUNC_START floatundidf
 ARM_FUNC_ALIAS aeabi_ul2d floatundidf
+	CFI_START_FUNCTION
+	.cfi_remember_state        @ Save the current CFA state.
 
 	orrs	r2, r0, r1
 	do_it	eq
 	RETc(eq)
 
-	do_push	{r4, r5, lr}
+	do_push {r4, r5, lr}       @ sp -= 12
+	.cfi_adjust_cfa_offset 12  @ CFA is now sp + previousOffset + 12
+	.cfi_rel_offset r4, 0      @ Registers are saved from sp + 0 to sp + 8
+	.cfi_rel_offset r5, 4
+	.cfi_rel_offset lr, 8
 
 	mov	r5, #0
 	b	2f
@@ -502,12 +548,20 @@
 
 ARM_FUNC_START floatdidf
 ARM_FUNC_ALIAS aeabi_l2d floatdidf
+	.cfi_restore_state
+	@ Restore the CFI state we saved above. If we didn't do this then the
+	@ following instructions would have the CFI state that was set by the
+	@ offset adjustments made in floatundidf.
 
 	orrs	r2, r0, r1
 	do_it	eq
 	RETc(eq)
 
-	do_push	{r4, r5, lr}
+	do_push {r4, r5, lr}       @ sp -= 12
+	.cfi_adjust_cfa_offset 12  @ CFA is now sp + previousOffset + 12
+	.cfi_rel_offset r4, 0      @ Registers are saved from sp to sp + 8
+	.cfi_rel_offset r5, 4
+	.cfi_rel_offset lr, 8
 
 	ands	r5, ah, #0x80000000	@ sign bit in r5
 	bpl	2f
@@ -550,6 +604,7 @@
 	add	r4, r4, r2
 	b	LSYM(Lad_p)
 
+	CFI_END_FUNCTION
 	FUNC_END floatdidf
 	FUNC_END aeabi_l2d
 	FUNC_END floatundidf
@@ -561,8 +616,15 @@
 
 ARM_FUNC_START muldf3
 ARM_FUNC_ALIAS aeabi_dmul muldf3
-	do_push	{r4, r5, r6, lr}
+	CFI_START_FUNCTION
 
+	do_push {r4, r5, r6, lr}    @ sp -= 16
+	.cfi_adjust_cfa_offset 16   @ CFA is now sp + previousOffset + 16
+	.cfi_rel_offset r4, 0       @ Registers are saved from sp to sp + 12.
+	.cfi_rel_offset r5, 4
+	.cfi_rel_offset r6, 8
+	.cfi_rel_offset lr, 12
+
 	@ Mask out exponents, trap any zero/denormal/INF/NAN.
 	mov	ip, #0xff
 	orr	ip, ip, #0x700
@@ -596,7 +658,16 @@
 	and   r6, r6, #0x80000000
 
 	@ Well, no way to make it shorter without the umull instruction.
-	stmfd	sp!, {r6, r7, r8, r9, sl, fp}
+	stmfd   sp!, {r6, r7, r8, r9, sl, fp}   @ sp -= 24
+	.cfi_remember_state         @ Save the current CFI state.
+	.cfi_adjust_cfa_offset 24   @ CFA is now sp + previousOffset + 24.
+	.cfi_rel_offset r6, 0       @ Registers are saved from sp to sp + 20.
+	.cfi_rel_offset r7, 4
+	.cfi_rel_offset r8, 8
+	.cfi_rel_offset r9, 12
+	.cfi_rel_offset sl, 16
+	.cfi_rel_offset fp, 20
+
 	mov	r7, xl, lsr #16
 	mov	r8, yl, lsr #16
 	mov	r9, xh, lsr #16
@@ -648,8 +719,8 @@
 	mul	fp, xh, yh
 	adcs	r5, r5, fp
 	adc	r6, r6, #0
-	ldmfd	sp!, {yl, r7, r8, r9, sl, fp}
-
+	ldmfd   sp!, {yl, r7, r8, r9, sl, fp}   @ sp += 24
+	.cfi_restore_state   @ Restore the previous CFI state.
 #else
 
 	@ Here is the actual multiplication.
@@ -715,7 +786,6 @@
 	orr	xh, xh, #0x00100000
 	mov	lr, #0
 	subs	r4, r4, #1
-
 LSYM(Lml_u):
 	@ Overflow?
 	bgt	LSYM(Lml_o)
@@ -863,13 +933,20 @@
 	orr	xh, xh, #0x00f80000
 	RETLDM	"r4, r5, r6"
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_dmul
 	FUNC_END muldf3
 
 ARM_FUNC_START divdf3
 ARM_FUNC_ALIAS aeabi_ddiv divdf3
+	CFI_START_FUNCTION
 	
 	do_push	{r4, r5, r6, lr}
+	.cfi_adjust_cfa_offset 16
+	.cfi_rel_offset r4, 0
+	.cfi_rel_offset r5, 4
+	.cfi_rel_offset r6, 8
+	.cfi_rel_offset lr, 12
 
 	@ Mask out exponents, trap any zero/denormal/INF/NAN.
 	mov	ip, #0xff
@@ -1052,6 +1129,7 @@
 	bne	LSYM(Lml_z)		@ 0 / <non_zero> -> 0
 	b	LSYM(Lml_n)		@ 0 / 0 -> NAN
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_ddiv
 	FUNC_END divdf3
 
@@ -1063,6 +1141,7 @@
 
 ARM_FUNC_START gtdf2
 ARM_FUNC_ALIAS gedf2 gtdf2
+	CFI_START_FUNCTION
 	mov	ip, #-1
 	b	1f
 
@@ -1077,6 +1156,10 @@
 	mov	ip, #1			@ how should we specify unordered here?
 
 1:	str	ip, [sp, #-4]!
+	.cfi_adjust_cfa_offset 4        @ CFA is now sp + previousOffset + 4.
+	@ We're not adding CFI for ip as it's pushed into the stack
+	@ only because @ it may be popped off later as a return value
+	@ (i.e. we're not preserving @ it anyways).
 
 	@ Trap any INF/NAN first.
 	mov	ip, xh, lsl #1
@@ -1085,10 +1168,18 @@
 	do_it	ne
 	COND(mvn,s,ne)	ip, ip, asr #21
 	beq	3f
+	.cfi_remember_state
+	@ Save the current CFI state. This is done because the branch
+	@ is conditional, @ and if we don't take it we'll issue a
+	@ .cfi_adjust_cfa_offset and return.  @ If we do take it,
+	@ however, the .cfi_adjust_cfa_offset from the non-branch @ code
+	@ will affect the branch code as well. To avoid this we'll
+	@ restore @ the current state before executing the branch code.
 
-	@ Test for equality.
-	@ Note that 0.0 is equal to -0.0.
+	@ Test for equality.  @ Note that 0.0 is equal to -0.0.
 2:	add	sp, sp, #4
+	.cfi_adjust_cfa_offset -4       @ CFA is now sp + previousOffset.
+
 	orrs	ip, xl, xh, lsl #1	@ if x == 0.0 or -0.0
 	do_it	eq, e
 	COND(orr,s,eq)	ip, yl, yh, lsl #1	@ and y == 0.0 or -0.0
@@ -1117,8 +1208,13 @@
 	orr	r0, r0, #1
 	RET
 
-	@ Look for a NAN.
-3:	mov	ip, xh, lsl #1
+3:  @ Look for a NAN.
+
+	@ Restore the previous CFI state (i.e. keep the CFI state as it was
+	@ before the branch).
+	.cfi_restore_state
+
+	mov ip, xh, lsl #1
 	mvns	ip, ip, asr #21
 	bne	4f
 	orrs	ip, xl, xh, lsl #12
@@ -1128,9 +1224,13 @@
 	bne	2b
 	orrs	ip, yl, yh, lsl #12
 	beq	2b			@ y is not NAN
+
 5:	ldr	r0, [sp], #4		@ unordered return code
+	.cfi_adjust_cfa_offset -4       @ CFA is now sp + previousOffset.
+
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END gedf2
 	FUNC_END gtdf2
 	FUNC_END ledf2
@@ -1140,6 +1240,7 @@
 	FUNC_END cmpdf2
 
 ARM_FUNC_START aeabi_cdrcmple
+	CFI_START_FUNCTION
 
 	mov	ip, r0
 	mov	r0, r2
@@ -1148,7 +1249,7 @@
 	mov	r1, r3
 	mov	r3, ip
 	b	6f
-	
+
 ARM_FUNC_START aeabi_cdcmpeq
 ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
 
@@ -1155,6 +1256,10 @@
 	@ The status-returning routines are required to preserve all
 	@ registers except ip, lr, and cpsr.
 6:	do_push	{r0, lr}
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8.
+	.cfi_rel_offset r0, 0     @ Previous r0 is saved at sp.
+	.cfi_rel_offset lr, 4     @ Previous lr is saved at sp + 4.
+
 	ARM_CALL cmpdf2
 	@ Set the Z flag correctly, and the C flag unconditionally.
 	cmp	r0, #0
@@ -1162,26 +1267,38 @@
 	@ that the first operand was smaller than the second.
 	do_it	mi
 	cmnmi	r0, #0
+
 	RETLDM	"r0"
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_cdcmple
 	FUNC_END aeabi_cdcmpeq
 	FUNC_END aeabi_cdrcmple
 	
 ARM_FUNC_START	aeabi_dcmpeq
+	CFI_START_FUNCTION
 
-	str	lr, [sp, #-8]!
+	str lr, [sp, #-8]!        @ sp -= 8
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
+	.cfi_rel_offset lr, 0     @ lr is at sp
+
 	ARM_CALL aeabi_cdcmple
 	do_it	eq, e
 	moveq	r0, #1	@ Equal to.
 	movne	r0, #0	@ Less than, greater than, or unordered.
+
 	RETLDM
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_dcmpeq
 
 ARM_FUNC_START	aeabi_dcmplt
+	CFI_START_FUNCTION
 
-	str	lr, [sp, #-8]!
+	str lr, [sp, #-8]!        @ sp -= 8
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
+	.cfi_rel_offset lr, 0     @ lr is at sp
+
 	ARM_CALL aeabi_cdcmple
 	do_it	cc, e
 	movcc	r0, #1	@ Less than.
@@ -1188,11 +1305,16 @@
 	movcs	r0, #0	@ Equal to, greater than, or unordered.
 	RETLDM
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_dcmplt
 
 ARM_FUNC_START	aeabi_dcmple
+	CFI_START_FUNCTION
 
-	str	lr, [sp, #-8]!
+	str lr, [sp, #-8]!        @ sp -= 8
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
+	.cfi_rel_offset lr, 0     @ lr is at sp
+
 	ARM_CALL aeabi_cdcmple
 	do_it	ls, e
 	movls	r0, #1  @ Less than or equal to.
@@ -1199,11 +1321,16 @@
 	movhi	r0, #0	@ Greater than or unordered.
 	RETLDM
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_dcmple
 
 ARM_FUNC_START	aeabi_dcmpge
+	CFI_START_FUNCTION
 
-	str	lr, [sp, #-8]!
+	str lr, [sp, #-8]!        @ sp -= 8
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
+	.cfi_rel_offset lr, 0     @ lr is at sp
+
 	ARM_CALL aeabi_cdrcmple
 	do_it	ls, e
 	movls	r0, #1	@ Operand 2 is less than or equal to operand 1.
@@ -1210,11 +1337,16 @@
 	movhi	r0, #0	@ Operand 2 greater than operand 1, or unordered.
 	RETLDM
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_dcmpge
 
 ARM_FUNC_START	aeabi_dcmpgt
+	CFI_START_FUNCTION
 
-	str	lr, [sp, #-8]!
+	str lr, [sp, #-8]!        @ sp -= 8
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
+	.cfi_rel_offset lr, 0     @ lr is at sp
+
 	ARM_CALL aeabi_cdrcmple
 	do_it	cc, e
 	movcc	r0, #1	@ Operand 2 is less than operand 1.
@@ -1222,6 +1354,7 @@
 			@ or they are unordered.
 	RETLDM
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_dcmpgt
 
 #endif /* L_cmpdf2 */
@@ -1230,6 +1363,7 @@
 
 ARM_FUNC_START unorddf2
 ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
+	.cfi_startproc
 
 	mov	ip, xh, lsl #1
 	mvns	ip, ip, asr #21
@@ -1247,6 +1381,7 @@
 3:	mov	r0, #1			@ arguments are unordered.
 	RET
 
+	.cfi_endproc
 	FUNC_END aeabi_dcmpun
 	FUNC_END unorddf2
 
@@ -1256,6 +1391,7 @@
 
 ARM_FUNC_START fixdfsi
 ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
+	CFI_START_FUNCTION
 
 	@ check exponent range.
 	mov	r2, xh, lsl #1
@@ -1289,6 +1425,7 @@
 4:	mov	r0, #0			@ How should we convert NAN?
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_d2iz
 	FUNC_END fixdfsi
 
@@ -1298,6 +1435,7 @@
 
 ARM_FUNC_START fixunsdfsi
 ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
+	CFI_START_FUNCTION
 
 	@ check exponent range.
 	movs	r2, xh, lsl #1
@@ -1327,6 +1465,7 @@
 4:	mov	r0, #0			@ How should we convert NAN?
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_d2uiz
 	FUNC_END fixunsdfsi
 
@@ -1336,6 +1475,7 @@
 
 ARM_FUNC_START truncdfsf2
 ARM_FUNC_ALIAS aeabi_d2f truncdfsf2
+	CFI_START_FUNCTION
 
 	@ check exponent range.
 	mov	r2, xh, lsl #1
@@ -1400,6 +1540,7 @@
 	orr	r0, r0, #0x00800000
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_d2f
 	FUNC_END truncdfsf2
 
Index: libgcc/config/arm/ieee754-sf.S
===================================================================
--- libgcc/config/arm/ieee754-sf.S	(revision 223219)
+++ libgcc/config/arm/ieee754-sf.S	(working copy)
@@ -31,6 +31,9 @@
  * Only the default rounding mode is intended for best performances.
  * Exceptions aren't supported yet, but that can be added quite easily
  * if necessary without impacting performances.
+ *
+ * In the CFI related comments, 'previousOffset' refers to the previous offset
+ * from sp used to compute the CFA.
  */
 
 #ifdef L_arm_negsf2
@@ -37,10 +40,12 @@
 	
 ARM_FUNC_START negsf2
 ARM_FUNC_ALIAS aeabi_fneg negsf2
+	CFI_START_FUNCTION
 
 	eor	r0, r0, #0x80000000	@ flip sign bit
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_fneg
 	FUNC_END negsf2
 
@@ -49,6 +54,7 @@
 #ifdef L_arm_addsubsf3
 
 ARM_FUNC_START aeabi_frsub
+	CFI_START_FUNCTION
 
 	eor	r0, r0, #0x80000000	@ flip sign bit of first arg
 	b	1f
@@ -284,6 +290,7 @@
 	orrne	r0, r0, #0x00400000	@ quiet NAN
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_frsub
 	FUNC_END aeabi_fadd
 	FUNC_END addsf3
@@ -292,6 +299,7 @@
 
 ARM_FUNC_START floatunsisf
 ARM_FUNC_ALIAS aeabi_ui2f floatunsisf
+	CFI_START_FUNCTION
 		
 	mov	r3, #0
 	b	1f
@@ -316,6 +324,7 @@
 	mov	al, #0
 	b	2f
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_i2f
 	FUNC_END floatsisf
 	FUNC_END aeabi_ui2f
@@ -323,6 +332,7 @@
 
 ARM_FUNC_START floatundisf
 ARM_FUNC_ALIAS aeabi_ul2f floatundisf
+	CFI_START_FUNCTION
 
 	orrs	r2, r0, r1
 	do_it	eq
@@ -409,6 +419,7 @@
 	biceq	r0, r0, ip, lsr #31
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END floatdisf
 	FUNC_END aeabi_l2f
 	FUNC_END floatundisf
@@ -420,6 +431,7 @@
 
 ARM_FUNC_START mulsf3
 ARM_FUNC_ALIAS aeabi_fmul mulsf3
+	CFI_START_FUNCTION
 
 	@ Mask out exponents, trap any zero/denormal/INF/NAN.
 	mov	ip, #0xff
@@ -454,7 +466,13 @@
 	and	r3, ip, #0x80000000
 
 	@ Well, no way to make it shorter without the umull instruction.
-	do_push	{r3, r4, r5}
+	do_push	{r3, r4, r5}       @ sp -= 12
+	.cfi_remember_state        @ Save the current CFI state
+	.cfi_adjust_cfa_offset 12  @ CFA is now sp + previousOffset + 12
+	.cfi_rel_offset r3, 0      @ Registers are saved from sp to sp + 8
+	.cfi_rel_offset r4, 4
+	.cfi_rel_offset r5, 8
+
 	mov	r4, r0, lsr #16
 	mov	r5, r1, lsr #16
 	bic	r0, r0, r4, lsl #16
@@ -465,7 +483,8 @@
 	mla	r0, r4, r1, r0
 	adds	r3, r3, r0, lsl #16
 	adc	r1, ip, r0, lsr #16
-	do_pop	{r0, r4, r5}
+	do_pop	{r0, r4, r5}       @ sp += 12
+	.cfi_restore_state         @ Restore the previous CFI state
 
 #else
 
@@ -618,11 +637,13 @@
 	orr	r0, r0, #0x00c00000
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_fmul
 	FUNC_END mulsf3
 
 ARM_FUNC_START divsf3
 ARM_FUNC_ALIAS aeabi_fdiv divsf3
+	CFI_START_FUNCTION
 
 	@ Mask out exponents, trap any zero/denormal/INF/NAN.
 	mov	ip, #0xff
@@ -758,6 +779,7 @@
 	bne	LSYM(Lml_z)		@ 0 / <non_zero> -> 0
 	b	LSYM(Lml_n)		@ 0 / 0 -> NAN
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_fdiv
 	FUNC_END divsf3
 
@@ -782,6 +804,7 @@
 
 ARM_FUNC_START gtsf2
 ARM_FUNC_ALIAS gesf2 gtsf2
+	CFI_START_FUNCTION
 	mov	ip, #-1
 	b	1f
 
@@ -796,6 +819,10 @@
 	mov	ip, #1			@ how should we specify unordered here?
 
 1:	str	ip, [sp, #-4]!
+	.cfi_adjust_cfa_offset 4  @ CFA is now sp + previousOffset + 4.
+	@ We're not adding CFI for ip as it's pushed into the stack only because
+	@ it may be popped off later as a return value (i.e. we're not preserving
+	@ it anyways).
 
 	@ Trap any INF/NAN first.
 	mov	r2, r0, lsl #1
@@ -804,10 +831,18 @@
 	do_it	ne
 	COND(mvn,s,ne)	ip, r3, asr #24
 	beq	3f
+	.cfi_remember_state
+	@ Save the current CFI state. This is done because the branch is conditional,
+	@ and if we don't take it we'll issue a .cfi_adjust_cfa_offset and return.
+	@ If we do take it, however, the .cfi_adjust_cfa_offset from the non-branch
+	@ code will affect the branch code as well. To avoid this we'll restore
+	@ the current state before executing the branch code.
 
 	@ Compare values.
 	@ Note that 0.0 is equal to -0.0.
 2:	add	sp, sp, #4
+	.cfi_adjust_cfa_offset -4       @ CFA is now sp + previousOffset.
+
 	orrs	ip, r2, r3, lsr #1	@ test if both are 0, clear C flag
 	do_it	ne
 	teqne	r0, r1			@ if not 0 compare sign
@@ -823,8 +858,13 @@
 	orrne	r0, r0, #1
 	RET
 
-	@ Look for a NAN. 
-3:	mvns	ip, r2, asr #24
+3:	@ Look for a NAN.
+
+	@ Restore the previous CFI state (i.e. keep the CFI state as it was
+	@ before the branch).
+	.cfi_restore_state
+
+	mvns	ip, r2, asr #24
 	bne	4f
 	movs	ip, r0, lsl #9
 	bne	5f			@ r0 is NAN
@@ -832,9 +872,12 @@
 	bne	2b
 	movs	ip, r1, lsl #9
 	beq	2b			@ r1 is not NAN
+
 5:	ldr	r0, [sp], #4		@ return unordered code.
+	.cfi_adjust_cfa_offset -4       @ CFA is now sp + previousOffset.
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END gesf2
 	FUNC_END gtsf2
 	FUNC_END lesf2
@@ -844,6 +887,7 @@
 	FUNC_END cmpsf2
 
 ARM_FUNC_START aeabi_cfrcmple
+	CFI_START_FUNCTION
 
 	mov	ip, r0
 	mov	r0, r1
@@ -856,6 +900,13 @@
 	@ The status-returning routines are required to preserve all
 	@ registers except ip, lr, and cpsr.
 6:	do_push	{r0, r1, r2, r3, lr}
+	.cfi_adjust_cfa_offset 20  @ CFA is at sp + previousOffset + 20
+	.cfi_rel_offset r0, 0      @ Registers are saved from sp to sp + 16
+	.cfi_rel_offset r1, 4
+	.cfi_rel_offset r2, 8
+	.cfi_rel_offset r3, 12
+	.cfi_rel_offset lr, 16
+
 	ARM_CALL cmpsf2
 	@ Set the Z flag correctly, and the C flag unconditionally.
 	cmp	r0, #0
@@ -865,13 +916,18 @@
 	cmnmi	r0, #0
 	RETLDM	"r0, r1, r2, r3"
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_cfcmple
 	FUNC_END aeabi_cfcmpeq
 	FUNC_END aeabi_cfrcmple
 
 ARM_FUNC_START	aeabi_fcmpeq
+	CFI_START_FUNCTION
 
-	str	lr, [sp, #-8]!
+	str	lr, [sp, #-8]!    @ sp -= 8
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
+	.cfi_rel_offset lr, 0     @ lr is at sp
+
 	ARM_CALL aeabi_cfcmple
 	do_it	eq, e
 	moveq	r0, #1	@ Equal to.
@@ -878,11 +934,16 @@
 	movne	r0, #0	@ Less than, greater than, or unordered.
 	RETLDM
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_fcmpeq
 
 ARM_FUNC_START	aeabi_fcmplt
+	CFI_START_FUNCTION
 
-	str	lr, [sp, #-8]!
+	str	lr, [sp, #-8]!    @ sp -= 8
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
+	.cfi_rel_offset lr, 0     @ lr is at sp
+
 	ARM_CALL aeabi_cfcmple
 	do_it	cc, e
 	movcc	r0, #1	@ Less than.
@@ -889,11 +950,16 @@
 	movcs	r0, #0	@ Equal to, greater than, or unordered.
 	RETLDM
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_fcmplt
 
 ARM_FUNC_START	aeabi_fcmple
+	CFI_START_FUNCTION
 
-	str	lr, [sp, #-8]!
+	str	lr, [sp, #-8]!    @ sp -= 8
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
+	.cfi_rel_offset lr, 0     @ lr is at sp
+
 	ARM_CALL aeabi_cfcmple
 	do_it	ls, e
 	movls	r0, #1  @ Less than or equal to.
@@ -900,11 +966,16 @@
 	movhi	r0, #0	@ Greater than or unordered.
 	RETLDM
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_fcmple
 
 ARM_FUNC_START	aeabi_fcmpge
+	CFI_START_FUNCTION
 
-	str	lr, [sp, #-8]!
+	str	lr, [sp, #-8]!    @ sp -= 8
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
+	.cfi_rel_offset lr, 0     @ lr is at sp
+
 	ARM_CALL aeabi_cfrcmple
 	do_it	ls, e
 	movls	r0, #1	@ Operand 2 is less than or equal to operand 1.
@@ -911,11 +982,16 @@
 	movhi	r0, #0	@ Operand 2 greater than operand 1, or unordered.
 	RETLDM
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_fcmpge
 
 ARM_FUNC_START	aeabi_fcmpgt
+	CFI_START_FUNCTION
 
-	str	lr, [sp, #-8]!
+	str	lr, [sp, #-8]!    @ sp -= 8
+	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
+	.cfi_rel_offset lr, 0     @ lr is at sp
+
 	ARM_CALL aeabi_cfrcmple
 	do_it	cc, e
 	movcc	r0, #1	@ Operand 2 is less than operand 1.
@@ -923,6 +999,7 @@
 			@ or they are unordered.
 	RETLDM
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_fcmpgt
 
 #endif /* L_cmpsf2 */
@@ -931,6 +1008,7 @@
 
 ARM_FUNC_START unordsf2
 ARM_FUNC_ALIAS aeabi_fcmpun unordsf2
+	CFI_START_FUNCTION
 
 	mov	r2, r0, lsl #1
 	mov	r3, r1, lsl #1
@@ -947,6 +1025,7 @@
 3:	mov	r0, #1			@ arguments are unordered.
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_fcmpun
 	FUNC_END unordsf2
 
@@ -956,6 +1035,7 @@
 
 ARM_FUNC_START fixsfsi
 ARM_FUNC_ALIAS aeabi_f2iz fixsfsi
+	CFI_START_FUNCTION
 
 	@ check exponent range.
 	mov	r2, r0, lsl #1
@@ -989,6 +1069,7 @@
 4:	mov	r0, #0			@ What should we convert NAN to?
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_f2iz
 	FUNC_END fixsfsi
 
@@ -998,6 +1079,7 @@
 
 ARM_FUNC_START fixunssfsi
 ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi
+	CFI_START_FUNCTION
 
 	@ check exponent range.
 	movs	r2, r0, lsl #1
@@ -1027,6 +1109,7 @@
 4:	mov	r0, #0			@ What should we convert NAN to?
 	RET
 
+	CFI_END_FUNCTION
 	FUNC_END aeabi_f2uiz
 	FUNC_END fixunssfsi
 
Index: libgcc/config/arm/lib1funcs.S
===================================================================
--- libgcc/config/arm/lib1funcs.S	(revision 223219)
+++ libgcc/config/arm/lib1funcs.S	(working copy)
@@ -1965,6 +1965,16 @@
 
 #endif /* Arch supports thumb.  */
 
+.macro CFI_START_FUNCTION
+	.cfi_startproc
+	.cfi_remember_state
+.endm
+
+.macro CFI_END_FUNCTION
+	.cfi_restore_state
+	.cfi_endproc
+.endm
+
 #ifndef __symbian__
 #ifndef __ARM_ARCH_6M__
 #include "ieee754-df.S"

  parent reply	other threads:[~2015-05-15 16:58 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-11 19:44 Martin Galvan
2015-05-12  8:55 ` Ramana Radhakrishnan
2015-05-12 13:24   ` Martin Galvan
2015-05-13 16:43     ` Ramana Radhakrishnan
2015-05-13 17:04       ` Ramana Radhakrishnan
2015-05-13 17:13         ` Martin Galvan
2015-05-13 17:15           ` Marek Polacek
2015-05-13 18:13             ` Martin Galvan
2015-05-13 18:31               ` Martin Galvan
2015-05-15 17:01               ` Ramana Radhakrishnan [this message]
2015-05-15 17:35                 ` Martin Galvan
  -- strict thread matches above, loose matches on Subject: below --
2015-04-28 15:51 Martin Galvan
2015-04-28 16:22 ` Ramana Radhakrishnan
2015-04-28 17:10   ` Martin Galvan
2015-05-04 20:51     ` Martin Galvan
2015-05-10 22:16       ` Fwd: " Martin Galvan
2015-05-11  7:45         ` Ramana Radhakrishnan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=555625BC.70708@arm.com \
    --to=ramana.radhakrishnan@arm.com \
    --cc=gcc-patches@gcc.gnu.org \
    --cc=martin.galvan@tallertechnologies.com \
    --cc=polacek@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).