Browse Source

Joseph S. Myers writes:
This patch fixes some of the ARM EABI code to be interworking-safe, using
bx where appropriate. (This code went in around the same time as the
Thumb patches, hence not being fixed by those patches.)

Mike Frysinger 19 years ago
parent
commit
8bb7fcac75

+ 4 - 0
libc/sysdeps/linux/arm/__longjmp.S

@@ -61,7 +61,11 @@ __longjmp:
 # endif
 # endif
 #endif	
 #endif	
 
 
+#if defined(__USE_BX__)
+	bx	lr
+#else
 	mov pc, lr
 	mov pc, lr
+#endif
 
 
 .size __longjmp,.-__longjmp
 .size __longjmp,.-__longjmp
 libc_hidden_def(__longjmp)
 libc_hidden_def(__longjmp)

+ 4 - 0
libc/sysdeps/linux/arm/mmap64.S

@@ -52,7 +52,11 @@ mmap64:
 	DO_CALL (mmap2)
 	DO_CALL (mmap2)
 	cmn	r0, $4096
 	cmn	r0, $4096
 	ldmfd	sp!, {r4, r5}
 	ldmfd	sp!, {r4, r5}
+#if defined(__USE_BX__)
+	bxcc	lr
+#else
 	movcc	pc, lr
 	movcc	pc, lr
+#endif
 	b	__syscall_error
 	b	__syscall_error
 .Linval:
 .Linval:
 	mov	r0, $-EINVAL
 	mov	r0, $-EINVAL

+ 4 - 0
libc/sysdeps/linux/arm/syscall-eabi.S

@@ -37,7 +37,11 @@ syscall:
 	swi	0x0
 	swi	0x0
 	ldmfd	sp!, {r4, r5, r6, r7}
 	ldmfd	sp!, {r4, r5, r6, r7}
 	cmn	r0, #4096
 	cmn	r0, #4096
+#if defined(__USE_BX__)
+	bxcc	lr
+#else
 	movcc	pc, lr
 	movcc	pc, lr
+#endif
 	b	__syscall_error
 	b	__syscall_error
 
 
 .size syscall,.-syscall
 .size syscall,.-syscall