فهرست منبع

Khem Raj says:
add support for using BX instruction for THUMB aware architectures

Mike Frysinger 20 سال پیش
والد
کامیت
ded219589f
7فایلهای تغییر یافته به همراه59 افزوده شده و 12 حذف شده
  1. 7 0
      extra/Configs/Config.arm
  2. 14 5
      libc/string/arm/_memcpy.S
  3. 9 2
      libc/string/arm/memcmp.S
  4. 10 2
      libc/string/arm/memset.S
  5. 5 1
      libc/string/arm/strcmp.S
  6. 5 1
      libc/string/arm/strlen.S
  7. 9 1
      libc/string/arm/strncmp.S

+ 7 - 0
extra/Configs/Config.arm

@@ -34,6 +34,13 @@ config CONFIG_ARM_EABI
 	  need a kernel supporting the EABI system call interface, or "N"
 	  for a compiler using the old Linux ABI.
 
+config USE_BX
+	bool "Use BX in function return"
+	default y
+	depends on !CONFIG_GENERIC_ARM && !CONFIG_ARM610 && !CONFIG_ARM710
+	help
+	  Use BX instruction for THUMB aware architectures.
+
 choice
 	prompt "Target Processor Type"
 	default CONFIG_GENERIC_ARM

+ 14 - 5
libc/string/arm/_memcpy.S

@@ -37,7 +37,7 @@
  * by Erik Andersen <andersen@codepoet.org>
  */
 
-
+#include <features.h>
 #include <endian.h>
 
 /*
@@ -83,8 +83,11 @@ _memcpy:
 	bcc	.Lmemcpy_backwards
 
 	moveq	r0, #0			/* Quick abort for len=0 */
-	moveq	pc, lr
-
+#if defined(__USE_BX__)
+        bxeq    lr
+#else
+        moveq   pc, lr
+#endif
 	stmdb	sp!, {r0, lr}		/* memcpy() returns dest addr */
 	subs	r2, r2, #4
 	blt	.Lmemcpy_fl4		/* less than 4 bytes */
@@ -389,8 +392,11 @@ _memcpy:
 .Lmemcpy_bl4:
 	/* less than 4 bytes to go */
 	adds	r2, r2, #4
+#if defined(__USE_BX__)
+        bxeq    lr
+#else
 	moveq	pc, lr			/* done */
-
+#endif
 	/* copy the crud byte at a time */
 	cmp	r2, #2
 	ldrb	r3, [r1, #-1]!
@@ -399,8 +405,11 @@ _memcpy:
 	strgeb	r3, [r0, #-1]!
 	ldrgtb	r3, [r1, #-1]!
 	strgtb	r3, [r0, #-1]!
+#if defined(__USE_BX__)
+        bx      lr
+#else
 	mov	pc, lr
-
+#endif
 	/* erg - unaligned destination */
 .Lmemcpy_bdestul:
 	cmp	r12, #2

+ 9 - 2
libc/string/arm/memcmp.S

@@ -40,8 +40,11 @@ memcmp:
 	/* if ((len - 1) < 0) return 0 */
 	subs	r2, r2, #1
 	movmi	r0, #0
+#if defined(__USE_BX__)
+        bxmi    lr
+#else
 	movmi	pc, lr
-
+#endif
 	/* ip == last src address to compare */
 	add	ip, r0, r2
 1:
@@ -51,7 +54,11 @@ memcmp:
 	cmpcs	r2, r3
 	beq	1b
 	sub	r0, r2, r3
-	mov	pc, lr
+#if defined(__USE_BX__)
+        bx      lr
+#else
+ 	mov	pc, lr
+#endif
 
 .size memcmp,.-memcmp
 

+ 10 - 2
libc/string/arm/memset.S

@@ -56,7 +56,11 @@ memset:
 	bge	1b
 2:
 	movs	a3, a3		@ anything left?
-	moveq	pc, lr		@ nope
+#if defined(__USE_BX__)
+        bxeq    lr
+#else
+        moveq	pc, lr		@ nope
+#endif
 	rsb	a3, a3, $7
 	add	pc, pc, a3, lsl $2
 	mov	r0, r0
@@ -67,7 +71,11 @@ memset:
 	strb	a2, [a4], $1
 	strb	a2, [a4], $1
 	strb	a2, [a4], $1
-	mov	pc, lr
+#if defined(__USE_BX__)
+        bx      lr
+#else
+ 	mov	pc, lr
+#endif
 
 .size memset,.-memset
 

+ 5 - 1
libc/string/arm/strcmp.S

@@ -44,7 +44,11 @@ strcmp:
 	cmpcs	r2, r3
 	beq	1b
 	sub	r0, r2, r3
-	mov	pc, lr
+#if defined(__USE_BX__)
+        bx      lr
+#else
+  	mov	pc, lr
+#endif
 
 .size strcmp,.-strcmp
 

+ 5 - 1
libc/string/arm/strlen.S

@@ -75,7 +75,11 @@ Llastword:				@ drop through to here once we find a
 	tstne   r2, $0x00ff0000         @ (if first three all non-zero, 4th
 	addne   r0, r0, $1              @  must be zero)
 #endif
-	mov	pc,lr
+#if defined(__USE_BX__)
+        bx      lr
+#else
+  	mov	pc,lr
+#endif
 
 .size strlen,.-strlen
 

+ 9 - 1
libc/string/arm/strncmp.S

@@ -40,7 +40,11 @@ strncmp:
 	/* if (len == 0) return 0 */
 	cmp	r2, #0
 	moveq	r0, #0
+#if defined(__USE_BX__)
+        bxeq    lr
+#else
 	moveq	pc, lr
+#endif
 	subs	r2, r2, #1
 
 	/* ip == last src address to compare */
@@ -53,7 +57,11 @@ strncmp:
 	cmpcs	r2, r3
 	beq	1b
 	sub	r0, r2, r3
-	mov	pc, lr
+#if defined(__USE_BX__)
+        bx      lr
+#else
+  	mov	pc, lr
+#endif
 
 .size strncmp,.-strncmp