_memcpy.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. /*-
  2. * Copyright (c) 1997 The NetBSD Foundation, Inc.
  3. * All rights reserved.
  4. *
  5. * This code is derived from software contributed to The NetBSD Foundation
  6. * by Neil A. Carson and Mark Brinicombe
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. All advertising materials mentioning features or use of this software
  17. * must display the following acknowledgement:
  18. * This product includes software developed by the NetBSD
  19. * Foundation, Inc. and its contributors.
  20. * 4. Neither the name of The NetBSD Foundation nor the names of its
  21. * contributors may be used to endorse or promote products derived
  22. * from this software without specific prior written permission.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
  25. * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  26. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Adapted for uClibc from NetBSD _memcpy.S,v 1.6 2003/10/09
  37. * by Erik Andersen <andersen@codepoet.org>
  38. */
  39. #include <features.h>
  40. #include <endian.h>
  41. #include <bits/arm_asm.h>
  42. #if !defined(THUMB1_ONLY)
  43. /*
  44. * This is one fun bit of code ...
  45. * Some easy listening music is suggested while trying to understand this
  46. * code e.g. Iron Maiden
  47. *
  48. * For anyone attempting to understand it :
  49. *
  50. * The core code is implemented here with simple stubs for memcpy()
  51. * memmove() and bcopy().
  52. *
  53. * All local labels are prefixed with Lmemcpy_
  54. * Following the prefix a label starting f is used in the forward copy code
  55. * while a label using b is used in the backwards copy code
  56. * The source and destination addresses determine whether a forward or
  57. * backward copy is performed.
  58. * Separate bits of code are used to deal with the following situations
  59. * for both the forward and backwards copy.
  60. * unaligned source address
  61. * unaligned destination address
  62. * Separate copy routines are used to produce an optimised result for each
  63. * of these cases.
  64. * The copy code will use LDM/STM instructions to copy up to 32 bytes at
  65. * a time where possible.
  66. *
  67. * Note: r12 (aka ip) can be trashed during the function along with
  68. * r0-r3 although r0-r2 have defined uses i.e. dest, src, len throughout.
  69. * Additional registers are preserved prior to use i.e. r4, r5 & lr
  70. * The return value in r0 must be the destination address.
  71. *
  72. * Apologies for the state of the comments ;-)
  73. */
  74. .text
  75. .global _memcpy
  76. .hidden _memcpy
  77. .type _memcpy,%function
  78. .align 4
  79. /* XXX: The Thumb-2 conditionals can be removed if/when we require an
  80. assembler that supports unified syntax. */
  81. .macro copy regs
  82. #if defined(__thumb2__)
  83. ittt ge
  84. ldmiage r1!, \regs
  85. stmiage r0!, \regs
  86. #else
  87. ldmgeia r1!, \regs
  88. stmgeia r0!, \regs
  89. #endif
  90. .endm
  91. .macro copydb regs
  92. #if defined(__thumb2__)
  93. ittt ge
  94. ldmdbge r1!, \regs
  95. stmdbge r0!, \regs
  96. #else
  97. ldmgedb r1!, \regs
  98. stmgedb r0!, \regs
  99. #endif
  100. .endm
  101. _memcpy:
  102. /* Determine copy direction */
  103. cmp r1, r0
  104. bcc .Lmemcpy_backwards
  105. IT(t, eq) /* Quick abort for src=dst */
  106. #if defined(__USE_BX__)
  107. bxeq lr
  108. #else
  109. moveq pc, lr
  110. #endif
  111. stmdb sp!, {r0, lr} /* memcpy() returns dest addr */
  112. subs r2, r2, #4
  113. blt .Lmemcpy_fl4 /* less than 4 bytes */
  114. ands r12, r0, #3
  115. bne .Lmemcpy_fdestul /* oh unaligned destination addr */
  116. ands r12, r1, #3
  117. bne .Lmemcpy_fsrcul /* oh unaligned source addr */
  118. .Lmemcpy_ft8:
  119. /* We have aligned source and destination */
  120. subs r2, r2, #8
  121. blt .Lmemcpy_fl12 /* less than 12 bytes (4 from above) */
  122. subs r2, r2, #0x14
  123. blt .Lmemcpy_fl32 /* less than 32 bytes (12 from above) */
  124. str r4, [sp, #-4]! /* borrow r4 */
  125. /* blat 32 bytes at a time */
  126. /* XXX for really big copies perhaps we should use more registers */
  127. .Lmemcpy_floop32:
  128. ldmia r1!, {r3, r4, r12, lr}
  129. stmia r0!, {r3, r4, r12, lr}
  130. ldmia r1!, {r3, r4, r12, lr}
  131. stmia r0!, {r3, r4, r12, lr}
  132. subs r2, r2, #0x20
  133. bge .Lmemcpy_floop32
  134. cmn r2, #0x10
  135. /* blat a remaining 16 bytes */
  136. copy "{r3, r4, r12, lr}"
  137. subge r2, r2, #0x10
  138. ldr r4, [sp], #4 /* restore r4 */
  139. .Lmemcpy_fl32:
  140. adds r2, r2, #0x14
  141. /* blat 12 bytes at a time */
  142. .Lmemcpy_floop12:
  143. copy "{r3, r12, lr}"
  144. #if defined(__thumb2__)
  145. subsge r2, r2, #0x0c
  146. #else
  147. subges r2, r2, #0x0c
  148. #endif
  149. bge .Lmemcpy_floop12
  150. .Lmemcpy_fl12:
  151. adds r2, r2, #8
  152. blt .Lmemcpy_fl4
  153. subs r2, r2, #4
  154. IT(tt, lt)
  155. ldrlt r3, [r1], #4
  156. strlt r3, [r0], #4
  157. copy "{r3, r12}"
  158. subge r2, r2, #4
  159. .Lmemcpy_fl4:
  160. /* less than 4 bytes to go */
  161. adds r2, r2, #4
  162. #if defined(__thumb2__)
  163. it eq
  164. popeq {r0, pc} /* done */
  165. #elif defined(__ARM_ARCH_4T__)
  166. ldmeqia sp!, {r0, r3} /* done */
  167. bxeq r3
  168. #else
  169. ldmeqia sp!, {r0, pc} /* done */
  170. #endif
  171. /* copy the crud byte at a time */
  172. cmp r2, #2
  173. ldrb r3, [r1], #1
  174. strb r3, [r0], #1
  175. #if defined(__thumb2__)
  176. itt ge
  177. ldrbge r3, [r1], #1
  178. strbge r3, [r0], #1
  179. itt gt
  180. ldrbgt r3, [r1], #1
  181. strbgt r3, [r0], #1
  182. #else
  183. ldrgeb r3, [r1], #1
  184. strgeb r3, [r0], #1
  185. ldrgtb r3, [r1], #1
  186. strgtb r3, [r0], #1
  187. #endif
  188. #if defined(__ARM_ARCH_4T__)
  189. ldmia sp!, {r0, r3}
  190. bx r3
  191. #else
  192. ldmia sp!, {r0, pc}
  193. #endif
  194. /* erg - unaligned destination */
  195. .Lmemcpy_fdestul:
  196. rsb r12, r12, #4
  197. cmp r12, #2
  198. /* align destination with byte copies */
  199. ldrb r3, [r1], #1
  200. strb r3, [r0], #1
  201. #if defined(__thumb2__)
  202. itt ge
  203. ldrbge r3, [r1], #1
  204. strbge r3, [r0], #1
  205. itt gt
  206. ldrbgt r3, [r1], #1
  207. strbgt r3, [r0], #1
  208. #else
  209. ldrgeb r3, [r1], #1
  210. strgeb r3, [r0], #1
  211. ldrgtb r3, [r1], #1
  212. strgtb r3, [r0], #1
  213. #endif
  214. subs r2, r2, r12
  215. blt .Lmemcpy_fl4 /* less the 4 bytes */
  216. ands r12, r1, #3
  217. beq .Lmemcpy_ft8 /* we have an aligned source */
  218. /* erg - unaligned source */
  219. /* This is where it gets nasty ... */
  220. .Lmemcpy_fsrcul:
  221. bic r1, r1, #3
  222. ldr lr, [r1], #4
  223. cmp r12, #2
  224. bgt .Lmemcpy_fsrcul3
  225. beq .Lmemcpy_fsrcul2
  226. cmp r2, #0x0c
  227. blt .Lmemcpy_fsrcul1loop4
  228. sub r2, r2, #0x0c
  229. stmdb sp!, {r4, r5}
  230. .Lmemcpy_fsrcul1loop16:
  231. #if __BYTE_ORDER == __BIG_ENDIAN
  232. mov r3, lr, lsl #8
  233. ldmia r1!, {r4, r5, r12, lr}
  234. orr r3, r3, r4, lsr #24
  235. mov r4, r4, lsl #8
  236. orr r4, r4, r5, lsr #24
  237. mov r5, r5, lsl #8
  238. orr r5, r5, r12, lsr #24
  239. mov r12, r12, lsl #8
  240. orr r12, r12, lr, lsr #24
  241. #else
  242. mov r3, lr, lsr #8
  243. ldmia r1!, {r4, r5, r12, lr}
  244. orr r3, r3, r4, lsl #24
  245. mov r4, r4, lsr #8
  246. orr r4, r4, r5, lsl #24
  247. mov r5, r5, lsr #8
  248. orr r5, r5, r12, lsl #24
  249. mov r12, r12, lsr #8
  250. orr r12, r12, lr, lsl #24
  251. #endif
  252. stmia r0!, {r3-r5, r12}
  253. subs r2, r2, #0x10
  254. bge .Lmemcpy_fsrcul1loop16
  255. ldmia sp!, {r4, r5}
  256. adds r2, r2, #0x0c
  257. blt .Lmemcpy_fsrcul1l4
  258. .Lmemcpy_fsrcul1loop4:
  259. #if __BYTE_ORDER == __BIG_ENDIAN
  260. mov r12, lr, lsl #8
  261. ldr lr, [r1], #4
  262. orr r12, r12, lr, lsr #24
  263. #else
  264. mov r12, lr, lsr #8
  265. ldr lr, [r1], #4
  266. orr r12, r12, lr, lsl #24
  267. #endif
  268. str r12, [r0], #4
  269. subs r2, r2, #4
  270. bge .Lmemcpy_fsrcul1loop4
  271. .Lmemcpy_fsrcul1l4:
  272. sub r1, r1, #3
  273. b .Lmemcpy_fl4
  274. .Lmemcpy_fsrcul2:
  275. cmp r2, #0x0c
  276. blt .Lmemcpy_fsrcul2loop4
  277. sub r2, r2, #0x0c
  278. stmdb sp!, {r4, r5}
  279. .Lmemcpy_fsrcul2loop16:
  280. #if __BYTE_ORDER == __BIG_ENDIAN
  281. mov r3, lr, lsl #16
  282. ldmia r1!, {r4, r5, r12, lr}
  283. orr r3, r3, r4, lsr #16
  284. mov r4, r4, lsl #16
  285. orr r4, r4, r5, lsr #16
  286. mov r5, r5, lsl #16
  287. orr r5, r5, r12, lsr #16
  288. mov r12, r12, lsl #16
  289. orr r12, r12, lr, lsr #16
  290. #else
  291. mov r3, lr, lsr #16
  292. ldmia r1!, {r4, r5, r12, lr}
  293. orr r3, r3, r4, lsl #16
  294. mov r4, r4, lsr #16
  295. orr r4, r4, r5, lsl #16
  296. mov r5, r5, lsr #16
  297. orr r5, r5, r12, lsl #16
  298. mov r12, r12, lsr #16
  299. orr r12, r12, lr, lsl #16
  300. #endif
  301. stmia r0!, {r3-r5, r12}
  302. subs r2, r2, #0x10
  303. bge .Lmemcpy_fsrcul2loop16
  304. ldmia sp!, {r4, r5}
  305. adds r2, r2, #0x0c
  306. blt .Lmemcpy_fsrcul2l4
  307. .Lmemcpy_fsrcul2loop4:
  308. #if __BYTE_ORDER == __BIG_ENDIAN
  309. mov r12, lr, lsl #16
  310. ldr lr, [r1], #4
  311. orr r12, r12, lr, lsr #16
  312. #else
  313. mov r12, lr, lsr #16
  314. ldr lr, [r1], #4
  315. orr r12, r12, lr, lsl #16
  316. #endif
  317. str r12, [r0], #4
  318. subs r2, r2, #4
  319. bge .Lmemcpy_fsrcul2loop4
  320. .Lmemcpy_fsrcul2l4:
  321. sub r1, r1, #2
  322. b .Lmemcpy_fl4
  323. .Lmemcpy_fsrcul3:
  324. cmp r2, #0x0c
  325. blt .Lmemcpy_fsrcul3loop4
  326. sub r2, r2, #0x0c
  327. stmdb sp!, {r4, r5}
  328. .Lmemcpy_fsrcul3loop16:
  329. #if __BYTE_ORDER == __BIG_ENDIAN
  330. mov r3, lr, lsl #24
  331. ldmia r1!, {r4, r5, r12, lr}
  332. orr r3, r3, r4, lsr #8
  333. mov r4, r4, lsl #24
  334. orr r4, r4, r5, lsr #8
  335. mov r5, r5, lsl #24
  336. orr r5, r5, r12, lsr #8
  337. mov r12, r12, lsl #24
  338. orr r12, r12, lr, lsr #8
  339. #else
  340. mov r3, lr, lsr #24
  341. ldmia r1!, {r4, r5, r12, lr}
  342. orr r3, r3, r4, lsl #8
  343. mov r4, r4, lsr #24
  344. orr r4, r4, r5, lsl #8
  345. mov r5, r5, lsr #24
  346. orr r5, r5, r12, lsl #8
  347. mov r12, r12, lsr #24
  348. orr r12, r12, lr, lsl #8
  349. #endif
  350. stmia r0!, {r3-r5, r12}
  351. subs r2, r2, #0x10
  352. bge .Lmemcpy_fsrcul3loop16
  353. ldmia sp!, {r4, r5}
  354. adds r2, r2, #0x0c
  355. blt .Lmemcpy_fsrcul3l4
  356. .Lmemcpy_fsrcul3loop4:
  357. #if __BYTE_ORDER == __BIG_ENDIAN
  358. mov r12, lr, lsl #24
  359. ldr lr, [r1], #4
  360. orr r12, r12, lr, lsr #8
  361. #else
  362. mov r12, lr, lsr #24
  363. ldr lr, [r1], #4
  364. orr r12, r12, lr, lsl #8
  365. #endif
  366. str r12, [r0], #4
  367. subs r2, r2, #4
  368. bge .Lmemcpy_fsrcul3loop4
  369. .Lmemcpy_fsrcul3l4:
  370. sub r1, r1, #1
  371. b .Lmemcpy_fl4
  372. .Lmemcpy_backwards:
  373. add r1, r1, r2
  374. add r0, r0, r2
  375. subs r2, r2, #4
  376. blt .Lmemcpy_bl4 /* less than 4 bytes */
  377. ands r12, r0, #3
  378. bne .Lmemcpy_bdestul /* oh unaligned destination addr */
  379. ands r12, r1, #3
  380. bne .Lmemcpy_bsrcul /* oh unaligned source addr */
  381. .Lmemcpy_bt8:
  382. /* We have aligned source and destination */
  383. subs r2, r2, #8
  384. blt .Lmemcpy_bl12 /* less than 12 bytes (4 from above) */
  385. stmdb sp!, {r4, lr}
  386. subs r2, r2, #0x14 /* less than 32 bytes (12 from above) */
  387. blt .Lmemcpy_bl32
  388. /* blat 32 bytes at a time */
  389. /* XXX for really big copies perhaps we should use more registers */
  390. .Lmemcpy_bloop32:
  391. ldmdb r1!, {r3, r4, r12, lr}
  392. stmdb r0!, {r3, r4, r12, lr}
  393. ldmdb r1!, {r3, r4, r12, lr}
  394. stmdb r0!, {r3, r4, r12, lr}
  395. subs r2, r2, #0x20
  396. bge .Lmemcpy_bloop32
  397. .Lmemcpy_bl32:
  398. cmn r2, #0x10
  399. /* blat a remaining 16 bytes */
  400. copydb "{r3, r4, r12, lr}"
  401. subge r2, r2, #0x10
  402. adds r2, r2, #0x14
  403. /* blat a remaining 12 bytes */
  404. copydb "{r3, r12, lr}"
  405. subge r2, r2, #0x0c
  406. ldmia sp!, {r4, lr}
  407. .Lmemcpy_bl12:
  408. adds r2, r2, #8
  409. blt .Lmemcpy_bl4
  410. subs r2, r2, #4
  411. IT(tt, lt)
  412. ldrlt r3, [r1, #-4]!
  413. strlt r3, [r0, #-4]!
  414. copydb "{r3, r12}"
  415. subge r2, r2, #4
  416. .Lmemcpy_bl4:
  417. /* less than 4 bytes to go */
  418. adds r2, r2, #4
  419. IT(t, eq)
  420. #if defined(__USE_BX__)
  421. bxeq lr
  422. #else
  423. moveq pc, lr /* done */
  424. #endif
  425. /* copy the crud byte at a time */
  426. cmp r2, #2
  427. ldrb r3, [r1, #-1]!
  428. strb r3, [r0, #-1]!
  429. #ifdef __thumb2__
  430. itt ge
  431. ldrbge r3, [r1, #-1]!
  432. strbge r3, [r0, #-1]!
  433. itt gt
  434. ldrbgt r3, [r1, #-1]!
  435. strbgt r3, [r0, #-1]!
  436. #else
  437. ldrgeb r3, [r1, #-1]!
  438. strgeb r3, [r0, #-1]!
  439. ldrgtb r3, [r1, #-1]!
  440. strgtb r3, [r0, #-1]!
  441. #endif
  442. #if defined(__USE_BX__)
  443. bx lr
  444. #else
  445. mov pc, lr
  446. #endif
  447. /* erg - unaligned destination */
  448. .Lmemcpy_bdestul:
  449. cmp r12, #2
  450. /* align destination with byte copies */
  451. ldrb r3, [r1, #-1]!
  452. strb r3, [r0, #-1]!
  453. #ifdef __thumb2__
  454. itt ge
  455. ldrbge r3, [r1, #-1]!
  456. strbge r3, [r0, #-1]!
  457. itt gt
  458. ldrbgt r3, [r1, #-1]!
  459. strbgt r3, [r0, #-1]!
  460. #else
  461. ldrgeb r3, [r1, #-1]!
  462. strgeb r3, [r0, #-1]!
  463. ldrgtb r3, [r1, #-1]!
  464. strgtb r3, [r0, #-1]!
  465. #endif
  466. subs r2, r2, r12
  467. blt .Lmemcpy_bl4 /* less than 4 bytes to go */
  468. ands r12, r1, #3
  469. beq .Lmemcpy_bt8 /* we have an aligned source */
  470. /* erg - unaligned source */
  471. /* This is where it gets nasty ... */
  472. .Lmemcpy_bsrcul:
  473. bic r1, r1, #3
  474. ldr r3, [r1, #0]
  475. cmp r12, #2
  476. blt .Lmemcpy_bsrcul1
  477. beq .Lmemcpy_bsrcul2
  478. cmp r2, #0x0c
  479. blt .Lmemcpy_bsrcul3loop4
  480. sub r2, r2, #0x0c
  481. stmdb sp!, {r4, r5, lr}
  482. .Lmemcpy_bsrcul3loop16:
  483. #if __BYTE_ORDER == __BIG_ENDIAN
  484. mov lr, r3, lsr #8
  485. ldmdb r1!, {r3-r5, r12}
  486. orr lr, lr, r12, lsl #24
  487. mov r12, r12, lsr #8
  488. orr r12, r12, r5, lsl #24
  489. mov r5, r5, lsr #8
  490. orr r5, r5, r4, lsl #24
  491. mov r4, r4, lsr #8
  492. orr r4, r4, r3, lsl #24
  493. #else
  494. mov lr, r3, lsl #8
  495. ldmdb r1!, {r3-r5, r12}
  496. orr lr, lr, r12, lsr #24
  497. mov r12, r12, lsl #8
  498. orr r12, r12, r5, lsr #24
  499. mov r5, r5, lsl #8
  500. orr r5, r5, r4, lsr #24
  501. mov r4, r4, lsl #8
  502. orr r4, r4, r3, lsr #24
  503. #endif
  504. stmdb r0!, {r4, r5, r12, lr}
  505. subs r2, r2, #0x10
  506. bge .Lmemcpy_bsrcul3loop16
  507. ldmia sp!, {r4, r5, lr}
  508. adds r2, r2, #0x0c
  509. blt .Lmemcpy_bsrcul3l4
  510. .Lmemcpy_bsrcul3loop4:
  511. #if __BYTE_ORDER == __BIG_ENDIAN
  512. mov r12, r3, lsr #8
  513. ldr r3, [r1, #-4]!
  514. orr r12, r12, r3, lsl #24
  515. #else
  516. mov r12, r3, lsl #8
  517. ldr r3, [r1, #-4]!
  518. orr r12, r12, r3, lsr #24
  519. #endif
  520. str r12, [r0, #-4]!
  521. subs r2, r2, #4
  522. bge .Lmemcpy_bsrcul3loop4
  523. .Lmemcpy_bsrcul3l4:
  524. add r1, r1, #3
  525. b .Lmemcpy_bl4
  526. .Lmemcpy_bsrcul2:
  527. cmp r2, #0x0c
  528. blt .Lmemcpy_bsrcul2loop4
  529. sub r2, r2, #0x0c
  530. stmdb sp!, {r4, r5, lr}
  531. .Lmemcpy_bsrcul2loop16:
  532. #if __BYTE_ORDER == __BIG_ENDIAN
  533. mov lr, r3, lsr #16
  534. ldmdb r1!, {r3-r5, r12}
  535. orr lr, lr, r12, lsl #16
  536. mov r12, r12, lsr #16
  537. orr r12, r12, r5, lsl #16
  538. mov r5, r5, lsr #16
  539. orr r5, r5, r4, lsl #16
  540. mov r4, r4, lsr #16
  541. orr r4, r4, r3, lsl #16
  542. #else
  543. mov lr, r3, lsl #16
  544. ldmdb r1!, {r3-r5, r12}
  545. orr lr, lr, r12, lsr #16
  546. mov r12, r12, lsl #16
  547. orr r12, r12, r5, lsr #16
  548. mov r5, r5, lsl #16
  549. orr r5, r5, r4, lsr #16
  550. mov r4, r4, lsl #16
  551. orr r4, r4, r3, lsr #16
  552. #endif
  553. stmdb r0!, {r4, r5, r12, lr}
  554. subs r2, r2, #0x10
  555. bge .Lmemcpy_bsrcul2loop16
  556. ldmia sp!, {r4, r5, lr}
  557. adds r2, r2, #0x0c
  558. blt .Lmemcpy_bsrcul2l4
  559. .Lmemcpy_bsrcul2loop4:
  560. #if __BYTE_ORDER == __BIG_ENDIAN
  561. mov r12, r3, lsr #16
  562. ldr r3, [r1, #-4]!
  563. orr r12, r12, r3, lsl #16
  564. #else
  565. mov r12, r3, lsl #16
  566. ldr r3, [r1, #-4]!
  567. orr r12, r12, r3, lsr #16
  568. #endif
  569. str r12, [r0, #-4]!
  570. subs r2, r2, #4
  571. bge .Lmemcpy_bsrcul2loop4
  572. .Lmemcpy_bsrcul2l4:
  573. add r1, r1, #2
  574. b .Lmemcpy_bl4
  575. .Lmemcpy_bsrcul1:
  576. cmp r2, #0x0c
  577. blt .Lmemcpy_bsrcul1loop4
  578. sub r2, r2, #0x0c
  579. stmdb sp!, {r4, r5, lr}
  580. .Lmemcpy_bsrcul1loop32:
  581. #if __BYTE_ORDER == __BIG_ENDIAN
  582. mov lr, r3, lsr #24
  583. ldmdb r1!, {r3-r5, r12}
  584. orr lr, lr, r12, lsl #8
  585. mov r12, r12, lsr #24
  586. orr r12, r12, r5, lsl #8
  587. mov r5, r5, lsr #24
  588. orr r5, r5, r4, lsl #8
  589. mov r4, r4, lsr #24
  590. orr r4, r4, r3, lsl #8
  591. #else
  592. mov lr, r3, lsl #24
  593. ldmdb r1!, {r3-r5, r12}
  594. orr lr, lr, r12, lsr #8
  595. mov r12, r12, lsl #24
  596. orr r12, r12, r5, lsr #8
  597. mov r5, r5, lsl #24
  598. orr r5, r5, r4, lsr #8
  599. mov r4, r4, lsl #24
  600. orr r4, r4, r3, lsr #8
  601. #endif
  602. stmdb r0!, {r4, r5, r12, lr}
  603. subs r2, r2, #0x10
  604. bge .Lmemcpy_bsrcul1loop32
  605. ldmia sp!, {r4, r5, lr}
  606. adds r2, r2, #0x0c
  607. blt .Lmemcpy_bsrcul1l4
  608. .Lmemcpy_bsrcul1loop4:
  609. #if __BYTE_ORDER == __BIG_ENDIAN
  610. mov r12, r3, lsr #24
  611. ldr r3, [r1, #-4]!
  612. orr r12, r12, r3, lsl #8
  613. #else
  614. mov r12, r3, lsl #24
  615. ldr r3, [r1, #-4]!
  616. orr r12, r12, r3, lsr #8
  617. #endif
  618. str r12, [r0, #-4]!
  619. subs r2, r2, #4
  620. bge .Lmemcpy_bsrcul1loop4
  621. .Lmemcpy_bsrcul1l4:
  622. add r1, r1, #1
  623. b .Lmemcpy_bl4
  624. #else /* THUMB1_ONLY */
  625. /* This is a fairly dumb implementation for when we can't use the 32-bit code
  626. above. */
  627. .text
  628. .global _memcpy
  629. .hidden _memcpy
  630. .type _memcpy,%function
  631. .align 4
  632. .thumb
  633. _memcpy:
  634. push {r0, r4}
  635. cmp r2, #0
  636. beq .Lmemcpy_exit
  637. @ See if we have overlapping regions, and need to reverse the
  638. @ direction of the copy
  639. cmp r0, r1
  640. bls .Lmemcpy_forwards
  641. add r4, r1, r2
  642. cmp r0, r4
  643. bcc .Lmemcpy_backwards
  644. .Lmemcpy_forwards:
  645. /* Forwards. */
  646. mov r3, r0
  647. eor r3, r1
  648. mov r4, #3
  649. tst r3, r4
  650. bne .Lmemcpy_funaligned
  651. cmp r2, #8
  652. bcc .Lmemcpy_funaligned
  653. 1: @ copy up to the first word boundary.
  654. tst r0, r4
  655. beq 1f
  656. ldrb r3, [r1]
  657. add r1, r1, #1
  658. strb r3, [r0]
  659. add r0, r0, #1
  660. sub r2, r2, #1
  661. b 1b
  662. 1: @ Copy aligned words
  663. ldr r3, [r1]
  664. add r1, r1, #4
  665. str r3, [r0]
  666. add r0, r0, #4
  667. sub r2, r2, #4
  668. cmp r2, #4
  669. bcs 1b
  670. cmp r2, #0
  671. beq .Lmemcpy_exit
  672. .Lmemcpy_funaligned:
  673. 1:
  674. ldrb r3, [r1]
  675. add r1, r1, #1
  676. strb r3, [r0]
  677. add r0, r0, #1
  678. sub r2, r2, #1
  679. bne 1b
  680. .Lmemcpy_exit:
  681. pop {r0, r4}
  682. bx lr
  683. .Lmemcpy_backwards:
  684. add r0, r0, r2
  685. add r1, r1, r2
  686. 1:
  687. sub r0, r0, #1
  688. sub r1, r1, #1
  689. ldrb r3, [r1]
  690. strb r3, [r0]
  691. sub r2, r2, #1
  692. bne 1b
  693. b .Lmemcpy_exit
  694. #endif