memcpy.S 19 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /*
  2. * "memcpy" implementation of SuperH
  3. *
  4. * Copyright (C) 1999 Niibe Yutaka
  5. * Copyright (c) 2002 STMicroelectronics Ltd
  6. * Modified from memcpy.S and micro-optimised for SH4
  7. * Stuart Menefy (stuart.menefy@st.com)
  8. *
  9. * Copyright (c) 2009 STMicroelectronics Ltd
  10. * Optimised using prefetching and 64bit data transfer via FPU
  11. * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  12. */
  13. /*
  14. * void *memcpy(void *dst, const void *src, size_t n);
  15. *
  16. * It is assumed that there is no overlap between src and dst.
  17. * If there is an overlap, then the results are undefined.
  18. */
  19. #include <sysdep.h>
  20. #include <endian.h>
  21. #if defined (__LITTLE_ENDIAN__) && defined (__SH_FPU_ANY__)
  22. #define MEMCPY_USES_FPU
  23. /* Use paired single precision load or store mode for 64-bit tranfering.
  24. * FPSCR.SZ=1,FPSCR.SZ=0 is well defined on both SH4-200 and SH4-300.
  25. * Currenlty it has been only implemented and tested for little endian mode. */
  26. .macro FPU_SET_PAIRED_PREC
  27. sts fpscr, r7
  28. mov #0x10, r0 ! PR=0 SZ=1
  29. shll16 r0
  30. lds r0, fpscr
  31. .endm
  32. .macro RESTORE_FPSCR
  33. lds r7, fpscr
  34. .endm
  35. .macro DALLOC
  36. ! Cache allocate + store on dst-32.
  37. add #-32, r1
  38. movca.l r0, @r1
  39. add #32, r1
  40. .endm
  41. #endif
  42. !
  43. ! GHIJ KLMN OPQR --> ...G HIJK LMNO PQR.
  44. !
  45. ! Size is 16 or greater, and may have trailing bytes
  46. .balign 32
  47. .Lcase1:
  48. ! Read a long word and write a long word at once
  49. ! At the start of each iteration, r7 contains last long load
  50. add #-1,r5 ! 79 EX
  51. mov r4,r2 ! 5 MT (0 cycles latency)
  52. mov.l @(r0,r5),r7 ! 21 LS (2 cycles latency)
  53. add #-4,r5 ! 50 EX
  54. add #7,r2 ! 79 EX
  55. !
  56. #ifdef __LITTLE_ENDIAN__
  57. ! 6 cycles, 4 bytes per iteration
  58. 3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! NMLK
  59. mov r7, r3 ! 5 MT (latency=0) ! RQPO
  60. cmp/hi r2,r0 ! 57 MT
  61. shll16 r3 ! 103 EX
  62. mov r1,r6 ! 5 MT (latency=0)
  63. shll8 r3 ! 102 EX ! Oxxx
  64. shlr8 r6 ! 106 EX ! xNML
  65. mov r1, r7 ! 5 MT (latency=0)
  66. or r6,r3 ! 82 EX ! ONML
  67. bt/s 3b ! 109 BR
  68. mov.l r3,@-r0 ! 30 LS
  69. #else
  70. 3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! KLMN
  71. mov r7,r3 ! 5 MT (latency=0) ! OPQR
  72. cmp/hi r2,r0 ! 57 MT
  73. shlr16 r3 ! 107 EX
  74. shlr8 r3 ! 106 EX ! xxxO
  75. mov r1,r6 ! 5 MT (latency=0)
  76. shll8 r6 ! 102 EX ! LMNx
  77. mov r1,r7 ! 5 MT (latency=0)
  78. or r6,r3 ! 82 EX ! LMNO
  79. bt/s 3b ! 109 BR
  80. mov.l r3,@-r0 ! 30 LS
  81. #endif
  82. ! Finally, copy a byte at once, if necessary
  83. add #4,r5 ! 50 EX
  84. cmp/eq r4,r0 ! 54 MT
  85. add #-6,r2 ! 50 EX
  86. bt 9f ! 109 BR
  87. 8: cmp/hi r2,r0 ! 57 MT
  88. mov.b @(r0,r5),r1 ! 20 LS (latency=2)
  89. bt/s 8b ! 109 BR
  90. mov.b r1,@-r0 ! 29 LS
  91. 9: rts
  92. nop
  93. !
  94. ! GHIJ KLMN OPQR --> .GHI JKLM NOPQ R...
  95. !
  96. ! Size is 16 or greater, and may have trailing bytes
  97. .balign 32
  98. .Lcase3:
  99. ! Read a long word and write a long word at once
  100. ! At the start of each iteration, r7 contains last long load
  101. add #-3,r5 ! 79 EX
  102. mov r4,r2 ! 5 MT (0 cycles latency)
  103. mov.l @(r0,r5),r7 ! 21 LS (2 cycles latency)
  104. add #-4,r5 ! 50 EX
  105. add #7,r2 ! 79 EX
  106. !
  107. #ifdef __LITTLE_ENDIAN__
  108. ! 6 cycles, 4 bytes per iteration
  109. 3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! NMLK
  110. mov r7, r3 ! 5 MT (latency=0) ! RQPO
  111. cmp/hi r2,r0 ! 57 MT
  112. shll8 r3 ! 102 EX ! QPOx
  113. mov r1,r6 ! 5 MT (latency=0)
  114. shlr16 r6 ! 107 EX
  115. shlr8 r6 ! 106 EX ! xxxN
  116. mov r1, r7 ! 5 MT (latency=0)
  117. or r6,r3 ! 82 EX ! QPON
  118. bt/s 3b ! 109 BR
  119. mov.l r3,@-r0 ! 30 LS
  120. #else
  121. 3: mov r7,r3 ! OPQR
  122. shlr8 r3 ! xOPQ
  123. mov.l @(r0,r5),r7 ! KLMN
  124. mov r7,r6
  125. shll16 r6
  126. shll8 r6 ! Nxxx
  127. or r6,r3 ! NOPQ
  128. cmp/hi r2,r0
  129. bt/s 3b
  130. mov.l r3,@-r0
  131. #endif
  132. ! Finally, copy a byte at once, if necessary
  133. add #6,r5 ! 50 EX
  134. cmp/eq r4,r0 ! 54 MT
  135. add #-6,r2 ! 50 EX
  136. bt 9f ! 109 BR
  137. 8: cmp/hi r2,r0 ! 57 MT
  138. mov.b @(r0,r5),r1 ! 20 LS (latency=2)
  139. bt/s 8b ! 109 BR
  140. mov.b r1,@-r0 ! 29 LS
  141. 9: rts
  142. nop
  143. ENTRY(memcpy)
  144. ! Calculate the invariants which will be used in the remainder
  145. ! of the code:
  146. !
  147. ! r4 --> [ ... ] DST [ ... ] SRC
  148. ! [ ... ] [ ... ]
  149. ! : :
  150. ! r0 --> [ ... ] r0+r5 --> [ ... ]
  151. !
  152. !
  153. ! Short circuit the common case of src, dst and len being 32 bit aligned
  154. ! and test for zero length move
  155. mov r6, r0 ! 5 MT (0 cycle latency)
  156. or r4, r0 ! 82 EX
  157. or r5, r0 ! 82 EX
  158. tst r6, r6 ! 86 MT
  159. bt/s 99f ! 111 BR (zero len)
  160. tst #3, r0 ! 87 MT
  161. mov r4, r0 ! 5 MT (0 cycle latency)
  162. add r6, r0 ! 49 EX
  163. bt/s .Lcase00 ! 111 BR (aligned)
  164. sub r4, r5 ! 75 EX
  165. ! Arguments are not nicely long word aligned or zero len.
  166. ! Check for small copies, and if so do a simple byte at a time copy.
  167. !
  168. ! Deciding on an exact value of 'small' is not easy, as the point at which
  169. ! using the optimised routines become worthwhile varies (these are the
  170. ! cycle counts for differnet sizes using byte-at-a-time vs. optimised):
  171. ! size byte-at-time long word byte
  172. ! 16 42 39-40 46-50 50-55
  173. ! 24 58 43-44 54-58 62-67
  174. ! 36 82 49-50 66-70 80-85
  175. ! However the penalty for getting it 'wrong' is much higher for long word
  176. ! aligned data (and this is more common), so use a value of 16.
  177. mov #16, r1 ! 6 EX
  178. cmp/gt r6,r1 ! 56 MT
  179. add #-1,r5 ! 50 EX
  180. bf/s 6f ! 108 BR (not small)
  181. mov r5, r3 ! 5 MT (latency=0)
  182. shlr r6 ! 104 EX
  183. mov.b @(r0,r5),r1 ! 20 LS (latency=2)
  184. bf/s 4f ! 111 BR
  185. add #-1,r3 ! 50 EX
  186. tst r6, r6 ! 86 MT
  187. bt/s 98f ! 110 BR
  188. mov.b r1,@-r0 ! 29 LS
  189. ! 4 cycles, 2 bytes per iteration
  190. 3: mov.b @(r0,r5),r1 ! 20 LS (latency=2)
  191. 4: mov.b @(r0,r3),r2 ! 20 LS (latency=2)
  192. dt r6 ! 67 EX
  193. mov.b r1,@-r0 ! 29 LS
  194. bf/s 3b ! 111 BR
  195. mov.b r2,@-r0 ! 29 LS
  196. 98:
  197. rts
  198. nop
  199. 99: rts
  200. mov r4, r0
  201. ! Size is not small, so its worthwhile looking for optimisations.
  202. ! First align destination to a long word boundary.
  203. !
  204. ! r5 = normal value -1
  205. 6: tst #3, r0 ! 87 MT
  206. mov #3, r3 ! 6 EX
  207. bt/s 2f ! 111 BR
  208. and r0,r3 ! 78 EX
  209. ! 3 cycles, 1 byte per iteration
  210. 1: dt r3 ! 67 EX
  211. mov.b @(r0,r5),r1 ! 19 LS (latency=2)
  212. add #-1, r6 ! 79 EX
  213. bf/s 1b ! 109 BR
  214. mov.b r1,@-r0 ! 28 LS
  215. 2: add #1, r5 ! 79 EX
  216. ! Now select the appropriate bulk transfer code based on relative
  217. ! alignment of src and dst.
  218. mov r0, r3 ! 5 MT (latency=0)
  219. mov r5, r0 ! 5 MT (latency=0)
  220. tst #1, r0 ! 87 MT
  221. bf/s 1f ! 111 BR
  222. mov #64, r7 ! 6 EX
  223. ! bit 0 clear
  224. cmp/ge r7, r6 ! 55 MT
  225. bt/s 2f ! 111 BR
  226. tst #2, r0 ! 87 MT
  227. ! small
  228. bt/s .Lcase0
  229. mov r3, r0
  230. bra .Lcase2
  231. nop
  232. ! big
  233. 2: bt/s .Lcase0b
  234. mov r3, r0
  235. bra .Lcase2b
  236. nop
  237. ! bit 0 set
  238. 1: tst #2, r0 ! 87 MT
  239. bt/s .Lcase1
  240. mov r3, r0
  241. bra .Lcase3
  242. nop
  243. !
  244. ! GHIJ KLMN OPQR --> GHIJ KLMN OPQR
  245. !
  246. ! src, dst and size are all long word aligned
  247. ! size is non-zero
  248. .balign 32
  249. .Lcase00:
  250. mov #64, r1 ! 6 EX
  251. mov r5, r3 ! 5 MT (latency=0)
  252. cmp/gt r6, r1 ! 56 MT
  253. add #-4, r5 ! 50 EX
  254. bf .Lcase00b ! 108 BR (big loop)
  255. shlr2 r6 ! 105 EX
  256. shlr r6 ! 104 EX
  257. mov.l @(r0, r5), r1 ! 21 LS (latency=2)
  258. bf/s 4f ! 111 BR
  259. add #-8, r3 ! 50 EX
  260. tst r6, r6 ! 86 MT
  261. bt/s 5f ! 110 BR
  262. mov.l r1,@-r0 ! 30 LS
  263. ! 4 cycles, 2 long words per iteration
  264. 3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
  265. 4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
  266. dt r6 ! 67 EX
  267. mov.l r1, @-r0 ! 30 LS
  268. bf/s 3b ! 109 BR
  269. mov.l r2, @-r0 ! 30 LS
  270. 5: rts
  271. nop
  272. ! Size is 16 or greater and less than 64, but may have trailing bytes
  273. .balign 32
  274. .Lcase0:
  275. add #-4, r5 ! 50 EX
  276. mov r4, r7 ! 5 MT (latency=0)
  277. mov.l @(r0, r5), r1 ! 21 LS (latency=2)
  278. mov #4, r2 ! 6 EX
  279. add #11, r7 ! 50 EX
  280. tst r2, r6 ! 86 MT
  281. mov r5, r3 ! 5 MT (latency=0)
  282. bt/s 4f ! 111 BR
  283. add #-4, r3 ! 50 EX
  284. mov.l r1,@-r0 ! 30 LS
  285. ! 4 cycles, 2 long words per iteration
  286. 3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
  287. 4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
  288. cmp/hi r7, r0
  289. mov.l r1, @-r0 ! 30 LS
  290. bt/s 3b ! 109 BR
  291. mov.l r2, @-r0 ! 30 LS
  292. ! Copy the final 0-3 bytes
  293. add #3,r5 ! 50 EX
  294. cmp/eq r0, r4 ! 54 MT
  295. add #-10, r7 ! 50 EX
  296. bt 9f ! 110 BR
  297. ! 3 cycles, 1 byte per iteration
  298. 1: mov.b @(r0,r5),r1 ! 19 LS
  299. cmp/hi r7,r0 ! 57 MT
  300. bt/s 1b ! 111 BR
  301. mov.b r1,@-r0 ! 28 LS
  302. 9: rts
  303. nop
  304. ! Size is at least 64 bytes, so will be going round the big loop at least once.
  305. !
  306. ! r2 = rounded up r4
  307. ! r3 = rounded down r0
  308. .balign 32
  309. .Lcase0b:
  310. add #-4, r5 ! 50 EX
  311. .Lcase00b:
  312. mov r0, r3 ! 5 MT (latency=0)
  313. mov #(~0x1f), r1 ! 6 EX
  314. and r1, r3 ! 78 EX
  315. mov r4, r2 ! 5 MT (latency=0)
  316. cmp/eq r3, r0 ! 54 MT
  317. add #0x1f, r2 ! 50 EX
  318. bt/s 1f ! 110 BR
  319. and r1, r2 ! 78 EX
  320. ! copy initial words until cache line aligned
  321. mov.l @(r0, r5), r1 ! 21 LS (latency=2)
  322. tst #4, r0 ! 87 MT
  323. mov r5, r6 ! 5 MT (latency=0)
  324. add #-4, r6 ! 50 EX
  325. bt/s 4f ! 111 BR
  326. add #8, r3 ! 50 EX
  327. tst #0x18, r0 ! 87 MT
  328. bt/s 1f ! 109 BR
  329. mov.l r1,@-r0 ! 30 LS
  330. ! 4 cycles, 2 long words per iteration
  331. 3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
  332. 4: mov.l @(r0, r6), r7 ! 21 LS (latency=2)
  333. cmp/eq r3, r0 ! 54 MT
  334. mov.l r1, @-r0 ! 30 LS
  335. bf/s 3b ! 109 BR
  336. mov.l r7, @-r0 ! 30 LS
  337. #ifdef MEMCPY_USES_FPU
  338. ! Copy the cache line aligned blocks by using the FPU registers.
  339. ! If src and dst are well aligned adopt 64-bit data transfer.
  340. ! We also need r0 as a temporary (for movca), so 'undo' the invariant:
  341. ! r5: src (was r0+r5)
  342. ! r1: dest (was r0)
  343. 1:
  344. add r0, r5
  345. mov r0, r1
  346. mov r1, r3 ! MT
  347. sub r2, r3 ! EX (r3 - r2 -> r3)
  348. mov #-5, r0
  349. shld r0, r3 ! number of the cache lines
  350. mov #8, r0
  351. cmp/ge r0, r3 ! Check if there are many cache lines to copy.
  352. bf 45f ! Copy cache line aligned blocks without pref.
  353. mov r5, r0
  354. add #-0x7c, r0
  355. tst #7, r0 ! src is 8byte aligned
  356. bf 45f
  357. ! Many cache lines have to be copied and the buffers are well aligned.
  358. ! Aggressive prefetching and FPU in single paired precision.
  359. mov r0, r5
  360. mov r5, r6
  361. add #-0x80, r6 ! prefetch head
  362. ! store FPU (in single precision mode, do not check R15 align).
  363. fmov fr12, @-r15
  364. fmov fr13, @-r15
  365. fmov fr14, @-r15
  366. fmov fr15, @-r15
  367. FPU_SET_PAIRED_PREC
  368. mov #4, r0
  369. 67:
  370. add #-0x20, r6
  371. pref @r6
  372. add #-0x20, r6
  373. pref @r6
  374. fmov @r5+, dr0
  375. fmov @r5+, dr2
  376. fmov @r5+, dr4
  377. fmov @r5+, dr6
  378. fmov @r5+, dr8
  379. fmov @r5+, dr10
  380. fmov @r5+, dr12
  381. fmov @r5+, dr14
  382. fmov @r5+, xd0
  383. fmov @r5+, xd2
  384. fmov @r5+, xd4
  385. fmov @r5+, xd6
  386. fmov @r5+, xd8
  387. fmov @r5+, xd10
  388. fmov @r5+, xd12
  389. fmov @r5+, xd14
  390. DALLOC
  391. fmov xd14, @-r1
  392. fmov xd12, @-r1
  393. fmov xd10, @-r1
  394. fmov xd8, @-r1
  395. DALLOC
  396. fmov xd6, @-r1
  397. fmov xd4, @-r1
  398. fmov xd2, @-r1
  399. fmov xd0, @-r1
  400. DALLOC
  401. fmov dr14, @-r1
  402. fmov dr12, @-r1
  403. fmov dr10, @-r1
  404. fmov dr8, @-r1
  405. DALLOC
  406. fmov dr6, @-r1
  407. add #-0x80, r5
  408. fmov dr4, @-r1
  409. add #-0x80, r5
  410. fmov dr2, @-r1
  411. add #-0x20, r6
  412. fmov dr0, @-r1
  413. add #-4, r3
  414. pref @r6
  415. add #-0x20, r6
  416. cmp/ge r0, r3
  417. bt/s 67b
  418. pref @r6
  419. RESTORE_FPSCR
  420. ! Restore FPU callee save registers
  421. fmov @r15+, fr15
  422. fmov @r15+, fr14
  423. fmov @r15+, fr13
  424. fmov @r15+, fr12
  425. ! Other cache lines could be copied: so use the FPU in single paired
  426. ! precision without prefetching. No check for alignment is necessary.
  427. mov #1, r0
  428. cmp/ge r0, r3
  429. bt/s 3f
  430. add #0x60, r5
  431. bra 5f
  432. nop
  433. ! No prefetch and FPU in single precision.
  434. 45:
  435. add #-0x1c, r5
  436. mov r5, r0
  437. tst #7, r0
  438. bt 3f
  439. 2: fmov.s @r5+, fr0
  440. fmov.s @r5+, fr1
  441. fmov.s @r5+, fr2
  442. fmov.s @r5+, fr3
  443. fmov.s @r5+, fr4
  444. fmov.s @r5+, fr5
  445. fmov.s @r5+, fr6
  446. fmov.s @r5+, fr7
  447. DALLOC
  448. fmov.s fr7, @-r1
  449. fmov.s fr6, @-r1
  450. fmov.s fr5, @-r1
  451. fmov.s fr4, @-r1
  452. fmov.s fr3, @-r1
  453. fmov.s fr2, @-r1
  454. fmov.s fr1, @-r1
  455. fmov.s fr0, @-r1
  456. cmp/eq r2,r1
  457. bf/s 2b
  458. add #-0x40, r5
  459. bra 5f
  460. nop
  461. ! No prefetch and FPU in single paired precision.
  462. 3: FPU_SET_PAIRED_PREC
  463. 4: fmov @r5+, dr0
  464. fmov @r5+, dr2
  465. fmov @r5+, dr4
  466. fmov @r5+, dr6
  467. DALLOC
  468. fmov dr6, @-r1
  469. fmov dr4, @-r1
  470. fmov dr2, @-r1
  471. fmov dr0, @-r1
  472. cmp/eq r2,r1
  473. bf/s 4b
  474. add #-0x40, r5
  475. RESTORE_FPSCR
  476. 5: mov r1, r0
  477. cmp/eq r4, r0 ! 54 MT
  478. bf/s 1f ! 109 BR
  479. sub r1, r5 ! 75 EX
  480. rts
  481. nop
  482. 1:
  483. #else
  484. ! Copy the cache line aligned blocks
  485. !
  486. ! In use: r0, r2, r4, r5
  487. ! Scratch: r1, r3, r6, r7
  488. !
  489. ! We could do this with the four scratch registers, but if src
  490. ! and dest hit the same cache line, this will thrash, so make
  491. ! use of additional registers.
  492. !
  493. ! We also need r0 as a temporary (for movca), so 'undo' the invariant:
  494. ! r5: src (was r0+r5)
  495. ! r1: dest (was r0)
  496. ! this can be reversed at the end, so we don't need to save any extra
  497. ! state.
  498. !
  499. 1: mov.l r8, @-r15 ! 30 LS
  500. add r0, r5 ! 49 EX
  501. mov.l r9, @-r15 ! 30 LS
  502. mov r0, r1 ! 5 MT (latency=0)
  503. mov.l r10, @-r15 ! 30 LS
  504. add #-0x1c, r5 ! 50 EX
  505. mov.l r11, @-r15 ! 30 LS
  506. ! 16 cycles, 32 bytes per iteration
  507. 2: mov.l @(0x00,r5),r0 ! 18 LS (latency=2)
  508. add #-0x20, r1 ! 50 EX
  509. mov.l @(0x04,r5),r3 ! 18 LS (latency=2)
  510. mov.l @(0x08,r5),r6 ! 18 LS (latency=2)
  511. mov.l @(0x0c,r5),r7 ! 18 LS (latency=2)
  512. mov.l @(0x10,r5),r8 ! 18 LS (latency=2)
  513. mov.l @(0x14,r5),r9 ! 18 LS (latency=2)
  514. mov.l @(0x18,r5),r10 ! 18 LS (latency=2)
  515. mov.l @(0x1c,r5),r11 ! 18 LS (latency=2)
  516. movca.l r0,@r1 ! 40 LS (latency=3-7)
  517. mov.l r3,@(0x04,r1) ! 33 LS
  518. mov.l r6,@(0x08,r1) ! 33 LS
  519. mov.l r7,@(0x0c,r1) ! 33 LS
  520. mov.l r8,@(0x10,r1) ! 33 LS
  521. add #-0x20, r5 ! 50 EX
  522. mov.l r9,@(0x14,r1) ! 33 LS
  523. cmp/eq r2,r1 ! 54 MT
  524. mov.l r10,@(0x18,r1) ! 33 LS
  525. bf/s 2b ! 109 BR
  526. mov.l r11,@(0x1c,r1) ! 33 LS
  527. mov r1, r0 ! 5 MT (latency=0)
  528. mov.l @r15+, r11 ! 15 LS
  529. sub r1, r5 ! 75 EX
  530. mov.l @r15+, r10 ! 15 LS
  531. cmp/eq r4, r0 ! 54 MT
  532. bf/s 1f ! 109 BR
  533. mov.l @r15+, r9 ! 15 LS
  534. rts
  535. 1: mov.l @r15+, r8 ! 15 LS
  536. #endif
  537. sub r4, r1 ! 75 EX (len remaining)
  538. ! number of trailing bytes is non-zero
  539. !
  540. ! invariants restored (r5 already decremented by 4)
  541. ! also r1=num bytes remaining
  542. mov #4, r2 ! 6 EX
  543. mov r4, r7 ! 5 MT (latency=0)
  544. add #0x1c, r5 ! 50 EX (back to -4)
  545. cmp/hs r2, r1 ! 58 MT
  546. bf/s 5f ! 108 BR
  547. add #11, r7 ! 50 EX
  548. mov.l @(r0, r5), r6 ! 21 LS (latency=2)
  549. tst r2, r1 ! 86 MT
  550. mov r5, r3 ! 5 MT (latency=0)
  551. bt/s 4f ! 111 BR
  552. add #-4, r3 ! 50 EX
  553. cmp/hs r2, r1 ! 58 MT
  554. bt/s 5f ! 111 BR
  555. mov.l r6,@-r0 ! 30 LS
  556. ! 4 cycles, 2 long words per iteration
  557. 3: mov.l @(r0, r5), r6 ! 21 LS (latency=2)
  558. 4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
  559. cmp/hi r7, r0
  560. mov.l r6, @-r0 ! 30 LS
  561. bt/s 3b ! 109 BR
  562. mov.l r2, @-r0 ! 30 LS
  563. ! Copy the final 0-3 bytes
  564. 5: cmp/eq r0, r4 ! 54 MT
  565. add #-10, r7 ! 50 EX
  566. bt 9f ! 110 BR
  567. add #3,r5 ! 50 EX
  568. ! 3 cycles, 1 byte per iteration
  569. 1: mov.b @(r0,r5),r1 ! 19 LS
  570. cmp/hi r7,r0 ! 57 MT
  571. bt/s 1b ! 111 BR
  572. mov.b r1,@-r0 ! 28 LS
  573. 9: rts
  574. nop
  575. !
  576. ! GHIJ KLMN OPQR --> ..GH IJKL MNOP QR..
  577. !
  578. .balign 32
  579. .Lcase2:
  580. ! Size is 16 or greater and less then 64, but may have trailing bytes
  581. 2: mov r5, r6 ! 5 MT (latency=0)
  582. add #-2,r5 ! 50 EX
  583. mov r4,r2 ! 5 MT (latency=0)
  584. add #-4,r6 ! 50 EX
  585. add #7,r2 ! 50 EX
  586. 3: mov.w @(r0,r5),r1 ! 20 LS (latency=2)
  587. mov.w @(r0,r6),r3 ! 20 LS (latency=2)
  588. cmp/hi r2,r0 ! 57 MT
  589. mov.w r1,@-r0 ! 29 LS
  590. bt/s 3b ! 111 BR
  591. mov.w r3,@-r0 ! 29 LS
  592. bra 10f
  593. nop
  594. .balign 32
  595. .Lcase2b:
  596. ! Size is at least 64 bytes, so will be going round the big loop at least once.
  597. !
  598. ! r2 = rounded up r4
  599. ! r3 = rounded down r0
  600. mov r0, r3 ! 5 MT (latency=0)
  601. mov #(~0x1f), r1 ! 6 EX
  602. and r1, r3 ! 78 EX
  603. mov r4, r2 ! 5 MT (latency=0)
  604. cmp/eq r3, r0 ! 54 MT
  605. add #0x1f, r2 ! 50 EX
  606. add #-2, r5 ! 50 EX
  607. bt/s 1f ! 110 BR
  608. and r1, r2 ! 78 EX
  609. ! Copy a short word one at a time until we are cache line aligned
  610. ! Normal values: r0, r2, r3, r4
  611. ! Unused: r1, r6, r7
  612. ! Mod: r5 (=r5-2)
  613. !
  614. add #2, r3 ! 50 EX
  615. 2: mov.w @(r0,r5),r1 ! 20 LS (latency=2)
  616. cmp/eq r3,r0 ! 54 MT
  617. bf/s 2b ! 111 BR
  618. mov.w r1,@-r0 ! 29 LS
  619. ! Copy the cache line aligned blocks
  620. !
  621. ! In use: r0, r2, r4, r5 (=r5-2)
  622. ! Scratch: r1, r3, r6, r7
  623. !
  624. ! We could do this with the four scratch registers, but if src
  625. ! and dest hit the same cache line, this will thrash, so make
  626. ! use of additional registers.
  627. !
  628. ! We also need r0 as a temporary (for movca), so 'undo' the invariant:
  629. ! r5: src (was r0+r5)
  630. ! r1: dest (was r0)
  631. ! this can be reversed at the end, so we don't need to save any extra
  632. ! state.
  633. !
  634. 1: mov.l r8, @-r15 ! 30 LS
  635. add r0, r5 ! 49 EX
  636. mov.l r9, @-r15 ! 30 LS
  637. mov r0, r1 ! 5 MT (latency=0)
  638. mov.l r10, @-r15 ! 30 LS
  639. add #-0x1e, r5 ! 50 EX
  640. mov.l r11, @-r15 ! 30 LS
  641. mov.l r12, @-r15 ! 30 LS
  642. ! 17 cycles, 32 bytes per iteration
  643. #ifdef __LITTLE_ENDIAN__
  644. 2: mov.w @r5+, r0 ! 14 LS (latency=2) ..JI
  645. add #-0x20, r1 ! 50 EX
  646. mov.l @r5+, r3 ! 15 LS (latency=2) NMLK
  647. mov.l @r5+, r6 ! 15 LS (latency=2) RQPO
  648. shll16 r0 ! 103 EX JI..
  649. mov.l @r5+, r7 ! 15 LS (latency=2)
  650. xtrct r3, r0 ! 48 EX LKJI
  651. mov.l @r5+, r8 ! 15 LS (latency=2)
  652. xtrct r6, r3 ! 48 EX PONM
  653. mov.l @r5+, r9 ! 15 LS (latency=2)
  654. xtrct r7, r6 ! 48 EX
  655. mov.l @r5+, r10 ! 15 LS (latency=2)
  656. xtrct r8, r7 ! 48 EX
  657. mov.l @r5+, r11 ! 15 LS (latency=2)
  658. xtrct r9, r8 ! 48 EX
  659. mov.w @r5+, r12 ! 15 LS (latency=2)
  660. xtrct r10, r9 ! 48 EX
  661. movca.l r0,@r1 ! 40 LS (latency=3-7)
  662. xtrct r11, r10 ! 48 EX
  663. mov.l r3, @(0x04,r1) ! 33 LS
  664. xtrct r12, r11 ! 48 EX
  665. mov.l r6, @(0x08,r1) ! 33 LS
  666. mov.l r7, @(0x0c,r1) ! 33 LS
  667. mov.l r8, @(0x10,r1) ! 33 LS
  668. add #-0x40, r5 ! 50 EX
  669. mov.l r9, @(0x14,r1) ! 33 LS
  670. cmp/eq r2,r1 ! 54 MT
  671. mov.l r10, @(0x18,r1) ! 33 LS
  672. bf/s 2b ! 109 BR
  673. mov.l r11, @(0x1c,r1) ! 33 LS
  674. #else
  675. 2: mov.w @(0x1e,r5), r0 ! 17 LS (latency=2)
  676. add #-2, r5 ! 50 EX
  677. mov.l @(0x1c,r5), r3 ! 18 LS (latency=2)
  678. add #-4, r1 ! 50 EX
  679. mov.l @(0x18,r5), r6 ! 18 LS (latency=2)
  680. shll16 r0 ! 103 EX
  681. mov.l @(0x14,r5), r7 ! 18 LS (latency=2)
  682. xtrct r3, r0 ! 48 EX
  683. mov.l @(0x10,r5), r8 ! 18 LS (latency=2)
  684. xtrct r6, r3 ! 48 EX
  685. mov.l @(0x0c,r5), r9 ! 18 LS (latency=2)
  686. xtrct r7, r6 ! 48 EX
  687. mov.l @(0x08,r5), r10 ! 18 LS (latency=2)
  688. xtrct r8, r7 ! 48 EX
  689. mov.l @(0x04,r5), r11 ! 18 LS (latency=2)
  690. xtrct r9, r8 ! 48 EX
  691. mov.l @(0x00,r5), r12 ! 18 LS (latency=2)
  692. xtrct r10, r9 ! 48 EX
  693. movca.l r0,@r1 ! 40 LS (latency=3-7)
  694. add #-0x1c, r1 ! 50 EX
  695. mov.l r3, @(0x18,r1) ! 33 LS
  696. xtrct r11, r10 ! 48 EX
  697. mov.l r6, @(0x14,r1) ! 33 LS
  698. xtrct r12, r11 ! 48 EX
  699. mov.l r7, @(0x10,r1) ! 33 LS
  700. mov.l r8, @(0x0c,r1) ! 33 LS
  701. add #-0x1e, r5 ! 50 EX
  702. mov.l r9, @(0x08,r1) ! 33 LS
  703. cmp/eq r2,r1 ! 54 MT
  704. mov.l r10, @(0x04,r1) ! 33 LS
  705. bf/s 2b ! 109 BR
  706. mov.l r11, @(0x00,r1) ! 33 LS
  707. #endif
  708. mov.l @r15+, r12
  709. mov r1, r0 ! 5 MT (latency=0)
  710. mov.l @r15+, r11 ! 15 LS
  711. sub r1, r5 ! 75 EX
  712. mov.l @r15+, r10 ! 15 LS
  713. cmp/eq r4, r0 ! 54 MT
  714. bf/s 1f ! 109 BR
  715. mov.l @r15+, r9 ! 15 LS
  716. rts
  717. 1: mov.l @r15+, r8 ! 15 LS
  718. add #0x1e, r5 ! 50 EX
  719. ! Finish off a short word at a time
  720. ! r5 must be invariant - 2
  721. 10: mov r4,r2 ! 5 MT (latency=0)
  722. add #1,r2 ! 50 EX
  723. cmp/hi r2, r0 ! 57 MT
  724. bf/s 1f ! 109 BR
  725. add #2, r2 ! 50 EX
  726. 3: mov.w @(r0,r5),r1 ! 20 LS
  727. cmp/hi r2,r0 ! 57 MT
  728. bt/s 3b ! 109 BR
  729. mov.w r1,@-r0 ! 29 LS
  730. 1:
  731. !
  732. ! Finally, copy the last byte if necessary
  733. cmp/eq r4,r0 ! 54 MT
  734. bt/s 9b
  735. add #1,r5
  736. mov.b @(r0,r5),r1
  737. rts
  738. mov.b r1,@-r0
  739. END(memcpy)
  740. libc_hidden_def (memcpy)