memcpy.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. /* Copy SIZE bytes from SRC to DEST.
  2. For UltraSPARC-III.
  3. Copyright (C) 2001, 2003 Free Software Foundation, Inc.
  4. This file is part of the GNU C Library.
  5. Contributed by David S. Miller (davem@redhat.com)
  6. The GNU C Library is free software; you can redistribute it and/or
  7. modify it under the terms of the GNU Lesser General Public
  8. License as published by the Free Software Foundation; either
  9. version 2.1 of the License, or (at your option) any later version.
  10. The GNU C Library is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. Lesser General Public License for more details.
  14. You should have received a copy of the GNU Lesser General Public
  15. License along with the GNU C Library; if not, write to the Free
  16. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  17. 02111-1307 USA. */
  18. #include <features.h>
  19. #define ASI_BLK_P 0xf0
  20. #define FPRS_FEF 0x04
  21. #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
  22. #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
  23. #ifndef XCC
  24. #define USE_BPR
  25. #define XCC xcc
  26. #endif
  27. .register %g2,#scratch
  28. .register %g3,#scratch
  29. .register %g6,#scratch
  30. .text
  31. .align 32
  32. #ifdef __UCLIBC_SUSV3_LEGACY__
  33. ENTRY(bcopy)
  34. sub %o1, %o0, %o4
  35. mov %o0, %g4
  36. cmp %o4, %o2
  37. mov %o1, %o0
  38. bgeu,pt %XCC, 100f
  39. mov %g4, %o1
  40. #ifndef USE_BPR
  41. srl %o2, 0, %o2
  42. #endif
  43. brnz,pn %o2, 220f
  44. add %o0, %o2, %o0
  45. retl
  46. nop
  47. END(bcopy)
  48. #endif
  49. /* Special/non-trivial issues of this code:
  50. *
  51. * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
  52. * 2) Only low 32 FPU registers are used so that only the
  53. * lower half of the FPU register set is dirtied by this
  54. * code. This is especially important in the kernel.
  55. * 3) This code never prefetches cachelines past the end
  56. * of the source buffer.
  57. *
  58. * The cheetah's flexible spine, oversized liver, enlarged heart,
  59. * slender muscular body, and claws make it the swiftest hunter
  60. * in Africa and the fastest animal on land. Can reach speeds
  61. * of up to 2.4GB per second.
  62. */
  63. .align 32
  64. ENTRY(memcpy)
  65. 100: /* %o0=dst, %o1=src, %o2=len */
  66. mov %o0, %g5
  67. cmp %o2, 0
  68. be,pn %XCC, out
  69. 218: or %o0, %o1, %o3
  70. cmp %o2, 16
  71. bleu,a,pn %XCC, small_copy
  72. or %o3, %o2, %o3
  73. cmp %o2, 256
  74. blu,pt %XCC, medium_copy
  75. andcc %o3, 0x7, %g0
  76. ba,pt %xcc, enter
  77. andcc %o0, 0x3f, %g2
  78. /* Here len >= 256 and condition codes reflect execution
  79. * of "andcc %o0, 0x7, %g2", done by caller.
  80. */
  81. .align 64
  82. enter:
  83. /* Is 'dst' already aligned on an 64-byte boundary? */
  84. be,pt %XCC, 2f
  85. /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
  86. * of bytes to copy to make 'dst' 64-byte aligned. We pre-
  87. * subtract this from 'len'.
  88. */
  89. sub %g2, 0x40, %g2
  90. sub %g0, %g2, %g2
  91. sub %o2, %g2, %o2
  92. /* Copy %g2 bytes from src to dst, one byte at a time. */
  93. 1: ldub [%o1 + 0x00], %o3
  94. add %o1, 0x1, %o1
  95. add %o0, 0x1, %o0
  96. subcc %g2, 0x1, %g2
  97. bg,pt %XCC, 1b
  98. stb %o3, [%o0 + -1]
  99. 2: VISEntryHalf
  100. and %o1, 0x7, %g1
  101. ba,pt %xcc, begin
  102. alignaddr %o1, %g0, %o1
  103. .align 64
  104. begin:
  105. prefetch [%o1 + 0x000], #one_read
  106. prefetch [%o1 + 0x040], #one_read
  107. andn %o2, (0x40 - 1), %o4
  108. prefetch [%o1 + 0x080], #one_read
  109. prefetch [%o1 + 0x0c0], #one_read
  110. ldd [%o1 + 0x000], %f0
  111. prefetch [%o1 + 0x100], #one_read
  112. ldd [%o1 + 0x008], %f2
  113. prefetch [%o1 + 0x140], #one_read
  114. ldd [%o1 + 0x010], %f4
  115. prefetch [%o1 + 0x180], #one_read
  116. faligndata %f0, %f2, %f16
  117. ldd [%o1 + 0x018], %f6
  118. faligndata %f2, %f4, %f18
  119. ldd [%o1 + 0x020], %f8
  120. faligndata %f4, %f6, %f20
  121. ldd [%o1 + 0x028], %f10
  122. faligndata %f6, %f8, %f22
  123. ldd [%o1 + 0x030], %f12
  124. faligndata %f8, %f10, %f24
  125. ldd [%o1 + 0x038], %f14
  126. faligndata %f10, %f12, %f26
  127. ldd [%o1 + 0x040], %f0
  128. sub %o4, 0x80, %o4
  129. add %o1, 0x40, %o1
  130. ba,pt %xcc, loop
  131. srl %o4, 6, %o3
  132. .align 64
  133. loop:
  134. ldd [%o1 + 0x008], %f2
  135. faligndata %f12, %f14, %f28
  136. ldd [%o1 + 0x010], %f4
  137. faligndata %f14, %f0, %f30
  138. stda %f16, [%o0] ASI_BLK_P
  139. ldd [%o1 + 0x018], %f6
  140. faligndata %f0, %f2, %f16
  141. ldd [%o1 + 0x020], %f8
  142. faligndata %f2, %f4, %f18
  143. ldd [%o1 + 0x028], %f10
  144. faligndata %f4, %f6, %f20
  145. ldd [%o1 + 0x030], %f12
  146. faligndata %f6, %f8, %f22
  147. ldd [%o1 + 0x038], %f14
  148. faligndata %f8, %f10, %f24
  149. ldd [%o1 + 0x040], %f0
  150. prefetch [%o1 + 0x180], #one_read
  151. faligndata %f10, %f12, %f26
  152. subcc %o3, 0x01, %o3
  153. add %o1, 0x40, %o1
  154. bg,pt %XCC, loop
  155. add %o0, 0x40, %o0
  156. /* Finally we copy the last full 64-byte block. */
  157. loopfini:
  158. ldd [%o1 + 0x008], %f2
  159. faligndata %f12, %f14, %f28
  160. ldd [%o1 + 0x010], %f4
  161. faligndata %f14, %f0, %f30
  162. stda %f16, [%o0] ASI_BLK_P
  163. ldd [%o1 + 0x018], %f6
  164. faligndata %f0, %f2, %f16
  165. ldd [%o1 + 0x020], %f8
  166. faligndata %f2, %f4, %f18
  167. ldd [%o1 + 0x028], %f10
  168. faligndata %f4, %f6, %f20
  169. ldd [%o1 + 0x030], %f12
  170. faligndata %f6, %f8, %f22
  171. ldd [%o1 + 0x038], %f14
  172. faligndata %f8, %f10, %f24
  173. cmp %g1, 0
  174. be,pt %XCC, 1f
  175. add %o0, 0x40, %o0
  176. ldd [%o1 + 0x040], %f0
  177. 1: faligndata %f10, %f12, %f26
  178. faligndata %f12, %f14, %f28
  179. faligndata %f14, %f0, %f30
  180. stda %f16, [%o0] ASI_BLK_P
  181. add %o0, 0x40, %o0
  182. add %o1, 0x40, %o1
  183. membar #Sync
  184. /* Now we copy the (len modulo 64) bytes at the end.
  185. * Note how we borrow the %f0 loaded above.
  186. *
  187. * Also notice how this code is careful not to perform a
  188. * load past the end of the src buffer.
  189. */
  190. loopend:
  191. and %o2, 0x3f, %o2
  192. andcc %o2, 0x38, %g2
  193. be,pn %XCC, endcruft
  194. subcc %g2, 0x8, %g2
  195. be,pn %XCC, endcruft
  196. cmp %g1, 0
  197. be,a,pt %XCC, 1f
  198. ldd [%o1 + 0x00], %f0
  199. 1: ldd [%o1 + 0x08], %f2
  200. add %o1, 0x8, %o1
  201. sub %o2, 0x8, %o2
  202. subcc %g2, 0x8, %g2
  203. faligndata %f0, %f2, %f8
  204. std %f8, [%o0 + 0x00]
  205. be,pn %XCC, endcruft
  206. add %o0, 0x8, %o0
  207. ldd [%o1 + 0x08], %f0
  208. add %o1, 0x8, %o1
  209. sub %o2, 0x8, %o2
  210. subcc %g2, 0x8, %g2
  211. faligndata %f2, %f0, %f8
  212. std %f8, [%o0 + 0x00]
  213. bne,pn %XCC, 1b
  214. add %o0, 0x8, %o0
  215. /* If anything is left, we copy it one byte at a time.
  216. * Note that %g1 is (src & 0x3) saved above before the
  217. * alignaddr was performed.
  218. */
  219. endcruft:
  220. cmp %o2, 0
  221. add %o1, %g1, %o1
  222. VISExitHalf
  223. be,pn %XCC, out
  224. sub %o0, %o1, %o3
  225. andcc %g1, 0x7, %g0
  226. bne,pn %icc, small_copy_unaligned
  227. andcc %o2, 0x8, %g0
  228. be,pt %icc, 1f
  229. nop
  230. ldx [%o1], %o5
  231. stx %o5, [%o1 + %o3]
  232. add %o1, 0x8, %o1
  233. 1: andcc %o2, 0x4, %g0
  234. be,pt %icc, 1f
  235. nop
  236. lduw [%o1], %o5
  237. stw %o5, [%o1 + %o3]
  238. add %o1, 0x4, %o1
  239. 1: andcc %o2, 0x2, %g0
  240. be,pt %icc, 1f
  241. nop
  242. lduh [%o1], %o5
  243. sth %o5, [%o1 + %o3]
  244. add %o1, 0x2, %o1
  245. 1: andcc %o2, 0x1, %g0
  246. be,pt %icc, out
  247. nop
  248. ldub [%o1], %o5
  249. ba,pt %xcc, out
  250. stb %o5, [%o1 + %o3]
  251. medium_copy: /* 16 < len <= 64 */
  252. bne,pn %XCC, small_copy_unaligned
  253. sub %o0, %o1, %o3
  254. medium_copy_aligned:
  255. andn %o2, 0x7, %o4
  256. and %o2, 0x7, %o2
  257. 1: subcc %o4, 0x8, %o4
  258. ldx [%o1], %o5
  259. stx %o5, [%o1 + %o3]
  260. bgu,pt %XCC, 1b
  261. add %o1, 0x8, %o1
  262. andcc %o2, 0x4, %g0
  263. be,pt %XCC, 1f
  264. nop
  265. sub %o2, 0x4, %o2
  266. lduw [%o1], %o5
  267. stw %o5, [%o1 + %o3]
  268. add %o1, 0x4, %o1
  269. 1: cmp %o2, 0
  270. be,pt %XCC, out
  271. nop
  272. ba,pt %xcc, small_copy_unaligned
  273. nop
  274. small_copy: /* 0 < len <= 16 */
  275. andcc %o3, 0x3, %g0
  276. bne,pn %XCC, small_copy_unaligned
  277. sub %o0, %o1, %o3
  278. small_copy_aligned:
  279. subcc %o2, 4, %o2
  280. lduw [%o1], %g1
  281. stw %g1, [%o1 + %o3]
  282. bgu,pt %XCC, small_copy_aligned
  283. add %o1, 4, %o1
  284. out: retl
  285. mov %g5, %o0
  286. .align 32
  287. small_copy_unaligned:
  288. subcc %o2, 1, %o2
  289. ldub [%o1], %g1
  290. stb %g1, [%o1 + %o3]
  291. bgu,pt %XCC, small_copy_unaligned
  292. add %o1, 1, %o1
  293. retl
  294. mov %g5, %o0
  295. END(memcpy)
  296. libc_hidden_def(memcpy)
  297. #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
  298. ldx [%src - offset - 0x20], %t0; \
  299. ldx [%src - offset - 0x18], %t1; \
  300. ldx [%src - offset - 0x10], %t2; \
  301. ldx [%src - offset - 0x08], %t3; \
  302. stw %t0, [%dst - offset - 0x1c]; \
  303. srlx %t0, 32, %t0; \
  304. stw %t0, [%dst - offset - 0x20]; \
  305. stw %t1, [%dst - offset - 0x14]; \
  306. srlx %t1, 32, %t1; \
  307. stw %t1, [%dst - offset - 0x18]; \
  308. stw %t2, [%dst - offset - 0x0c]; \
  309. srlx %t2, 32, %t2; \
  310. stw %t2, [%dst - offset - 0x10]; \
  311. stw %t3, [%dst - offset - 0x04]; \
  312. srlx %t3, 32, %t3; \
  313. stw %t3, [%dst - offset - 0x08];
  314. #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
  315. ldx [%src - offset - 0x20], %t0; \
  316. ldx [%src - offset - 0x18], %t1; \
  317. ldx [%src - offset - 0x10], %t2; \
  318. ldx [%src - offset - 0x08], %t3; \
  319. stx %t0, [%dst - offset - 0x20]; \
  320. stx %t1, [%dst - offset - 0x18]; \
  321. stx %t2, [%dst - offset - 0x10]; \
  322. stx %t3, [%dst - offset - 0x08]; \
  323. ldx [%src - offset - 0x40], %t0; \
  324. ldx [%src - offset - 0x38], %t1; \
  325. ldx [%src - offset - 0x30], %t2; \
  326. ldx [%src - offset - 0x28], %t3; \
  327. stx %t0, [%dst - offset - 0x40]; \
  328. stx %t1, [%dst - offset - 0x38]; \
  329. stx %t2, [%dst - offset - 0x30]; \
  330. stx %t3, [%dst - offset - 0x28];
  331. #define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
  332. ldx [%src + offset + 0x00], %t0; \
  333. ldx [%src + offset + 0x08], %t1; \
  334. stw %t0, [%dst + offset + 0x04]; \
  335. srlx %t0, 32, %t2; \
  336. stw %t2, [%dst + offset + 0x00]; \
  337. stw %t1, [%dst + offset + 0x0c]; \
  338. srlx %t1, 32, %t3; \
  339. stw %t3, [%dst + offset + 0x08];
  340. #define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
  341. ldx [%src + offset + 0x00], %t0; \
  342. ldx [%src + offset + 0x08], %t1; \
  343. stx %t0, [%dst + offset + 0x00]; \
  344. stx %t1, [%dst + offset + 0x08];
  345. .align 32
  346. 228: andcc %o2, 1, %g0 /* IEU1 Group */
  347. be,pt %icc, 2f+4 /* CTI */
  348. 1: ldub [%o1 - 1], %o5 /* LOAD Group */
  349. sub %o1, 1, %o1 /* IEU0 */
  350. sub %o0, 1, %o0 /* IEU1 */
  351. subcc %o2, 1, %o2 /* IEU1 Group */
  352. be,pn %xcc, 229f /* CTI */
  353. stb %o5, [%o0] /* Store */
  354. 2: ldub [%o1 - 1], %o5 /* LOAD Group */
  355. sub %o0, 2, %o0 /* IEU0 */
  356. ldub [%o1 - 2], %g5 /* LOAD Group */
  357. sub %o1, 2, %o1 /* IEU0 */
  358. subcc %o2, 2, %o2 /* IEU1 Group */
  359. stb %o5, [%o0 + 1] /* Store */
  360. bne,pt %xcc, 2b /* CTI */
  361. stb %g5, [%o0] /* Store */
  362. 229: retl
  363. mov %g4, %o0
  364. .align 32
  365. ENTRY(memmove)
  366. mov %o0, %g5
  367. #ifndef USE_BPR
  368. srl %o2, 0, %o2 /* IEU1 Group */
  369. #endif
  370. brz,pn %o2, out /* CTI Group */
  371. sub %o0, %o1, %o4 /* IEU0 */
  372. cmp %o4, %o2 /* IEU1 Group */
  373. bgeu,pt %XCC, 218b /* CTI */
  374. mov %o0, %g4 /* IEU0 */
  375. add %o0, %o2, %o0 /* IEU0 Group */
  376. 220: add %o1, %o2, %o1 /* IEU1 */
  377. cmp %o2, 15 /* IEU1 Group */
  378. bleu,pn %xcc, 228b /* CTI */
  379. andcc %o0, 7, %g2 /* IEU1 Group */
  380. sub %o0, %o1, %g5 /* IEU0 */
  381. andcc %g5, 3, %o5 /* IEU1 Group */
  382. bne,pn %xcc, 232f /* CTI */
  383. andcc %o1, 3, %g0 /* IEU1 Group */
  384. be,a,pt %xcc, 236f /* CTI */
  385. andcc %o1, 4, %g0 /* IEU1 Group */
  386. andcc %o1, 1, %g0 /* IEU1 Group */
  387. be,pn %xcc, 4f /* CTI */
  388. andcc %o1, 2, %g0 /* IEU1 Group */
  389. ldub [%o1 - 1], %g2 /* Load Group */
  390. sub %o1, 1, %o1 /* IEU0 */
  391. sub %o0, 1, %o0 /* IEU1 */
  392. sub %o2, 1, %o2 /* IEU0 Group */
  393. be,pn %xcc, 5f /* CTI Group */
  394. stb %g2, [%o0] /* Store */
  395. 4: lduh [%o1 - 2], %g2 /* Load Group */
  396. sub %o1, 2, %o1 /* IEU0 */
  397. sub %o0, 2, %o0 /* IEU1 */
  398. sub %o2, 2, %o2 /* IEU0 */
  399. sth %g2, [%o0] /* Store Group + bubble */
  400. 5: andcc %o1, 4, %g0 /* IEU1 */
  401. 236: be,a,pn %xcc, 2f /* CTI */
  402. andcc %o2, -128, %g6 /* IEU1 Group */
  403. lduw [%o1 - 4], %g5 /* Load Group */
  404. sub %o1, 4, %o1 /* IEU0 */
  405. sub %o0, 4, %o0 /* IEU1 */
  406. sub %o2, 4, %o2 /* IEU0 Group */
  407. stw %g5, [%o0] /* Store */
  408. andcc %o2, -128, %g6 /* IEU1 Group */
  409. 2: be,pn %xcc, 235f /* CTI */
  410. andcc %o0, 4, %g0 /* IEU1 Group */
  411. be,pn %xcc, 282f + 4 /* CTI Group */
  412. 5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  413. RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
  414. RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  415. RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
  416. subcc %g6, 128, %g6 /* IEU1 Group */
  417. sub %o1, 128, %o1 /* IEU0 */
  418. bne,pt %xcc, 5b /* CTI */
  419. sub %o0, 128, %o0 /* IEU0 Group */
  420. 235: andcc %o2, 0x70, %g6 /* IEU1 Group */
  421. 41: be,pn %xcc, 280f /* CTI */
  422. andcc %o2, 8, %g0 /* IEU1 Group */
  423. /* Clk1 8-( */
  424. /* Clk2 8-( */
  425. /* Clk3 8-( */
  426. /* Clk4 8-( */
  427. 279: rd %pc, %o5 /* PDU Group */
  428. sll %g6, 1, %g5 /* IEU0 Group */
  429. sub %o1, %g6, %o1 /* IEU1 */
  430. sub %o5, %g5, %o5 /* IEU0 Group */
  431. jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/
  432. sub %o0, %g6, %o0 /* IEU0 Group */
  433. RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
  434. RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
  435. RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
  436. RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
  437. RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
  438. RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
  439. RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
  440. 280: be,pt %xcc, 281f /* CTI */
  441. andcc %o2, 4, %g0 /* IEU1 */
  442. ldx [%o1 - 8], %g2 /* Load Group */
  443. sub %o0, 8, %o0 /* IEU0 */
  444. stw %g2, [%o0 + 4] /* Store Group */
  445. sub %o1, 8, %o1 /* IEU1 */
  446. srlx %g2, 32, %g2 /* IEU0 Group */
  447. stw %g2, [%o0] /* Store */
  448. 281: be,pt %xcc, 1f /* CTI */
  449. andcc %o2, 2, %g0 /* IEU1 Group */
  450. lduw [%o1 - 4], %g2 /* Load Group */
  451. sub %o1, 4, %o1 /* IEU0 */
  452. stw %g2, [%o0 - 4] /* Store Group */
  453. sub %o0, 4, %o0 /* IEU0 */
  454. 1: be,pt %xcc, 1f /* CTI */
  455. andcc %o2, 1, %g0 /* IEU1 Group */
  456. lduh [%o1 - 2], %g2 /* Load Group */
  457. sub %o1, 2, %o1 /* IEU0 */
  458. sth %g2, [%o0 - 2] /* Store Group */
  459. sub %o0, 2, %o0 /* IEU0 */
  460. 1: be,pt %xcc, 211f /* CTI */
  461. nop /* IEU1 */
  462. ldub [%o1 - 1], %g2 /* Load Group */
  463. stb %g2, [%o0 - 1] /* Store Group + bubble */
  464. 211: retl
  465. mov %g4, %o0
  466. 282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  467. RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  468. subcc %g6, 128, %g6 /* IEU1 Group */
  469. sub %o1, 128, %o1 /* IEU0 */
  470. bne,pt %xcc, 282b /* CTI */
  471. sub %o0, 128, %o0 /* IEU0 Group */
  472. andcc %o2, 0x70, %g6 /* IEU1 */
  473. be,pn %xcc, 284f /* CTI */
  474. andcc %o2, 8, %g0 /* IEU1 Group */
  475. /* Clk1 8-( */
  476. /* Clk2 8-( */
  477. /* Clk3 8-( */
  478. /* Clk4 8-( */
  479. 283: rd %pc, %o5 /* PDU Group */
  480. sub %o1, %g6, %o1 /* IEU0 Group */
  481. sub %o5, %g6, %o5 /* IEU1 */
  482. jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/
  483. sub %o0, %g6, %o0 /* IEU0 Group */
  484. RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
  485. RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
  486. RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
  487. RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
  488. RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
  489. RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
  490. RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
  491. 284: be,pt %xcc, 285f /* CTI Group */
  492. andcc %o2, 4, %g0 /* IEU1 */
  493. ldx [%o1 - 8], %g2 /* Load Group */
  494. sub %o0, 8, %o0 /* IEU0 */
  495. sub %o1, 8, %o1 /* IEU0 Group */
  496. stx %g2, [%o0] /* Store */
  497. 285: be,pt %xcc, 1f /* CTI */
  498. andcc %o2, 2, %g0 /* IEU1 Group */
  499. lduw [%o1 - 4], %g2 /* Load Group */
  500. sub %o0, 4, %o0 /* IEU0 */
  501. sub %o1, 4, %o1 /* IEU0 Group */
  502. stw %g2, [%o0] /* Store */
  503. 1: be,pt %xcc, 1f /* CTI */
  504. andcc %o2, 1, %g0 /* IEU1 Group */
  505. lduh [%o1 - 2], %g2 /* Load Group */
  506. sub %o0, 2, %o0 /* IEU0 */
  507. sub %o1, 2, %o1 /* IEU0 Group */
  508. sth %g2, [%o0] /* Store */
  509. 1: be,pt %xcc, 1f /* CTI */
  510. nop /* IEU0 Group */
  511. ldub [%o1 - 1], %g2 /* Load Group */
  512. stb %g2, [%o0 - 1] /* Store Group + bubble */
  513. 1: retl
  514. mov %g4, %o0
  515. 232: brz,pt %g2, 2f /* CTI Group */
  516. sub %o2, %g2, %o2 /* IEU0 Group */
  517. 1: ldub [%o1 - 1], %g5 /* Load Group */
  518. sub %o1, 1, %o1 /* IEU0 */
  519. sub %o0, 1, %o0 /* IEU1 */
  520. subcc %g2, 1, %g2 /* IEU1 Group */
  521. bne,pt %xcc, 1b /* CTI */
  522. stb %g5, [%o0] /* Store */
  523. 2: andn %o2, 7, %g5 /* IEU0 Group */
  524. and %o2, 7, %o2 /* IEU1 */
  525. fmovd %f0, %f2 /* FPU */
  526. alignaddr %o1, %g0, %g1 /* GRU Group */
  527. ldd [%g1], %f4 /* Load Group */
  528. 1: ldd [%g1 - 8], %f6 /* Load Group */
  529. sub %g1, 8, %g1 /* IEU0 Group */
  530. subcc %g5, 8, %g5 /* IEU1 */
  531. faligndata %f6, %f4, %f0 /* GRU Group */
  532. std %f0, [%o0 - 8] /* Store */
  533. sub %o1, 8, %o1 /* IEU0 Group */
  534. be,pn %xcc, 233f /* CTI */
  535. sub %o0, 8, %o0 /* IEU1 */
  536. ldd [%g1 - 8], %f4 /* Load Group */
  537. sub %g1, 8, %g1 /* IEU0 */
  538. subcc %g5, 8, %g5 /* IEU1 */
  539. faligndata %f4, %f6, %f0 /* GRU Group */
  540. std %f0, [%o0 - 8] /* Store */
  541. sub %o1, 8, %o1 /* IEU0 */
  542. bne,pn %xcc, 1b /* CTI Group */
  543. sub %o0, 8, %o0 /* IEU0 */
  544. 233: brz,pn %o2, 234f /* CTI Group */
  545. nop /* IEU0 */
  546. 237: ldub [%o1 - 1], %g5 /* LOAD */
  547. sub %o1, 1, %o1 /* IEU0 */
  548. sub %o0, 1, %o0 /* IEU1 */
  549. subcc %o2, 1, %o2 /* IEU1 */
  550. bne,pt %xcc, 237b /* CTI */
  551. stb %g5, [%o0] /* Store Group */
  552. 234: wr %g0, FPRS_FEF, %fprs
  553. retl
  554. mov %g4, %o0
  555. END(memmove)
  556. libc_hidden_def(memmove)
  557. #ifdef USE_BPR
  558. weak_alias(memcpy,__align_cpy_1)
  559. weak_alias(memcpy,__align_cpy_2)
  560. weak_alias(memcpy,__align_cpy_4)
  561. weak_alias(memcpy,__align_cpy_8)
  562. weak_alias(memcpy,__align_cpy_16)
  563. #endif