memcpy.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. /* Copy SIZE bytes from SRC to DEST.
  2. For UltraSPARC-III.
  3. Copyright (C) 2001, 2003 Free Software Foundation, Inc.
  4. This file is part of the GNU C Library.
  5. Contributed by David S. Miller (davem@redhat.com)
  6. The GNU C Library is free software; you can redistribute it and/or
  7. modify it under the terms of the GNU Lesser General Public
  8. License as published by the Free Software Foundation; either
  9. version 2.1 of the License, or (at your option) any later version.
  10. The GNU C Library is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. Lesser General Public License for more details.
  14. You should have received a copy of the GNU Lesser General Public
  15. License along with the GNU C Library; if not, see
  16. <http://www.gnu.org/licenses/>. */
  17. #include <features.h>
  18. #define ASI_BLK_P 0xf0
  19. #define FPRS_FEF 0x04
  20. #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
  21. #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
  22. #ifndef XCC
  23. #define USE_BPR
  24. #define XCC xcc
  25. #endif
  26. .register %g2,#scratch
  27. .register %g3,#scratch
  28. .register %g6,#scratch
  29. .text
  30. .align 32
  31. #ifdef __UCLIBC_SUSV3_LEGACY__
  32. ENTRY(bcopy)
  33. sub %o1, %o0, %o4
  34. mov %o0, %g4
  35. cmp %o4, %o2
  36. mov %o1, %o0
  37. bgeu,pt %XCC, 100f
  38. mov %g4, %o1
  39. #ifndef USE_BPR
  40. srl %o2, 0, %o2
  41. #endif
  42. brnz,pn %o2, 220f
  43. add %o0, %o2, %o0
  44. retl
  45. nop
  46. END(bcopy)
  47. #endif
  48. /* Special/non-trivial issues of this code:
  49. *
  50. * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
  51. * 2) Only low 32 FPU registers are used so that only the
  52. * lower half of the FPU register set is dirtied by this
  53. * code. This is especially important in the kernel.
  54. * 3) This code never prefetches cachelines past the end
  55. * of the source buffer.
  56. *
  57. * The cheetah's flexible spine, oversized liver, enlarged heart,
  58. * slender muscular body, and claws make it the swiftest hunter
  59. * in Africa and the fastest animal on land. Can reach speeds
  60. * of up to 2.4GB per second.
  61. */
  62. .align 32
  63. ENTRY(memcpy)
  64. 100: /* %o0=dst, %o1=src, %o2=len */
  65. mov %o0, %g5
  66. cmp %o2, 0
  67. be,pn %XCC, out
  68. 218: or %o0, %o1, %o3
  69. cmp %o2, 16
  70. bleu,a,pn %XCC, small_copy
  71. or %o3, %o2, %o3
  72. cmp %o2, 256
  73. blu,pt %XCC, medium_copy
  74. andcc %o3, 0x7, %g0
  75. ba,pt %xcc, enter
  76. andcc %o0, 0x3f, %g2
  77. /* Here len >= 256 and condition codes reflect execution
  78. * of "andcc %o0, 0x7, %g2", done by caller.
  79. */
  80. .align 64
  81. enter:
  82. /* Is 'dst' already aligned on an 64-byte boundary? */
  83. be,pt %XCC, 2f
  84. /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
  85. * of bytes to copy to make 'dst' 64-byte aligned. We pre-
  86. * subtract this from 'len'.
  87. */
  88. sub %g2, 0x40, %g2
  89. sub %g0, %g2, %g2
  90. sub %o2, %g2, %o2
  91. /* Copy %g2 bytes from src to dst, one byte at a time. */
  92. 1: ldub [%o1 + 0x00], %o3
  93. add %o1, 0x1, %o1
  94. add %o0, 0x1, %o0
  95. subcc %g2, 0x1, %g2
  96. bg,pt %XCC, 1b
  97. stb %o3, [%o0 + -1]
  98. 2: VISEntryHalf
  99. and %o1, 0x7, %g1
  100. ba,pt %xcc, begin
  101. alignaddr %o1, %g0, %o1
  102. .align 64
  103. begin:
  104. prefetch [%o1 + 0x000], #one_read
  105. prefetch [%o1 + 0x040], #one_read
  106. andn %o2, (0x40 - 1), %o4
  107. prefetch [%o1 + 0x080], #one_read
  108. prefetch [%o1 + 0x0c0], #one_read
  109. ldd [%o1 + 0x000], %f0
  110. prefetch [%o1 + 0x100], #one_read
  111. ldd [%o1 + 0x008], %f2
  112. prefetch [%o1 + 0x140], #one_read
  113. ldd [%o1 + 0x010], %f4
  114. prefetch [%o1 + 0x180], #one_read
  115. faligndata %f0, %f2, %f16
  116. ldd [%o1 + 0x018], %f6
  117. faligndata %f2, %f4, %f18
  118. ldd [%o1 + 0x020], %f8
  119. faligndata %f4, %f6, %f20
  120. ldd [%o1 + 0x028], %f10
  121. faligndata %f6, %f8, %f22
  122. ldd [%o1 + 0x030], %f12
  123. faligndata %f8, %f10, %f24
  124. ldd [%o1 + 0x038], %f14
  125. faligndata %f10, %f12, %f26
  126. ldd [%o1 + 0x040], %f0
  127. sub %o4, 0x80, %o4
  128. add %o1, 0x40, %o1
  129. ba,pt %xcc, loop
  130. srl %o4, 6, %o3
  131. .align 64
  132. loop:
  133. ldd [%o1 + 0x008], %f2
  134. faligndata %f12, %f14, %f28
  135. ldd [%o1 + 0x010], %f4
  136. faligndata %f14, %f0, %f30
  137. stda %f16, [%o0] ASI_BLK_P
  138. ldd [%o1 + 0x018], %f6
  139. faligndata %f0, %f2, %f16
  140. ldd [%o1 + 0x020], %f8
  141. faligndata %f2, %f4, %f18
  142. ldd [%o1 + 0x028], %f10
  143. faligndata %f4, %f6, %f20
  144. ldd [%o1 + 0x030], %f12
  145. faligndata %f6, %f8, %f22
  146. ldd [%o1 + 0x038], %f14
  147. faligndata %f8, %f10, %f24
  148. ldd [%o1 + 0x040], %f0
  149. prefetch [%o1 + 0x180], #one_read
  150. faligndata %f10, %f12, %f26
  151. subcc %o3, 0x01, %o3
  152. add %o1, 0x40, %o1
  153. bg,pt %XCC, loop
  154. add %o0, 0x40, %o0
  155. /* Finally we copy the last full 64-byte block. */
  156. loopfini:
  157. ldd [%o1 + 0x008], %f2
  158. faligndata %f12, %f14, %f28
  159. ldd [%o1 + 0x010], %f4
  160. faligndata %f14, %f0, %f30
  161. stda %f16, [%o0] ASI_BLK_P
  162. ldd [%o1 + 0x018], %f6
  163. faligndata %f0, %f2, %f16
  164. ldd [%o1 + 0x020], %f8
  165. faligndata %f2, %f4, %f18
  166. ldd [%o1 + 0x028], %f10
  167. faligndata %f4, %f6, %f20
  168. ldd [%o1 + 0x030], %f12
  169. faligndata %f6, %f8, %f22
  170. ldd [%o1 + 0x038], %f14
  171. faligndata %f8, %f10, %f24
  172. cmp %g1, 0
  173. be,pt %XCC, 1f
  174. add %o0, 0x40, %o0
  175. ldd [%o1 + 0x040], %f0
  176. 1: faligndata %f10, %f12, %f26
  177. faligndata %f12, %f14, %f28
  178. faligndata %f14, %f0, %f30
  179. stda %f16, [%o0] ASI_BLK_P
  180. add %o0, 0x40, %o0
  181. add %o1, 0x40, %o1
  182. membar #Sync
  183. /* Now we copy the (len modulo 64) bytes at the end.
  184. * Note how we borrow the %f0 loaded above.
  185. *
  186. * Also notice how this code is careful not to perform a
  187. * load past the end of the src buffer.
  188. */
  189. loopend:
  190. and %o2, 0x3f, %o2
  191. andcc %o2, 0x38, %g2
  192. be,pn %XCC, endcruft
  193. subcc %g2, 0x8, %g2
  194. be,pn %XCC, endcruft
  195. cmp %g1, 0
  196. be,a,pt %XCC, 1f
  197. ldd [%o1 + 0x00], %f0
  198. 1: ldd [%o1 + 0x08], %f2
  199. add %o1, 0x8, %o1
  200. sub %o2, 0x8, %o2
  201. subcc %g2, 0x8, %g2
  202. faligndata %f0, %f2, %f8
  203. std %f8, [%o0 + 0x00]
  204. be,pn %XCC, endcruft
  205. add %o0, 0x8, %o0
  206. ldd [%o1 + 0x08], %f0
  207. add %o1, 0x8, %o1
  208. sub %o2, 0x8, %o2
  209. subcc %g2, 0x8, %g2
  210. faligndata %f2, %f0, %f8
  211. std %f8, [%o0 + 0x00]
  212. bne,pn %XCC, 1b
  213. add %o0, 0x8, %o0
  214. /* If anything is left, we copy it one byte at a time.
  215. * Note that %g1 is (src & 0x3) saved above before the
  216. * alignaddr was performed.
  217. */
  218. endcruft:
  219. cmp %o2, 0
  220. add %o1, %g1, %o1
  221. VISExitHalf
  222. be,pn %XCC, out
  223. sub %o0, %o1, %o3
  224. andcc %g1, 0x7, %g0
  225. bne,pn %icc, small_copy_unaligned
  226. andcc %o2, 0x8, %g0
  227. be,pt %icc, 1f
  228. nop
  229. ldx [%o1], %o5
  230. stx %o5, [%o1 + %o3]
  231. add %o1, 0x8, %o1
  232. 1: andcc %o2, 0x4, %g0
  233. be,pt %icc, 1f
  234. nop
  235. lduw [%o1], %o5
  236. stw %o5, [%o1 + %o3]
  237. add %o1, 0x4, %o1
  238. 1: andcc %o2, 0x2, %g0
  239. be,pt %icc, 1f
  240. nop
  241. lduh [%o1], %o5
  242. sth %o5, [%o1 + %o3]
  243. add %o1, 0x2, %o1
  244. 1: andcc %o2, 0x1, %g0
  245. be,pt %icc, out
  246. nop
  247. ldub [%o1], %o5
  248. ba,pt %xcc, out
  249. stb %o5, [%o1 + %o3]
  250. medium_copy: /* 16 < len <= 64 */
  251. bne,pn %XCC, small_copy_unaligned
  252. sub %o0, %o1, %o3
  253. medium_copy_aligned:
  254. andn %o2, 0x7, %o4
  255. and %o2, 0x7, %o2
  256. 1: subcc %o4, 0x8, %o4
  257. ldx [%o1], %o5
  258. stx %o5, [%o1 + %o3]
  259. bgu,pt %XCC, 1b
  260. add %o1, 0x8, %o1
  261. andcc %o2, 0x4, %g0
  262. be,pt %XCC, 1f
  263. nop
  264. sub %o2, 0x4, %o2
  265. lduw [%o1], %o5
  266. stw %o5, [%o1 + %o3]
  267. add %o1, 0x4, %o1
  268. 1: cmp %o2, 0
  269. be,pt %XCC, out
  270. nop
  271. ba,pt %xcc, small_copy_unaligned
  272. nop
  273. small_copy: /* 0 < len <= 16 */
  274. andcc %o3, 0x3, %g0
  275. bne,pn %XCC, small_copy_unaligned
  276. sub %o0, %o1, %o3
  277. small_copy_aligned:
  278. subcc %o2, 4, %o2
  279. lduw [%o1], %g1
  280. stw %g1, [%o1 + %o3]
  281. bgu,pt %XCC, small_copy_aligned
  282. add %o1, 4, %o1
  283. out: retl
  284. mov %g5, %o0
  285. .align 32
  286. small_copy_unaligned:
  287. subcc %o2, 1, %o2
  288. ldub [%o1], %g1
  289. stb %g1, [%o1 + %o3]
  290. bgu,pt %XCC, small_copy_unaligned
  291. add %o1, 1, %o1
  292. retl
  293. mov %g5, %o0
  294. END(memcpy)
  295. libc_hidden_def(memcpy)
  296. #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
  297. ldx [%src - offset - 0x20], %t0; \
  298. ldx [%src - offset - 0x18], %t1; \
  299. ldx [%src - offset - 0x10], %t2; \
  300. ldx [%src - offset - 0x08], %t3; \
  301. stw %t0, [%dst - offset - 0x1c]; \
  302. srlx %t0, 32, %t0; \
  303. stw %t0, [%dst - offset - 0x20]; \
  304. stw %t1, [%dst - offset - 0x14]; \
  305. srlx %t1, 32, %t1; \
  306. stw %t1, [%dst - offset - 0x18]; \
  307. stw %t2, [%dst - offset - 0x0c]; \
  308. srlx %t2, 32, %t2; \
  309. stw %t2, [%dst - offset - 0x10]; \
  310. stw %t3, [%dst - offset - 0x04]; \
  311. srlx %t3, 32, %t3; \
  312. stw %t3, [%dst - offset - 0x08];
  313. #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
  314. ldx [%src - offset - 0x20], %t0; \
  315. ldx [%src - offset - 0x18], %t1; \
  316. ldx [%src - offset - 0x10], %t2; \
  317. ldx [%src - offset - 0x08], %t3; \
  318. stx %t0, [%dst - offset - 0x20]; \
  319. stx %t1, [%dst - offset - 0x18]; \
  320. stx %t2, [%dst - offset - 0x10]; \
  321. stx %t3, [%dst - offset - 0x08]; \
  322. ldx [%src - offset - 0x40], %t0; \
  323. ldx [%src - offset - 0x38], %t1; \
  324. ldx [%src - offset - 0x30], %t2; \
  325. ldx [%src - offset - 0x28], %t3; \
  326. stx %t0, [%dst - offset - 0x40]; \
  327. stx %t1, [%dst - offset - 0x38]; \
  328. stx %t2, [%dst - offset - 0x30]; \
  329. stx %t3, [%dst - offset - 0x28];
  330. #define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
  331. ldx [%src + offset + 0x00], %t0; \
  332. ldx [%src + offset + 0x08], %t1; \
  333. stw %t0, [%dst + offset + 0x04]; \
  334. srlx %t0, 32, %t2; \
  335. stw %t2, [%dst + offset + 0x00]; \
  336. stw %t1, [%dst + offset + 0x0c]; \
  337. srlx %t1, 32, %t3; \
  338. stw %t3, [%dst + offset + 0x08];
  339. #define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
  340. ldx [%src + offset + 0x00], %t0; \
  341. ldx [%src + offset + 0x08], %t1; \
  342. stx %t0, [%dst + offset + 0x00]; \
  343. stx %t1, [%dst + offset + 0x08];
  344. .align 32
  345. 228: andcc %o2, 1, %g0 /* IEU1 Group */
  346. be,pt %icc, 2f+4 /* CTI */
  347. 1: ldub [%o1 - 1], %o5 /* LOAD Group */
  348. sub %o1, 1, %o1 /* IEU0 */
  349. sub %o0, 1, %o0 /* IEU1 */
  350. subcc %o2, 1, %o2 /* IEU1 Group */
  351. be,pn %xcc, 229f /* CTI */
  352. stb %o5, [%o0] /* Store */
  353. 2: ldub [%o1 - 1], %o5 /* LOAD Group */
  354. sub %o0, 2, %o0 /* IEU0 */
  355. ldub [%o1 - 2], %g5 /* LOAD Group */
  356. sub %o1, 2, %o1 /* IEU0 */
  357. subcc %o2, 2, %o2 /* IEU1 Group */
  358. stb %o5, [%o0 + 1] /* Store */
  359. bne,pt %xcc, 2b /* CTI */
  360. stb %g5, [%o0] /* Store */
  361. 229: retl
  362. mov %g4, %o0
  363. .align 32
  364. ENTRY(memmove)
  365. mov %o0, %g5
  366. #ifndef USE_BPR
  367. srl %o2, 0, %o2 /* IEU1 Group */
  368. #endif
  369. brz,pn %o2, out /* CTI Group */
  370. sub %o0, %o1, %o4 /* IEU0 */
  371. cmp %o4, %o2 /* IEU1 Group */
  372. bgeu,pt %XCC, 218b /* CTI */
  373. mov %o0, %g4 /* IEU0 */
  374. add %o0, %o2, %o0 /* IEU0 Group */
  375. 220: add %o1, %o2, %o1 /* IEU1 */
  376. cmp %o2, 15 /* IEU1 Group */
  377. bleu,pn %xcc, 228b /* CTI */
  378. andcc %o0, 7, %g2 /* IEU1 Group */
  379. sub %o0, %o1, %g5 /* IEU0 */
  380. andcc %g5, 3, %o5 /* IEU1 Group */
  381. bne,pn %xcc, 232f /* CTI */
  382. andcc %o1, 3, %g0 /* IEU1 Group */
  383. be,a,pt %xcc, 236f /* CTI */
  384. andcc %o1, 4, %g0 /* IEU1 Group */
  385. andcc %o1, 1, %g0 /* IEU1 Group */
  386. be,pn %xcc, 4f /* CTI */
  387. andcc %o1, 2, %g0 /* IEU1 Group */
  388. ldub [%o1 - 1], %g2 /* Load Group */
  389. sub %o1, 1, %o1 /* IEU0 */
  390. sub %o0, 1, %o0 /* IEU1 */
  391. sub %o2, 1, %o2 /* IEU0 Group */
  392. be,pn %xcc, 5f /* CTI Group */
  393. stb %g2, [%o0] /* Store */
  394. 4: lduh [%o1 - 2], %g2 /* Load Group */
  395. sub %o1, 2, %o1 /* IEU0 */
  396. sub %o0, 2, %o0 /* IEU1 */
  397. sub %o2, 2, %o2 /* IEU0 */
  398. sth %g2, [%o0] /* Store Group + bubble */
  399. 5: andcc %o1, 4, %g0 /* IEU1 */
  400. 236: be,a,pn %xcc, 2f /* CTI */
  401. andcc %o2, -128, %g6 /* IEU1 Group */
  402. lduw [%o1 - 4], %g5 /* Load Group */
  403. sub %o1, 4, %o1 /* IEU0 */
  404. sub %o0, 4, %o0 /* IEU1 */
  405. sub %o2, 4, %o2 /* IEU0 Group */
  406. stw %g5, [%o0] /* Store */
  407. andcc %o2, -128, %g6 /* IEU1 Group */
  408. 2: be,pn %xcc, 235f /* CTI */
  409. andcc %o0, 4, %g0 /* IEU1 Group */
  410. be,pn %xcc, 282f + 4 /* CTI Group */
  411. 5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  412. RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
  413. RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  414. RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
  415. subcc %g6, 128, %g6 /* IEU1 Group */
  416. sub %o1, 128, %o1 /* IEU0 */
  417. bne,pt %xcc, 5b /* CTI */
  418. sub %o0, 128, %o0 /* IEU0 Group */
  419. 235: andcc %o2, 0x70, %g6 /* IEU1 Group */
  420. 41: be,pn %xcc, 280f /* CTI */
  421. andcc %o2, 8, %g0 /* IEU1 Group */
  422. /* Clk1 8-( */
  423. /* Clk2 8-( */
  424. /* Clk3 8-( */
  425. /* Clk4 8-( */
  426. 279: rd %pc, %o5 /* PDU Group */
  427. sll %g6, 1, %g5 /* IEU0 Group */
  428. sub %o1, %g6, %o1 /* IEU1 */
  429. sub %o5, %g5, %o5 /* IEU0 Group */
  430. jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/
  431. sub %o0, %g6, %o0 /* IEU0 Group */
  432. RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
  433. RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
  434. RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
  435. RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
  436. RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
  437. RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
  438. RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
  439. 280: be,pt %xcc, 281f /* CTI */
  440. andcc %o2, 4, %g0 /* IEU1 */
  441. ldx [%o1 - 8], %g2 /* Load Group */
  442. sub %o0, 8, %o0 /* IEU0 */
  443. stw %g2, [%o0 + 4] /* Store Group */
  444. sub %o1, 8, %o1 /* IEU1 */
  445. srlx %g2, 32, %g2 /* IEU0 Group */
  446. stw %g2, [%o0] /* Store */
  447. 281: be,pt %xcc, 1f /* CTI */
  448. andcc %o2, 2, %g0 /* IEU1 Group */
  449. lduw [%o1 - 4], %g2 /* Load Group */
  450. sub %o1, 4, %o1 /* IEU0 */
  451. stw %g2, [%o0 - 4] /* Store Group */
  452. sub %o0, 4, %o0 /* IEU0 */
  453. 1: be,pt %xcc, 1f /* CTI */
  454. andcc %o2, 1, %g0 /* IEU1 Group */
  455. lduh [%o1 - 2], %g2 /* Load Group */
  456. sub %o1, 2, %o1 /* IEU0 */
  457. sth %g2, [%o0 - 2] /* Store Group */
  458. sub %o0, 2, %o0 /* IEU0 */
  459. 1: be,pt %xcc, 211f /* CTI */
  460. nop /* IEU1 */
  461. ldub [%o1 - 1], %g2 /* Load Group */
  462. stb %g2, [%o0 - 1] /* Store Group + bubble */
  463. 211: retl
  464. mov %g4, %o0
  465. 282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  466. RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  467. subcc %g6, 128, %g6 /* IEU1 Group */
  468. sub %o1, 128, %o1 /* IEU0 */
  469. bne,pt %xcc, 282b /* CTI */
  470. sub %o0, 128, %o0 /* IEU0 Group */
  471. andcc %o2, 0x70, %g6 /* IEU1 */
  472. be,pn %xcc, 284f /* CTI */
  473. andcc %o2, 8, %g0 /* IEU1 Group */
  474. /* Clk1 8-( */
  475. /* Clk2 8-( */
  476. /* Clk3 8-( */
  477. /* Clk4 8-( */
  478. 283: rd %pc, %o5 /* PDU Group */
  479. sub %o1, %g6, %o1 /* IEU0 Group */
  480. sub %o5, %g6, %o5 /* IEU1 */
  481. jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/
  482. sub %o0, %g6, %o0 /* IEU0 Group */
  483. RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
  484. RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
  485. RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
  486. RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
  487. RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
  488. RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
  489. RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
  490. 284: be,pt %xcc, 285f /* CTI Group */
  491. andcc %o2, 4, %g0 /* IEU1 */
  492. ldx [%o1 - 8], %g2 /* Load Group */
  493. sub %o0, 8, %o0 /* IEU0 */
  494. sub %o1, 8, %o1 /* IEU0 Group */
  495. stx %g2, [%o0] /* Store */
  496. 285: be,pt %xcc, 1f /* CTI */
  497. andcc %o2, 2, %g0 /* IEU1 Group */
  498. lduw [%o1 - 4], %g2 /* Load Group */
  499. sub %o0, 4, %o0 /* IEU0 */
  500. sub %o1, 4, %o1 /* IEU0 Group */
  501. stw %g2, [%o0] /* Store */
  502. 1: be,pt %xcc, 1f /* CTI */
  503. andcc %o2, 1, %g0 /* IEU1 Group */
  504. lduh [%o1 - 2], %g2 /* Load Group */
  505. sub %o0, 2, %o0 /* IEU0 */
  506. sub %o1, 2, %o1 /* IEU0 Group */
  507. sth %g2, [%o0] /* Store */
  508. 1: be,pt %xcc, 1f /* CTI */
  509. nop /* IEU0 Group */
  510. ldub [%o1 - 1], %g2 /* Load Group */
  511. stb %g2, [%o0 - 1] /* Store Group + bubble */
  512. 1: retl
  513. mov %g4, %o0
  514. 232: brz,pt %g2, 2f /* CTI Group */
  515. sub %o2, %g2, %o2 /* IEU0 Group */
  516. 1: ldub [%o1 - 1], %g5 /* Load Group */
  517. sub %o1, 1, %o1 /* IEU0 */
  518. sub %o0, 1, %o0 /* IEU1 */
  519. subcc %g2, 1, %g2 /* IEU1 Group */
  520. bne,pt %xcc, 1b /* CTI */
  521. stb %g5, [%o0] /* Store */
  522. 2: andn %o2, 7, %g5 /* IEU0 Group */
  523. and %o2, 7, %o2 /* IEU1 */
  524. fmovd %f0, %f2 /* FPU */
  525. alignaddr %o1, %g0, %g1 /* GRU Group */
  526. ldd [%g1], %f4 /* Load Group */
  527. 1: ldd [%g1 - 8], %f6 /* Load Group */
  528. sub %g1, 8, %g1 /* IEU0 Group */
  529. subcc %g5, 8, %g5 /* IEU1 */
  530. faligndata %f6, %f4, %f0 /* GRU Group */
  531. std %f0, [%o0 - 8] /* Store */
  532. sub %o1, 8, %o1 /* IEU0 Group */
  533. be,pn %xcc, 233f /* CTI */
  534. sub %o0, 8, %o0 /* IEU1 */
  535. ldd [%g1 - 8], %f4 /* Load Group */
  536. sub %g1, 8, %g1 /* IEU0 */
  537. subcc %g5, 8, %g5 /* IEU1 */
  538. faligndata %f4, %f6, %f0 /* GRU Group */
  539. std %f0, [%o0 - 8] /* Store */
  540. sub %o1, 8, %o1 /* IEU0 */
  541. bne,pn %xcc, 1b /* CTI Group */
  542. sub %o0, 8, %o0 /* IEU0 */
  543. 233: brz,pn %o2, 234f /* CTI Group */
  544. nop /* IEU0 */
  545. 237: ldub [%o1 - 1], %g5 /* LOAD */
  546. sub %o1, 1, %o1 /* IEU0 */
  547. sub %o0, 1, %o0 /* IEU1 */
  548. subcc %o2, 1, %o2 /* IEU1 */
  549. bne,pt %xcc, 237b /* CTI */
  550. stb %g5, [%o0] /* Store Group */
  551. 234: wr %g0, FPRS_FEF, %fprs
  552. retl
  553. mov %g4, %o0
  554. END(memmove)
  555. libc_hidden_def(memmove)
  556. #ifdef USE_BPR
  557. weak_alias(memcpy,__align_cpy_1)
  558. weak_alias(memcpy,__align_cpy_2)
  559. weak_alias(memcpy,__align_cpy_4)
  560. weak_alias(memcpy,__align_cpy_8)
  561. weak_alias(memcpy,__align_cpy_16)
  562. #endif