memcpy.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. /* Copy SIZE bytes from SRC to DEST.
  2. For UltraSPARC-III.
  3. Copyright (C) 2001, 2003 Free Software Foundation, Inc.
  4. This file is part of the GNU C Library.
  5. Contributed by David S. Miller (davem@redhat.com)
  6. The GNU C Library is free software; you can redistribute it and/or
  7. modify it under the terms of the GNU Lesser General Public
  8. License as published by the Free Software Foundation; either
  9. version 2.1 of the License, or (at your option) any later version.
  10. The GNU C Library is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. Lesser General Public License for more details.
  14. You should have received a copy of the GNU Lesser General Public
  15. License along with the GNU C Library; if not, write to the Free
  16. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  17. 02111-1307 USA. */
  18. #define ASI_BLK_P 0xf0
  19. #define FPRS_FEF 0x04
  20. #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
  21. #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
  22. #ifndef XCC
  23. #define USE_BPR
  24. #define XCC xcc
  25. #endif
  26. .register %g2,#scratch
  27. .register %g3,#scratch
  28. .register %g6,#scratch
  29. .text
  30. .align 32
  31. ENTRY(bcopy)
  32. sub %o1, %o0, %o4
  33. mov %o0, %g4
  34. cmp %o4, %o2
  35. mov %o1, %o0
  36. bgeu,pt %XCC, 100f
  37. mov %g4, %o1
  38. #ifndef USE_BPR
  39. srl %o2, 0, %o2
  40. #endif
  41. brnz,pn %o2, 220f
  42. add %o0, %o2, %o0
  43. retl
  44. nop
  45. END(bcopy)
  46. /* Special/non-trivial issues of this code:
  47. *
  48. * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
  49. * 2) Only low 32 FPU registers are used so that only the
  50. * lower half of the FPU register set is dirtied by this
  51. * code. This is especially important in the kernel.
  52. * 3) This code never prefetches cachelines past the end
  53. * of the source buffer.
  54. *
  55. * The cheetah's flexible spine, oversized liver, enlarged heart,
  56. * slender muscular body, and claws make it the swiftest hunter
  57. * in Africa and the fastest animal on land. Can reach speeds
  58. * of up to 2.4GB per second.
  59. */
  60. .align 32
  61. ENTRY(memcpy)
  62. 100: /* %o0=dst, %o1=src, %o2=len */
  63. mov %o0, %g5
  64. cmp %o2, 0
  65. be,pn %XCC, out
  66. 218: or %o0, %o1, %o3
  67. cmp %o2, 16
  68. bleu,a,pn %XCC, small_copy
  69. or %o3, %o2, %o3
  70. cmp %o2, 256
  71. blu,pt %XCC, medium_copy
  72. andcc %o3, 0x7, %g0
  73. ba,pt %xcc, enter
  74. andcc %o0, 0x3f, %g2
  75. /* Here len >= 256 and condition codes reflect execution
  76. * of "andcc %o0, 0x7, %g2", done by caller.
  77. */
  78. .align 64
  79. enter:
  80. /* Is 'dst' already aligned on an 64-byte boundary? */
  81. be,pt %XCC, 2f
  82. /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
  83. * of bytes to copy to make 'dst' 64-byte aligned. We pre-
  84. * subtract this from 'len'.
  85. */
  86. sub %g2, 0x40, %g2
  87. sub %g0, %g2, %g2
  88. sub %o2, %g2, %o2
  89. /* Copy %g2 bytes from src to dst, one byte at a time. */
  90. 1: ldub [%o1 + 0x00], %o3
  91. add %o1, 0x1, %o1
  92. add %o0, 0x1, %o0
  93. subcc %g2, 0x1, %g2
  94. bg,pt %XCC, 1b
  95. stb %o3, [%o0 + -1]
  96. 2: VISEntryHalf
  97. and %o1, 0x7, %g1
  98. ba,pt %xcc, begin
  99. alignaddr %o1, %g0, %o1
  100. .align 64
  101. begin:
  102. prefetch [%o1 + 0x000], #one_read
  103. prefetch [%o1 + 0x040], #one_read
  104. andn %o2, (0x40 - 1), %o4
  105. prefetch [%o1 + 0x080], #one_read
  106. prefetch [%o1 + 0x0c0], #one_read
  107. ldd [%o1 + 0x000], %f0
  108. prefetch [%o1 + 0x100], #one_read
  109. ldd [%o1 + 0x008], %f2
  110. prefetch [%o1 + 0x140], #one_read
  111. ldd [%o1 + 0x010], %f4
  112. prefetch [%o1 + 0x180], #one_read
  113. faligndata %f0, %f2, %f16
  114. ldd [%o1 + 0x018], %f6
  115. faligndata %f2, %f4, %f18
  116. ldd [%o1 + 0x020], %f8
  117. faligndata %f4, %f6, %f20
  118. ldd [%o1 + 0x028], %f10
  119. faligndata %f6, %f8, %f22
  120. ldd [%o1 + 0x030], %f12
  121. faligndata %f8, %f10, %f24
  122. ldd [%o1 + 0x038], %f14
  123. faligndata %f10, %f12, %f26
  124. ldd [%o1 + 0x040], %f0
  125. sub %o4, 0x80, %o4
  126. add %o1, 0x40, %o1
  127. ba,pt %xcc, loop
  128. srl %o4, 6, %o3
  129. .align 64
  130. loop:
  131. ldd [%o1 + 0x008], %f2
  132. faligndata %f12, %f14, %f28
  133. ldd [%o1 + 0x010], %f4
  134. faligndata %f14, %f0, %f30
  135. stda %f16, [%o0] ASI_BLK_P
  136. ldd [%o1 + 0x018], %f6
  137. faligndata %f0, %f2, %f16
  138. ldd [%o1 + 0x020], %f8
  139. faligndata %f2, %f4, %f18
  140. ldd [%o1 + 0x028], %f10
  141. faligndata %f4, %f6, %f20
  142. ldd [%o1 + 0x030], %f12
  143. faligndata %f6, %f8, %f22
  144. ldd [%o1 + 0x038], %f14
  145. faligndata %f8, %f10, %f24
  146. ldd [%o1 + 0x040], %f0
  147. prefetch [%o1 + 0x180], #one_read
  148. faligndata %f10, %f12, %f26
  149. subcc %o3, 0x01, %o3
  150. add %o1, 0x40, %o1
  151. bg,pt %XCC, loop
  152. add %o0, 0x40, %o0
  153. /* Finally we copy the last full 64-byte block. */
  154. loopfini:
  155. ldd [%o1 + 0x008], %f2
  156. faligndata %f12, %f14, %f28
  157. ldd [%o1 + 0x010], %f4
  158. faligndata %f14, %f0, %f30
  159. stda %f16, [%o0] ASI_BLK_P
  160. ldd [%o1 + 0x018], %f6
  161. faligndata %f0, %f2, %f16
  162. ldd [%o1 + 0x020], %f8
  163. faligndata %f2, %f4, %f18
  164. ldd [%o1 + 0x028], %f10
  165. faligndata %f4, %f6, %f20
  166. ldd [%o1 + 0x030], %f12
  167. faligndata %f6, %f8, %f22
  168. ldd [%o1 + 0x038], %f14
  169. faligndata %f8, %f10, %f24
  170. cmp %g1, 0
  171. be,pt %XCC, 1f
  172. add %o0, 0x40, %o0
  173. ldd [%o1 + 0x040], %f0
  174. 1: faligndata %f10, %f12, %f26
  175. faligndata %f12, %f14, %f28
  176. faligndata %f14, %f0, %f30
  177. stda %f16, [%o0] ASI_BLK_P
  178. add %o0, 0x40, %o0
  179. add %o1, 0x40, %o1
  180. membar #Sync
  181. /* Now we copy the (len modulo 64) bytes at the end.
  182. * Note how we borrow the %f0 loaded above.
  183. *
  184. * Also notice how this code is careful not to perform a
  185. * load past the end of the src buffer.
  186. */
  187. loopend:
  188. and %o2, 0x3f, %o2
  189. andcc %o2, 0x38, %g2
  190. be,pn %XCC, endcruft
  191. subcc %g2, 0x8, %g2
  192. be,pn %XCC, endcruft
  193. cmp %g1, 0
  194. be,a,pt %XCC, 1f
  195. ldd [%o1 + 0x00], %f0
  196. 1: ldd [%o1 + 0x08], %f2
  197. add %o1, 0x8, %o1
  198. sub %o2, 0x8, %o2
  199. subcc %g2, 0x8, %g2
  200. faligndata %f0, %f2, %f8
  201. std %f8, [%o0 + 0x00]
  202. be,pn %XCC, endcruft
  203. add %o0, 0x8, %o0
  204. ldd [%o1 + 0x08], %f0
  205. add %o1, 0x8, %o1
  206. sub %o2, 0x8, %o2
  207. subcc %g2, 0x8, %g2
  208. faligndata %f2, %f0, %f8
  209. std %f8, [%o0 + 0x00]
  210. bne,pn %XCC, 1b
  211. add %o0, 0x8, %o0
  212. /* If anything is left, we copy it one byte at a time.
  213. * Note that %g1 is (src & 0x3) saved above before the
  214. * alignaddr was performed.
  215. */
  216. endcruft:
  217. cmp %o2, 0
  218. add %o1, %g1, %o1
  219. VISExitHalf
  220. be,pn %XCC, out
  221. sub %o0, %o1, %o3
  222. andcc %g1, 0x7, %g0
  223. bne,pn %icc, small_copy_unaligned
  224. andcc %o2, 0x8, %g0
  225. be,pt %icc, 1f
  226. nop
  227. ldx [%o1], %o5
  228. stx %o5, [%o1 + %o3]
  229. add %o1, 0x8, %o1
  230. 1: andcc %o2, 0x4, %g0
  231. be,pt %icc, 1f
  232. nop
  233. lduw [%o1], %o5
  234. stw %o5, [%o1 + %o3]
  235. add %o1, 0x4, %o1
  236. 1: andcc %o2, 0x2, %g0
  237. be,pt %icc, 1f
  238. nop
  239. lduh [%o1], %o5
  240. sth %o5, [%o1 + %o3]
  241. add %o1, 0x2, %o1
  242. 1: andcc %o2, 0x1, %g0
  243. be,pt %icc, out
  244. nop
  245. ldub [%o1], %o5
  246. ba,pt %xcc, out
  247. stb %o5, [%o1 + %o3]
  248. medium_copy: /* 16 < len <= 64 */
  249. bne,pn %XCC, small_copy_unaligned
  250. sub %o0, %o1, %o3
  251. medium_copy_aligned:
  252. andn %o2, 0x7, %o4
  253. and %o2, 0x7, %o2
  254. 1: subcc %o4, 0x8, %o4
  255. ldx [%o1], %o5
  256. stx %o5, [%o1 + %o3]
  257. bgu,pt %XCC, 1b
  258. add %o1, 0x8, %o1
  259. andcc %o2, 0x4, %g0
  260. be,pt %XCC, 1f
  261. nop
  262. sub %o2, 0x4, %o2
  263. lduw [%o1], %o5
  264. stw %o5, [%o1 + %o3]
  265. add %o1, 0x4, %o1
  266. 1: cmp %o2, 0
  267. be,pt %XCC, out
  268. nop
  269. ba,pt %xcc, small_copy_unaligned
  270. nop
  271. small_copy: /* 0 < len <= 16 */
  272. andcc %o3, 0x3, %g0
  273. bne,pn %XCC, small_copy_unaligned
  274. sub %o0, %o1, %o3
  275. small_copy_aligned:
  276. subcc %o2, 4, %o2
  277. lduw [%o1], %g1
  278. stw %g1, [%o1 + %o3]
  279. bgu,pt %XCC, small_copy_aligned
  280. add %o1, 4, %o1
  281. out: retl
  282. mov %g5, %o0
  283. .align 32
  284. small_copy_unaligned:
  285. subcc %o2, 1, %o2
  286. ldub [%o1], %g1
  287. stb %g1, [%o1 + %o3]
  288. bgu,pt %XCC, small_copy_unaligned
  289. add %o1, 1, %o1
  290. retl
  291. mov %g5, %o0
  292. END(memcpy)
  293. #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
  294. ldx [%src - offset - 0x20], %t0; \
  295. ldx [%src - offset - 0x18], %t1; \
  296. ldx [%src - offset - 0x10], %t2; \
  297. ldx [%src - offset - 0x08], %t3; \
  298. stw %t0, [%dst - offset - 0x1c]; \
  299. srlx %t0, 32, %t0; \
  300. stw %t0, [%dst - offset - 0x20]; \
  301. stw %t1, [%dst - offset - 0x14]; \
  302. srlx %t1, 32, %t1; \
  303. stw %t1, [%dst - offset - 0x18]; \
  304. stw %t2, [%dst - offset - 0x0c]; \
  305. srlx %t2, 32, %t2; \
  306. stw %t2, [%dst - offset - 0x10]; \
  307. stw %t3, [%dst - offset - 0x04]; \
  308. srlx %t3, 32, %t3; \
  309. stw %t3, [%dst - offset - 0x08];
  310. #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
  311. ldx [%src - offset - 0x20], %t0; \
  312. ldx [%src - offset - 0x18], %t1; \
  313. ldx [%src - offset - 0x10], %t2; \
  314. ldx [%src - offset - 0x08], %t3; \
  315. stx %t0, [%dst - offset - 0x20]; \
  316. stx %t1, [%dst - offset - 0x18]; \
  317. stx %t2, [%dst - offset - 0x10]; \
  318. stx %t3, [%dst - offset - 0x08]; \
  319. ldx [%src - offset - 0x40], %t0; \
  320. ldx [%src - offset - 0x38], %t1; \
  321. ldx [%src - offset - 0x30], %t2; \
  322. ldx [%src - offset - 0x28], %t3; \
  323. stx %t0, [%dst - offset - 0x40]; \
  324. stx %t1, [%dst - offset - 0x38]; \
  325. stx %t2, [%dst - offset - 0x30]; \
  326. stx %t3, [%dst - offset - 0x28];
  327. #define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
  328. ldx [%src + offset + 0x00], %t0; \
  329. ldx [%src + offset + 0x08], %t1; \
  330. stw %t0, [%dst + offset + 0x04]; \
  331. srlx %t0, 32, %t2; \
  332. stw %t2, [%dst + offset + 0x00]; \
  333. stw %t1, [%dst + offset + 0x0c]; \
  334. srlx %t1, 32, %t3; \
  335. stw %t3, [%dst + offset + 0x08];
  336. #define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
  337. ldx [%src + offset + 0x00], %t0; \
  338. ldx [%src + offset + 0x08], %t1; \
  339. stx %t0, [%dst + offset + 0x00]; \
  340. stx %t1, [%dst + offset + 0x08];
  341. .align 32
  342. 228: andcc %o2, 1, %g0 /* IEU1 Group */
  343. be,pt %icc, 2f+4 /* CTI */
  344. 1: ldub [%o1 - 1], %o5 /* LOAD Group */
  345. sub %o1, 1, %o1 /* IEU0 */
  346. sub %o0, 1, %o0 /* IEU1 */
  347. subcc %o2, 1, %o2 /* IEU1 Group */
  348. be,pn %xcc, 229f /* CTI */
  349. stb %o5, [%o0] /* Store */
  350. 2: ldub [%o1 - 1], %o5 /* LOAD Group */
  351. sub %o0, 2, %o0 /* IEU0 */
  352. ldub [%o1 - 2], %g5 /* LOAD Group */
  353. sub %o1, 2, %o1 /* IEU0 */
  354. subcc %o2, 2, %o2 /* IEU1 Group */
  355. stb %o5, [%o0 + 1] /* Store */
  356. bne,pt %xcc, 2b /* CTI */
  357. stb %g5, [%o0] /* Store */
  358. 229: retl
  359. mov %g4, %o0
  360. .align 32
  361. ENTRY(memmove)
  362. mov %o0, %g5
  363. #ifndef USE_BPR
  364. srl %o2, 0, %o2 /* IEU1 Group */
  365. #endif
  366. brz,pn %o2, out /* CTI Group */
  367. sub %o0, %o1, %o4 /* IEU0 */
  368. cmp %o4, %o2 /* IEU1 Group */
  369. bgeu,pt %XCC, 218b /* CTI */
  370. mov %o0, %g4 /* IEU0 */
  371. add %o0, %o2, %o0 /* IEU0 Group */
  372. 220: add %o1, %o2, %o1 /* IEU1 */
  373. cmp %o2, 15 /* IEU1 Group */
  374. bleu,pn %xcc, 228b /* CTI */
  375. andcc %o0, 7, %g2 /* IEU1 Group */
  376. sub %o0, %o1, %g5 /* IEU0 */
  377. andcc %g5, 3, %o5 /* IEU1 Group */
  378. bne,pn %xcc, 232f /* CTI */
  379. andcc %o1, 3, %g0 /* IEU1 Group */
  380. be,a,pt %xcc, 236f /* CTI */
  381. andcc %o1, 4, %g0 /* IEU1 Group */
  382. andcc %o1, 1, %g0 /* IEU1 Group */
  383. be,pn %xcc, 4f /* CTI */
  384. andcc %o1, 2, %g0 /* IEU1 Group */
  385. ldub [%o1 - 1], %g2 /* Load Group */
  386. sub %o1, 1, %o1 /* IEU0 */
  387. sub %o0, 1, %o0 /* IEU1 */
  388. sub %o2, 1, %o2 /* IEU0 Group */
  389. be,pn %xcc, 5f /* CTI Group */
  390. stb %g2, [%o0] /* Store */
  391. 4: lduh [%o1 - 2], %g2 /* Load Group */
  392. sub %o1, 2, %o1 /* IEU0 */
  393. sub %o0, 2, %o0 /* IEU1 */
  394. sub %o2, 2, %o2 /* IEU0 */
  395. sth %g2, [%o0] /* Store Group + bubble */
  396. 5: andcc %o1, 4, %g0 /* IEU1 */
  397. 236: be,a,pn %xcc, 2f /* CTI */
  398. andcc %o2, -128, %g6 /* IEU1 Group */
  399. lduw [%o1 - 4], %g5 /* Load Group */
  400. sub %o1, 4, %o1 /* IEU0 */
  401. sub %o0, 4, %o0 /* IEU1 */
  402. sub %o2, 4, %o2 /* IEU0 Group */
  403. stw %g5, [%o0] /* Store */
  404. andcc %o2, -128, %g6 /* IEU1 Group */
  405. 2: be,pn %xcc, 235f /* CTI */
  406. andcc %o0, 4, %g0 /* IEU1 Group */
  407. be,pn %xcc, 282f + 4 /* CTI Group */
  408. 5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  409. RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
  410. RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  411. RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
  412. subcc %g6, 128, %g6 /* IEU1 Group */
  413. sub %o1, 128, %o1 /* IEU0 */
  414. bne,pt %xcc, 5b /* CTI */
  415. sub %o0, 128, %o0 /* IEU0 Group */
  416. 235: andcc %o2, 0x70, %g6 /* IEU1 Group */
  417. 41: be,pn %xcc, 280f /* CTI */
  418. andcc %o2, 8, %g0 /* IEU1 Group */
  419. /* Clk1 8-( */
  420. /* Clk2 8-( */
  421. /* Clk3 8-( */
  422. /* Clk4 8-( */
  423. 279: rd %pc, %o5 /* PDU Group */
  424. sll %g6, 1, %g5 /* IEU0 Group */
  425. sub %o1, %g6, %o1 /* IEU1 */
  426. sub %o5, %g5, %o5 /* IEU0 Group */
  427. jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/
  428. sub %o0, %g6, %o0 /* IEU0 Group */
  429. RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
  430. RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
  431. RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
  432. RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
  433. RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
  434. RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
  435. RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
  436. 280: be,pt %xcc, 281f /* CTI */
  437. andcc %o2, 4, %g0 /* IEU1 */
  438. ldx [%o1 - 8], %g2 /* Load Group */
  439. sub %o0, 8, %o0 /* IEU0 */
  440. stw %g2, [%o0 + 4] /* Store Group */
  441. sub %o1, 8, %o1 /* IEU1 */
  442. srlx %g2, 32, %g2 /* IEU0 Group */
  443. stw %g2, [%o0] /* Store */
  444. 281: be,pt %xcc, 1f /* CTI */
  445. andcc %o2, 2, %g0 /* IEU1 Group */
  446. lduw [%o1 - 4], %g2 /* Load Group */
  447. sub %o1, 4, %o1 /* IEU0 */
  448. stw %g2, [%o0 - 4] /* Store Group */
  449. sub %o0, 4, %o0 /* IEU0 */
  450. 1: be,pt %xcc, 1f /* CTI */
  451. andcc %o2, 1, %g0 /* IEU1 Group */
  452. lduh [%o1 - 2], %g2 /* Load Group */
  453. sub %o1, 2, %o1 /* IEU0 */
  454. sth %g2, [%o0 - 2] /* Store Group */
  455. sub %o0, 2, %o0 /* IEU0 */
  456. 1: be,pt %xcc, 211f /* CTI */
  457. nop /* IEU1 */
  458. ldub [%o1 - 1], %g2 /* Load Group */
  459. stb %g2, [%o0 - 1] /* Store Group + bubble */
  460. 211: retl
  461. mov %g4, %o0
  462. 282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  463. RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  464. subcc %g6, 128, %g6 /* IEU1 Group */
  465. sub %o1, 128, %o1 /* IEU0 */
  466. bne,pt %xcc, 282b /* CTI */
  467. sub %o0, 128, %o0 /* IEU0 Group */
  468. andcc %o2, 0x70, %g6 /* IEU1 */
  469. be,pn %xcc, 284f /* CTI */
  470. andcc %o2, 8, %g0 /* IEU1 Group */
  471. /* Clk1 8-( */
  472. /* Clk2 8-( */
  473. /* Clk3 8-( */
  474. /* Clk4 8-( */
  475. 283: rd %pc, %o5 /* PDU Group */
  476. sub %o1, %g6, %o1 /* IEU0 Group */
  477. sub %o5, %g6, %o5 /* IEU1 */
  478. jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/
  479. sub %o0, %g6, %o0 /* IEU0 Group */
  480. RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
  481. RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
  482. RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
  483. RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
  484. RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
  485. RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
  486. RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
  487. 284: be,pt %xcc, 285f /* CTI Group */
  488. andcc %o2, 4, %g0 /* IEU1 */
  489. ldx [%o1 - 8], %g2 /* Load Group */
  490. sub %o0, 8, %o0 /* IEU0 */
  491. sub %o1, 8, %o1 /* IEU0 Group */
  492. stx %g2, [%o0] /* Store */
  493. 285: be,pt %xcc, 1f /* CTI */
  494. andcc %o2, 2, %g0 /* IEU1 Group */
  495. lduw [%o1 - 4], %g2 /* Load Group */
  496. sub %o0, 4, %o0 /* IEU0 */
  497. sub %o1, 4, %o1 /* IEU0 Group */
  498. stw %g2, [%o0] /* Store */
  499. 1: be,pt %xcc, 1f /* CTI */
  500. andcc %o2, 1, %g0 /* IEU1 Group */
  501. lduh [%o1 - 2], %g2 /* Load Group */
  502. sub %o0, 2, %o0 /* IEU0 */
  503. sub %o1, 2, %o1 /* IEU0 Group */
  504. sth %g2, [%o0] /* Store */
  505. 1: be,pt %xcc, 1f /* CTI */
  506. nop /* IEU0 Group */
  507. ldub [%o1 - 1], %g2 /* Load Group */
  508. stb %g2, [%o0 - 1] /* Store Group + bubble */
  509. 1: retl
  510. mov %g4, %o0
  511. 232: brz,pt %g2, 2f /* CTI Group */
  512. sub %o2, %g2, %o2 /* IEU0 Group */
  513. 1: ldub [%o1 - 1], %g5 /* Load Group */
  514. sub %o1, 1, %o1 /* IEU0 */
  515. sub %o0, 1, %o0 /* IEU1 */
  516. subcc %g2, 1, %g2 /* IEU1 Group */
  517. bne,pt %xcc, 1b /* CTI */
  518. stb %g5, [%o0] /* Store */
  519. 2: andn %o2, 7, %g5 /* IEU0 Group */
  520. and %o2, 7, %o2 /* IEU1 */
  521. fmovd %f0, %f2 /* FPU */
  522. alignaddr %o1, %g0, %g1 /* GRU Group */
  523. ldd [%g1], %f4 /* Load Group */
  524. 1: ldd [%g1 - 8], %f6 /* Load Group */
  525. sub %g1, 8, %g1 /* IEU0 Group */
  526. subcc %g5, 8, %g5 /* IEU1 */
  527. faligndata %f6, %f4, %f0 /* GRU Group */
  528. std %f0, [%o0 - 8] /* Store */
  529. sub %o1, 8, %o1 /* IEU0 Group */
  530. be,pn %xcc, 233f /* CTI */
  531. sub %o0, 8, %o0 /* IEU1 */
  532. ldd [%g1 - 8], %f4 /* Load Group */
  533. sub %g1, 8, %g1 /* IEU0 */
  534. subcc %g5, 8, %g5 /* IEU1 */
  535. faligndata %f4, %f6, %f0 /* GRU Group */
  536. std %f0, [%o0 - 8] /* Store */
  537. sub %o1, 8, %o1 /* IEU0 */
  538. bne,pn %xcc, 1b /* CTI Group */
  539. sub %o0, 8, %o0 /* IEU0 */
  540. 233: brz,pn %o2, 234f /* CTI Group */
  541. nop /* IEU0 */
  542. 237: ldub [%o1 - 1], %g5 /* LOAD */
  543. sub %o1, 1, %o1 /* IEU0 */
  544. sub %o0, 1, %o0 /* IEU1 */
  545. subcc %o2, 1, %o2 /* IEU1 */
  546. bne,pt %xcc, 237b /* CTI */
  547. stb %g5, [%o0] /* Store Group */
  548. 234: wr %g0, FPRS_FEF, %fprs
  549. retl
  550. mov %g4, %o0
  551. END(memmove)
  552. #ifdef USE_BPR
  553. weak_alias (memcpy, __align_cpy_1)
  554. weak_alias (memcpy, __align_cpy_2)
  555. weak_alias (memcpy, __align_cpy_4)
  556. weak_alias (memcpy, __align_cpy_8)
  557. weak_alias (memcpy, __align_cpy_16)
  558. #endif