memcpy.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609
  1. /* Copy SIZE bytes from SRC to DEST.
  2. For UltraSPARC-III.
  3. Copyright (C) 2001, 2003 Free Software Foundation, Inc.
  4. This file is part of the GNU C Library.
  5. Contributed by David S. Miller (davem@redhat.com)
  6. The GNU C Library is free software; you can redistribute it and/or
  7. modify it under the terms of the GNU Lesser General Public
  8. License as published by the Free Software Foundation; either
  9. version 2.1 of the License, or (at your option) any later version.
  10. The GNU C Library is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. Lesser General Public License for more details.
  14. You should have received a copy of the GNU Lesser General Public
  15. License along with the GNU C Library; if not, write to the Free
  16. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  17. 02111-1307 USA. */
  18. #define ASI_BLK_P 0xf0
  19. #define FPRS_FEF 0x04
  20. #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
  21. #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
  22. #ifndef XCC
  23. #define USE_BPR
  24. #define XCC xcc
  25. #endif
  26. .register %g2,#scratch
  27. .register %g3,#scratch
  28. .register %g6,#scratch
  29. .text
  30. .align 32
  31. ENTRY(__bcopy)
  32. sub %o1, %o0, %o4
  33. mov %o0, %g4
  34. cmp %o4, %o2
  35. mov %o1, %o0
  36. bgeu,pt %XCC, 100f
  37. mov %g4, %o1
  38. #ifndef USE_BPR
  39. srl %o2, 0, %o2
  40. #endif
  41. brnz,pn %o2, 220f
  42. add %o0, %o2, %o0
  43. retl
  44. nop
  45. END(__bcopy)
  46. strong_alias(__bcopy,bcopy)
  47. /* Special/non-trivial issues of this code:
  48. *
  49. * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
  50. * 2) Only low 32 FPU registers are used so that only the
  51. * lower half of the FPU register set is dirtied by this
  52. * code. This is especially important in the kernel.
  53. * 3) This code never prefetches cachelines past the end
  54. * of the source buffer.
  55. *
  56. * The cheetah's flexible spine, oversized liver, enlarged heart,
  57. * slender muscular body, and claws make it the swiftest hunter
  58. * in Africa and the fastest animal on land. Can reach speeds
  59. * of up to 2.4GB per second.
  60. */
  61. .align 32
  62. ENTRY(__memcpy)
  63. 100: /* %o0=dst, %o1=src, %o2=len */
  64. mov %o0, %g5
  65. cmp %o2, 0
  66. be,pn %XCC, out
  67. 218: or %o0, %o1, %o3
  68. cmp %o2, 16
  69. bleu,a,pn %XCC, small_copy
  70. or %o3, %o2, %o3
  71. cmp %o2, 256
  72. blu,pt %XCC, medium_copy
  73. andcc %o3, 0x7, %g0
  74. ba,pt %xcc, enter
  75. andcc %o0, 0x3f, %g2
  76. /* Here len >= 256 and condition codes reflect execution
  77. * of "andcc %o0, 0x7, %g2", done by caller.
  78. */
  79. .align 64
  80. enter:
  81. /* Is 'dst' already aligned on an 64-byte boundary? */
  82. be,pt %XCC, 2f
  83. /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
  84. * of bytes to copy to make 'dst' 64-byte aligned. We pre-
  85. * subtract this from 'len'.
  86. */
  87. sub %g2, 0x40, %g2
  88. sub %g0, %g2, %g2
  89. sub %o2, %g2, %o2
  90. /* Copy %g2 bytes from src to dst, one byte at a time. */
  91. 1: ldub [%o1 + 0x00], %o3
  92. add %o1, 0x1, %o1
  93. add %o0, 0x1, %o0
  94. subcc %g2, 0x1, %g2
  95. bg,pt %XCC, 1b
  96. stb %o3, [%o0 + -1]
  97. 2: VISEntryHalf
  98. and %o1, 0x7, %g1
  99. ba,pt %xcc, begin
  100. alignaddr %o1, %g0, %o1
  101. .align 64
  102. begin:
  103. prefetch [%o1 + 0x000], #one_read
  104. prefetch [%o1 + 0x040], #one_read
  105. andn %o2, (0x40 - 1), %o4
  106. prefetch [%o1 + 0x080], #one_read
  107. prefetch [%o1 + 0x0c0], #one_read
  108. ldd [%o1 + 0x000], %f0
  109. prefetch [%o1 + 0x100], #one_read
  110. ldd [%o1 + 0x008], %f2
  111. prefetch [%o1 + 0x140], #one_read
  112. ldd [%o1 + 0x010], %f4
  113. prefetch [%o1 + 0x180], #one_read
  114. faligndata %f0, %f2, %f16
  115. ldd [%o1 + 0x018], %f6
  116. faligndata %f2, %f4, %f18
  117. ldd [%o1 + 0x020], %f8
  118. faligndata %f4, %f6, %f20
  119. ldd [%o1 + 0x028], %f10
  120. faligndata %f6, %f8, %f22
  121. ldd [%o1 + 0x030], %f12
  122. faligndata %f8, %f10, %f24
  123. ldd [%o1 + 0x038], %f14
  124. faligndata %f10, %f12, %f26
  125. ldd [%o1 + 0x040], %f0
  126. sub %o4, 0x80, %o4
  127. add %o1, 0x40, %o1
  128. ba,pt %xcc, loop
  129. srl %o4, 6, %o3
  130. .align 64
  131. loop:
  132. ldd [%o1 + 0x008], %f2
  133. faligndata %f12, %f14, %f28
  134. ldd [%o1 + 0x010], %f4
  135. faligndata %f14, %f0, %f30
  136. stda %f16, [%o0] ASI_BLK_P
  137. ldd [%o1 + 0x018], %f6
  138. faligndata %f0, %f2, %f16
  139. ldd [%o1 + 0x020], %f8
  140. faligndata %f2, %f4, %f18
  141. ldd [%o1 + 0x028], %f10
  142. faligndata %f4, %f6, %f20
  143. ldd [%o1 + 0x030], %f12
  144. faligndata %f6, %f8, %f22
  145. ldd [%o1 + 0x038], %f14
  146. faligndata %f8, %f10, %f24
  147. ldd [%o1 + 0x040], %f0
  148. prefetch [%o1 + 0x180], #one_read
  149. faligndata %f10, %f12, %f26
  150. subcc %o3, 0x01, %o3
  151. add %o1, 0x40, %o1
  152. bg,pt %XCC, loop
  153. add %o0, 0x40, %o0
  154. /* Finally we copy the last full 64-byte block. */
  155. loopfini:
  156. ldd [%o1 + 0x008], %f2
  157. faligndata %f12, %f14, %f28
  158. ldd [%o1 + 0x010], %f4
  159. faligndata %f14, %f0, %f30
  160. stda %f16, [%o0] ASI_BLK_P
  161. ldd [%o1 + 0x018], %f6
  162. faligndata %f0, %f2, %f16
  163. ldd [%o1 + 0x020], %f8
  164. faligndata %f2, %f4, %f18
  165. ldd [%o1 + 0x028], %f10
  166. faligndata %f4, %f6, %f20
  167. ldd [%o1 + 0x030], %f12
  168. faligndata %f6, %f8, %f22
  169. ldd [%o1 + 0x038], %f14
  170. faligndata %f8, %f10, %f24
  171. cmp %g1, 0
  172. be,pt %XCC, 1f
  173. add %o0, 0x40, %o0
  174. ldd [%o1 + 0x040], %f0
  175. 1: faligndata %f10, %f12, %f26
  176. faligndata %f12, %f14, %f28
  177. faligndata %f14, %f0, %f30
  178. stda %f16, [%o0] ASI_BLK_P
  179. add %o0, 0x40, %o0
  180. add %o1, 0x40, %o1
  181. membar #Sync
  182. /* Now we copy the (len modulo 64) bytes at the end.
  183. * Note how we borrow the %f0 loaded above.
  184. *
  185. * Also notice how this code is careful not to perform a
  186. * load past the end of the src buffer.
  187. */
  188. loopend:
  189. and %o2, 0x3f, %o2
  190. andcc %o2, 0x38, %g2
  191. be,pn %XCC, endcruft
  192. subcc %g2, 0x8, %g2
  193. be,pn %XCC, endcruft
  194. cmp %g1, 0
  195. be,a,pt %XCC, 1f
  196. ldd [%o1 + 0x00], %f0
  197. 1: ldd [%o1 + 0x08], %f2
  198. add %o1, 0x8, %o1
  199. sub %o2, 0x8, %o2
  200. subcc %g2, 0x8, %g2
  201. faligndata %f0, %f2, %f8
  202. std %f8, [%o0 + 0x00]
  203. be,pn %XCC, endcruft
  204. add %o0, 0x8, %o0
  205. ldd [%o1 + 0x08], %f0
  206. add %o1, 0x8, %o1
  207. sub %o2, 0x8, %o2
  208. subcc %g2, 0x8, %g2
  209. faligndata %f2, %f0, %f8
  210. std %f8, [%o0 + 0x00]
  211. bne,pn %XCC, 1b
  212. add %o0, 0x8, %o0
  213. /* If anything is left, we copy it one byte at a time.
  214. * Note that %g1 is (src & 0x3) saved above before the
  215. * alignaddr was performed.
  216. */
  217. endcruft:
  218. cmp %o2, 0
  219. add %o1, %g1, %o1
  220. VISExitHalf
  221. be,pn %XCC, out
  222. sub %o0, %o1, %o3
  223. andcc %g1, 0x7, %g0
  224. bne,pn %icc, small_copy_unaligned
  225. andcc %o2, 0x8, %g0
  226. be,pt %icc, 1f
  227. nop
  228. ldx [%o1], %o5
  229. stx %o5, [%o1 + %o3]
  230. add %o1, 0x8, %o1
  231. 1: andcc %o2, 0x4, %g0
  232. be,pt %icc, 1f
  233. nop
  234. lduw [%o1], %o5
  235. stw %o5, [%o1 + %o3]
  236. add %o1, 0x4, %o1
  237. 1: andcc %o2, 0x2, %g0
  238. be,pt %icc, 1f
  239. nop
  240. lduh [%o1], %o5
  241. sth %o5, [%o1 + %o3]
  242. add %o1, 0x2, %o1
  243. 1: andcc %o2, 0x1, %g0
  244. be,pt %icc, out
  245. nop
  246. ldub [%o1], %o5
  247. ba,pt %xcc, out
  248. stb %o5, [%o1 + %o3]
  249. medium_copy: /* 16 < len <= 64 */
  250. bne,pn %XCC, small_copy_unaligned
  251. sub %o0, %o1, %o3
  252. medium_copy_aligned:
  253. andn %o2, 0x7, %o4
  254. and %o2, 0x7, %o2
  255. 1: subcc %o4, 0x8, %o4
  256. ldx [%o1], %o5
  257. stx %o5, [%o1 + %o3]
  258. bgu,pt %XCC, 1b
  259. add %o1, 0x8, %o1
  260. andcc %o2, 0x4, %g0
  261. be,pt %XCC, 1f
  262. nop
  263. sub %o2, 0x4, %o2
  264. lduw [%o1], %o5
  265. stw %o5, [%o1 + %o3]
  266. add %o1, 0x4, %o1
  267. 1: cmp %o2, 0
  268. be,pt %XCC, out
  269. nop
  270. ba,pt %xcc, small_copy_unaligned
  271. nop
  272. small_copy: /* 0 < len <= 16 */
  273. andcc %o3, 0x3, %g0
  274. bne,pn %XCC, small_copy_unaligned
  275. sub %o0, %o1, %o3
  276. small_copy_aligned:
  277. subcc %o2, 4, %o2
  278. lduw [%o1], %g1
  279. stw %g1, [%o1 + %o3]
  280. bgu,pt %XCC, small_copy_aligned
  281. add %o1, 4, %o1
  282. out: retl
  283. mov %g5, %o0
  284. .align 32
  285. small_copy_unaligned:
  286. subcc %o2, 1, %o2
  287. ldub [%o1], %g1
  288. stb %g1, [%o1 + %o3]
  289. bgu,pt %XCC, small_copy_unaligned
  290. add %o1, 1, %o1
  291. retl
  292. mov %g5, %o0
  293. END(__memcpy)
  294. strong_alias(__memcpy,memcpy)
  295. #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
  296. ldx [%src - offset - 0x20], %t0; \
  297. ldx [%src - offset - 0x18], %t1; \
  298. ldx [%src - offset - 0x10], %t2; \
  299. ldx [%src - offset - 0x08], %t3; \
  300. stw %t0, [%dst - offset - 0x1c]; \
  301. srlx %t0, 32, %t0; \
  302. stw %t0, [%dst - offset - 0x20]; \
  303. stw %t1, [%dst - offset - 0x14]; \
  304. srlx %t1, 32, %t1; \
  305. stw %t1, [%dst - offset - 0x18]; \
  306. stw %t2, [%dst - offset - 0x0c]; \
  307. srlx %t2, 32, %t2; \
  308. stw %t2, [%dst - offset - 0x10]; \
  309. stw %t3, [%dst - offset - 0x04]; \
  310. srlx %t3, 32, %t3; \
  311. stw %t3, [%dst - offset - 0x08];
  312. #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
  313. ldx [%src - offset - 0x20], %t0; \
  314. ldx [%src - offset - 0x18], %t1; \
  315. ldx [%src - offset - 0x10], %t2; \
  316. ldx [%src - offset - 0x08], %t3; \
  317. stx %t0, [%dst - offset - 0x20]; \
  318. stx %t1, [%dst - offset - 0x18]; \
  319. stx %t2, [%dst - offset - 0x10]; \
  320. stx %t3, [%dst - offset - 0x08]; \
  321. ldx [%src - offset - 0x40], %t0; \
  322. ldx [%src - offset - 0x38], %t1; \
  323. ldx [%src - offset - 0x30], %t2; \
  324. ldx [%src - offset - 0x28], %t3; \
  325. stx %t0, [%dst - offset - 0x40]; \
  326. stx %t1, [%dst - offset - 0x38]; \
  327. stx %t2, [%dst - offset - 0x30]; \
  328. stx %t3, [%dst - offset - 0x28];
  329. #define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
  330. ldx [%src + offset + 0x00], %t0; \
  331. ldx [%src + offset + 0x08], %t1; \
  332. stw %t0, [%dst + offset + 0x04]; \
  333. srlx %t0, 32, %t2; \
  334. stw %t2, [%dst + offset + 0x00]; \
  335. stw %t1, [%dst + offset + 0x0c]; \
  336. srlx %t1, 32, %t3; \
  337. stw %t3, [%dst + offset + 0x08];
  338. #define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
  339. ldx [%src + offset + 0x00], %t0; \
  340. ldx [%src + offset + 0x08], %t1; \
  341. stx %t0, [%dst + offset + 0x00]; \
  342. stx %t1, [%dst + offset + 0x08];
  343. .align 32
  344. 228: andcc %o2, 1, %g0 /* IEU1 Group */
  345. be,pt %icc, 2f+4 /* CTI */
  346. 1: ldub [%o1 - 1], %o5 /* LOAD Group */
  347. sub %o1, 1, %o1 /* IEU0 */
  348. sub %o0, 1, %o0 /* IEU1 */
  349. subcc %o2, 1, %o2 /* IEU1 Group */
  350. be,pn %xcc, 229f /* CTI */
  351. stb %o5, [%o0] /* Store */
  352. 2: ldub [%o1 - 1], %o5 /* LOAD Group */
  353. sub %o0, 2, %o0 /* IEU0 */
  354. ldub [%o1 - 2], %g5 /* LOAD Group */
  355. sub %o1, 2, %o1 /* IEU0 */
  356. subcc %o2, 2, %o2 /* IEU1 Group */
  357. stb %o5, [%o0 + 1] /* Store */
  358. bne,pt %xcc, 2b /* CTI */
  359. stb %g5, [%o0] /* Store */
  360. 229: retl
  361. mov %g4, %o0
  362. .align 32
  363. ENTRY(__memmove)
  364. mov %o0, %g5
  365. #ifndef USE_BPR
  366. srl %o2, 0, %o2 /* IEU1 Group */
  367. #endif
  368. brz,pn %o2, out /* CTI Group */
  369. sub %o0, %o1, %o4 /* IEU0 */
  370. cmp %o4, %o2 /* IEU1 Group */
  371. bgeu,pt %XCC, 218b /* CTI */
  372. mov %o0, %g4 /* IEU0 */
  373. add %o0, %o2, %o0 /* IEU0 Group */
  374. 220: add %o1, %o2, %o1 /* IEU1 */
  375. cmp %o2, 15 /* IEU1 Group */
  376. bleu,pn %xcc, 228b /* CTI */
  377. andcc %o0, 7, %g2 /* IEU1 Group */
  378. sub %o0, %o1, %g5 /* IEU0 */
  379. andcc %g5, 3, %o5 /* IEU1 Group */
  380. bne,pn %xcc, 232f /* CTI */
  381. andcc %o1, 3, %g0 /* IEU1 Group */
  382. be,a,pt %xcc, 236f /* CTI */
  383. andcc %o1, 4, %g0 /* IEU1 Group */
  384. andcc %o1, 1, %g0 /* IEU1 Group */
  385. be,pn %xcc, 4f /* CTI */
  386. andcc %o1, 2, %g0 /* IEU1 Group */
  387. ldub [%o1 - 1], %g2 /* Load Group */
  388. sub %o1, 1, %o1 /* IEU0 */
  389. sub %o0, 1, %o0 /* IEU1 */
  390. sub %o2, 1, %o2 /* IEU0 Group */
  391. be,pn %xcc, 5f /* CTI Group */
  392. stb %g2, [%o0] /* Store */
  393. 4: lduh [%o1 - 2], %g2 /* Load Group */
  394. sub %o1, 2, %o1 /* IEU0 */
  395. sub %o0, 2, %o0 /* IEU1 */
  396. sub %o2, 2, %o2 /* IEU0 */
  397. sth %g2, [%o0] /* Store Group + bubble */
  398. 5: andcc %o1, 4, %g0 /* IEU1 */
  399. 236: be,a,pn %xcc, 2f /* CTI */
  400. andcc %o2, -128, %g6 /* IEU1 Group */
  401. lduw [%o1 - 4], %g5 /* Load Group */
  402. sub %o1, 4, %o1 /* IEU0 */
  403. sub %o0, 4, %o0 /* IEU1 */
  404. sub %o2, 4, %o2 /* IEU0 Group */
  405. stw %g5, [%o0] /* Store */
  406. andcc %o2, -128, %g6 /* IEU1 Group */
  407. 2: be,pn %xcc, 235f /* CTI */
  408. andcc %o0, 4, %g0 /* IEU1 Group */
  409. be,pn %xcc, 282f + 4 /* CTI Group */
  410. 5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  411. RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
  412. RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  413. RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
  414. subcc %g6, 128, %g6 /* IEU1 Group */
  415. sub %o1, 128, %o1 /* IEU0 */
  416. bne,pt %xcc, 5b /* CTI */
  417. sub %o0, 128, %o0 /* IEU0 Group */
  418. 235: andcc %o2, 0x70, %g6 /* IEU1 Group */
  419. 41: be,pn %xcc, 280f /* CTI */
  420. andcc %o2, 8, %g0 /* IEU1 Group */
  421. /* Clk1 8-( */
  422. /* Clk2 8-( */
  423. /* Clk3 8-( */
  424. /* Clk4 8-( */
  425. 279: rd %pc, %o5 /* PDU Group */
  426. sll %g6, 1, %g5 /* IEU0 Group */
  427. sub %o1, %g6, %o1 /* IEU1 */
  428. sub %o5, %g5, %o5 /* IEU0 Group */
  429. jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/
  430. sub %o0, %g6, %o0 /* IEU0 Group */
  431. RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
  432. RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
  433. RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
  434. RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
  435. RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
  436. RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
  437. RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
  438. 280: be,pt %xcc, 281f /* CTI */
  439. andcc %o2, 4, %g0 /* IEU1 */
  440. ldx [%o1 - 8], %g2 /* Load Group */
  441. sub %o0, 8, %o0 /* IEU0 */
  442. stw %g2, [%o0 + 4] /* Store Group */
  443. sub %o1, 8, %o1 /* IEU1 */
  444. srlx %g2, 32, %g2 /* IEU0 Group */
  445. stw %g2, [%o0] /* Store */
  446. 281: be,pt %xcc, 1f /* CTI */
  447. andcc %o2, 2, %g0 /* IEU1 Group */
  448. lduw [%o1 - 4], %g2 /* Load Group */
  449. sub %o1, 4, %o1 /* IEU0 */
  450. stw %g2, [%o0 - 4] /* Store Group */
  451. sub %o0, 4, %o0 /* IEU0 */
  452. 1: be,pt %xcc, 1f /* CTI */
  453. andcc %o2, 1, %g0 /* IEU1 Group */
  454. lduh [%o1 - 2], %g2 /* Load Group */
  455. sub %o1, 2, %o1 /* IEU0 */
  456. sth %g2, [%o0 - 2] /* Store Group */
  457. sub %o0, 2, %o0 /* IEU0 */
  458. 1: be,pt %xcc, 211f /* CTI */
  459. nop /* IEU1 */
  460. ldub [%o1 - 1], %g2 /* Load Group */
  461. stb %g2, [%o0 - 1] /* Store Group + bubble */
  462. 211: retl
  463. mov %g4, %o0
  464. 282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  465. RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  466. subcc %g6, 128, %g6 /* IEU1 Group */
  467. sub %o1, 128, %o1 /* IEU0 */
  468. bne,pt %xcc, 282b /* CTI */
  469. sub %o0, 128, %o0 /* IEU0 Group */
  470. andcc %o2, 0x70, %g6 /* IEU1 */
  471. be,pn %xcc, 284f /* CTI */
  472. andcc %o2, 8, %g0 /* IEU1 Group */
  473. /* Clk1 8-( */
  474. /* Clk2 8-( */
  475. /* Clk3 8-( */
  476. /* Clk4 8-( */
  477. 283: rd %pc, %o5 /* PDU Group */
  478. sub %o1, %g6, %o1 /* IEU0 Group */
  479. sub %o5, %g6, %o5 /* IEU1 */
  480. jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/
  481. sub %o0, %g6, %o0 /* IEU0 Group */
  482. RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
  483. RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
  484. RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
  485. RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
  486. RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
  487. RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
  488. RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
  489. 284: be,pt %xcc, 285f /* CTI Group */
  490. andcc %o2, 4, %g0 /* IEU1 */
  491. ldx [%o1 - 8], %g2 /* Load Group */
  492. sub %o0, 8, %o0 /* IEU0 */
  493. sub %o1, 8, %o1 /* IEU0 Group */
  494. stx %g2, [%o0] /* Store */
  495. 285: be,pt %xcc, 1f /* CTI */
  496. andcc %o2, 2, %g0 /* IEU1 Group */
  497. lduw [%o1 - 4], %g2 /* Load Group */
  498. sub %o0, 4, %o0 /* IEU0 */
  499. sub %o1, 4, %o1 /* IEU0 Group */
  500. stw %g2, [%o0] /* Store */
  501. 1: be,pt %xcc, 1f /* CTI */
  502. andcc %o2, 1, %g0 /* IEU1 Group */
  503. lduh [%o1 - 2], %g2 /* Load Group */
  504. sub %o0, 2, %o0 /* IEU0 */
  505. sub %o1, 2, %o1 /* IEU0 Group */
  506. sth %g2, [%o0] /* Store */
  507. 1: be,pt %xcc, 1f /* CTI */
  508. nop /* IEU0 Group */
  509. ldub [%o1 - 1], %g2 /* Load Group */
  510. stb %g2, [%o0 - 1] /* Store Group + bubble */
  511. 1: retl
  512. mov %g4, %o0
  513. 232: brz,pt %g2, 2f /* CTI Group */
  514. sub %o2, %g2, %o2 /* IEU0 Group */
  515. 1: ldub [%o1 - 1], %g5 /* Load Group */
  516. sub %o1, 1, %o1 /* IEU0 */
  517. sub %o0, 1, %o0 /* IEU1 */
  518. subcc %g2, 1, %g2 /* IEU1 Group */
  519. bne,pt %xcc, 1b /* CTI */
  520. stb %g5, [%o0] /* Store */
  521. 2: andn %o2, 7, %g5 /* IEU0 Group */
  522. and %o2, 7, %o2 /* IEU1 */
  523. fmovd %f0, %f2 /* FPU */
  524. alignaddr %o1, %g0, %g1 /* GRU Group */
  525. ldd [%g1], %f4 /* Load Group */
  526. 1: ldd [%g1 - 8], %f6 /* Load Group */
  527. sub %g1, 8, %g1 /* IEU0 Group */
  528. subcc %g5, 8, %g5 /* IEU1 */
  529. faligndata %f6, %f4, %f0 /* GRU Group */
  530. std %f0, [%o0 - 8] /* Store */
  531. sub %o1, 8, %o1 /* IEU0 Group */
  532. be,pn %xcc, 233f /* CTI */
  533. sub %o0, 8, %o0 /* IEU1 */
  534. ldd [%g1 - 8], %f4 /* Load Group */
  535. sub %g1, 8, %g1 /* IEU0 */
  536. subcc %g5, 8, %g5 /* IEU1 */
  537. faligndata %f4, %f6, %f0 /* GRU Group */
  538. std %f0, [%o0 - 8] /* Store */
  539. sub %o1, 8, %o1 /* IEU0 */
  540. bne,pn %xcc, 1b /* CTI Group */
  541. sub %o0, 8, %o0 /* IEU0 */
  542. 233: brz,pn %o2, 234f /* CTI Group */
  543. nop /* IEU0 */
  544. 237: ldub [%o1 - 1], %g5 /* LOAD */
  545. sub %o1, 1, %o1 /* IEU0 */
  546. sub %o0, 1, %o0 /* IEU1 */
  547. subcc %o2, 1, %o2 /* IEU1 */
  548. bne,pt %xcc, 237b /* CTI */
  549. stb %g5, [%o0] /* Store Group */
  550. 234: wr %g0, FPRS_FEF, %fprs
  551. retl
  552. mov %g4, %o0
  553. END(__memmove)
  554. strong_alias(__memmove,memmove)
  555. #ifdef USE_BPR
  556. weak_alias(memcpy,__align_cpy_1)
  557. weak_alias(memcpy,__align_cpy_2)
  558. weak_alias(memcpy,__align_cpy_4)
  559. weak_alias(memcpy,__align_cpy_8)
  560. weak_alias(memcpy,__align_cpy_16)
  561. #endif