memcpy.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. /* Copy SIZE bytes from SRC to DEST.
  2. For UltraSPARC-III.
  3. Copyright (C) 2001, 2003 Free Software Foundation, Inc.
  4. This file is part of the GNU C Library.
  5. Contributed by David S. Miller (davem@redhat.com)
  6. The GNU C Library is free software; you can redistribute it and/or
  7. modify it under the terms of the GNU Lesser General Public
  8. License as published by the Free Software Foundation; either
  9. version 2.1 of the License, or (at your option) any later version.
  10. The GNU C Library is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. Lesser General Public License for more details.
  14. You should have received a copy of the GNU Lesser General Public
  15. License along with the GNU C Library; if not, write to the Free
  16. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  17. 02111-1307 USA. */
  18. #define ASI_BLK_P 0xf0
  19. #define FPRS_FEF 0x04
  20. #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
  21. #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
  22. #ifndef XCC
  23. #define USE_BPR
  24. #define XCC xcc
  25. #endif
  26. .register %g2,#scratch
  27. .register %g3,#scratch
  28. .register %g6,#scratch
  29. .text
  30. .align 32
  31. .globl bcopy
  32. .set bcopy,__bcopy
  33. .hidden __bcopy
  34. ENTRY(__bcopy)
  35. sub %o1, %o0, %o4
  36. mov %o0, %g4
  37. cmp %o4, %o2
  38. mov %o1, %o0
  39. bgeu,pt %XCC, 100f
  40. mov %g4, %o1
  41. #ifndef USE_BPR
  42. srl %o2, 0, %o2
  43. #endif
  44. brnz,pn %o2, 220f
  45. add %o0, %o2, %o0
  46. retl
  47. nop
  48. END(__bcopy)
  49. /* Special/non-trivial issues of this code:
  50. *
  51. * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
  52. * 2) Only low 32 FPU registers are used so that only the
  53. * lower half of the FPU register set is dirtied by this
  54. * code. This is especially important in the kernel.
  55. * 3) This code never prefetches cachelines past the end
  56. * of the source buffer.
  57. *
  58. * The cheetah's flexible spine, oversized liver, enlarged heart,
  59. * slender muscular body, and claws make it the swiftest hunter
  60. * in Africa and the fastest animal on land. Can reach speeds
  61. * of up to 2.4GB per second.
  62. */
  63. .align 32
  64. .globl memcpy
  65. .set memcpy,__memcpy
  66. .hidden __memcpy
  67. ENTRY(__memcpy)
  68. 100: /* %o0=dst, %o1=src, %o2=len */
  69. mov %o0, %g5
  70. cmp %o2, 0
  71. be,pn %XCC, out
  72. 218: or %o0, %o1, %o3
  73. cmp %o2, 16
  74. bleu,a,pn %XCC, small_copy
  75. or %o3, %o2, %o3
  76. cmp %o2, 256
  77. blu,pt %XCC, medium_copy
  78. andcc %o3, 0x7, %g0
  79. ba,pt %xcc, enter
  80. andcc %o0, 0x3f, %g2
  81. /* Here len >= 256 and condition codes reflect execution
  82. * of "andcc %o0, 0x7, %g2", done by caller.
  83. */
  84. .align 64
  85. enter:
  86. /* Is 'dst' already aligned on an 64-byte boundary? */
  87. be,pt %XCC, 2f
  88. /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
  89. * of bytes to copy to make 'dst' 64-byte aligned. We pre-
  90. * subtract this from 'len'.
  91. */
  92. sub %g2, 0x40, %g2
  93. sub %g0, %g2, %g2
  94. sub %o2, %g2, %o2
  95. /* Copy %g2 bytes from src to dst, one byte at a time. */
  96. 1: ldub [%o1 + 0x00], %o3
  97. add %o1, 0x1, %o1
  98. add %o0, 0x1, %o0
  99. subcc %g2, 0x1, %g2
  100. bg,pt %XCC, 1b
  101. stb %o3, [%o0 + -1]
  102. 2: VISEntryHalf
  103. and %o1, 0x7, %g1
  104. ba,pt %xcc, begin
  105. alignaddr %o1, %g0, %o1
  106. .align 64
  107. begin:
  108. prefetch [%o1 + 0x000], #one_read
  109. prefetch [%o1 + 0x040], #one_read
  110. andn %o2, (0x40 - 1), %o4
  111. prefetch [%o1 + 0x080], #one_read
  112. prefetch [%o1 + 0x0c0], #one_read
  113. ldd [%o1 + 0x000], %f0
  114. prefetch [%o1 + 0x100], #one_read
  115. ldd [%o1 + 0x008], %f2
  116. prefetch [%o1 + 0x140], #one_read
  117. ldd [%o1 + 0x010], %f4
  118. prefetch [%o1 + 0x180], #one_read
  119. faligndata %f0, %f2, %f16
  120. ldd [%o1 + 0x018], %f6
  121. faligndata %f2, %f4, %f18
  122. ldd [%o1 + 0x020], %f8
  123. faligndata %f4, %f6, %f20
  124. ldd [%o1 + 0x028], %f10
  125. faligndata %f6, %f8, %f22
  126. ldd [%o1 + 0x030], %f12
  127. faligndata %f8, %f10, %f24
  128. ldd [%o1 + 0x038], %f14
  129. faligndata %f10, %f12, %f26
  130. ldd [%o1 + 0x040], %f0
  131. sub %o4, 0x80, %o4
  132. add %o1, 0x40, %o1
  133. ba,pt %xcc, loop
  134. srl %o4, 6, %o3
  135. .align 64
  136. loop:
  137. ldd [%o1 + 0x008], %f2
  138. faligndata %f12, %f14, %f28
  139. ldd [%o1 + 0x010], %f4
  140. faligndata %f14, %f0, %f30
  141. stda %f16, [%o0] ASI_BLK_P
  142. ldd [%o1 + 0x018], %f6
  143. faligndata %f0, %f2, %f16
  144. ldd [%o1 + 0x020], %f8
  145. faligndata %f2, %f4, %f18
  146. ldd [%o1 + 0x028], %f10
  147. faligndata %f4, %f6, %f20
  148. ldd [%o1 + 0x030], %f12
  149. faligndata %f6, %f8, %f22
  150. ldd [%o1 + 0x038], %f14
  151. faligndata %f8, %f10, %f24
  152. ldd [%o1 + 0x040], %f0
  153. prefetch [%o1 + 0x180], #one_read
  154. faligndata %f10, %f12, %f26
  155. subcc %o3, 0x01, %o3
  156. add %o1, 0x40, %o1
  157. bg,pt %XCC, loop
  158. add %o0, 0x40, %o0
  159. /* Finally we copy the last full 64-byte block. */
  160. loopfini:
  161. ldd [%o1 + 0x008], %f2
  162. faligndata %f12, %f14, %f28
  163. ldd [%o1 + 0x010], %f4
  164. faligndata %f14, %f0, %f30
  165. stda %f16, [%o0] ASI_BLK_P
  166. ldd [%o1 + 0x018], %f6
  167. faligndata %f0, %f2, %f16
  168. ldd [%o1 + 0x020], %f8
  169. faligndata %f2, %f4, %f18
  170. ldd [%o1 + 0x028], %f10
  171. faligndata %f4, %f6, %f20
  172. ldd [%o1 + 0x030], %f12
  173. faligndata %f6, %f8, %f22
  174. ldd [%o1 + 0x038], %f14
  175. faligndata %f8, %f10, %f24
  176. cmp %g1, 0
  177. be,pt %XCC, 1f
  178. add %o0, 0x40, %o0
  179. ldd [%o1 + 0x040], %f0
  180. 1: faligndata %f10, %f12, %f26
  181. faligndata %f12, %f14, %f28
  182. faligndata %f14, %f0, %f30
  183. stda %f16, [%o0] ASI_BLK_P
  184. add %o0, 0x40, %o0
  185. add %o1, 0x40, %o1
  186. membar #Sync
  187. /* Now we copy the (len modulo 64) bytes at the end.
  188. * Note how we borrow the %f0 loaded above.
  189. *
  190. * Also notice how this code is careful not to perform a
  191. * load past the end of the src buffer.
  192. */
  193. loopend:
  194. and %o2, 0x3f, %o2
  195. andcc %o2, 0x38, %g2
  196. be,pn %XCC, endcruft
  197. subcc %g2, 0x8, %g2
  198. be,pn %XCC, endcruft
  199. cmp %g1, 0
  200. be,a,pt %XCC, 1f
  201. ldd [%o1 + 0x00], %f0
  202. 1: ldd [%o1 + 0x08], %f2
  203. add %o1, 0x8, %o1
  204. sub %o2, 0x8, %o2
  205. subcc %g2, 0x8, %g2
  206. faligndata %f0, %f2, %f8
  207. std %f8, [%o0 + 0x00]
  208. be,pn %XCC, endcruft
  209. add %o0, 0x8, %o0
  210. ldd [%o1 + 0x08], %f0
  211. add %o1, 0x8, %o1
  212. sub %o2, 0x8, %o2
  213. subcc %g2, 0x8, %g2
  214. faligndata %f2, %f0, %f8
  215. std %f8, [%o0 + 0x00]
  216. bne,pn %XCC, 1b
  217. add %o0, 0x8, %o0
  218. /* If anything is left, we copy it one byte at a time.
  219. * Note that %g1 is (src & 0x3) saved above before the
  220. * alignaddr was performed.
  221. */
  222. endcruft:
  223. cmp %o2, 0
  224. add %o1, %g1, %o1
  225. VISExitHalf
  226. be,pn %XCC, out
  227. sub %o0, %o1, %o3
  228. andcc %g1, 0x7, %g0
  229. bne,pn %icc, small_copy_unaligned
  230. andcc %o2, 0x8, %g0
  231. be,pt %icc, 1f
  232. nop
  233. ldx [%o1], %o5
  234. stx %o5, [%o1 + %o3]
  235. add %o1, 0x8, %o1
  236. 1: andcc %o2, 0x4, %g0
  237. be,pt %icc, 1f
  238. nop
  239. lduw [%o1], %o5
  240. stw %o5, [%o1 + %o3]
  241. add %o1, 0x4, %o1
  242. 1: andcc %o2, 0x2, %g0
  243. be,pt %icc, 1f
  244. nop
  245. lduh [%o1], %o5
  246. sth %o5, [%o1 + %o3]
  247. add %o1, 0x2, %o1
  248. 1: andcc %o2, 0x1, %g0
  249. be,pt %icc, out
  250. nop
  251. ldub [%o1], %o5
  252. ba,pt %xcc, out
  253. stb %o5, [%o1 + %o3]
  254. medium_copy: /* 16 < len <= 64 */
  255. bne,pn %XCC, small_copy_unaligned
  256. sub %o0, %o1, %o3
  257. medium_copy_aligned:
  258. andn %o2, 0x7, %o4
  259. and %o2, 0x7, %o2
  260. 1: subcc %o4, 0x8, %o4
  261. ldx [%o1], %o5
  262. stx %o5, [%o1 + %o3]
  263. bgu,pt %XCC, 1b
  264. add %o1, 0x8, %o1
  265. andcc %o2, 0x4, %g0
  266. be,pt %XCC, 1f
  267. nop
  268. sub %o2, 0x4, %o2
  269. lduw [%o1], %o5
  270. stw %o5, [%o1 + %o3]
  271. add %o1, 0x4, %o1
  272. 1: cmp %o2, 0
  273. be,pt %XCC, out
  274. nop
  275. ba,pt %xcc, small_copy_unaligned
  276. nop
  277. small_copy: /* 0 < len <= 16 */
  278. andcc %o3, 0x3, %g0
  279. bne,pn %XCC, small_copy_unaligned
  280. sub %o0, %o1, %o3
  281. small_copy_aligned:
  282. subcc %o2, 4, %o2
  283. lduw [%o1], %g1
  284. stw %g1, [%o1 + %o3]
  285. bgu,pt %XCC, small_copy_aligned
  286. add %o1, 4, %o1
  287. out: retl
  288. mov %g5, %o0
  289. .align 32
  290. small_copy_unaligned:
  291. subcc %o2, 1, %o2
  292. ldub [%o1], %g1
  293. stb %g1, [%o1 + %o3]
  294. bgu,pt %XCC, small_copy_unaligned
  295. add %o1, 1, %o1
  296. retl
  297. mov %g5, %o0
  298. END(__memcpy)
  299. #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
  300. ldx [%src - offset - 0x20], %t0; \
  301. ldx [%src - offset - 0x18], %t1; \
  302. ldx [%src - offset - 0x10], %t2; \
  303. ldx [%src - offset - 0x08], %t3; \
  304. stw %t0, [%dst - offset - 0x1c]; \
  305. srlx %t0, 32, %t0; \
  306. stw %t0, [%dst - offset - 0x20]; \
  307. stw %t1, [%dst - offset - 0x14]; \
  308. srlx %t1, 32, %t1; \
  309. stw %t1, [%dst - offset - 0x18]; \
  310. stw %t2, [%dst - offset - 0x0c]; \
  311. srlx %t2, 32, %t2; \
  312. stw %t2, [%dst - offset - 0x10]; \
  313. stw %t3, [%dst - offset - 0x04]; \
  314. srlx %t3, 32, %t3; \
  315. stw %t3, [%dst - offset - 0x08];
  316. #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
  317. ldx [%src - offset - 0x20], %t0; \
  318. ldx [%src - offset - 0x18], %t1; \
  319. ldx [%src - offset - 0x10], %t2; \
  320. ldx [%src - offset - 0x08], %t3; \
  321. stx %t0, [%dst - offset - 0x20]; \
  322. stx %t1, [%dst - offset - 0x18]; \
  323. stx %t2, [%dst - offset - 0x10]; \
  324. stx %t3, [%dst - offset - 0x08]; \
  325. ldx [%src - offset - 0x40], %t0; \
  326. ldx [%src - offset - 0x38], %t1; \
  327. ldx [%src - offset - 0x30], %t2; \
  328. ldx [%src - offset - 0x28], %t3; \
  329. stx %t0, [%dst - offset - 0x40]; \
  330. stx %t1, [%dst - offset - 0x38]; \
  331. stx %t2, [%dst - offset - 0x30]; \
  332. stx %t3, [%dst - offset - 0x28];
  333. #define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
  334. ldx [%src + offset + 0x00], %t0; \
  335. ldx [%src + offset + 0x08], %t1; \
  336. stw %t0, [%dst + offset + 0x04]; \
  337. srlx %t0, 32, %t2; \
  338. stw %t2, [%dst + offset + 0x00]; \
  339. stw %t1, [%dst + offset + 0x0c]; \
  340. srlx %t1, 32, %t3; \
  341. stw %t3, [%dst + offset + 0x08];
  342. #define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
  343. ldx [%src + offset + 0x00], %t0; \
  344. ldx [%src + offset + 0x08], %t1; \
  345. stx %t0, [%dst + offset + 0x00]; \
  346. stx %t1, [%dst + offset + 0x08];
  347. .align 32
  348. 228: andcc %o2, 1, %g0 /* IEU1 Group */
  349. be,pt %icc, 2f+4 /* CTI */
  350. 1: ldub [%o1 - 1], %o5 /* LOAD Group */
  351. sub %o1, 1, %o1 /* IEU0 */
  352. sub %o0, 1, %o0 /* IEU1 */
  353. subcc %o2, 1, %o2 /* IEU1 Group */
  354. be,pn %xcc, 229f /* CTI */
  355. stb %o5, [%o0] /* Store */
  356. 2: ldub [%o1 - 1], %o5 /* LOAD Group */
  357. sub %o0, 2, %o0 /* IEU0 */
  358. ldub [%o1 - 2], %g5 /* LOAD Group */
  359. sub %o1, 2, %o1 /* IEU0 */
  360. subcc %o2, 2, %o2 /* IEU1 Group */
  361. stb %o5, [%o0 + 1] /* Store */
  362. bne,pt %xcc, 2b /* CTI */
  363. stb %g5, [%o0] /* Store */
  364. 229: retl
  365. mov %g4, %o0
  366. .align 32
  367. .globl memmove
  368. .set memmove,__memmove
  369. .hidden __memmove
  370. ENTRY(__memmove)
  371. mov %o0, %g5
  372. #ifndef USE_BPR
  373. srl %o2, 0, %o2 /* IEU1 Group */
  374. #endif
  375. brz,pn %o2, out /* CTI Group */
  376. sub %o0, %o1, %o4 /* IEU0 */
  377. cmp %o4, %o2 /* IEU1 Group */
  378. bgeu,pt %XCC, 218b /* CTI */
  379. mov %o0, %g4 /* IEU0 */
  380. add %o0, %o2, %o0 /* IEU0 Group */
  381. 220: add %o1, %o2, %o1 /* IEU1 */
  382. cmp %o2, 15 /* IEU1 Group */
  383. bleu,pn %xcc, 228b /* CTI */
  384. andcc %o0, 7, %g2 /* IEU1 Group */
  385. sub %o0, %o1, %g5 /* IEU0 */
  386. andcc %g5, 3, %o5 /* IEU1 Group */
  387. bne,pn %xcc, 232f /* CTI */
  388. andcc %o1, 3, %g0 /* IEU1 Group */
  389. be,a,pt %xcc, 236f /* CTI */
  390. andcc %o1, 4, %g0 /* IEU1 Group */
  391. andcc %o1, 1, %g0 /* IEU1 Group */
  392. be,pn %xcc, 4f /* CTI */
  393. andcc %o1, 2, %g0 /* IEU1 Group */
  394. ldub [%o1 - 1], %g2 /* Load Group */
  395. sub %o1, 1, %o1 /* IEU0 */
  396. sub %o0, 1, %o0 /* IEU1 */
  397. sub %o2, 1, %o2 /* IEU0 Group */
  398. be,pn %xcc, 5f /* CTI Group */
  399. stb %g2, [%o0] /* Store */
  400. 4: lduh [%o1 - 2], %g2 /* Load Group */
  401. sub %o1, 2, %o1 /* IEU0 */
  402. sub %o0, 2, %o0 /* IEU1 */
  403. sub %o2, 2, %o2 /* IEU0 */
  404. sth %g2, [%o0] /* Store Group + bubble */
  405. 5: andcc %o1, 4, %g0 /* IEU1 */
  406. 236: be,a,pn %xcc, 2f /* CTI */
  407. andcc %o2, -128, %g6 /* IEU1 Group */
  408. lduw [%o1 - 4], %g5 /* Load Group */
  409. sub %o1, 4, %o1 /* IEU0 */
  410. sub %o0, 4, %o0 /* IEU1 */
  411. sub %o2, 4, %o2 /* IEU0 Group */
  412. stw %g5, [%o0] /* Store */
  413. andcc %o2, -128, %g6 /* IEU1 Group */
  414. 2: be,pn %xcc, 235f /* CTI */
  415. andcc %o0, 4, %g0 /* IEU1 Group */
  416. be,pn %xcc, 282f + 4 /* CTI Group */
  417. 5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  418. RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
  419. RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  420. RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
  421. subcc %g6, 128, %g6 /* IEU1 Group */
  422. sub %o1, 128, %o1 /* IEU0 */
  423. bne,pt %xcc, 5b /* CTI */
  424. sub %o0, 128, %o0 /* IEU0 Group */
  425. 235: andcc %o2, 0x70, %g6 /* IEU1 Group */
  426. 41: be,pn %xcc, 280f /* CTI */
  427. andcc %o2, 8, %g0 /* IEU1 Group */
  428. /* Clk1 8-( */
  429. /* Clk2 8-( */
  430. /* Clk3 8-( */
  431. /* Clk4 8-( */
  432. 279: rd %pc, %o5 /* PDU Group */
  433. sll %g6, 1, %g5 /* IEU0 Group */
  434. sub %o1, %g6, %o1 /* IEU1 */
  435. sub %o5, %g5, %o5 /* IEU0 Group */
  436. jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/
  437. sub %o0, %g6, %o0 /* IEU0 Group */
  438. RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
  439. RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
  440. RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
  441. RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
  442. RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
  443. RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
  444. RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
  445. 280: be,pt %xcc, 281f /* CTI */
  446. andcc %o2, 4, %g0 /* IEU1 */
  447. ldx [%o1 - 8], %g2 /* Load Group */
  448. sub %o0, 8, %o0 /* IEU0 */
  449. stw %g2, [%o0 + 4] /* Store Group */
  450. sub %o1, 8, %o1 /* IEU1 */
  451. srlx %g2, 32, %g2 /* IEU0 Group */
  452. stw %g2, [%o0] /* Store */
  453. 281: be,pt %xcc, 1f /* CTI */
  454. andcc %o2, 2, %g0 /* IEU1 Group */
  455. lduw [%o1 - 4], %g2 /* Load Group */
  456. sub %o1, 4, %o1 /* IEU0 */
  457. stw %g2, [%o0 - 4] /* Store Group */
  458. sub %o0, 4, %o0 /* IEU0 */
  459. 1: be,pt %xcc, 1f /* CTI */
  460. andcc %o2, 1, %g0 /* IEU1 Group */
  461. lduh [%o1 - 2], %g2 /* Load Group */
  462. sub %o1, 2, %o1 /* IEU0 */
  463. sth %g2, [%o0 - 2] /* Store Group */
  464. sub %o0, 2, %o0 /* IEU0 */
  465. 1: be,pt %xcc, 211f /* CTI */
  466. nop /* IEU1 */
  467. ldub [%o1 - 1], %g2 /* Load Group */
  468. stb %g2, [%o0 - 1] /* Store Group + bubble */
  469. 211: retl
  470. mov %g4, %o0
  471. 282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  472. RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  473. subcc %g6, 128, %g6 /* IEU1 Group */
  474. sub %o1, 128, %o1 /* IEU0 */
  475. bne,pt %xcc, 282b /* CTI */
  476. sub %o0, 128, %o0 /* IEU0 Group */
  477. andcc %o2, 0x70, %g6 /* IEU1 */
  478. be,pn %xcc, 284f /* CTI */
  479. andcc %o2, 8, %g0 /* IEU1 Group */
  480. /* Clk1 8-( */
  481. /* Clk2 8-( */
  482. /* Clk3 8-( */
  483. /* Clk4 8-( */
  484. 283: rd %pc, %o5 /* PDU Group */
  485. sub %o1, %g6, %o1 /* IEU0 Group */
  486. sub %o5, %g6, %o5 /* IEU1 */
  487. jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/
  488. sub %o0, %g6, %o0 /* IEU0 Group */
  489. RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
  490. RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
  491. RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
  492. RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
  493. RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
  494. RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
  495. RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
  496. 284: be,pt %xcc, 285f /* CTI Group */
  497. andcc %o2, 4, %g0 /* IEU1 */
  498. ldx [%o1 - 8], %g2 /* Load Group */
  499. sub %o0, 8, %o0 /* IEU0 */
  500. sub %o1, 8, %o1 /* IEU0 Group */
  501. stx %g2, [%o0] /* Store */
  502. 285: be,pt %xcc, 1f /* CTI */
  503. andcc %o2, 2, %g0 /* IEU1 Group */
  504. lduw [%o1 - 4], %g2 /* Load Group */
  505. sub %o0, 4, %o0 /* IEU0 */
  506. sub %o1, 4, %o1 /* IEU0 Group */
  507. stw %g2, [%o0] /* Store */
  508. 1: be,pt %xcc, 1f /* CTI */
  509. andcc %o2, 1, %g0 /* IEU1 Group */
  510. lduh [%o1 - 2], %g2 /* Load Group */
  511. sub %o0, 2, %o0 /* IEU0 */
  512. sub %o1, 2, %o1 /* IEU0 Group */
  513. sth %g2, [%o0] /* Store */
  514. 1: be,pt %xcc, 1f /* CTI */
  515. nop /* IEU0 Group */
  516. ldub [%o1 - 1], %g2 /* Load Group */
  517. stb %g2, [%o0 - 1] /* Store Group + bubble */
  518. 1: retl
  519. mov %g4, %o0
  520. 232: brz,pt %g2, 2f /* CTI Group */
  521. sub %o2, %g2, %o2 /* IEU0 Group */
  522. 1: ldub [%o1 - 1], %g5 /* Load Group */
  523. sub %o1, 1, %o1 /* IEU0 */
  524. sub %o0, 1, %o0 /* IEU1 */
  525. subcc %g2, 1, %g2 /* IEU1 Group */
  526. bne,pt %xcc, 1b /* CTI */
  527. stb %g5, [%o0] /* Store */
  528. 2: andn %o2, 7, %g5 /* IEU0 Group */
  529. and %o2, 7, %o2 /* IEU1 */
  530. fmovd %f0, %f2 /* FPU */
  531. alignaddr %o1, %g0, %g1 /* GRU Group */
  532. ldd [%g1], %f4 /* Load Group */
  533. 1: ldd [%g1 - 8], %f6 /* Load Group */
  534. sub %g1, 8, %g1 /* IEU0 Group */
  535. subcc %g5, 8, %g5 /* IEU1 */
  536. faligndata %f6, %f4, %f0 /* GRU Group */
  537. std %f0, [%o0 - 8] /* Store */
  538. sub %o1, 8, %o1 /* IEU0 Group */
  539. be,pn %xcc, 233f /* CTI */
  540. sub %o0, 8, %o0 /* IEU1 */
  541. ldd [%g1 - 8], %f4 /* Load Group */
  542. sub %g1, 8, %g1 /* IEU0 */
  543. subcc %g5, 8, %g5 /* IEU1 */
  544. faligndata %f4, %f6, %f0 /* GRU Group */
  545. std %f0, [%o0 - 8] /* Store */
  546. sub %o1, 8, %o1 /* IEU0 */
  547. bne,pn %xcc, 1b /* CTI Group */
  548. sub %o0, 8, %o0 /* IEU0 */
  549. 233: brz,pn %o2, 234f /* CTI Group */
  550. nop /* IEU0 */
  551. 237: ldub [%o1 - 1], %g5 /* LOAD */
  552. sub %o1, 1, %o1 /* IEU0 */
  553. sub %o0, 1, %o0 /* IEU1 */
  554. subcc %o2, 1, %o2 /* IEU1 */
  555. bne,pt %xcc, 237b /* CTI */
  556. stb %g5, [%o0] /* Store Group */
  557. 234: wr %g0, FPRS_FEF, %fprs
  558. retl
  559. mov %g4, %o0
  560. END(__memmove)
  561. #ifdef USE_BPR
  562. weak_alias (memcpy, __align_cpy_1)
  563. weak_alias (memcpy, __align_cpy_2)
  564. weak_alias (memcpy, __align_cpy_4)
  565. weak_alias (memcpy, __align_cpy_8)
  566. weak_alias (memcpy, __align_cpy_16)
  567. #endif