memcpy.S 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923
  1. /* Copy SIZE bytes from SRC to DEST.
  2. For UltraSPARC.
  3. Copyright (C) 1996, 97, 98, 99, 2003 Free Software Foundation, Inc.
  4. This file is part of the GNU C Library.
  5. Contributed by David S. Miller (davem@caip.rutgers.edu) and
  6. Jakub Jelinek (jakub@redhat.com).
  7. The GNU C Library is free software; you can redistribute it and/or
  8. modify it under the terms of the GNU Lesser General Public
  9. License as published by the Free Software Foundation; either
  10. version 2.1 of the License, or (at your option) any later version.
  11. The GNU C Library is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. Lesser General Public License for more details.
  15. You should have received a copy of the GNU Lesser General Public
  16. License along with the GNU C Library; if not, write to the Free
  17. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  18. 02111-1307 USA. */
  19. #include <features.h>
  20. #include <asm/asi.h>
  21. #ifndef XCC
  22. #define USE_BPR
  23. .register %g2, #scratch
  24. .register %g3, #scratch
  25. .register %g6, #scratch
  26. #define XCC xcc
  27. #endif
  28. #define FPRS_FEF 4
  29. #define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
  30. faligndata %f1, %f2, %f48; \
  31. faligndata %f2, %f3, %f50; \
  32. faligndata %f3, %f4, %f52; \
  33. faligndata %f4, %f5, %f54; \
  34. faligndata %f5, %f6, %f56; \
  35. faligndata %f6, %f7, %f58; \
  36. faligndata %f7, %f8, %f60; \
  37. faligndata %f8, %f9, %f62;
  38. #define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
  39. ldda [%src] %asi, %fdest; \
  40. add %src, 0x40, %src; \
  41. add %dest, 0x40, %dest; \
  42. subcc %len, 0x40, %len; \
  43. be,pn %xcc, jmptgt; \
  44. stda %fsrc, [%dest - 0x40] %asi;
  45. #define LOOP_CHUNK1(src, dest, len, branch_dest) \
  46. MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
  47. #define LOOP_CHUNK2(src, dest, len, branch_dest) \
  48. MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
  49. #define LOOP_CHUNK3(src, dest, len, branch_dest) \
  50. MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
  51. #define STORE_SYNC(dest, fsrc) \
  52. stda %fsrc, [%dest] %asi; \
  53. add %dest, 0x40, %dest;
  54. #define STORE_JUMP(dest, fsrc, target) \
  55. stda %fsrc, [%dest] %asi; \
  56. add %dest, 0x40, %dest; \
  57. ba,pt %xcc, target;
  58. #define VISLOOP_PAD nop; nop; nop; nop; \
  59. nop; nop; nop; nop; \
  60. nop; nop; nop; nop; \
  61. nop; nop; nop;
  62. #define FINISH_VISCHUNK(dest, f0, f1, left) \
  63. subcc %left, 8, %left; \
  64. bl,pn %xcc, 205f; \
  65. faligndata %f0, %f1, %f48; \
  66. std %f48, [%dest]; \
  67. add %dest, 8, %dest;
  68. #define UNEVEN_VISCHUNK(dest, f0, f1, left) \
  69. subcc %left, 8, %left; \
  70. bl,pn %xcc, 205f; \
  71. fsrc1 %f0, %f1; \
  72. ba,a,pt %xcc, 204f;
  73. /* Macros for non-VIS memcpy code. */
  74. #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
  75. ldx [%src + offset + 0x00], %t0; \
  76. ldx [%src + offset + 0x08], %t1; \
  77. ldx [%src + offset + 0x10], %t2; \
  78. ldx [%src + offset + 0x18], %t3; \
  79. stw %t0, [%dst + offset + 0x04]; \
  80. srlx %t0, 32, %t0; \
  81. stw %t0, [%dst + offset + 0x00]; \
  82. stw %t1, [%dst + offset + 0x0c]; \
  83. srlx %t1, 32, %t1; \
  84. stw %t1, [%dst + offset + 0x08]; \
  85. stw %t2, [%dst + offset + 0x14]; \
  86. srlx %t2, 32, %t2; \
  87. stw %t2, [%dst + offset + 0x10]; \
  88. stw %t3, [%dst + offset + 0x1c]; \
  89. srlx %t3, 32, %t3; \
  90. stw %t3, [%dst + offset + 0x18];
  91. #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
  92. ldx [%src + offset + 0x00], %t0; \
  93. ldx [%src + offset + 0x08], %t1; \
  94. ldx [%src + offset + 0x10], %t2; \
  95. ldx [%src + offset + 0x18], %t3; \
  96. stx %t0, [%dst + offset + 0x00]; \
  97. stx %t1, [%dst + offset + 0x08]; \
  98. stx %t2, [%dst + offset + 0x10]; \
  99. stx %t3, [%dst + offset + 0x18]; \
  100. ldx [%src + offset + 0x20], %t0; \
  101. ldx [%src + offset + 0x28], %t1; \
  102. ldx [%src + offset + 0x30], %t2; \
  103. ldx [%src + offset + 0x38], %t3; \
  104. stx %t0, [%dst + offset + 0x20]; \
  105. stx %t1, [%dst + offset + 0x28]; \
  106. stx %t2, [%dst + offset + 0x30]; \
  107. stx %t3, [%dst + offset + 0x38];
  108. #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
  109. ldx [%src - offset - 0x10], %t0; \
  110. ldx [%src - offset - 0x08], %t1; \
  111. stw %t0, [%dst - offset - 0x0c]; \
  112. srlx %t0, 32, %t2; \
  113. stw %t2, [%dst - offset - 0x10]; \
  114. stw %t1, [%dst - offset - 0x04]; \
  115. srlx %t1, 32, %t3; \
  116. stw %t3, [%dst - offset - 0x08];
  117. #define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
  118. ldx [%src - offset - 0x10], %t0; \
  119. ldx [%src - offset - 0x08], %t1; \
  120. stx %t0, [%dst - offset - 0x10]; \
  121. stx %t1, [%dst - offset - 0x08];
  122. /* Macros for non-VIS memmove code. */
  123. #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
  124. ldx [%src - offset - 0x20], %t0; \
  125. ldx [%src - offset - 0x18], %t1; \
  126. ldx [%src - offset - 0x10], %t2; \
  127. ldx [%src - offset - 0x08], %t3; \
  128. stw %t0, [%dst - offset - 0x1c]; \
  129. srlx %t0, 32, %t0; \
  130. stw %t0, [%dst - offset - 0x20]; \
  131. stw %t1, [%dst - offset - 0x14]; \
  132. srlx %t1, 32, %t1; \
  133. stw %t1, [%dst - offset - 0x18]; \
  134. stw %t2, [%dst - offset - 0x0c]; \
  135. srlx %t2, 32, %t2; \
  136. stw %t2, [%dst - offset - 0x10]; \
  137. stw %t3, [%dst - offset - 0x04]; \
  138. srlx %t3, 32, %t3; \
  139. stw %t3, [%dst - offset - 0x08];
  140. #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
  141. ldx [%src - offset - 0x20], %t0; \
  142. ldx [%src - offset - 0x18], %t1; \
  143. ldx [%src - offset - 0x10], %t2; \
  144. ldx [%src - offset - 0x08], %t3; \
  145. stx %t0, [%dst - offset - 0x20]; \
  146. stx %t1, [%dst - offset - 0x18]; \
  147. stx %t2, [%dst - offset - 0x10]; \
  148. stx %t3, [%dst - offset - 0x08]; \
  149. ldx [%src - offset - 0x40], %t0; \
  150. ldx [%src - offset - 0x38], %t1; \
  151. ldx [%src - offset - 0x30], %t2; \
  152. ldx [%src - offset - 0x28], %t3; \
  153. stx %t0, [%dst - offset - 0x40]; \
  154. stx %t1, [%dst - offset - 0x38]; \
  155. stx %t2, [%dst - offset - 0x30]; \
  156. stx %t3, [%dst - offset - 0x28];
  157. #define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
  158. ldx [%src + offset + 0x00], %t0; \
  159. ldx [%src + offset + 0x08], %t1; \
  160. stw %t0, [%dst + offset + 0x04]; \
  161. srlx %t0, 32, %t2; \
  162. stw %t2, [%dst + offset + 0x00]; \
  163. stw %t1, [%dst + offset + 0x0c]; \
  164. srlx %t1, 32, %t3; \
  165. stw %t3, [%dst + offset + 0x08];
  166. #define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
  167. ldx [%src + offset + 0x00], %t0; \
  168. ldx [%src + offset + 0x08], %t1; \
  169. stx %t0, [%dst + offset + 0x00]; \
  170. stx %t1, [%dst + offset + 0x08];
  171. .text
  172. .align 32
  173. #ifdef __UCLIBC_SUSV3_LEGACY__
  174. ENTRY(bcopy)
  175. sub %o1, %o0, %o4 /* IEU0 Group */
  176. mov %o0, %g3 /* IEU1 */
  177. cmp %o4, %o2 /* IEU1 Group */
  178. mov %o1, %o0 /* IEU0 */
  179. bgeu,pt %XCC, 210f /* CTI */
  180. mov %g3, %o1 /* IEU0 Group */
  181. #ifndef USE_BPR
  182. srl %o2, 0, %o2 /* IEU1 */
  183. #endif
  184. brnz,pn %o2, 220f /* CTI Group */
  185. add %o0, %o2, %o0 /* IEU0 */
  186. retl
  187. nop
  188. END(bcopy)
  189. #endif
  190. .align 32
  191. 200: be,pt %xcc, 201f /* CTI */
  192. andcc %o0, 0x38, %g5 /* IEU1 Group */
  193. mov 8, %g1 /* IEU0 */
  194. sub %g1, %g2, %g2 /* IEU0 Group */
  195. andcc %o0, 1, %g0 /* IEU1 */
  196. be,pt %icc, 2f /* CTI */
  197. sub %o2, %g2, %o2 /* IEU0 Group */
  198. 1: ldub [%o1], %o5 /* Load Group */
  199. add %o1, 1, %o1 /* IEU0 */
  200. add %o0, 1, %o0 /* IEU1 */
  201. subcc %g2, 1, %g2 /* IEU1 Group */
  202. be,pn %xcc, 3f /* CTI */
  203. stb %o5, [%o0 - 1] /* Store */
  204. 2: ldub [%o1], %o5 /* Load Group */
  205. add %o0, 2, %o0 /* IEU0 */
  206. ldub [%o1 + 1], %g3 /* Load Group */
  207. subcc %g2, 2, %g2 /* IEU1 Group */
  208. stb %o5, [%o0 - 2] /* Store */
  209. add %o1, 2, %o1 /* IEU0 */
  210. bne,pt %xcc, 2b /* CTI Group */
  211. stb %g3, [%o0 - 1] /* Store */
  212. 3: andcc %o0, 0x38, %g5 /* IEU1 Group */
  213. 201: be,pt %icc, 202f /* CTI */
  214. mov 64, %g1 /* IEU0 */
  215. fmovd %f0, %f2 /* FPU */
  216. sub %g1, %g5, %g5 /* IEU0 Group */
  217. alignaddr %o1, %g0, %g1 /* GRU Group */
  218. ldd [%g1], %f4 /* Load Group */
  219. sub %o2, %g5, %o2 /* IEU0 */
  220. 1: ldd [%g1 + 0x8], %f6 /* Load Group */
  221. add %g1, 0x8, %g1 /* IEU0 Group */
  222. subcc %g5, 8, %g5 /* IEU1 */
  223. faligndata %f4, %f6, %f0 /* GRU Group */
  224. std %f0, [%o0] /* Store */
  225. add %o1, 8, %o1 /* IEU0 Group */
  226. be,pn %xcc, 202f /* CTI */
  227. add %o0, 8, %o0 /* IEU1 */
  228. ldd [%g1 + 0x8], %f4 /* Load Group */
  229. add %g1, 8, %g1 /* IEU0 */
  230. subcc %g5, 8, %g5 /* IEU1 */
  231. faligndata %f6, %f4, %f0 /* GRU Group */
  232. std %f0, [%o0] /* Store */
  233. add %o1, 8, %o1 /* IEU0 */
  234. bne,pt %xcc, 1b /* CTI Group */
  235. add %o0, 8, %o0 /* IEU0 */
  236. 202: membar #LoadStore | #StoreStore | #StoreLoad /* LSU Group */
  237. wr %g0, ASI_BLK_P, %asi /* LSU Group */
  238. subcc %o2, 0x40, %g6 /* IEU1 Group */
  239. mov %o1, %g1 /* IEU0 */
  240. andncc %g6, (0x40 - 1), %g6 /* IEU1 Group */
  241. srl %g1, 3, %g2 /* IEU0 */
  242. sub %o2, %g6, %g3 /* IEU0 Group */
  243. andn %o1, (0x40 - 1), %o1 /* IEU1 */
  244. and %g2, 7, %g2 /* IEU0 Group */
  245. andncc %g3, 0x7, %g3 /* IEU1 */
  246. fmovd %f0, %f2 /* FPU */
  247. sub %g3, 0x10, %g3 /* IEU0 Group */
  248. sub %o2, %g6, %o2 /* IEU1 */
  249. alignaddr %g1, %g0, %g0 /* GRU Group */
  250. add %g1, %g6, %g1 /* IEU0 Group */
  251. subcc %o2, %g3, %o2 /* IEU1 */
  252. ldda [%o1 + 0x00] %asi, %f0 /* LSU Group */
  253. add %g1, %g3, %g1 /* IEU0 */
  254. ldda [%o1 + 0x40] %asi, %f16 /* LSU Group */
  255. sub %g6, 0x80, %g6 /* IEU0 */
  256. ldda [%o1 + 0x80] %asi, %f32 /* LSU Group */
  257. /* Clk1 Group 8-( */
  258. /* Clk2 Group 8-( */
  259. /* Clk3 Group 8-( */
  260. /* Clk4 Group 8-( */
  261. 203: rd %pc, %g5 /* PDU Group 8-( */
  262. addcc %g5, %lo(300f - 203b), %g5 /* IEU1 Group */
  263. sll %g2, 9, %g2 /* IEU0 */
  264. jmpl %g5 + %g2, %g0 /* CTI Group brk forced*/
  265. addcc %o1, 0xc0, %o1 /* IEU1 Group */
  266. .align 512 /* OK, here comes the fun part... */
  267. 300: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) LOOP_CHUNK1(o1, o0, g6, 301f)
  268. FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) LOOP_CHUNK2(o1, o0, g6, 302f)
  269. FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) LOOP_CHUNK3(o1, o0, g6, 303f)
  270. b,pt %xcc, 300b+4; faligndata %f0, %f2, %f48
  271. 301: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) STORE_SYNC(o0, f48) membar #Sync
  272. FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) STORE_JUMP(o0, f48, 400f) membar #Sync
  273. 302: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) STORE_SYNC(o0, f48) membar #Sync
  274. FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) STORE_JUMP(o0, f48, 416f) membar #Sync
  275. 303: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) STORE_SYNC(o0, f48) membar #Sync
  276. FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) STORE_JUMP(o0, f48, 432f) membar #Sync
  277. VISLOOP_PAD
  278. 310: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) LOOP_CHUNK1(o1, o0, g6, 311f)
  279. FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) LOOP_CHUNK2(o1, o0, g6, 312f)
  280. FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) LOOP_CHUNK3(o1, o0, g6, 313f)
  281. b,pt %xcc, 310b+4; faligndata %f2, %f4, %f48
  282. 311: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) STORE_SYNC(o0, f48) membar #Sync
  283. FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) STORE_JUMP(o0, f48, 402f) membar #Sync
  284. 312: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) STORE_SYNC(o0, f48) membar #Sync
  285. FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) STORE_JUMP(o0, f48, 418f) membar #Sync
  286. 313: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) STORE_SYNC(o0, f48) membar #Sync
  287. FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) STORE_JUMP(o0, f48, 434f) membar #Sync
  288. VISLOOP_PAD
  289. 320: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) LOOP_CHUNK1(o1, o0, g6, 321f)
  290. FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) LOOP_CHUNK2(o1, o0, g6, 322f)
  291. FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) LOOP_CHUNK3(o1, o0, g6, 323f)
  292. b,pt %xcc, 320b+4; faligndata %f4, %f6, %f48
  293. 321: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) STORE_SYNC(o0, f48) membar #Sync
  294. FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) STORE_JUMP(o0, f48, 404f) membar #Sync
  295. 322: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) STORE_SYNC(o0, f48) membar #Sync
  296. FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) STORE_JUMP(o0, f48, 420f) membar #Sync
  297. 323: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) STORE_SYNC(o0, f48) membar #Sync
  298. FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) STORE_JUMP(o0, f48, 436f) membar #Sync
  299. VISLOOP_PAD
  300. 330: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) LOOP_CHUNK1(o1, o0, g6, 331f)
  301. FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) LOOP_CHUNK2(o1, o0, g6, 332f)
  302. FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) LOOP_CHUNK3(o1, o0, g6, 333f)
  303. b,pt %xcc, 330b+4; faligndata %f6, %f8, %f48
  304. 331: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) STORE_SYNC(o0, f48) membar #Sync
  305. FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) STORE_JUMP(o0, f48, 406f) membar #Sync
  306. 332: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) STORE_SYNC(o0, f48) membar #Sync
  307. FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) STORE_JUMP(o0, f48, 422f) membar #Sync
  308. 333: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) STORE_SYNC(o0, f48) membar #Sync
  309. FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) STORE_JUMP(o0, f48, 438f) membar #Sync
  310. VISLOOP_PAD
  311. 340: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) LOOP_CHUNK1(o1, o0, g6, 341f)
  312. FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) LOOP_CHUNK2(o1, o0, g6, 342f)
  313. FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) LOOP_CHUNK3(o1, o0, g6, 343f)
  314. b,pt %xcc, 340b+4; faligndata %f8, %f10, %f48
  315. 341: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) STORE_SYNC(o0, f48) membar #Sync
  316. FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) STORE_JUMP(o0, f48, 408f) membar #Sync
  317. 342: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) STORE_SYNC(o0, f48) membar #Sync
  318. FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) STORE_JUMP(o0, f48, 424f) membar #Sync
  319. 343: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) STORE_SYNC(o0, f48) membar #Sync
  320. FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) STORE_JUMP(o0, f48, 440f) membar #Sync
  321. VISLOOP_PAD
  322. 350: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) LOOP_CHUNK1(o1, o0, g6, 351f)
  323. FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) LOOP_CHUNK2(o1, o0, g6, 352f)
  324. FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) LOOP_CHUNK3(o1, o0, g6, 353f)
  325. b,pt %xcc, 350b+4; faligndata %f10, %f12, %f48
  326. 351: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) STORE_SYNC(o0, f48) membar #Sync
  327. FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) STORE_JUMP(o0, f48, 410f) membar #Sync
  328. 352: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) STORE_SYNC(o0, f48) membar #Sync
  329. FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) STORE_JUMP(o0, f48, 426f) membar #Sync
  330. 353: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) STORE_SYNC(o0, f48) membar #Sync
  331. FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) STORE_JUMP(o0, f48, 442f) membar #Sync
  332. VISLOOP_PAD
  333. 360: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) LOOP_CHUNK1(o1, o0, g6, 361f)
  334. FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) LOOP_CHUNK2(o1, o0, g6, 362f)
  335. FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) LOOP_CHUNK3(o1, o0, g6, 363f)
  336. b,pt %xcc, 360b+4; faligndata %f12, %f14, %f48
  337. 361: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) STORE_SYNC(o0, f48) membar #Sync
  338. FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) STORE_JUMP(o0, f48, 412f) membar #Sync
  339. 362: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) STORE_SYNC(o0, f48) membar #Sync
  340. FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) STORE_JUMP(o0, f48, 428f) membar #Sync
  341. 363: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) STORE_SYNC(o0, f48) membar #Sync
  342. FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) STORE_JUMP(o0, f48, 444f) membar #Sync
  343. VISLOOP_PAD
  344. 370: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) LOOP_CHUNK1(o1, o0, g6, 371f)
  345. FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) LOOP_CHUNK2(o1, o0, g6, 372f)
  346. FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) LOOP_CHUNK3(o1, o0, g6, 373f)
  347. b,pt %xcc, 370b+4; faligndata %f14, %f16, %f48
  348. 371: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) STORE_SYNC(o0, f48) membar #Sync
  349. FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) STORE_JUMP(o0, f48, 414f) membar #Sync
  350. 372: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) STORE_SYNC(o0, f48) membar #Sync
  351. FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) STORE_JUMP(o0, f48, 430f) membar #Sync
  352. 373: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) STORE_SYNC(o0, f48) membar #Sync
  353. FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) STORE_JUMP(o0, f48, 446f) membar #Sync
  354. VISLOOP_PAD
  355. 400: FINISH_VISCHUNK(o0, f0, f2, g3)
  356. 402: FINISH_VISCHUNK(o0, f2, f4, g3)
  357. 404: FINISH_VISCHUNK(o0, f4, f6, g3)
  358. 406: FINISH_VISCHUNK(o0, f6, f8, g3)
  359. 408: FINISH_VISCHUNK(o0, f8, f10, g3)
  360. 410: FINISH_VISCHUNK(o0, f10, f12, g3)
  361. 412: FINISH_VISCHUNK(o0, f12, f14, g3)
  362. 414: UNEVEN_VISCHUNK(o0, f14, f0, g3)
  363. 416: FINISH_VISCHUNK(o0, f16, f18, g3)
  364. 418: FINISH_VISCHUNK(o0, f18, f20, g3)
  365. 420: FINISH_VISCHUNK(o0, f20, f22, g3)
  366. 422: FINISH_VISCHUNK(o0, f22, f24, g3)
  367. 424: FINISH_VISCHUNK(o0, f24, f26, g3)
  368. 426: FINISH_VISCHUNK(o0, f26, f28, g3)
  369. 428: FINISH_VISCHUNK(o0, f28, f30, g3)
  370. 430: UNEVEN_VISCHUNK(o0, f30, f0, g3)
  371. 432: FINISH_VISCHUNK(o0, f32, f34, g3)
  372. 434: FINISH_VISCHUNK(o0, f34, f36, g3)
  373. 436: FINISH_VISCHUNK(o0, f36, f38, g3)
  374. 438: FINISH_VISCHUNK(o0, f38, f40, g3)
  375. 440: FINISH_VISCHUNK(o0, f40, f42, g3)
  376. 442: FINISH_VISCHUNK(o0, f42, f44, g3)
  377. 444: FINISH_VISCHUNK(o0, f44, f46, g3)
  378. 446: UNEVEN_VISCHUNK(o0, f46, f0, g3)
  379. 204: ldd [%o1], %f2 /* Load Group */
  380. add %o1, 8, %o1 /* IEU0 */
  381. subcc %g3, 8, %g3 /* IEU1 */
  382. faligndata %f0, %f2, %f8 /* GRU Group */
  383. std %f8, [%o0] /* Store */
  384. bl,pn %xcc, 205f /* CTI */
  385. add %o0, 8, %o0 /* IEU0 Group */
  386. ldd [%o1], %f0 /* Load Group */
  387. add %o1, 8, %o1 /* IEU0 */
  388. subcc %g3, 8, %g3 /* IEU1 */
  389. faligndata %f2, %f0, %f8 /* GRU Group */
  390. std %f8, [%o0] /* Store */
  391. bge,pt %xcc, 204b /* CTI */
  392. add %o0, 8, %o0 /* IEU0 Group */
  393. 205: brz,pt %o2, 207f /* CTI Group */
  394. mov %g1, %o1 /* IEU0 */
  395. 206: ldub [%o1], %g5 /* LOAD */
  396. add %o1, 1, %o1 /* IEU0 */
  397. add %o0, 1, %o0 /* IEU1 */
  398. subcc %o2, 1, %o2 /* IEU1 */
  399. bne,pt %xcc, 206b /* CTI */
  400. stb %g5, [%o0 - 1] /* Store Group */
  401. 207: membar #StoreLoad | #StoreStore /* LSU Group */
  402. wr %g0, FPRS_FEF, %fprs
  403. retl
  404. mov %g4, %o0
  405. 208: andcc %o2, 1, %g0 /* IEU1 Group */
  406. be,pt %icc, 2f+4 /* CTI */
  407. 1: ldub [%o1], %g5 /* LOAD Group */
  408. add %o1, 1, %o1 /* IEU0 */
  409. add %o0, 1, %o0 /* IEU1 */
  410. subcc %o2, 1, %o2 /* IEU1 Group */
  411. be,pn %xcc, 209f /* CTI */
  412. stb %g5, [%o0 - 1] /* Store */
  413. 2: ldub [%o1], %g5 /* LOAD Group */
  414. add %o0, 2, %o0 /* IEU0 */
  415. ldub [%o1 + 1], %o5 /* LOAD Group */
  416. add %o1, 2, %o1 /* IEU0 */
  417. subcc %o2, 2, %o2 /* IEU1 Group */
  418. stb %g5, [%o0 - 2] /* Store */
  419. bne,pt %xcc, 2b /* CTI */
  420. stb %o5, [%o0 - 1] /* Store */
  421. 209: retl
  422. mov %g4, %o0
  423. #ifdef USE_BPR
  424. /* void *__align_cpy_4(void *dest, void *src, size_t n)
  425. * SPARC v9 SYSV ABI
  426. * Like memcpy, but results are undefined if (!n || ((dest | src | n) & 3))
  427. */
  428. .align 32
  429. ENTRY(__align_cpy_4)
  430. mov %o0, %g4 /* IEU0 Group */
  431. cmp %o2, 15 /* IEU1 */
  432. bleu,pn %xcc, 208b /* CTI */
  433. cmp %o2, (64 * 6) /* IEU1 Group */
  434. bgeu,pn %xcc, 200b /* CTI */
  435. andcc %o0, 7, %g2 /* IEU1 Group */
  436. ba,pt %xcc, 216f /* CTI */
  437. andcc %o1, 4, %g0 /* IEU1 Group */
  438. END(__align_cpy_4)
  439. /* void *__align_cpy_8(void *dest, void *src, size_t n)
  440. * SPARC v9 SYSV ABI
  441. * Like memcpy, but results are undefined if (!n || ((dest | src | n) & 7))
  442. */
  443. .align 32
  444. ENTRY(__align_cpy_8)
  445. mov %o0, %g4 /* IEU0 Group */
  446. cmp %o2, 15 /* IEU1 */
  447. bleu,pn %xcc, 208b /* CTI */
  448. cmp %o2, (64 * 6) /* IEU1 Group */
  449. bgeu,pn %xcc, 201b /* CTI */
  450. andcc %o0, 0x38, %g5 /* IEU1 Group */
  451. andcc %o2, -128, %g6 /* IEU1 Group */
  452. bne,a,pt %xcc, 82f + 4 /* CTI */
  453. ldx [%o1], %g1 /* Load */
  454. ba,pt %xcc, 41f /* CTI Group */
  455. andcc %o2, 0x70, %g6 /* IEU1 */
  456. END(__align_cpy_8)
  457. /* void *__align_cpy_16(void *dest, void *src, size_t n)
  458. * SPARC v9 SYSV ABI
  459. * Like memcpy, but results are undefined if (!n || ((dest | src | n) & 15))
  460. */
  461. .align 32
  462. ENTRY(__align_cpy_16)
  463. mov %o0, %g4 /* IEU0 Group */
  464. cmp %o2, (64 * 6) /* IEU1 */
  465. bgeu,pn %xcc, 201b /* CTI */
  466. andcc %o0, 0x38, %g5 /* IEU1 Group */
  467. andcc %o2, -128, %g6 /* IEU1 Group */
  468. bne,a,pt %xcc, 82f + 4 /* CTI */
  469. ldx [%o1], %g1 /* Load */
  470. ba,pt %xcc, 41f /* CTI Group */
  471. andcc %o2, 0x70, %g6 /* IEU1 */
  472. END(__align_cpy_16)
  473. #endif
  474. .align 32
  475. ENTRY(memcpy)
  476. 210:
  477. #ifndef USE_BPR
  478. srl %o2, 0, %o2 /* IEU1 Group */
  479. #endif
  480. brz,pn %o2, 209b /* CTI Group */
  481. mov %o0, %g4 /* IEU0 */
  482. 218: cmp %o2, 15 /* IEU1 Group */
  483. bleu,pn %xcc, 208b /* CTI */
  484. cmp %o2, (64 * 6) /* IEU1 Group */
  485. bgeu,pn %xcc, 200b /* CTI */
  486. andcc %o0, 7, %g2 /* IEU1 Group */
  487. sub %o0, %o1, %g5 /* IEU0 */
  488. andcc %g5, 3, %o5 /* IEU1 Group */
  489. bne,pn %xcc, 212f /* CTI */
  490. andcc %o1, 3, %g0 /* IEU1 Group */
  491. be,a,pt %xcc, 216f /* CTI */
  492. andcc %o1, 4, %g0 /* IEU1 Group */
  493. andcc %o1, 1, %g0 /* IEU1 Group */
  494. be,pn %xcc, 4f /* CTI */
  495. andcc %o1, 2, %g0 /* IEU1 Group */
  496. ldub [%o1], %g2 /* Load Group */
  497. add %o1, 1, %o1 /* IEU0 */
  498. add %o0, 1, %o0 /* IEU1 */
  499. sub %o2, 1, %o2 /* IEU0 Group */
  500. bne,pn %xcc, 5f /* CTI Group */
  501. stb %g2, [%o0 - 1] /* Store */
  502. 4: lduh [%o1], %g2 /* Load Group */
  503. add %o1, 2, %o1 /* IEU0 */
  504. add %o0, 2, %o0 /* IEU1 */
  505. sub %o2, 2, %o2 /* IEU0 */
  506. sth %g2, [%o0 - 2] /* Store Group + bubble */
  507. 5: andcc %o1, 4, %g0 /* IEU1 */
  508. 216: be,a,pn %xcc, 2f /* CTI */
  509. andcc %o2, -128, %g6 /* IEU1 Group */
  510. lduw [%o1], %g5 /* Load Group */
  511. add %o1, 4, %o1 /* IEU0 */
  512. add %o0, 4, %o0 /* IEU1 */
  513. sub %o2, 4, %o2 /* IEU0 Group */
  514. stw %g5, [%o0 - 4] /* Store */
  515. andcc %o2, -128, %g6 /* IEU1 Group */
  516. 2: be,pn %xcc, 215f /* CTI */
  517. andcc %o0, 4, %g0 /* IEU1 Group */
  518. be,pn %xcc, 82f + 4 /* CTI Group */
  519. 5: MOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  520. MOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
  521. MOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  522. MOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
  523. 35: subcc %g6, 128, %g6 /* IEU1 Group */
  524. add %o1, 128, %o1 /* IEU0 */
  525. bne,pt %xcc, 5b /* CTI */
  526. add %o0, 128, %o0 /* IEU0 Group */
  527. 215: andcc %o2, 0x70, %g6 /* IEU1 Group */
  528. 41: be,pn %xcc, 80f /* CTI */
  529. andcc %o2, 8, %g0 /* IEU1 Group */
  530. /* Clk1 8-( */
  531. /* Clk2 8-( */
  532. /* Clk3 8-( */
  533. /* Clk4 8-( */
  534. 79: rd %pc, %o5 /* PDU Group */
  535. sll %g6, 1, %g5 /* IEU0 Group */
  536. add %o1, %g6, %o1 /* IEU1 */
  537. sub %o5, %g5, %o5 /* IEU0 Group */
  538. jmpl %o5 + %lo(80f - 79b), %g0 /* CTI Group brk forced*/
  539. add %o0, %g6, %o0 /* IEU0 Group */
  540. 36: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
  541. MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
  542. MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
  543. MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
  544. MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
  545. MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
  546. MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
  547. 80: be,pt %xcc, 81f /* CTI */
  548. andcc %o2, 4, %g0 /* IEU1 */
  549. ldx [%o1], %g2 /* Load Group */
  550. add %o0, 8, %o0 /* IEU0 */
  551. stw %g2, [%o0 - 0x4] /* Store Group */
  552. add %o1, 8, %o1 /* IEU1 */
  553. srlx %g2, 32, %g2 /* IEU0 Group */
  554. stw %g2, [%o0 - 0x8] /* Store */
  555. 81: be,pt %xcc, 1f /* CTI */
  556. andcc %o2, 2, %g0 /* IEU1 Group */
  557. lduw [%o1], %g2 /* Load Group */
  558. add %o1, 4, %o1 /* IEU0 */
  559. stw %g2, [%o0] /* Store Group */
  560. add %o0, 4, %o0 /* IEU0 */
  561. 1: be,pt %xcc, 1f /* CTI */
  562. andcc %o2, 1, %g0 /* IEU1 Group */
  563. lduh [%o1], %g2 /* Load Group */
  564. add %o1, 2, %o1 /* IEU0 */
  565. sth %g2, [%o0] /* Store Group */
  566. add %o0, 2, %o0 /* IEU0 */
  567. 1: be,pt %xcc, 211f /* CTI */
  568. nop /* IEU1 */
  569. ldub [%o1], %g2 /* Load Group */
  570. stb %g2, [%o0] /* Store Group + bubble */
  571. 211: retl
  572. mov %g4, %o0
  573. 82: MOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  574. MOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  575. 37: subcc %g6, 128, %g6 /* IEU1 Group */
  576. add %o1, 128, %o1 /* IEU0 */
  577. bne,pt %xcc, 82b /* CTI */
  578. add %o0, 128, %o0 /* IEU0 Group */
  579. andcc %o2, 0x70, %g6 /* IEU1 */
  580. be,pn %xcc, 84f /* CTI */
  581. andcc %o2, 8, %g0 /* IEU1 Group */
  582. /* Clk1 8-( */
  583. /* Clk2 8-( */
  584. /* Clk3 8-( */
  585. /* Clk4 8-( */
  586. 83: rd %pc, %o5 /* PDU Group */
  587. add %o1, %g6, %o1 /* IEU0 Group */
  588. sub %o5, %g6, %o5 /* IEU1 */
  589. jmpl %o5 + %lo(84f - 83b), %g0 /* CTI Group brk forced*/
  590. add %o0, %g6, %o0 /* IEU0 Group */
  591. 38: MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
  592. MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
  593. MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
  594. MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
  595. MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
  596. MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
  597. MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
  598. 84: be,pt %xcc, 85f /* CTI Group */
  599. andcc %o2, 4, %g0 /* IEU1 */
  600. ldx [%o1], %g2 /* Load Group */
  601. add %o0, 8, %o0 /* IEU0 */
  602. add %o1, 8, %o1 /* IEU0 Group */
  603. stx %g2, [%o0 - 0x8] /* Store */
  604. 85: be,pt %xcc, 1f /* CTI */
  605. andcc %o2, 2, %g0 /* IEU1 Group */
  606. lduw [%o1], %g2 /* Load Group */
  607. add %o0, 4, %o0 /* IEU0 */
  608. add %o1, 4, %o1 /* IEU0 Group */
  609. stw %g2, [%o0 - 0x4] /* Store */
  610. 1: be,pt %xcc, 1f /* CTI */
  611. andcc %o2, 1, %g0 /* IEU1 Group */
  612. lduh [%o1], %g2 /* Load Group */
  613. add %o0, 2, %o0 /* IEU0 */
  614. add %o1, 2, %o1 /* IEU0 Group */
  615. sth %g2, [%o0 - 0x2] /* Store */
  616. 1: be,pt %xcc, 1f /* CTI */
  617. nop /* IEU0 Group */
  618. ldub [%o1], %g2 /* Load Group */
  619. stb %g2, [%o0] /* Store Group + bubble */
  620. 1: retl
  621. mov %g4, %o0
  622. 212: brz,pt %g2, 2f /* CTI Group */
  623. mov 8, %g1 /* IEU0 */
  624. sub %g1, %g2, %g2 /* IEU0 Group */
  625. sub %o2, %g2, %o2 /* IEU0 Group */
  626. 1: ldub [%o1], %g5 /* Load Group */
  627. add %o1, 1, %o1 /* IEU0 */
  628. add %o0, 1, %o0 /* IEU1 */
  629. subcc %g2, 1, %g2 /* IEU1 Group */
  630. bne,pt %xcc, 1b /* CTI */
  631. stb %g5, [%o0 - 1] /* Store */
  632. 2: andn %o2, 7, %g5 /* IEU0 Group */
  633. and %o2, 7, %o2 /* IEU1 */
  634. fmovd %f0, %f2 /* FPU */
  635. alignaddr %o1, %g0, %g1 /* GRU Group */
  636. ldd [%g1], %f4 /* Load Group */
  637. 1: ldd [%g1 + 0x8], %f6 /* Load Group */
  638. add %g1, 0x8, %g1 /* IEU0 Group */
  639. subcc %g5, 8, %g5 /* IEU1 */
  640. faligndata %f4, %f6, %f0 /* GRU Group */
  641. std %f0, [%o0] /* Store */
  642. add %o1, 8, %o1 /* IEU0 Group */
  643. be,pn %xcc, 213f /* CTI */
  644. add %o0, 8, %o0 /* IEU1 */
  645. ldd [%g1 + 0x8], %f4 /* Load Group */
  646. add %g1, 8, %g1 /* IEU0 */
  647. subcc %g5, 8, %g5 /* IEU1 */
  648. faligndata %f6, %f4, %f0 /* GRU Group */
  649. std %f0, [%o0] /* Store */
  650. add %o1, 8, %o1 /* IEU0 */
  651. bne,pn %xcc, 1b /* CTI Group */
  652. add %o0, 8, %o0 /* IEU0 */
  653. 213: brz,pn %o2, 214f /* CTI Group */
  654. nop /* IEU0 */
  655. ldub [%o1], %g5 /* LOAD */
  656. add %o1, 1, %o1 /* IEU0 */
  657. add %o0, 1, %o0 /* IEU1 */
  658. subcc %o2, 1, %o2 /* IEU1 */
  659. bne,pt %xcc, 206b /* CTI */
  660. stb %g5, [%o0 - 1] /* Store Group */
  661. 214: wr %g0, FPRS_FEF, %fprs
  662. retl
  663. mov %g4, %o0
  664. END(memcpy)
  665. libc_hidden_def(memcpy)
  666. .align 32
  667. 228: andcc %o2, 1, %g0 /* IEU1 Group */
  668. be,pt %icc, 2f+4 /* CTI */
  669. 1: ldub [%o1 - 1], %o5 /* LOAD Group */
  670. sub %o1, 1, %o1 /* IEU0 */
  671. sub %o0, 1, %o0 /* IEU1 */
  672. subcc %o2, 1, %o2 /* IEU1 Group */
  673. be,pn %xcc, 229f /* CTI */
  674. stb %o5, [%o0] /* Store */
  675. 2: ldub [%o1 - 1], %o5 /* LOAD Group */
  676. sub %o0, 2, %o0 /* IEU0 */
  677. ldub [%o1 - 2], %g5 /* LOAD Group */
  678. sub %o1, 2, %o1 /* IEU0 */
  679. subcc %o2, 2, %o2 /* IEU1 Group */
  680. stb %o5, [%o0 + 1] /* Store */
  681. bne,pt %xcc, 2b /* CTI */
  682. stb %g5, [%o0] /* Store */
  683. 229: retl
  684. mov %g4, %o0
  685. 219: retl
  686. nop
  687. .align 32
  688. ENTRY(memmove)
  689. #ifndef USE_BPR
  690. srl %o2, 0, %o2 /* IEU1 Group */
  691. #endif
  692. brz,pn %o2, 219b /* CTI Group */
  693. sub %o0, %o1, %o4 /* IEU0 */
  694. cmp %o4, %o2 /* IEU1 Group */
  695. bgeu,pt %XCC, 218b /* CTI */
  696. mov %o0, %g4 /* IEU0 */
  697. add %o0, %o2, %o0 /* IEU0 Group */
  698. 220: add %o1, %o2, %o1 /* IEU1 */
  699. cmp %o2, 15 /* IEU1 Group */
  700. bleu,pn %xcc, 228b /* CTI */
  701. andcc %o0, 7, %g2 /* IEU1 Group */
  702. sub %o0, %o1, %g5 /* IEU0 */
  703. andcc %g5, 3, %o5 /* IEU1 Group */
  704. bne,pn %xcc, 232f /* CTI */
  705. andcc %o1, 3, %g0 /* IEU1 Group */
  706. be,a,pt %xcc, 236f /* CTI */
  707. andcc %o1, 4, %g0 /* IEU1 Group */
  708. andcc %o1, 1, %g0 /* IEU1 Group */
  709. be,pn %xcc, 4f /* CTI */
  710. andcc %o1, 2, %g0 /* IEU1 Group */
  711. ldub [%o1 - 1], %g2 /* Load Group */
  712. sub %o1, 1, %o1 /* IEU0 */
  713. sub %o0, 1, %o0 /* IEU1 */
  714. sub %o2, 1, %o2 /* IEU0 Group */
  715. be,pn %xcc, 5f /* CTI Group */
  716. stb %g2, [%o0] /* Store */
  717. 4: lduh [%o1 - 2], %g2 /* Load Group */
  718. sub %o1, 2, %o1 /* IEU0 */
  719. sub %o0, 2, %o0 /* IEU1 */
  720. sub %o2, 2, %o2 /* IEU0 */
  721. sth %g2, [%o0] /* Store Group + bubble */
  722. 5: andcc %o1, 4, %g0 /* IEU1 */
  723. 236: be,a,pn %xcc, 2f /* CTI */
  724. andcc %o2, -128, %g6 /* IEU1 Group */
  725. lduw [%o1 - 4], %g5 /* Load Group */
  726. sub %o1, 4, %o1 /* IEU0 */
  727. sub %o0, 4, %o0 /* IEU1 */
  728. sub %o2, 4, %o2 /* IEU0 Group */
  729. stw %g5, [%o0] /* Store */
  730. andcc %o2, -128, %g6 /* IEU1 Group */
  731. 2: be,pn %xcc, 235f /* CTI */
  732. andcc %o0, 4, %g0 /* IEU1 Group */
  733. be,pn %xcc, 282f + 4 /* CTI Group */
  734. 5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  735. RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
  736. RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  737. RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
  738. subcc %g6, 128, %g6 /* IEU1 Group */
  739. sub %o1, 128, %o1 /* IEU0 */
  740. bne,pt %xcc, 5b /* CTI */
  741. sub %o0, 128, %o0 /* IEU0 Group */
  742. 235: andcc %o2, 0x70, %g6 /* IEU1 Group */
  743. 41: be,pn %xcc, 280f /* CTI */
  744. andcc %o2, 8, %g0 /* IEU1 Group */
  745. /* Clk1 8-( */
  746. /* Clk2 8-( */
  747. /* Clk3 8-( */
  748. /* Clk4 8-( */
  749. 279: rd %pc, %o5 /* PDU Group */
  750. sll %g6, 1, %g5 /* IEU0 Group */
  751. sub %o1, %g6, %o1 /* IEU1 */
  752. sub %o5, %g5, %o5 /* IEU0 Group */
  753. jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/
  754. sub %o0, %g6, %o0 /* IEU0 Group */
  755. RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
  756. RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
  757. RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
  758. RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
  759. RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
  760. RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
  761. RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
  762. 280: be,pt %xcc, 281f /* CTI */
  763. andcc %o2, 4, %g0 /* IEU1 */
  764. ldx [%o1 - 8], %g2 /* Load Group */
  765. sub %o0, 8, %o0 /* IEU0 */
  766. stw %g2, [%o0 + 4] /* Store Group */
  767. sub %o1, 8, %o1 /* IEU1 */
  768. srlx %g2, 32, %g2 /* IEU0 Group */
  769. stw %g2, [%o0] /* Store */
  770. 281: be,pt %xcc, 1f /* CTI */
  771. andcc %o2, 2, %g0 /* IEU1 Group */
  772. lduw [%o1 - 4], %g2 /* Load Group */
  773. sub %o1, 4, %o1 /* IEU0 */
  774. stw %g2, [%o0 - 4] /* Store Group */
  775. sub %o0, 4, %o0 /* IEU0 */
  776. 1: be,pt %xcc, 1f /* CTI */
  777. andcc %o2, 1, %g0 /* IEU1 Group */
  778. lduh [%o1 - 2], %g2 /* Load Group */
  779. sub %o1, 2, %o1 /* IEU0 */
  780. sth %g2, [%o0 - 2] /* Store Group */
  781. sub %o0, 2, %o0 /* IEU0 */
  782. 1: be,pt %xcc, 211f /* CTI */
  783. nop /* IEU1 */
  784. ldub [%o1 - 1], %g2 /* Load Group */
  785. stb %g2, [%o0 - 1] /* Store Group + bubble */
  786. 211: retl
  787. mov %g4, %o0
  788. 282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
  789. RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
  790. subcc %g6, 128, %g6 /* IEU1 Group */
  791. sub %o1, 128, %o1 /* IEU0 */
  792. bne,pt %xcc, 282b /* CTI */
  793. sub %o0, 128, %o0 /* IEU0 Group */
  794. andcc %o2, 0x70, %g6 /* IEU1 */
  795. be,pn %xcc, 284f /* CTI */
  796. andcc %o2, 8, %g0 /* IEU1 Group */
  797. /* Clk1 8-( */
  798. /* Clk2 8-( */
  799. /* Clk3 8-( */
  800. /* Clk4 8-( */
  801. 283: rd %pc, %o5 /* PDU Group */
  802. sub %o1, %g6, %o1 /* IEU0 Group */
  803. sub %o5, %g6, %o5 /* IEU1 */
  804. jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/
  805. sub %o0, %g6, %o0 /* IEU0 Group */
  806. RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
  807. RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
  808. RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
  809. RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
  810. RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
  811. RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
  812. RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
  813. 284: be,pt %xcc, 285f /* CTI Group */
  814. andcc %o2, 4, %g0 /* IEU1 */
  815. ldx [%o1 - 8], %g2 /* Load Group */
  816. sub %o0, 8, %o0 /* IEU0 */
  817. sub %o1, 8, %o1 /* IEU0 Group */
  818. stx %g2, [%o0] /* Store */
  819. 285: be,pt %xcc, 1f /* CTI */
  820. andcc %o2, 2, %g0 /* IEU1 Group */
  821. lduw [%o1 - 4], %g2 /* Load Group */
  822. sub %o0, 4, %o0 /* IEU0 */
  823. sub %o1, 4, %o1 /* IEU0 Group */
  824. stw %g2, [%o0] /* Store */
  825. 1: be,pt %xcc, 1f /* CTI */
  826. andcc %o2, 1, %g0 /* IEU1 Group */
  827. lduh [%o1 - 2], %g2 /* Load Group */
  828. sub %o0, 2, %o0 /* IEU0 */
  829. sub %o1, 2, %o1 /* IEU0 Group */
  830. sth %g2, [%o0] /* Store */
  831. 1: be,pt %xcc, 1f /* CTI */
  832. nop /* IEU0 Group */
  833. ldub [%o1 - 1], %g2 /* Load Group */
  834. stb %g2, [%o0 - 1] /* Store Group + bubble */
  835. 1: retl
  836. mov %g4, %o0
  837. 232: brz,pt %g2, 2f /* CTI Group */
  838. sub %o2, %g2, %o2 /* IEU0 Group */
  839. 1: ldub [%o1 - 1], %g5 /* Load Group */
  840. sub %o1, 1, %o1 /* IEU0 */
  841. sub %o0, 1, %o0 /* IEU1 */
  842. subcc %g2, 1, %g2 /* IEU1 Group */
  843. bne,pt %xcc, 1b /* CTI */
  844. stb %g5, [%o0] /* Store */
  845. 2: andn %o2, 7, %g5 /* IEU0 Group */
  846. and %o2, 7, %o2 /* IEU1 */
  847. fmovd %f0, %f2 /* FPU */
  848. alignaddr %o1, %g0, %g1 /* GRU Group */
  849. ldd [%g1], %f4 /* Load Group */
  850. 1: ldd [%g1 - 8], %f6 /* Load Group */
  851. sub %g1, 8, %g1 /* IEU0 Group */
  852. subcc %g5, 8, %g5 /* IEU1 */
  853. faligndata %f6, %f4, %f0 /* GRU Group */
  854. std %f0, [%o0 - 8] /* Store */
  855. sub %o1, 8, %o1 /* IEU0 Group */
  856. be,pn %xcc, 233f /* CTI */
  857. sub %o0, 8, %o0 /* IEU1 */
  858. ldd [%g1 - 8], %f4 /* Load Group */
  859. sub %g1, 8, %g1 /* IEU0 */
  860. subcc %g5, 8, %g5 /* IEU1 */
  861. faligndata %f4, %f6, %f0 /* GRU Group */
  862. std %f0, [%o0 - 8] /* Store */
  863. sub %o1, 8, %o1 /* IEU0 */
  864. bne,pn %xcc, 1b /* CTI Group */
  865. sub %o0, 8, %o0 /* IEU0 */
  866. 233: brz,pn %o2, 234f /* CTI Group */
  867. nop /* IEU0 */
  868. 237: ldub [%o1 - 1], %g5 /* LOAD */
  869. sub %o1, 1, %o1 /* IEU0 */
  870. sub %o0, 1, %o0 /* IEU1 */
  871. subcc %o2, 1, %o2 /* IEU1 */
  872. bne,pt %xcc, 237b /* CTI */
  873. stb %g5, [%o0] /* Store Group */
  874. 234: wr %g0, FPRS_FEF, %fprs
  875. retl
  876. mov %g4, %o0
  877. END(memmove)
  878. libc_hidden_def(memmove)
  879. #ifdef USE_BPR
  880. weak_alias(memcpy,__align_cpy_1)
  881. weak_alias(memcpy,__align_cpy_2)
  882. #endif