memmove.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  3. * Copyright (C) 2008-2009 PetaLogix
  4. * Copyright (C) 2008 Jim Law - Iris LP All rights reserved.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General
  7. * Public License. See the file COPYING in the main directory of this
  8. * archive for more details.
  9. *
  10. * Written by Jim Law <jlaw@irispower.com>
  11. *
  12. * intended to replace:
  13. * memcpy in memcpy.c and
  14. * memmove in memmove.c
  15. * ... in arch/microblaze/lib
  16. *
  17. *
  18. * assly_fastcopy.S
  19. *
  20. * Attempt at quicker memcpy and memmove for MicroBlaze
  21. * Input : Operand1 in Reg r5 - destination address
  22. * Operand2 in Reg r6 - source address
  23. * Operand3 in Reg r7 - number of bytes to transfer
  24. * Output: Result in Reg r3 - starting destinaition address
  25. *
  26. *
  27. * Explanation:
  28. * Perform (possibly unaligned) copy of a block of memory
  29. * between mem locations with size of xfer spec'd in bytes
  30. */
  31. .globl memmove
  32. .type memmove, @function
  33. .ent memmove
  34. #ifdef __MICROBLAZEEL__
  35. # define BSLLI bsrli
  36. # define BSRLI bslli
  37. #else
  38. # define BSLLI bslli
  39. # define BSRLI bsrli
  40. #endif
  41. memmove:
  42. cmpu r4, r5, r6 /* n = s - d */
  43. bgei r4, HIDDEN_JUMPTARGET(memcpy)
  44. fast_memcpy_descending:
  45. /* move d to return register as value of function */
  46. addi r3, r5, 0
  47. add r5, r5, r7 /* d = d + c */
  48. add r6, r6, r7 /* s = s + c */
  49. addi r4, r0, 4 /* n = 4 */
  50. cmpu r4, r4, r7 /* n = c - n (unsigned) */
  51. blti r4,d_xfer_end /* if n < 0, less than one word to transfer */
  52. /* transfer first 0~3 bytes to get aligned dest address */
  53. andi r4, r5, 3 /* n = d & 3 */
  54. /* if zero, destination already aligned */
  55. beqi r4,d_dalign_done
  56. rsub r7, r4, r7 /* c = c - n adjust c */
  57. d_xfer_first_loop:
  58. /* if no bytes left to transfer, transfer the bulk */
  59. beqi r4,d_dalign_done
  60. addi r6, r6, -1 /* s-- */
  61. addi r5, r5, -1 /* d-- */
  62. lbui r11, r6, 0 /* h = *s */
  63. sbi r11, r5, 0 /* *d = h */
  64. brid d_xfer_first_loop /* loop */
  65. addi r4, r4, -1 /* n-- (IN DELAY SLOT) */
  66. d_dalign_done:
  67. addi r4, r0, 32 /* n = 32 */
  68. cmpu r4, r4, r7 /* n = c - n (unsigned) */
  69. /* if n < 0, less than one block to transfer */
  70. blti r4, d_block_done
  71. d_block_xfer:
  72. andi r4, r7, 0xffffffe0 /* n = c & ~31 */
  73. rsub r7, r4, r7 /* c = c - n */
  74. andi r9, r6, 3 /* t1 = s & 3 */
  75. /* if temp != 0, unaligned transfers needed */
  76. bnei r9, d_block_unaligned
  77. d_block_aligned:
  78. addi r6, r6, -32 /* s = s - 32 */
  79. addi r5, r5, -32 /* d = d - 32 */
  80. lwi r9, r6, 28 /* t1 = *(s + 28) */
  81. lwi r10, r6, 24 /* t2 = *(s + 24) */
  82. lwi r11, r6, 20 /* t3 = *(s + 20) */
  83. lwi r12, r6, 16 /* t4 = *(s + 16) */
  84. swi r9, r5, 28 /* *(d + 28) = t1 */
  85. swi r10, r5, 24 /* *(d + 24) = t2 */
  86. swi r11, r5, 20 /* *(d + 20) = t3 */
  87. swi r12, r5, 16 /* *(d + 16) = t4 */
  88. lwi r9, r6, 12 /* t1 = *(s + 12) */
  89. lwi r10, r6, 8 /* t2 = *(s + 8) */
  90. lwi r11, r6, 4 /* t3 = *(s + 4) */
  91. lwi r12, r6, 0 /* t4 = *(s + 0) */
  92. swi r9, r5, 12 /* *(d + 12) = t1 */
  93. swi r10, r5, 8 /* *(d + 8) = t2 */
  94. swi r11, r5, 4 /* *(d + 4) = t3 */
  95. addi r4, r4, -32 /* n = n - 32 */
  96. bneid r4, d_block_aligned /* while (n) loop */
  97. swi r12, r5, 0 /* *(d + 0) = t4 (IN DELAY SLOT) */
  98. bri d_block_done
  99. d_block_unaligned:
  100. andi r8, r6, 0xfffffffc /* as = s & ~3 */
  101. rsub r6, r4, r6 /* s = s - n */
  102. lwi r11, r8, 0 /* h = *(as + 0) */
  103. addi r9, r9, -1
  104. beqi r9,d_block_u1 /* t1 was 1 => 1 byte offset */
  105. addi r9, r9, -1
  106. beqi r9,d_block_u2 /* t1 was 2 => 2 byte offset */
  107. d_block_u3:
  108. BSRLI r11, r11, 8 /* h = h >> 8 */
  109. d_bu3_loop:
  110. addi r8, r8, -32 /* as = as - 32 */
  111. addi r5, r5, -32 /* d = d - 32 */
  112. lwi r12, r8, 28 /* v = *(as + 28) */
  113. BSLLI r9, r12, 24 /* t1 = v << 24 */
  114. or r9, r11, r9 /* t1 = h | t1 */
  115. swi r9, r5, 28 /* *(d + 28) = t1 */
  116. BSRLI r11, r12, 8 /* h = v >> 8 */
  117. lwi r12, r8, 24 /* v = *(as + 24) */
  118. BSLLI r9, r12, 24 /* t1 = v << 24 */
  119. or r9, r11, r9 /* t1 = h | t1 */
  120. swi r9, r5, 24 /* *(d + 24) = t1 */
  121. BSRLI r11, r12, 8 /* h = v >> 8 */
  122. lwi r12, r8, 20 /* v = *(as + 20) */
  123. BSLLI r9, r12, 24 /* t1 = v << 24 */
  124. or r9, r11, r9 /* t1 = h | t1 */
  125. swi r9, r5, 20 /* *(d + 20) = t1 */
  126. BSRLI r11, r12, 8 /* h = v >> 8 */
  127. lwi r12, r8, 16 /* v = *(as + 16) */
  128. BSLLI r9, r12, 24 /* t1 = v << 24 */
  129. or r9, r11, r9 /* t1 = h | t1 */
  130. swi r9, r5, 16 /* *(d + 16) = t1 */
  131. BSRLI r11, r12, 8 /* h = v >> 8 */
  132. lwi r12, r8, 12 /* v = *(as + 12) */
  133. BSLLI r9, r12, 24 /* t1 = v << 24 */
  134. or r9, r11, r9 /* t1 = h | t1 */
  135. swi r9, r5, 12 /* *(d + 112) = t1 */
  136. BSRLI r11, r12, 8 /* h = v >> 8 */
  137. lwi r12, r8, 8 /* v = *(as + 8) */
  138. BSLLI r9, r12, 24 /* t1 = v << 24 */
  139. or r9, r11, r9 /* t1 = h | t1 */
  140. swi r9, r5, 8 /* *(d + 8) = t1 */
  141. BSRLI r11, r12, 8 /* h = v >> 8 */
  142. lwi r12, r8, 4 /* v = *(as + 4) */
  143. BSLLI r9, r12, 24 /* t1 = v << 24 */
  144. or r9, r11, r9 /* t1 = h | t1 */
  145. swi r9, r5, 4 /* *(d + 4) = t1 */
  146. BSRLI r11, r12, 8 /* h = v >> 8 */
  147. lwi r12, r8, 0 /* v = *(as + 0) */
  148. BSLLI r9, r12, 24 /* t1 = v << 24 */
  149. or r9, r11, r9 /* t1 = h | t1 */
  150. swi r9, r5, 0 /* *(d + 0) = t1 */
  151. addi r4, r4, -32 /* n = n - 32 */
  152. bneid r4, d_bu3_loop /* while (n) loop */
  153. BSRLI r11, r12, 8 /* h = v >> 8 (IN DELAY SLOT) */
  154. bri d_block_done
  155. d_block_u1:
  156. BSRLI r11, r11, 24 /* h = h >> 24 */
  157. d_bu1_loop:
  158. addi r8, r8, -32 /* as = as - 32 */
  159. addi r5, r5, -32 /* d = d - 32 */
  160. lwi r12, r8, 28 /* v = *(as + 28) */
  161. BSLLI r9, r12, 8 /* t1 = v << 8 */
  162. or r9, r11, r9 /* t1 = h | t1 */
  163. swi r9, r5, 28 /* *(d + 28) = t1 */
  164. BSRLI r11, r12, 24 /* h = v >> 24 */
  165. lwi r12, r8, 24 /* v = *(as + 24) */
  166. BSLLI r9, r12, 8 /* t1 = v << 8 */
  167. or r9, r11, r9 /* t1 = h | t1 */
  168. swi r9, r5, 24 /* *(d + 24) = t1 */
  169. BSRLI r11, r12, 24 /* h = v >> 24 */
  170. lwi r12, r8, 20 /* v = *(as + 20) */
  171. BSLLI r9, r12, 8 /* t1 = v << 8 */
  172. or r9, r11, r9 /* t1 = h | t1 */
  173. swi r9, r5, 20 /* *(d + 20) = t1 */
  174. BSRLI r11, r12, 24 /* h = v >> 24 */
  175. lwi r12, r8, 16 /* v = *(as + 16) */
  176. BSLLI r9, r12, 8 /* t1 = v << 8 */
  177. or r9, r11, r9 /* t1 = h | t1 */
  178. swi r9, r5, 16 /* *(d + 16) = t1 */
  179. BSRLI r11, r12, 24 /* h = v >> 24 */
  180. lwi r12, r8, 12 /* v = *(as + 12) */
  181. BSLLI r9, r12, 8 /* t1 = v << 8 */
  182. or r9, r11, r9 /* t1 = h | t1 */
  183. swi r9, r5, 12 /* *(d + 112) = t1 */
  184. BSRLI r11, r12, 24 /* h = v >> 24 */
  185. lwi r12, r8, 8 /* v = *(as + 8) */
  186. BSLLI r9, r12, 8 /* t1 = v << 8 */
  187. or r9, r11, r9 /* t1 = h | t1 */
  188. swi r9, r5, 8 /* *(d + 8) = t1 */
  189. BSRLI r11, r12, 24 /* h = v >> 24 */
  190. lwi r12, r8, 4 /* v = *(as + 4) */
  191. BSLLI r9, r12, 8 /* t1 = v << 8 */
  192. or r9, r11, r9 /* t1 = h | t1 */
  193. swi r9, r5, 4 /* *(d + 4) = t1 */
  194. BSRLI r11, r12, 24 /* h = v >> 24 */
  195. lwi r12, r8, 0 /* v = *(as + 0) */
  196. BSLLI r9, r12, 8 /* t1 = v << 8 */
  197. or r9, r11, r9 /* t1 = h | t1 */
  198. swi r9, r5, 0 /* *(d + 0) = t1 */
  199. addi r4, r4, -32 /* n = n - 32 */
  200. bneid r4, d_bu1_loop /* while (n) loop */
  201. BSRLI r11, r12, 24 /* h = v >> 24 (IN DELAY SLOT) */
  202. bri d_block_done
  203. d_block_u2:
  204. BSRLI r11, r11, 16 /* h = h >> 16 */
  205. d_bu2_loop:
  206. addi r8, r8, -32 /* as = as - 32 */
  207. addi r5, r5, -32 /* d = d - 32 */
  208. lwi r12, r8, 28 /* v = *(as + 28) */
  209. BSLLI r9, r12, 16 /* t1 = v << 16 */
  210. or r9, r11, r9 /* t1 = h | t1 */
  211. swi r9, r5, 28 /* *(d + 28) = t1 */
  212. BSRLI r11, r12, 16 /* h = v >> 16 */
  213. lwi r12, r8, 24 /* v = *(as + 24) */
  214. BSLLI r9, r12, 16 /* t1 = v << 16 */
  215. or r9, r11, r9 /* t1 = h | t1 */
  216. swi r9, r5, 24 /* *(d + 24) = t1 */
  217. BSRLI r11, r12, 16 /* h = v >> 16 */
  218. lwi r12, r8, 20 /* v = *(as + 20) */
  219. BSLLI r9, r12, 16 /* t1 = v << 16 */
  220. or r9, r11, r9 /* t1 = h | t1 */
  221. swi r9, r5, 20 /* *(d + 20) = t1 */
  222. BSRLI r11, r12, 16 /* h = v >> 16 */
  223. lwi r12, r8, 16 /* v = *(as + 16) */
  224. BSLLI r9, r12, 16 /* t1 = v << 16 */
  225. or r9, r11, r9 /* t1 = h | t1 */
  226. swi r9, r5, 16 /* *(d + 16) = t1 */
  227. BSRLI r11, r12, 16 /* h = v >> 16 */
  228. lwi r12, r8, 12 /* v = *(as + 12) */
  229. BSLLI r9, r12, 16 /* t1 = v << 16 */
  230. or r9, r11, r9 /* t1 = h | t1 */
  231. swi r9, r5, 12 /* *(d + 112) = t1 */
  232. BSRLI r11, r12, 16 /* h = v >> 16 */
  233. lwi r12, r8, 8 /* v = *(as + 8) */
  234. BSLLI r9, r12, 16 /* t1 = v << 16 */
  235. or r9, r11, r9 /* t1 = h | t1 */
  236. swi r9, r5, 8 /* *(d + 8) = t1 */
  237. BSRLI r11, r12, 16 /* h = v >> 16 */
  238. lwi r12, r8, 4 /* v = *(as + 4) */
  239. BSLLI r9, r12, 16 /* t1 = v << 16 */
  240. or r9, r11, r9 /* t1 = h | t1 */
  241. swi r9, r5, 4 /* *(d + 4) = t1 */
  242. BSRLI r11, r12, 16 /* h = v >> 16 */
  243. lwi r12, r8, 0 /* v = *(as + 0) */
  244. BSLLI r9, r12, 16 /* t1 = v << 16 */
  245. or r9, r11, r9 /* t1 = h | t1 */
  246. swi r9, r5, 0 /* *(d + 0) = t1 */
  247. addi r4, r4, -32 /* n = n - 32 */
  248. bneid r4, d_bu2_loop /* while (n) loop */
  249. BSRLI r11, r12, 16 /* h = v >> 16 (IN DELAY SLOT) */
  250. d_block_done:
  251. addi r4, r0, 4 /* n = 4 */
  252. cmpu r4, r4, r7 /* n = c - n (unsigned) */
  253. blti r4,d_xfer_end /* if n < 0, less than one word to transfer */
  254. d_word_xfer:
  255. andi r4, r7, 0xfffffffc /* n = c & ~3 */
  256. rsub r5, r4, r5 /* d = d - n */
  257. rsub r6, r4, r6 /* s = s - n */
  258. rsub r7, r4, r7 /* c = c - n */
  259. andi r9, r6, 3 /* t1 = s & 3 */
  260. /* if temp != 0, unaligned transfers needed */
  261. bnei r9, d_word_unaligned
  262. d_word_aligned:
  263. addi r4, r4,-4 /* n-- */
  264. lw r9, r6, r4 /* t1 = *(s+n) */
  265. bneid r4, d_word_aligned /* loop */
  266. sw r9, r5, r4 /* *(d+n) = t1 (IN DELAY SLOT) */
  267. bri d_word_done
  268. d_word_unaligned:
  269. andi r8, r6, 0xfffffffc /* as = s & ~3 */
  270. lw r11, r8, r4 /* h = *(as + n) */
  271. addi r9, r9, -1
  272. beqi r9,d_word_u1 /* t1 was 1 => 1 byte offset */
  273. addi r9, r9, -1
  274. beqi r9,d_word_u2 /* t1 was 2 => 2 byte offset */
  275. d_word_u3:
  276. BSRLI r11, r11, 8 /* h = h >> 8 */
  277. d_wu3_loop:
  278. addi r4, r4,-4 /* n = n - 4 */
  279. lw r12, r8, r4 /* v = *(as + n) */
  280. BSLLI r9, r12, 24 /* t1 = v << 24 */
  281. or r9, r11, r9 /* t1 = h | t1 */
  282. sw r9, r5, r4 /* *(d + n) = t1 */
  283. bneid r4, d_wu3_loop /* while (n) loop */
  284. BSRLI r11, r12, 8 /* h = v >> 8 (IN DELAY SLOT) */
  285. bri d_word_done
  286. d_word_u1:
  287. BSRLI r11, r11, 24 /* h = h >> 24 */
  288. d_wu1_loop:
  289. addi r4, r4,-4 /* n = n - 4 */
  290. lw r12, r8, r4 /* v = *(as + n) */
  291. BSLLI r9, r12, 8 /* t1 = v << 8 */
  292. or r9, r11, r9 /* t1 = h | t1 */
  293. sw r9, r5, r4 /* *(d + n) = t1 */
  294. bneid r4, d_wu1_loop /* while (n) loop */
  295. BSRLI r11, r12, 24 /* h = v >> 24 (IN DELAY SLOT) */
  296. bri d_word_done
  297. d_word_u2:
  298. BSRLI r11, r11, 16 /* h = h >> 16 */
  299. d_wu2_loop:
  300. addi r4, r4,-4 /* n = n - 4 */
  301. lw r12, r8, r4 /* v = *(as + n) */
  302. BSLLI r9, r12, 16 /* t1 = v << 16 */
  303. or r9, r11, r9 /* t1 = h | t1 */
  304. sw r9, r5, r4 /* *(d + n) = t1 */
  305. bneid r4, d_wu2_loop /* while (n) loop */
  306. BSRLI r11, r12, 16 /* h = v >> 16 (IN DELAY SLOT) */
  307. d_word_done:
  308. d_xfer_end:
  309. d_xfer_end_loop:
  310. beqi r7, a_done /* while (c) */
  311. addi r6, r6, -1 /* s-- */
  312. lbui r9, r6, 0 /* t1 = *s */
  313. addi r5, r5, -1 /* d-- */
  314. sbi r9, r5, 0 /* *d = t1 */
  315. brid d_xfer_end_loop /* loop */
  316. addi r7, r7, -1 /* c-- (IN DELAY SLOT) */
  317. a_done:
  318. d_done:
  319. rtsd r15, 8
  320. nop
  321. .size memmove, . - memmove
  322. .end memmove
  323. libc_hidden_def(memmove)