memcpy.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. /* Optimized version of the standard memcpy() function.
  2. This file is part of the GNU C Library.
  3. Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
  4. Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
  5. Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
  6. The GNU C Library is free software; you can redistribute it and/or
  7. modify it under the terms of the GNU Lesser General Public
  8. License as published by the Free Software Foundation; either
  9. version 2.1 of the License, or (at your option) any later version.
  10. The GNU C Library is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. Lesser General Public License for more details.
  14. You should have received a copy of the GNU Lesser General Public
  15. License along with the GNU C Library; if not, see
  16. <http://www.gnu.org/licenses/>. */
  17. /* Return: dest
  18. Inputs:
  19. in0: dest
  20. in1: src
  21. in2: byte count
  22. An assembly implementation of the algorithm used by the generic C
  23. version from glibc. The case when source and sest are aligned is
  24. treated separately, for extra performance.
  25. In this form, memcpy assumes little endian mode. For big endian mode,
  26. sh1 must be computed using an extra instruction: sub sh1 = 64, sh1
  27. and the order of r[MEMLAT] and r[MEMLAT+1] must be reverted in the
  28. shrp instruction. */
  29. #define USE_LFETCH
  30. #define USE_FLP
  31. #include <sysdep.h>
  32. #undef ret
  33. #define LFETCH_DIST 500
  34. #define ALIGN_UNROLL_no 4 /* no. of elements */
  35. #define ALIGN_UNROLL_sh 2 /* (shift amount) */
  36. #define MEMLAT 8
  37. #define Nrot ((4*(MEMLAT+2) + 7) & ~7)
  38. #define OP_T_THRES 16
  39. #define OPSIZ 8
  40. #define loopcnt r14
  41. #define elemcnt r15
  42. #define saved_pr r16
  43. #define saved_lc r17
  44. #define adest r18
  45. #define dest r19
  46. #define asrc r20
  47. #define src r21
  48. #define len r22
  49. #define tmp2 r23
  50. #define tmp3 r24
  51. #define tmp4 r25
  52. #define ptable r26
  53. #define ploop56 r27
  54. #define loopaddr r28
  55. #define sh1 r29
  56. #define ptr1 r30
  57. #define ptr2 r31
  58. #define movi0 mov
  59. #define p_scr p6
  60. #define p_xtr p7
  61. #define p_nxtr p8
  62. #define p_few p9
  63. #if defined(USE_FLP)
  64. #define load ldf8
  65. #define store stf8
  66. #define tempreg f6
  67. #define the_r fr
  68. #define the_s fs
  69. #define the_t ft
  70. #define the_q fq
  71. #define the_w fw
  72. #define the_x fx
  73. #define the_y fy
  74. #define the_z fz
  75. #elif defined(USE_INT)
  76. #define load ld8
  77. #define store st8
  78. #define tempreg tmp2
  79. #define the_r r
  80. #define the_s s
  81. #define the_t t
  82. #define the_q q
  83. #define the_w w
  84. #define the_x x
  85. #define the_y y
  86. #define the_z z
  87. #endif
  88. #ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
  89. /* Manually force proper loop-alignment. Note: be sure to
  90. double-check the code-layout after making any changes to
  91. this routine! */
  92. # define ALIGN(n) { nop 0 }
  93. #else
  94. # define ALIGN(n) .align n
  95. #endif
  96. #if defined(USE_LFETCH)
  97. #define LOOP(shift) \
  98. ALIGN(32); \
  99. .loop##shift : \
  100. { .mmb \
  101. (p[0]) ld8.nt1 r[0] = [asrc], 8 ; \
  102. (p[0]) lfetch.nt1 [ptr1], 16 ; \
  103. nop.b 0 ; \
  104. } { .mib \
  105. (p[MEMLAT+1]) st8 [dest] = tmp3, 8 ; \
  106. (p[MEMLAT]) shrp tmp3 = r[MEMLAT], s[MEMLAT+1], shift ; \
  107. nop.b 0 ;; \
  108. } { .mmb \
  109. (p[0]) ld8.nt1 s[0] = [asrc], 8 ; \
  110. (p[0]) lfetch.nt1 [ptr2], 16 ; \
  111. nop.b 0 ; \
  112. } { .mib \
  113. (p[MEMLAT+1]) st8 [dest] = tmp4, 8 ; \
  114. (p[MEMLAT]) shrp tmp4 = s[MEMLAT], r[MEMLAT], shift ; \
  115. br.ctop.sptk.many .loop##shift \
  116. ;; } \
  117. { .mib \
  118. br.cond.sptk.many .copy_bytes ; /* deal with the remaining bytes */ \
  119. }
  120. #else
  121. #define LOOP(shift) \
  122. ALIGN(32); \
  123. .loop##shift : \
  124. { .mmb \
  125. (p[0]) ld8.nt1 r[0] = [asrc], 8 ; \
  126. nop.b 0 ; \
  127. } { .mib \
  128. (p[MEMLAT+1]) st8 [dest] = tmp3, 8 ; \
  129. (p[MEMLAT]) shrp tmp3 = r[MEMLAT], s[MEMLAT+1], shift ; \
  130. nop.b 0 ;; \
  131. } { .mmb \
  132. (p[0]) ld8.nt1 s[0] = [asrc], 8 ; \
  133. nop.b 0 ; \
  134. } { .mib \
  135. (p[MEMLAT+1]) st8 [dest] = tmp4, 8 ; \
  136. (p[MEMLAT]) shrp tmp4 = s[MEMLAT], r[MEMLAT], shift ; \
  137. br.ctop.sptk.many .loop##shift \
  138. ;; } \
  139. { .mib \
  140. br.cond.sptk.many .copy_bytes ; /* deal with the remaining bytes */ \
  141. }
  142. #endif
  143. ENTRY(memcpy)
  144. { .mmi
  145. .prologue
  146. alloc r2 = ar.pfs, 3, Nrot - 3, 0, Nrot
  147. .rotr r[MEMLAT+1], s[MEMLAT+2], q[MEMLAT+1], t[MEMLAT+1]
  148. .rotp p[MEMLAT+2]
  149. .rotf fr[MEMLAT+1], fq[MEMLAT+1], fs[MEMLAT+1], ft[MEMLAT+1]
  150. mov ret0 = in0 /* return tmp2 = dest */
  151. .save pr, saved_pr
  152. movi0 saved_pr = pr /* save the predicate registers */
  153. } { .mmi
  154. and tmp4 = 7, in0 /* check if destination is aligned */
  155. mov dest = in0 /* dest */
  156. mov src = in1 /* src */
  157. ;; }
  158. { .mii
  159. cmp.eq p_scr, p0 = in2, r0 /* if (len == 0) */
  160. .save ar.lc, saved_lc
  161. movi0 saved_lc = ar.lc /* save the loop counter */
  162. .body
  163. cmp.ge p_few, p0 = OP_T_THRES, in2 /* is len <= OP_T_THRESH */
  164. } { .mbb
  165. mov len = in2 /* len */
  166. (p_scr) br.cond.dpnt.few .restore_and_exit /* Branch no. 1: return dest */
  167. (p_few) br.cond.dpnt.many .copy_bytes /* Branch no. 2: copy byte by byte */
  168. ;; }
  169. { .mmi
  170. #if defined(USE_LFETCH)
  171. lfetch.nt1 [dest] /* */
  172. lfetch.nt1 [src] /* */
  173. #endif
  174. shr.u elemcnt = len, 3 /* elemcnt = len / 8 */
  175. } { .mib
  176. cmp.eq p_scr, p0 = tmp4, r0 /* is destination aligned? */
  177. sub loopcnt = 7, tmp4 /* */
  178. (p_scr) br.cond.dptk.many .dest_aligned
  179. ;; }
  180. { .mmi
  181. ld1 tmp2 = [src], 1 /* */
  182. sub len = len, loopcnt, 1 /* reduce len */
  183. movi0 ar.lc = loopcnt /* */
  184. } { .mib
  185. cmp.ne p_scr, p0 = 0, loopcnt /* avoid loading beyond end-point */
  186. ;; }
  187. .l0: /* ---------------------------- L0: Align src on 8-byte boundary */
  188. { .mmi
  189. st1 [dest] = tmp2, 1 /* */
  190. (p_scr) ld1 tmp2 = [src], 1 /* */
  191. } { .mib
  192. cmp.lt p_scr, p0 = 1, loopcnt /* avoid load beyond end-point */
  193. add loopcnt = -1, loopcnt
  194. br.cloop.dptk.few .l0 /* */
  195. ;; }
  196. .dest_aligned:
  197. { .mmi
  198. and tmp4 = 7, src /* ready for alignment check */
  199. shr.u elemcnt = len, 3 /* elemcnt = len / 8 */
  200. ;; }
  201. { .mib
  202. cmp.ne p_scr, p0 = tmp4, r0 /* is source also aligned */
  203. tbit.nz p_xtr, p_nxtr = src, 3 /* prepare a separate move if src */
  204. } { .mib /* is not 16B aligned */
  205. add ptr2 = LFETCH_DIST, dest /* prefetch address */
  206. add ptr1 = LFETCH_DIST, src
  207. (p_scr) br.cond.dptk.many .src_not_aligned
  208. ;; }
  209. /* The optimal case, when dest, and src are aligned */
  210. .both_aligned:
  211. { .mmi
  212. .pred.rel "mutex",p_xtr,p_nxtr
  213. (p_xtr) cmp.gt p_scr, p0 = ALIGN_UNROLL_no+1, elemcnt /* Need N + 1 to qualify */
  214. (p_nxtr) cmp.gt p_scr, p0 = ALIGN_UNROLL_no, elemcnt /* Need only N to qualify */
  215. movi0 pr.rot = 1 << 16 /* set rotating predicates */
  216. } { .mib
  217. (p_scr) br.cond.dpnt.many .copy_full_words
  218. ;; }
  219. { .mmi
  220. (p_xtr) load tempreg = [src], 8
  221. (p_xtr) add elemcnt = -1, elemcnt
  222. movi0 ar.ec = MEMLAT + 1 /* set the epilog counter */
  223. ;; }
  224. { .mmi
  225. (p_xtr) add len = -8, len /* */
  226. add asrc = 16, src /* one bank apart (for USE_INT) */
  227. shr.u loopcnt = elemcnt, ALIGN_UNROLL_sh /* cater for unrolling */
  228. ;;}
  229. { .mmi
  230. add loopcnt = -1, loopcnt
  231. (p_xtr) store [dest] = tempreg, 8 /* copy the "extra" word */
  232. nop.i 0
  233. ;; }
  234. { .mib
  235. add adest = 16, dest
  236. movi0 ar.lc = loopcnt /* set the loop counter */
  237. ;; }
  238. #ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
  239. { nop 0 }
  240. #else
  241. .align 32
  242. #endif
  243. #if defined(USE_FLP)
  244. .l1: /* ------------------------------- L1: Everything a multiple of 8 */
  245. { .mmi
  246. #if defined(USE_LFETCH)
  247. (p[0]) lfetch.nt1 [ptr2],32
  248. #endif
  249. (p[0]) ldfp8 the_r[0],the_q[0] = [src], 16
  250. (p[0]) add len = -32, len
  251. } {.mmb
  252. (p[MEMLAT]) store [dest] = the_r[MEMLAT], 8
  253. (p[MEMLAT]) store [adest] = the_s[MEMLAT], 8
  254. ;; }
  255. { .mmi
  256. #if defined(USE_LFETCH)
  257. (p[0]) lfetch.nt1 [ptr1],32
  258. #endif
  259. (p[0]) ldfp8 the_s[0], the_t[0] = [src], 16
  260. } {.mmb
  261. (p[MEMLAT]) store [dest] = the_q[MEMLAT], 24
  262. (p[MEMLAT]) store [adest] = the_t[MEMLAT], 24
  263. br.ctop.dptk.many .l1
  264. ;; }
  265. #elif defined(USE_INT)
  266. .l1: /* ------------------------------- L1: Everything a multiple of 8 */
  267. { .mmi
  268. (p[0]) load the_r[0] = [src], 8
  269. (p[0]) load the_q[0] = [asrc], 8
  270. (p[0]) add len = -32, len
  271. } {.mmb
  272. (p[MEMLAT]) store [dest] = the_r[MEMLAT], 8
  273. (p[MEMLAT]) store [adest] = the_q[MEMLAT], 8
  274. ;; }
  275. { .mmi
  276. (p[0]) load the_s[0] = [src], 24
  277. (p[0]) load the_t[0] = [asrc], 24
  278. } {.mmb
  279. (p[MEMLAT]) store [dest] = the_s[MEMLAT], 24
  280. (p[MEMLAT]) store [adest] = the_t[MEMLAT], 24
  281. #if defined(USE_LFETCH)
  282. ;; }
  283. { .mmb
  284. (p[0]) lfetch.nt1 [ptr2],32
  285. (p[0]) lfetch.nt1 [ptr1],32
  286. #endif
  287. br.ctop.dptk.many .l1
  288. ;; }
  289. #endif
  290. .copy_full_words:
  291. { .mib
  292. cmp.gt p_scr, p0 = 8, len /* */
  293. shr.u elemcnt = len, 3 /* */
  294. (p_scr) br.cond.dpnt.many .copy_bytes
  295. ;; }
  296. { .mii
  297. load tempreg = [src], 8
  298. add loopcnt = -1, elemcnt /* */
  299. ;; }
  300. { .mii
  301. cmp.ne p_scr, p0 = 0, loopcnt /* */
  302. mov ar.lc = loopcnt /* */
  303. ;; }
  304. .l2: /* ------------------------------- L2: Max 4 words copied separately */
  305. { .mmi
  306. store [dest] = tempreg, 8
  307. (p_scr) load tempreg = [src], 8 /* */
  308. add len = -8, len
  309. } { .mib
  310. cmp.lt p_scr, p0 = 1, loopcnt /* avoid load beyond end-point */
  311. add loopcnt = -1, loopcnt
  312. br.cloop.dptk.few .l2
  313. ;; }
  314. .copy_bytes:
  315. { .mib
  316. cmp.eq p_scr, p0 = len, r0 /* is len == 0 ? */
  317. add loopcnt = -1, len /* len--; */
  318. (p_scr) br.cond.spnt .restore_and_exit
  319. ;; }
  320. { .mii
  321. ld1 tmp2 = [src], 1
  322. movi0 ar.lc = loopcnt
  323. cmp.ne p_scr, p0 = 0, loopcnt /* avoid load beyond end-point */
  324. ;; }
  325. .l3: /* ------------------------------- L3: Final byte move */
  326. { .mmi
  327. st1 [dest] = tmp2, 1
  328. (p_scr) ld1 tmp2 = [src], 1
  329. } { .mib
  330. cmp.lt p_scr, p0 = 1, loopcnt /* avoid load beyond end-point */
  331. add loopcnt = -1, loopcnt
  332. br.cloop.dptk.few .l3
  333. ;; }
  334. .restore_and_exit:
  335. { .mmi
  336. movi0 pr = saved_pr, -1 /* restore the predicate registers */
  337. ;; }
  338. { .mib
  339. movi0 ar.lc = saved_lc /* restore the loop counter */
  340. br.ret.sptk.many b0
  341. ;; }
  342. .src_not_aligned:
  343. { .mmi
  344. cmp.gt p_scr, p0 = 16, len
  345. and sh1 = 7, src /* sh1 = src % 8 */
  346. shr.u loopcnt = len, 4 /* element-cnt = len / 16 */
  347. } { .mib
  348. add tmp4 = @ltoff(.table), gp
  349. add tmp3 = @ltoff(.loop56), gp
  350. (p_scr) br.cond.dpnt.many .copy_bytes /* do byte by byte if too few */
  351. ;; }
  352. { .mmi
  353. and asrc = -8, src /* asrc = (-8) -- align src for loop */
  354. add loopcnt = -1, loopcnt /* loopcnt-- */
  355. shl sh1 = sh1, 3 /* sh1 = 8 * (src % 8) */
  356. } { .mmi
  357. ld8 ptable = [tmp4] /* ptable = &table */
  358. ld8 ploop56 = [tmp3] /* ploop56 = &loop56 */
  359. and tmp2 = -16, len /* tmp2 = len & -OPSIZ */
  360. ;; }
  361. { .mmi
  362. add tmp3 = ptable, sh1 /* tmp3 = &table + sh1 */
  363. add src = src, tmp2 /* src += len & (-16) */
  364. movi0 ar.lc = loopcnt /* set LC */
  365. ;; }
  366. { .mmi
  367. ld8 tmp4 = [tmp3] /* tmp4 = loop offset */
  368. sub len = len, tmp2 /* len -= len & (-16) */
  369. movi0 ar.ec = MEMLAT + 2 /* one more pass needed */
  370. ;; }
  371. { .mmi
  372. ld8 s[1] = [asrc], 8 /* preload */
  373. sub loopaddr = ploop56,tmp4 /* loopadd = &loop56 - loop offset */
  374. movi0 pr.rot = 1 << 16 /* set rotating predicates */
  375. ;; }
  376. { .mib
  377. nop.m 0
  378. movi0 b6 = loopaddr
  379. br b6 /* jump to the appropriate loop */
  380. ;; }
  381. LOOP(8)
  382. LOOP(16)
  383. LOOP(24)
  384. LOOP(32)
  385. LOOP(40)
  386. LOOP(48)
  387. LOOP(56)
  388. END(memcpy)
  389. libc_hidden_def (memcpy)
  390. .rodata
  391. .align 8
  392. .table:
  393. data8 0 /* dummy entry */
  394. data8 .loop56 - .loop8
  395. data8 .loop56 - .loop16
  396. data8 .loop56 - .loop24
  397. data8 .loop56 - .loop32
  398. data8 .loop56 - .loop40
  399. data8 .loop56 - .loop48
  400. data8 .loop56 - .loop56