memcpy.S 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /* Copyright (C) 2012-2017 Free Software Foundation, Inc.
  2. The GNU C Library is free software; you can redistribute it and/or
  3. modify it under the terms of the GNU Lesser General Public
  4. License as published by the Free Software Foundation; either
  5. version 2.1 of the License, or (at your option) any later version.
  6. The GNU C Library is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  9. Lesser General Public License for more details.
  10. You should have received a copy of the GNU Lesser General Public
  11. License along with the GNU C Library. If not, see
  12. <http://www.gnu.org/licenses/>. */
  13. #include <sysdep.h>
  14. /* Assumptions:
  15. *
  16. * ARMv8-a, AArch64, unaligned accesses.
  17. *
  18. */
  19. #define dstin x0
  20. #define src x1
  21. #define count x2
  22. #define dst x3
  23. #define srcend x4
  24. #define dstend x5
  25. #define A_l x6
  26. #define A_lw w6
  27. #define A_h x7
  28. #define A_hw w7
  29. #define B_l x8
  30. #define B_lw w8
  31. #define B_h x9
  32. #define C_l x10
  33. #define C_h x11
  34. #define D_l x12
  35. #define D_h x13
  36. #define E_l src
  37. #define E_h count
  38. #define F_l srcend
  39. #define F_h dst
  40. #define G_l count
  41. #define G_h dst
  42. #define tmp1 x14
  43. ENTRY (memcpy)
  44. prfm PLDL1KEEP, [src]
  45. add srcend, src, count
  46. add dstend, dstin, count
  47. cmp count, 16
  48. b.ls L(copy16)
  49. cmp count, 96
  50. b.hi L(copy_long)
  51. /* Medium copies: 17..96 bytes. */
  52. sub tmp1, count, 1
  53. ldp A_l, A_h, [src]
  54. tbnz tmp1, 6, L(copy96)
  55. ldp D_l, D_h, [srcend, -16]
  56. tbz tmp1, 5, 1f
  57. ldp B_l, B_h, [src, 16]
  58. ldp C_l, C_h, [srcend, -32]
  59. stp B_l, B_h, [dstin, 16]
  60. stp C_l, C_h, [dstend, -32]
  61. 1:
  62. stp A_l, A_h, [dstin]
  63. stp D_l, D_h, [dstend, -16]
  64. ret
  65. .p2align 4
  66. /* Small copies: 0..16 bytes. */
  67. L(copy16):
  68. cmp count, 8
  69. b.lo 1f
  70. ldr A_l, [src]
  71. ldr A_h, [srcend, -8]
  72. str A_l, [dstin]
  73. str A_h, [dstend, -8]
  74. ret
  75. .p2align 4
  76. 1:
  77. tbz count, 2, 1f
  78. ldr A_lw, [src]
  79. ldr A_hw, [srcend, -4]
  80. str A_lw, [dstin]
  81. str A_hw, [dstend, -4]
  82. ret
  83. /* Copy 0..3 bytes. Use a branchless sequence that copies the same
  84. byte 3 times if count==1, or the 2nd byte twice if count==2. */
  85. 1:
  86. cbz count, 2f
  87. lsr tmp1, count, 1
  88. ldrb A_lw, [src]
  89. ldrb A_hw, [srcend, -1]
  90. ldrb B_lw, [src, tmp1]
  91. strb A_lw, [dstin]
  92. strb B_lw, [dstin, tmp1]
  93. strb A_hw, [dstend, -1]
  94. 2: ret
  95. .p2align 4
  96. /* Copy 64..96 bytes. Copy 64 bytes from the start and
  97. 32 bytes from the end. */
  98. L(copy96):
  99. ldp B_l, B_h, [src, 16]
  100. ldp C_l, C_h, [src, 32]
  101. ldp D_l, D_h, [src, 48]
  102. ldp E_l, E_h, [srcend, -32]
  103. ldp F_l, F_h, [srcend, -16]
  104. stp A_l, A_h, [dstin]
  105. stp B_l, B_h, [dstin, 16]
  106. stp C_l, C_h, [dstin, 32]
  107. stp D_l, D_h, [dstin, 48]
  108. stp E_l, E_h, [dstend, -32]
  109. stp F_l, F_h, [dstend, -16]
  110. ret
  111. /* Align DST to 16 byte alignment so that we don't cross cache line
  112. boundaries on both loads and stores. There are at least 96 bytes
  113. to copy, so copy 16 bytes unaligned and then align. The loop
  114. copies 64 bytes per iteration and prefetches one iteration ahead. */
  115. .p2align 4
  116. L(copy_long):
  117. and tmp1, dstin, 15
  118. bic dst, dstin, 15
  119. ldp D_l, D_h, [src]
  120. sub src, src, tmp1
  121. add count, count, tmp1 /* Count is now 16 too large. */
  122. ldp A_l, A_h, [src, 16]
  123. stp D_l, D_h, [dstin]
  124. ldp B_l, B_h, [src, 32]
  125. ldp C_l, C_h, [src, 48]
  126. ldp D_l, D_h, [src, 64]!
  127. subs count, count, 128 + 16 /* Test and readjust count. */
  128. b.ls 2f
  129. 1:
  130. stp A_l, A_h, [dst, 16]
  131. ldp A_l, A_h, [src, 16]
  132. stp B_l, B_h, [dst, 32]
  133. ldp B_l, B_h, [src, 32]
  134. stp C_l, C_h, [dst, 48]
  135. ldp C_l, C_h, [src, 48]
  136. stp D_l, D_h, [dst, 64]!
  137. ldp D_l, D_h, [src, 64]!
  138. subs count, count, 64
  139. b.hi 1b
  140. /* Write the last full set of 64 bytes. The remainder is at most 64
  141. bytes, so it is safe to always copy 64 bytes from the end even if
  142. there is just 1 byte left. */
  143. 2:
  144. ldp E_l, E_h, [srcend, -64]
  145. stp A_l, A_h, [dst, 16]
  146. ldp A_l, A_h, [srcend, -48]
  147. stp B_l, B_h, [dst, 32]
  148. ldp B_l, B_h, [srcend, -32]
  149. stp C_l, C_h, [dst, 48]
  150. ldp C_l, C_h, [srcend, -16]
  151. stp D_l, D_h, [dst, 64]
  152. stp E_l, E_h, [dstend, -64]
  153. stp A_l, A_h, [dstend, -48]
  154. stp B_l, B_h, [dstend, -32]
  155. stp C_l, C_h, [dstend, -16]
  156. ret
  157. .p2align 4
  158. L(move_long):
  159. cbz tmp1, 3f
  160. add srcend, src, count
  161. add dstend, dstin, count
  162. /* Align dstend to 16 byte alignment so that we don't cross cache line
  163. boundaries on both loads and stores. There are at least 96 bytes
  164. to copy, so copy 16 bytes unaligned and then align. The loop
  165. copies 64 bytes per iteration and prefetches one iteration ahead. */
  166. and tmp1, dstend, 15
  167. ldp D_l, D_h, [srcend, -16]
  168. sub srcend, srcend, tmp1
  169. sub count, count, tmp1
  170. ldp A_l, A_h, [srcend, -16]
  171. stp D_l, D_h, [dstend, -16]
  172. ldp B_l, B_h, [srcend, -32]
  173. ldp C_l, C_h, [srcend, -48]
  174. ldp D_l, D_h, [srcend, -64]!
  175. sub dstend, dstend, tmp1
  176. subs count, count, 128
  177. b.ls 2f
  178. nop
  179. 1:
  180. stp A_l, A_h, [dstend, -16]
  181. ldp A_l, A_h, [srcend, -16]
  182. stp B_l, B_h, [dstend, -32]
  183. ldp B_l, B_h, [srcend, -32]
  184. stp C_l, C_h, [dstend, -48]
  185. ldp C_l, C_h, [srcend, -48]
  186. stp D_l, D_h, [dstend, -64]!
  187. ldp D_l, D_h, [srcend, -64]!
  188. subs count, count, 64
  189. b.hi 1b
  190. /* Write the last full set of 64 bytes. The remainder is at most 64
  191. bytes, so it is safe to always copy 64 bytes from the start even if
  192. there is just 1 byte left. */
  193. 2:
  194. ldp G_l, G_h, [src, 48]
  195. stp A_l, A_h, [dstend, -16]
  196. ldp A_l, A_h, [src, 32]
  197. stp B_l, B_h, [dstend, -32]
  198. ldp B_l, B_h, [src, 16]
  199. stp C_l, C_h, [dstend, -48]
  200. ldp C_l, C_h, [src]
  201. stp D_l, D_h, [dstend, -64]
  202. stp G_l, G_h, [dstin, 48]
  203. stp A_l, A_h, [dstin, 32]
  204. stp B_l, B_h, [dstin, 16]
  205. stp C_l, C_h, [dstin]
  206. 3: ret
  207. END (memcpy)
  208. libc_hidden_def (memcpy)