strcpy.S 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. /*
  2. * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
  3. * Copyright (C) 2007 ARC International (UK) LTD
  4. *
  5. * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball.
  6. */
  7. #include <sysdep.h>
  8. /* If dst and src are 4 byte aligned, copy 8 bytes at a time.
  9. If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
  10. it 8 byte aligned. Thus, we can do a little read-ahead, without
  11. dereferencing a cache line that we should not touch.
  12. Note that short and long instructions have been scheduled to avoid
  13. branch stalls.
  14. The beq_s to r3z could be made unaligned & long to avoid a stall
  15. there, but the it is not likely to be taken often, and it
  16. would also be likey to cost an unaligned mispredict at the next call. */
  17. ENTRY(strcpy)
  18. or r2,r0,r1
  19. bmsk_s r2,r2,1
  20. brne.d r2,0,charloop
  21. mov_s r10,r0
  22. ld_s r3,[r1,0]
  23. mov r8,0x01010101
  24. bbit0.d r1,2,loop_start
  25. ror r12,r8
  26. sub r2,r3,r8
  27. bic_s r2,r2,r3
  28. tst_s r2,r12
  29. bne r3z
  30. mov_s r4,r3
  31. .balign 4
  32. loop:
  33. ld.a r3,[r1,4]
  34. st.ab r4,[r10,4]
  35. loop_start:
  36. ld.a r4,[r1,4]
  37. sub r2,r3,r8
  38. bic_s r2,r2,r3
  39. tst_s r2,r12
  40. bne_s r3z
  41. st.ab r3,[r10,4]
  42. sub r2,r4,r8
  43. bic r2,r2,r4
  44. tst r2,r12
  45. beq loop
  46. mov_s r3,r4
  47. #ifdef __LITTLE_ENDIAN__
  48. r3z: bmsk.f r1,r3,7
  49. lsr_s r3,r3,8
  50. #else
  51. r3z: lsr.f r1,r3,24
  52. asl_s r3,r3,8
  53. #endif
  54. bne.d r3z
  55. stb.ab r1,[r10,1]
  56. j_s [blink]
  57. .balign 4
  58. charloop:
  59. ldb.ab r3,[r1,1]
  60. brne.d r3,0,charloop
  61. stb.ab r3,[r10,1]
  62. j [blink]
  63. END(strcpy)
  64. libc_hidden_def(strcpy)