|
@@ -46,15 +46,15 @@
|
|
|
#define ptr1 r28
|
|
|
#define ptr2 r27
|
|
|
#define ptr3 r26
|
|
|
-#define ptr9 r24
|
|
|
+#define ptr9 r24
|
|
|
#define loopcnt r23
|
|
|
#define linecnt r22
|
|
|
#define bytecnt r21
|
|
|
|
|
|
#define fvalue f6
|
|
|
|
|
|
-// This routine uses only scratch predicate registers (p6 - p15)
|
|
|
-#define p_scr p6 // default register for same-cycle branches
|
|
|
+/* This routine uses only scratch predicate registers (p6 - p15) */
|
|
|
+#define p_scr p6 /* default register for same-cycle branches */
|
|
|
#define p_nz p7
|
|
|
#define p_zr p8
|
|
|
#define p_unalgn p9
|
|
@@ -68,7 +68,7 @@
|
|
|
#define MIN1 15
|
|
|
#define MIN1P1HALF 8
|
|
|
#define LINE_SIZE 128
|
|
|
-#define LSIZE_SH 7 // shift amount
|
|
|
+#define LSIZE_SH 7 /* shift amount */
|
|
|
#define PREF_AHEAD 8
|
|
|
|
|
|
#define USE_FLP
|
|
@@ -90,97 +90,97 @@ ENTRY(memset)
|
|
|
movi0 save_lc = ar.lc
|
|
|
} { .mmi
|
|
|
.body
|
|
|
- mov ret0 = dest // return value
|
|
|
- cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero
|
|
|
+ mov ret0 = dest /* return value */
|
|
|
+ cmp.ne p_nz, p_zr = value, r0 /* use stf.spill if value is zero */
|
|
|
cmp.eq p_scr, p0 = cnt, r0
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
- and ptr2 = -(MIN1+1), dest // aligned address
|
|
|
- and tmp = MIN1, dest // prepare to check for alignment
|
|
|
- tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U)
|
|
|
+ and ptr2 = -(MIN1+1), dest /* aligned address */
|
|
|
+ and tmp = MIN1, dest /* prepare to check for alignment */
|
|
|
+ tbit.nz p_y, p_n = dest, 0 /* Do we have an odd address? (M_B_U) */
|
|
|
} { .mib
|
|
|
mov ptr1 = dest
|
|
|
- mux1 value = value, @brcst // create 8 identical bytes in word
|
|
|
-(p_scr) br.ret.dpnt.many rp // return immediately if count = 0
|
|
|
+ mux1 value = value, @brcst /* create 8 identical bytes in word */
|
|
|
+(p_scr) br.ret.dpnt.many rp /* return immediately if count = 0 */
|
|
|
;; }
|
|
|
{ .mib
|
|
|
cmp.ne p_unalgn, p0 = tmp, r0
|
|
|
-} { .mib // NB: # of bytes to move is 1 higher
|
|
|
- sub bytecnt = (MIN1+1), tmp // than loopcnt
|
|
|
- cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task?
|
|
|
-(p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U)
|
|
|
+} { .mib /* NB: # of bytes to move is 1 higher */
|
|
|
+ sub bytecnt = (MIN1+1), tmp /* than loopcnt */
|
|
|
+ cmp.gt p_scr, p0 = 16, cnt /* is it a minimalistic task? */
|
|
|
+(p_scr) br.cond.dptk.many .move_bytes_unaligned /* go move just a few (M_B_U) */
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
-(p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment
|
|
|
-(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment
|
|
|
-(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ?
|
|
|
+(p_unalgn) add ptr1 = (MIN1+1), ptr2 /* after alignment */
|
|
|
+(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 /* after alignment */
|
|
|
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 /* should we do a st8 ? */
|
|
|
;; }
|
|
|
{ .mib
|
|
|
(p_y) add cnt = -8, cnt
|
|
|
-(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ?
|
|
|
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 /* should we do a st4 ? */
|
|
|
} { .mib
|
|
|
(p_y) st8 [ptr2] = value, -4
|
|
|
(p_n) add ptr2 = 4, ptr2
|
|
|
;; }
|
|
|
{ .mib
|
|
|
(p_yy) add cnt = -4, cnt
|
|
|
-(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ?
|
|
|
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 /* should we do a st2 ? */
|
|
|
} { .mib
|
|
|
(p_yy) st4 [ptr2] = value, -2
|
|
|
(p_nn) add ptr2 = 2, ptr2
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
- mov tmp = LINE_SIZE+1 // for compare
|
|
|
+ mov tmp = LINE_SIZE+1 /* for compare */
|
|
|
(p_y) add cnt = -2, cnt
|
|
|
-(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ?
|
|
|
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 /* should we do a st1 ? */
|
|
|
} { .mmi
|
|
|
- setf.sig fvalue=value // transfer value to FLP side
|
|
|
+ setf.sig fvalue=value /* transfer value to FLP side */
|
|
|
(p_y) st2 [ptr2] = value, -1
|
|
|
(p_n) add ptr2 = 1, ptr2
|
|
|
;; }
|
|
|
|
|
|
{ .mmi
|
|
|
(p_yy) st1 [ptr2] = value
|
|
|
- cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task?
|
|
|
+ cmp.gt p_scr, p0 = tmp, cnt /* is it a minimalistic task? */
|
|
|
} { .mbb
|
|
|
(p_yy) add cnt = -1, cnt
|
|
|
-(p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few
|
|
|
+(p_scr) br.cond.dpnt.many .fraction_of_line /* go move just a few */
|
|
|
;; }
|
|
|
|
|
|
{ .mib
|
|
|
nop.m 0
|
|
|
shr.u linecnt = cnt, LSIZE_SH
|
|
|
-(p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill
|
|
|
+(p_zr) br.cond.dptk.many .l1b /* Jump to use stf.spill */
|
|
|
;; }
|
|
|
|
|
|
#ifndef GAS_ALIGN_BREAKS_UNWIND_INFO
|
|
|
- .align 32 // -------- // L1A: store ahead into cache lines; fill later
|
|
|
+ .align 32 /* -------- L1A: store ahead into cache lines; fill later */
|
|
|
#endif
|
|
|
{ .mmi
|
|
|
- and tmp = -(LINE_SIZE), cnt // compute end of range
|
|
|
- mov ptr9 = ptr1 // used for prefetching
|
|
|
- and cnt = (LINE_SIZE-1), cnt // remainder
|
|
|
+ and tmp = -(LINE_SIZE), cnt /* compute end of range */
|
|
|
+ mov ptr9 = ptr1 /* used for prefetching */
|
|
|
+ and cnt = (LINE_SIZE-1), cnt /* remainder */
|
|
|
} { .mmi
|
|
|
- mov loopcnt = PREF_AHEAD-1 // default prefetch loop
|
|
|
- cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
|
|
|
+ mov loopcnt = PREF_AHEAD-1 /* default prefetch loop */
|
|
|
+ cmp.gt p_scr, p0 = PREF_AHEAD, linecnt /* check against actual value */
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
-(p_scr) add loopcnt = -1, linecnt // start of stores
|
|
|
- add ptr2 = 8, ptr1 // (beyond prefetch stores)
|
|
|
- add ptr1 = tmp, ptr1 // first address beyond total
|
|
|
-;; } // range
|
|
|
+(p_scr) add loopcnt = -1, linecnt /* start of stores */
|
|
|
+ add ptr2 = 8, ptr1 /* (beyond prefetch stores) */
|
|
|
+ add ptr1 = tmp, ptr1 /* first address beyond total */
|
|
|
+;; } /* range */
|
|
|
{ .mmi
|
|
|
- add tmp = -1, linecnt // next loop count
|
|
|
+ add tmp = -1, linecnt /* next loop count */
|
|
|
movi0 ar.lc = loopcnt
|
|
|
;; }
|
|
|
.pref_l1a:
|
|
|
{ .mib
|
|
|
- store [ptr9] = myval, 128 // Do stores one cache line apart
|
|
|
+ store [ptr9] = myval, 128 /* Do stores one cache line apart */
|
|
|
nop.i 0
|
|
|
br.cloop.dptk.few .pref_l1a
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
- add ptr0 = 16, ptr2 // Two stores in parallel
|
|
|
+ add ptr0 = 16, ptr2 /* Two stores in parallel */
|
|
|
movi0 ar.lc = tmp
|
|
|
;; }
|
|
|
.l1ax:
|
|
@@ -211,7 +211,7 @@ ENTRY(memset)
|
|
|
{ .mmi
|
|
|
store [ptr2] = myval, 8
|
|
|
store [ptr0] = myval, 32
|
|
|
- cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
|
|
|
+ cmp.lt p_scr, p0 = ptr9, ptr1 /* do we need more prefetching? */
|
|
|
;; }
|
|
|
{ .mmb
|
|
|
store [ptr2] = myval, 24
|
|
@@ -219,9 +219,9 @@ ENTRY(memset)
|
|
|
br.cloop.dptk.few .l1ax
|
|
|
;; }
|
|
|
{ .mbb
|
|
|
- cmp.le p_scr, p0 = 8, cnt // just a few bytes left ?
|
|
|
-(p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2
|
|
|
- br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3
|
|
|
+ cmp.le p_scr, p0 = 8, cnt /* just a few bytes left ? */
|
|
|
+(p_scr) br.cond.dpnt.many .fraction_of_line /* Branch no. 2 */
|
|
|
+ br.cond.dpnt.many .move_bytes_from_alignment /* Branch no. 3 */
|
|
|
;; }
|
|
|
|
|
|
#ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
|
|
@@ -229,32 +229,32 @@ ENTRY(memset)
|
|
|
#else
|
|
|
.align 32
|
|
|
#endif
|
|
|
-.l1b: // ------------------ // L1B: store ahead into cache lines; fill later
|
|
|
+.l1b: /* ------------------ L1B: store ahead into cache lines; fill later */
|
|
|
{ .mmi
|
|
|
- and tmp = -(LINE_SIZE), cnt // compute end of range
|
|
|
- mov ptr9 = ptr1 // used for prefetching
|
|
|
- and cnt = (LINE_SIZE-1), cnt // remainder
|
|
|
+ and tmp = -(LINE_SIZE), cnt /* compute end of range */
|
|
|
+ mov ptr9 = ptr1 /* used for prefetching */
|
|
|
+ and cnt = (LINE_SIZE-1), cnt /* remainder */
|
|
|
} { .mmi
|
|
|
- mov loopcnt = PREF_AHEAD-1 // default prefetch loop
|
|
|
- cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
|
|
|
+ mov loopcnt = PREF_AHEAD-1 /* default prefetch loop */
|
|
|
+ cmp.gt p_scr, p0 = PREF_AHEAD, linecnt /* check against actual value */
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
(p_scr) add loopcnt = -1, linecnt
|
|
|
- add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores)
|
|
|
- add ptr1 = tmp, ptr1 // first address beyond total range
|
|
|
+ add ptr2 = 16, ptr1 /* start of stores (beyond prefetch stores) */
|
|
|
+ add ptr1 = tmp, ptr1 /* first address beyond total range */
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
- add tmp = -1, linecnt // next loop count
|
|
|
+ add tmp = -1, linecnt /* next loop count */
|
|
|
movi0 ar.lc = loopcnt
|
|
|
;; }
|
|
|
.pref_l1b:
|
|
|
{ .mib
|
|
|
- stf.spill [ptr9] = f0, 128 // Do stores one cache line apart
|
|
|
+ stf.spill [ptr9] = f0, 128 /* Do stores one cache line apart */
|
|
|
nop.i 0
|
|
|
br.cloop.dptk.few .pref_l1b
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
- add ptr0 = 16, ptr2 // Two stores in parallel
|
|
|
+ add ptr0 = 16, ptr2 /* Two stores in parallel */
|
|
|
movi0 ar.lc = tmp
|
|
|
;; }
|
|
|
.l1bx:
|
|
@@ -269,7 +269,7 @@ ENTRY(memset)
|
|
|
{ .mmi
|
|
|
stf.spill [ptr2] = f0, 32
|
|
|
stf.spill [ptr0] = f0, 64
|
|
|
- cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
|
|
|
+ cmp.lt p_scr, p0 = ptr9, ptr1 /* do we need more prefetching? */
|
|
|
;; }
|
|
|
{ .mmb
|
|
|
stf.spill [ptr2] = f0, 32
|
|
@@ -277,14 +277,14 @@ ENTRY(memset)
|
|
|
br.cloop.dptk.few .l1bx
|
|
|
;; }
|
|
|
{ .mib
|
|
|
- cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
|
|
|
+ cmp.gt p_scr, p0 = 8, cnt /* just a few bytes left ? */
|
|
|
(p_scr) br.cond.dpnt.many .move_bytes_from_alignment
|
|
|
;; }
|
|
|
|
|
|
.fraction_of_line:
|
|
|
{ .mib
|
|
|
add ptr2 = 16, ptr1
|
|
|
- shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32
|
|
|
+ shr.u loopcnt = cnt, 5 /* loopcnt = cnt / 32 */
|
|
|
;; }
|
|
|
{ .mib
|
|
|
cmp.eq p_scr, p0 = loopcnt, r0
|
|
@@ -292,13 +292,13 @@ ENTRY(memset)
|
|
|
(p_scr) br.cond.dpnt.many store_words
|
|
|
;; }
|
|
|
{ .mib
|
|
|
- and cnt = 0x1f, cnt // compute the remaining cnt
|
|
|
+ and cnt = 0x1f, cnt /* compute the remaining cnt */
|
|
|
movi0 ar.lc = loopcnt
|
|
|
;; }
|
|
|
#ifndef GAS_ALIGN_BREAKS_UNWIND_INFO
|
|
|
.align 32
|
|
|
#endif
|
|
|
-.l2: // ---------------------------- // L2A: store 32B in 2 cycles
|
|
|
+.l2: /* ---------------------------- L2A: store 32B in 2 cycles */
|
|
|
{ .mmb
|
|
|
store [ptr1] = myval, 8
|
|
|
store [ptr2] = myval, 8
|
|
@@ -309,34 +309,34 @@ ENTRY(memset)
|
|
|
;; }
|
|
|
store_words:
|
|
|
{ .mib
|
|
|
- cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
|
|
|
-(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch
|
|
|
+ cmp.gt p_scr, p0 = 8, cnt /* just a few bytes left ? */
|
|
|
+(p_scr) br.cond.dpnt.many .move_bytes_from_alignment /* Branch */
|
|
|
;; }
|
|
|
|
|
|
{ .mmi
|
|
|
- store [ptr1] = myval, 8 // store
|
|
|
- cmp.le p_y, p_n = 16, cnt //
|
|
|
- add cnt = -8, cnt // subtract
|
|
|
+ store [ptr1] = myval, 8 /* store */
|
|
|
+ cmp.le p_y, p_n = 16, cnt /* */
|
|
|
+ add cnt = -8, cnt /* subtract */
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
-(p_y) store [ptr1] = myval, 8 // store
|
|
|
-(p_y) cmp.le.unc p_yy, p_nn = 16, cnt //
|
|
|
-(p_y) add cnt = -8, cnt // subtract
|
|
|
+(p_y) store [ptr1] = myval, 8 /* store */
|
|
|
+(p_y) cmp.le.unc p_yy, p_nn = 16, cnt /* */
|
|
|
+(p_y) add cnt = -8, cnt /* subtract */
|
|
|
;; }
|
|
|
-{ .mmi // store
|
|
|
-(p_yy) store [ptr1] = myval, 8 //
|
|
|
-(p_yy) add cnt = -8, cnt // subtract
|
|
|
+{ .mmi /* store */
|
|
|
+(p_yy) store [ptr1] = myval, 8 /* */
|
|
|
+(p_yy) add cnt = -8, cnt /* subtract */
|
|
|
;; }
|
|
|
|
|
|
.move_bytes_from_alignment:
|
|
|
{ .mib
|
|
|
cmp.eq p_scr, p0 = cnt, r0
|
|
|
- tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ?
|
|
|
+ tbit.nz.unc p_y, p0 = cnt, 2 /* should we terminate with a st4 ? */
|
|
|
(p_scr) br.cond.dpnt.few .restore_and_exit
|
|
|
;; }
|
|
|
{ .mib
|
|
|
(p_y) st4 [ptr1] = value, 4
|
|
|
- tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ?
|
|
|
+ tbit.nz.unc p_yy, p0 = cnt, 1 /* should we terminate with a st2 ? */
|
|
|
;; }
|
|
|
{ .mib
|
|
|
(p_yy) st2 [ptr1] = value, 2
|
|
@@ -362,38 +362,38 @@ store_words:
|
|
|
(p_n) add ptr2 = 2, ptr1
|
|
|
} { .mmi
|
|
|
(p_y) add ptr2 = 3, ptr1
|
|
|
-(p_y) st1 [ptr1] = value, 1 // fill 1 (odd-aligned) byte
|
|
|
-(p_y) add cnt = -1, cnt // [15, 14 (or less) left]
|
|
|
+(p_y) st1 [ptr1] = value, 1 /* fill 1 (odd-aligned) byte */
|
|
|
+(p_y) add cnt = -1, cnt /* [15, 14 (or less) left] */
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
(p_yy) cmp.le.unc p_y, p0 = 8, cnt
|
|
|
- add ptr3 = ptr1, cnt // prepare last store
|
|
|
+ add ptr3 = ptr1, cnt /* prepare last store */
|
|
|
movi0 ar.lc = save_lc
|
|
|
} { .mmi
|
|
|
-(p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
|
|
|
-(p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes
|
|
|
-(p_yy) add cnt = -4, cnt // [11, 10 (o less) left]
|
|
|
+(p_yy) st2 [ptr1] = value, 4 /* fill 2 (aligned) bytes */
|
|
|
+(p_yy) st2 [ptr2] = value, 4 /* fill 2 (aligned) bytes */
|
|
|
+(p_yy) add cnt = -4, cnt /* [11, 10 (o less) left] */
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
(p_y) cmp.le.unc p_yy, p0 = 8, cnt
|
|
|
- add ptr3 = -1, ptr3 // last store
|
|
|
- tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ?
|
|
|
+ add ptr3 = -1, ptr3 /* last store */
|
|
|
+ tbit.nz p_scr, p0 = cnt, 1 /* will there be a st2 at the end ? */
|
|
|
} { .mmi
|
|
|
-(p_y) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
|
|
|
-(p_y) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes
|
|
|
-(p_y) add cnt = -4, cnt // [7, 6 (or less) left]
|
|
|
+(p_y) st2 [ptr1] = value, 4 /* fill 2 (aligned) bytes */
|
|
|
+(p_y) st2 [ptr2] = value, 4 /* fill 2 (aligned) bytes */
|
|
|
+(p_y) add cnt = -4, cnt /* [7, 6 (or less) left] */
|
|
|
;; }
|
|
|
{ .mmi
|
|
|
-(p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
|
|
|
-(p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes
|
|
|
- // [3, 2 (or less) left]
|
|
|
- tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ?
|
|
|
+(p_yy) st2 [ptr1] = value, 4 /* fill 2 (aligned) bytes */
|
|
|
+(p_yy) st2 [ptr2] = value, 4 /* fill 2 (aligned) bytes */
|
|
|
+ /* [3, 2 (or less) left] */
|
|
|
+ tbit.nz p_y, p0 = cnt, 0 /* will there be a st1 at the end ? */
|
|
|
} { .mmi
|
|
|
(p_yy) add cnt = -4, cnt
|
|
|
;; }
|
|
|
{ .mmb
|
|
|
-(p_scr) st2 [ptr1] = value // fill 2 (aligned) bytes
|
|
|
-(p_y) st1 [ptr3] = value // fill last byte (using ptr3)
|
|
|
+(p_scr) st2 [ptr1] = value /* fill 2 (aligned) bytes */
|
|
|
+(p_y) st1 [ptr3] = value /* fill last byte (using ptr3) */
|
|
|
br.ret.sptk.many rp
|
|
|
;; }
|
|
|
END(memset)
|