123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137 |
- --- xf86-video-siliconmotion-1.7.7.orig/src/smi_video.c 2012-07-17 06:53:21.000000000 +0200
- +++ xf86-video-siliconmotion-1.7.7/src/smi_video.c 2014-03-14 18:35:37.546382342 +0100
- @@ -275,6 +275,7 @@ static XF86ImageRec SMI_VideoImages[] =
- XVIMAGE_YUY2,
- XVIMAGE_YV12,
- XVIMAGE_I420,
- + XVIMAGE_UYVY,
- {
- FOURCC_RV15, /* id */
- XvRGB, /* type */
- @@ -1461,6 +1462,117 @@ SMI_QueryBestSize(
- LEAVE();
- }
-
- +static void myXVCopyYUV12ToPacked(const unsigned char *srcy, const unsigned char *srcv, const unsigned char *srcu,
- + unsigned char *dst, int srcPitchy, int srcPitchuv, int dstPitch, int h, int w)
- +{
- + int i, j;
- + unsigned char const *y, *u, *v;
- + int dstinc, yinc, uinc, vinc;
- +
- + y = srcy;
- + u = srcu;
- + v = srcv;
- +
- + dstinc = dstPitch - 2*w;
- + yinc = srcPitchy - w;
- + uinc = srcPitchuv - w/2;
- + vinc = srcPitchuv - w/2;
- +
- + for (i = 0; i < h; i++) {
- + asm (
- +// ".set arch=loongson2f\n\t"
- + ".set noreorder\n\t"
- + "move $8, %8 \n\t"
- + "1: \n\t"
- + "beqz $8, 2f \n\t"
- + "xor $f0, $f0, $f0 \n\t"
- + "ldc1 $f4, (%0) \n\t"
- + "punpcklbh $f2, $f4, $f0 \n\t"
- + "punpckhbh $f4, $f4, $f0 \n\t"
- + "ldc1 $f16, 8(%0) \n\t"
- + "punpcklbh $f14, $f16, $f0 \n\t"
- + "punpckhbh $f16, $f16, $f0 \n\t"
- +
- + "lwc1 $f8, (%1) \n\t"
- + "lwc1 $f12, (%2) \n\t"
- + "punpcklbh $f8, $f8, $f12 \n\t"
- + "punpcklbh $f6, $f0, $f8 \n\t"
- + "punpckhbh $f8, $f0, $f8 \n\t"
- + "lwc1 $f18, 4(%1) \n\t"
- + "lwc1 $f12, 4(%2) \n\t"
- + "punpcklbh $f18, $f18, $f12 \n\t"
- + "punpcklbh $f10, $f0, $f18 \n\t"
- + "punpckhbh $f12, $f0, $f18 \n\t"
- +
- + "or $f2, $f2, $f6 \n\t"
- + "or $f4, $f4, $f8 \n\t"
- + "or $f14, $f14, $f10 \n\t"
- + "or $f16, $f16, $f12 \n\t"
- +
- + "sdc1 $f2, (%3) \n\t"
- + "sdc1 $f4, 8(%3) \n\t"
- + "add %0, 16 \n\t"
- + "add %1, 8 \n\t"
- + "add %2, 8 \n\t"
- + "sdc1 $f14, 0x10(%3) \n\t"
- + "sdc1 $f16, 0x18(%3) \n\t"
- + "add $8, -1 \n\t"
- + "b 1b \n\t"
- + "add %3, 32 \n\t"
- + "2: \n\t"
- + ".set reorder\n\t"
- + : "=r" (y), "=r" (u), "=r" (v), "=r" (dst)
- + : "0" (y), "1" (u), "2" (v), "3" (dst), "r" (w>>4)
- + : "memory","$8"
- + );
- +
- + asm (
- +// ".set arch=loongson2f\n\t"
- + ".set noreorder\n\t"
- + "move $8, %8 \n\t"
- + "1: \n\t"
- + "beqz $8, 2f \n\t"
- + "xor $f0, $f0, $f0 \n\t"
- + "ldc1 $f4, (%0) \n\t"
- + "punpcklbh $f2, $f4, $f0 \n\t"
- + "punpckhbh $f4, $f4, $f0 \n\t"
- +
- + "lwc1 $f8, (%1) \n\t"
- + "lwc1 $f12, (%2) \n\t"
- + "punpcklbh $f8, $f8, $f12 \n\t"
- + "punpcklbh $f6, $f0, $f8 \n\t"
- + "punpckhbh $f8, $f0, $f8 \n\t"
- +
- + "or $f2, $f2, $f6 \n\t"
- + "or $f4, $f4, $f8 \n\t"
- +
- + "sdc1 $f2, (%3) \n\t"
- + "sdc1 $f4, 8(%3) \n\t"
- + "add %0, 8 \n\t"
- + "add %1, 4 \n\t"
- + "add %2, 4 \n\t"
- + "add $8, -1 \n\t"
- + "b 1b \n\t"
- + "add %3, 16 \n\t"
- + "2:\n\t"
- + ".set reorder\n\t"
- + : "=r" (y), "=r" (u), "=r" (v), "=r" (dst)
- + : "0" (y), "1" (u), "2" (v), "3" (dst), "r" ((w&0xf)/8)
- + : "memory","$8"
- + );
- +
- + for (j = (w&7)/2; j; j--) {
- + *dst++ = *y++;
- + *dst++ = *u++;
- + *dst++ = *y++;
- + *dst++ = *v++;
- + }
- + y += yinc;
- + u = (i%2) ? (u + uinc): (u - w/2);
- + v = (i%2) ? (v + vinc): (v - w/2);
- + dst += dstinc;
- + }
- +}
-
- static int
- SMI_PutImage(
- @@ -1592,7 +1704,7 @@ SMI_PutImage(
- offset3 = tmp;
- }
- nLines = ((((y2 + 0xffff) >> 16) + 1) & ~1) - top;
- - xf86XVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1),
- + myXVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1),
- buf + offset2, buf + offset3, dstStart,
- srcPitch, srcPitch2, dstPitch, nLines,
- nPixels);
|