loongson.patch 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. diff -Nur xf86-video-siliconmotion-1.7.4/src/smi_video.c xf86-video-siliconmotion-1.7.4-loongson/src/smi_video.c
  2. --- xf86-video-siliconmotion-1.7.4/src/smi_video.c 2010-02-25 08:33:07.000000000 +0100
  3. +++ xf86-video-siliconmotion-1.7.4-loongson/src/smi_video.c 2012-03-13 04:18:18.634989344 +0100
  4. @@ -276,6 +276,7 @@
  5. XVIMAGE_YUY2,
  6. XVIMAGE_YV12,
  7. XVIMAGE_I420,
  8. + XVIMAGE_UYVY,
  9. {
  10. FOURCC_RV15, /* id */
  11. XvRGB, /* type */
  12. @@ -1464,6 +1465,117 @@
  13. LEAVE();
  14. }
  15. +static void myXVCopyYUV12ToPacked(const unsigned char *srcy, const unsigned char *srcv, const unsigned char *srcu,
  16. + unsigned char *dst, int srcPitchy, int srcPitchuv, int dstPitch, int h, int w)
  17. +{
  18. + int i, j;
  19. + unsigned char const *y, *u, *v;
  20. + int dstinc, yinc, uinc, vinc;
  21. +
  22. + y = srcy;
  23. + u = srcu;
  24. + v = srcv;
  25. +
  26. + dstinc = dstPitch - 2*w;
  27. + yinc = srcPitchy - w;
  28. + uinc = srcPitchuv - w/2;
  29. + vinc = srcPitchuv - w/2;
  30. +
  31. + for (i = 0; i < h; i++) {
  32. + asm (
  33. +// ".set arch=loongson2f\n\t"
  34. + ".set noreorder\n\t"
  35. + "move $8, %8 \n\t"
  36. + "1: \n\t"
  37. + "beqz $8, 2f \n\t"
  38. + "xor $f0, $f0, $f0 \n\t"
  39. + "ldc1 $f4, (%0) \n\t"
  40. + "punpcklbh $f2, $f4, $f0 \n\t"
  41. + "punpckhbh $f4, $f4, $f0 \n\t"
  42. + "ldc1 $f16, 8(%0) \n\t"
  43. + "punpcklbh $f14, $f16, $f0 \n\t"
  44. + "punpckhbh $f16, $f16, $f0 \n\t"
  45. +
  46. + "lwc1 $f8, (%1) \n\t"
  47. + "lwc1 $f12, (%2) \n\t"
  48. + "punpcklbh $f8, $f8, $f12 \n\t"
  49. + "punpcklbh $f6, $f0, $f8 \n\t"
  50. + "punpckhbh $f8, $f0, $f8 \n\t"
  51. + "lwc1 $f18, 4(%1) \n\t"
  52. + "lwc1 $f12, 4(%2) \n\t"
  53. + "punpcklbh $f18, $f18, $f12 \n\t"
  54. + "punpcklbh $f10, $f0, $f18 \n\t"
  55. + "punpckhbh $f12, $f0, $f18 \n\t"
  56. +
  57. + "or $f2, $f2, $f6 \n\t"
  58. + "or $f4, $f4, $f8 \n\t"
  59. + "or $f14, $f14, $f10 \n\t"
  60. + "or $f16, $f16, $f12 \n\t"
  61. +
  62. + "sdc1 $f2, (%3) \n\t"
  63. + "sdc1 $f4, 8(%3) \n\t"
  64. + "add %0, 16 \n\t"
  65. + "add %1, 8 \n\t"
  66. + "add %2, 8 \n\t"
  67. + "sdc1 $f14, 0x10(%3) \n\t"
  68. + "sdc1 $f16, 0x18(%3) \n\t"
  69. + "add $8, -1 \n\t"
  70. + "b 1b \n\t"
  71. + "add %3, 32 \n\t"
  72. + "2: \n\t"
  73. + ".set reorder\n\t"
  74. + : "=r" (y), "=r" (u), "=r" (v), "=r" (dst)
  75. + : "0" (y), "1" (u), "2" (v), "3" (dst), "r" (w>>4)
  76. + : "memory","$8"
  77. + );
  78. +
  79. + asm (
  80. +// ".set arch=loongson2f\n\t"
  81. + ".set noreorder\n\t"
  82. + "move $8, %8 \n\t"
  83. + "1: \n\t"
  84. + "beqz $8, 2f \n\t"
  85. + "xor $f0, $f0, $f0 \n\t"
  86. + "ldc1 $f4, (%0) \n\t"
  87. + "punpcklbh $f2, $f4, $f0 \n\t"
  88. + "punpckhbh $f4, $f4, $f0 \n\t"
  89. +
  90. + "lwc1 $f8, (%1) \n\t"
  91. + "lwc1 $f12, (%2) \n\t"
  92. + "punpcklbh $f8, $f8, $f12 \n\t"
  93. + "punpcklbh $f6, $f0, $f8 \n\t"
  94. + "punpckhbh $f8, $f0, $f8 \n\t"
  95. +
  96. + "or $f2, $f2, $f6 \n\t"
  97. + "or $f4, $f4, $f8 \n\t"
  98. +
  99. + "sdc1 $f2, (%3) \n\t"
  100. + "sdc1 $f4, 8(%3) \n\t"
  101. + "add %0, 8 \n\t"
  102. + "add %1, 4 \n\t"
  103. + "add %2, 4 \n\t"
  104. + "add $8, -1 \n\t"
  105. + "b 1b \n\t"
  106. + "add %3, 16 \n\t"
  107. + "2:\n\t"
  108. + ".set reorder\n\t"
  109. + : "=r" (y), "=r" (u), "=r" (v), "=r" (dst)
  110. + : "0" (y), "1" (u), "2" (v), "3" (dst), "r" ((w&0xf)/8)
  111. + : "memory","$8"
  112. + );
  113. +
  114. + for (j = (w&7)/2; j; j--) {
  115. + *dst++ = *y++;
  116. + *dst++ = *u++;
  117. + *dst++ = *y++;
  118. + *dst++ = *v++;
  119. + }
  120. + y += yinc;
  121. + u = (i%2) ? (u + uinc): (u - w/2);
  122. + v = (i%2) ? (v + vinc): (v - w/2);
  123. + dst += dstinc;
  124. + }
  125. +}
  126. static int
  127. SMI_PutImage(
  128. @@ -1592,7 +1704,7 @@
  129. offset3 = tmp;
  130. }
  131. nLines = ((((y2 + 0xffff) >> 16) + 1) & ~1) - top;
  132. - xf86XVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1),
  133. + myXVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1),
  134. buf + offset2, buf + offset3, dstStart,
  135. srcPitch, srcPitch2, dstPitch, nLines,
  136. nPixels);