i420_rgb_mmx.h
上传用户:riyaled888
上传日期:2009-03-27
资源大小:7338k
文件大小:17k
源码类别:

多媒体

开发平台:

MultiPlatform

  1. /*****************************************************************************
  2.  * transforms_yuvmmx.h: MMX YUV transformation assembly
  3.  *****************************************************************************
  4.  * Copyright (C) 1999-2004 VideoLAN
  5.  * $Id: i420_rgb_mmx.h 7703 2004-05-17 19:38:13Z gbazin $
  6.  *
  7.  * Authors: Olie Lho <ollie@sis.com.tw>
  8.  *          Ga雔 Hendryckx <jimmy@via.ecp.fr>
  9.  *          Samuel Hocevar <sam@zoy.org>
  10.  *
  11.  * This program is free software; you can redistribute it and/or modify
  12.  * it under the terms of the GNU General Public License as published by
  13.  * the Free Software Foundation; either version 2 of the License, or
  14.  * (at your option) any later version.
  15.  *
  16.  * This program is distributed in the hope that it will be useful,
  17.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19.  * GNU General Public License for more details.
  20.  *
  21.  * You should have received a copy of the GNU General Public License
  22.  * along with this program; if not, write to the Free Software
  23.  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111, USA.
  24.  *****************************************************************************/
  25. /* hope these constant values are cache line aligned */
  26. #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
  27. #define USED_U64(foo) 
  28.     static const uint64_t foo __asm__ (#foo) __attribute__((used))
  29. #else
  30. #define USED_U64(foo) 
  31.     static const uint64_t foo __asm__ (#foo) __attribute__((unused))
  32. #endif
  33. USED_U64(mmx_80w)     = 0x0080008000800080ULL;
  34. USED_U64(mmx_10w)     = 0x1010101010101010ULL;
  35. USED_U64(mmx_00ffw)   = 0x00ff00ff00ff00ffULL;
  36. USED_U64(mmx_Y_coeff) = 0x253f253f253f253fULL;
  37. USED_U64(mmx_U_green) = 0xf37df37df37df37dULL;
  38. USED_U64(mmx_U_blue)  = 0x4093409340934093ULL;
  39. USED_U64(mmx_V_red)   = 0x3312331233123312ULL;
  40. USED_U64(mmx_V_green) = 0xe5fce5fce5fce5fcULL;
  41. USED_U64(mmx_mask_f8) = 0xf8f8f8f8f8f8f8f8ULL;
  42. USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL;
  43. #undef USED_U64
  44. #define MMX_INIT_16 "                                                       n
  45. movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       n
  46. movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       n
  47. pxor      %%mm4, %%mm4      # zero mm4                                      n
  48. movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       n
  49. #movl      $0, (%3)         # cache preload for image                       n
  50. "
  51. #define MMX_INIT_16_GRAY "                                                  n
  52. movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       n
  53. #movl      $0, (%3)         # cache preload for image                       n
  54. "
  55. #define MMX_INIT_32 "                                                       n
  56. movd      (%1), %%mm0       # Load 4 Cb       00 00 00 00 u3 u2 u1 u0       n
  57. movl      $0, (%3)          # cache preload for image                       n
  58. movd      (%2), %%mm1       # Load 4 Cr       00 00 00 00 v3 v2 v1 v0       n
  59. pxor      %%mm4, %%mm4      # zero mm4                                      n
  60. movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       n
  61. "
  62. /*
  63.  * Do the multiply part of the conversion for even and odd pixels,
  64.  * register usage:
  65.  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
  66.  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
  67.  * mm6 -> Y even, mm7 -> Y odd
  68.  */
  69. #define MMX_YUV_MUL "                                                       n
  70. # convert the chroma part                                                   n
  71. punpcklbw %%mm4, %%mm0          # scatter 4 Cb    00 u3 00 u2 00 u1 00 u0   n
  72. punpcklbw %%mm4, %%mm1          # scatter 4 Cr    00 v3 00 v2 00 v1 00 v0   n
  73. psubsw    mmx_80w, %%mm0        # Cb -= 128                                 n
  74. psubsw    mmx_80w, %%mm1        # Cr -= 128                                 n
  75. psllw     $3, %%mm0             # Promote precision                         n
  76. psllw     $3, %%mm1             # Promote precision                         n
  77. movq      %%mm0, %%mm2          # Copy 4 Cb       00 u3 00 u2 00 u1 00 u0   n
  78. movq      %%mm1, %%mm3          # Copy 4 Cr       00 v3 00 v2 00 v1 00 v0   n
  79. pmulhw    mmx_U_green, %%mm2    # Mul Cb with green coeff -> Cb green       n
  80. pmulhw    mmx_V_green, %%mm3    # Mul Cr with green coeff -> Cr green       n
  81. pmulhw    mmx_U_blue, %%mm0     # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   n
  82. pmulhw    mmx_V_red, %%mm1      # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   n
  83. paddsw    %%mm3, %%mm2          # Cb green + Cr green -> Cgreen             n
  84.                                                                             n
  85. # convert the luma part                                                     n
  86. psubusb   mmx_10w, %%mm6        # Y -= 16                                   n
  87. movq      %%mm6, %%mm7          # Copy 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   n
  88. pand      mmx_00ffw, %%mm6      # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   n
  89. psrlw     $8, %%mm7             # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   n
  90. psllw     $3, %%mm6             # Promote precision                         n
  91. psllw     $3, %%mm7             # Promote precision                         n
  92. pmulhw    mmx_Y_coeff, %%mm6    # Mul 4 Y even    00 y6 00 y4 00 y2 00 y0   n
  93. pmulhw    mmx_Y_coeff, %%mm7    # Mul 4 Y odd     00 y7 00 y5 00 y3 00 y1   n
  94. "
  95. /*
  96.  * Do the addition part of the conversion for even and odd pixels,
  97.  * register usage:
  98.  * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
  99.  * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd  pixels,
  100.  * mm6 -> Y even, mm7 -> Y odd
  101.  */
  102. #define MMX_YUV_ADD "                                                       n
  103. # Do horizontal and vertical scaling                                        n
  104. movq      %%mm0, %%mm3          # Copy Cblue                                n
  105. movq      %%mm1, %%mm4          # Copy Cred                                 n
  106. movq      %%mm2, %%mm5          # Copy Cgreen                               n
  107. paddsw    %%mm6, %%mm0          # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   n
  108. paddsw    %%mm7, %%mm3          # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   n
  109. paddsw    %%mm6, %%mm1          # Y even + Cred   00 R6 00 R4 00 R2 00 R0   n
  110. paddsw    %%mm7, %%mm4          # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   n
  111. paddsw    %%mm6, %%mm2          # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   n
  112. paddsw    %%mm7, %%mm5          # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   n
  113.                                                                             n
  114. # Limit RGB even to 0..255                                                  n
  115. packuswb  %%mm0, %%mm0          # B6 B4 B2 B0 / B6 B4 B2 B0                 n
  116. packuswb  %%mm1, %%mm1          # R6 R4 R2 R0 / R6 R4 R2 R0                 n
  117. packuswb  %%mm2, %%mm2          # G6 G4 G2 G0 / G6 G4 G2 G0                 n
  118.                                                                             n
  119. # Limit RGB odd to 0..255                                                   n
  120. packuswb  %%mm3, %%mm3          # B7 B5 B3 B1 / B7 B5 B3 B1                 n
  121. packuswb  %%mm4, %%mm4          # R7 R5 R3 R1 / R7 R5 R3 R1                 n
  122. packuswb  %%mm5, %%mm5          # G7 G5 G3 G1 / G7 G5 G3 G1                 n
  123.                                                                             n
  124. # Interleave RGB even and odd                                               n
  125. punpcklbw %%mm3, %%mm0          #                 B7 B6 B5 B4 B3 B2 B1 B0   n
  126. punpcklbw %%mm4, %%mm1          #                 R7 R6 R5 R4 R3 R2 R1 R0   n
  127. punpcklbw %%mm5, %%mm2          #                 G7 G6 G5 G4 G3 G2 G1 G0   n
  128. "
  129. /*
  130.  * Grayscale case, only use Y
  131.  */
  132. #define MMX_YUV_GRAY "                                                      n
  133. # convert the luma part                                                     n
  134. psubusb   mmx_10w, %%mm6                                                    n
  135. movq      %%mm6, %%mm7                                                      n
  136. pand      mmx_00ffw, %%mm6                                                  n
  137. psrlw     $8, %%mm7                                                         n
  138. psllw     $3, %%mm6                                                         n
  139. psllw     $3, %%mm7                                                         n
  140. pmulhw    mmx_Y_coeff, %%mm6                                                n
  141. pmulhw    mmx_Y_coeff, %%mm7                                                n
  142. packuswb  %%mm6, %%mm6                                                      n
  143. packuswb  %%mm7, %%mm7                                                      n
  144. punpcklbw %%mm7, %%mm6                                                      n
  145. "
  146. #define MMX_UNPACK_16_GRAY "                                                n
  147. movq      %%mm6, %%mm5                                                      n
  148. pand      mmx_mask_f8, %%mm6                                                n
  149. pand      mmx_mask_fc, %%mm5                                                n
  150. movq      %%mm6, %%mm7                                                      n
  151. psrlw     $3, %%mm7                                                         n
  152. pxor      %%mm3, %%mm3                                                      n
  153. movq      %%mm7, %%mm2                                                      n
  154. movq      %%mm5, %%mm0                                                      n
  155. punpcklbw %%mm3, %%mm5                                                      n
  156. punpcklbw %%mm6, %%mm7                                                      n
  157. psllw     $3, %%mm5                                                         n
  158. por       %%mm5, %%mm7                                                      n
  159. movq      %%mm7, (%3)                                                       n
  160. punpckhbw %%mm3, %%mm0                                                      n
  161. punpckhbw %%mm6, %%mm2                                                      n
  162. psllw     $3, %%mm0                                                         n
  163. movq      8(%0), %%mm6                                                      n
  164. por       %%mm0, %%mm2                                                      n
  165. movq      %%mm2, 8(%3)                                                      n
  166. "
  167. /*
  168.  * convert RGB plane to RGB 15 bits,
  169.  * mm0 -> B, mm1 -> R, mm2 -> G,
  170.  * mm4 -> GB, mm5 -> AR pixel 4-7,
  171.  * mm6 -> GB, mm7 -> AR pixel 0-3
  172.  */
  173. #define MMX_UNPACK_15 "                                                     n
  174. # mask unneeded bits off                                                    n
  175. pand      mmx_mask_f8, %%mm0    # b7b6b5b4 b3______ b7b6b5b4 b3______       n
  176. psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       n
  177. pand      mmx_mask_f8, %%mm2    # g7g6g5g4 g3______ g7g6g5g4 g3______       n
  178. pand      mmx_mask_f8, %%mm1    # r7r6r5r4 r3______ r7r6r5r4 r3______       n
  179. psrlw     $1,%%mm1              # __r7r6r5 r4r3____ __r7r6r5 r4r3____       n
  180. pxor      %%mm4, %%mm4          # zero mm4                                  n
  181. movq      %%mm0, %%mm5          # Copy B7-B0                                n
  182. movq      %%mm2, %%mm7          # Copy G7-G0                                n
  183.                                                                             n
  184. # convert rgb24 plane to rgb15 pack for pixel 0-3                           n
  185. punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3______       n
  186. punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       n
  187. psllw     $2,%%mm2              # ________ ____g7g6 g5g4g3__ ________       n
  188. por       %%mm2, %%mm0          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       n
  189. movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   n
  190. movq      %%mm0, (%3)           # store pixel 0-3                           n
  191.                                                                             n
  192. # convert rgb24 plane to rgb16 pack for pixel 0-3                           n
  193. punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3______       n
  194. punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       n
  195. psllw     $2,%%mm7              # ________ ____g7g6 g5g4g3__ ________       n
  196. movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   n
  197. por       %%mm7, %%mm5          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       n
  198. movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   n
  199. movq      %%mm5, 8(%3)          # store pixel 4-7                           n
  200. "
  201. /*
  202.  * convert RGB plane to RGB 16 bits,
  203.  * mm0 -> B, mm1 -> R, mm2 -> G,
  204.  * mm4 -> GB, mm5 -> AR pixel 4-7,
  205.  * mm6 -> GB, mm7 -> AR pixel 0-3
  206.  */
  207. #define MMX_UNPACK_16 "                                                     n
  208. # mask unneeded bits off                                                    n
  209. pand      mmx_mask_f8, %%mm0    # b7b6b5b4 b3______ b7b6b5b4 b3______       n
  210. pand      mmx_mask_fc, %%mm2    # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       n
  211. pand      mmx_mask_f8, %%mm1    # r7r6r5r4 r3______ r7r6r5r4 r3______       n
  212. psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       n
  213. pxor      %%mm4, %%mm4          # zero mm4                                  n
  214. movq      %%mm0, %%mm5          # Copy B7-B0                                n
  215. movq      %%mm2, %%mm7          # Copy G7-G0                                n
  216.                                                                             n
  217. # convert rgb24 plane to rgb16 pack for pixel 0-3                           n
  218. punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3g2____       n
  219. punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       n
  220. psllw     $3,%%mm2              # ________ __g7g6g5 g4g3g2__ ________       n
  221. por       %%mm2, %%mm0          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       n
  222. movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   n
  223. movq      %%mm0, (%3)           # store pixel 0-3                           n
  224.                                                                             n
  225. # convert rgb24 plane to rgb16 pack for pixel 0-3                           n
  226. punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3g2____       n
  227. punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       n
  228. psllw     $3,%%mm7              # ________ __g7g6g5 g4g3g2__ ________       n
  229. movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   n
  230. por       %%mm7, %%mm5          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       n
  231. movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   n
  232. movq      %%mm5, 8(%3)          # store pixel 4-7                           n
  233. "
  234. /*
  235.  * convert RGB plane to RGB packed format,
  236.  * mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
  237.  * mm4 -> GB, mm5 -> AR pixel 4-7,
  238.  * mm6 -> GB, mm7 -> AR pixel 0-3
  239.  */
  240. #define MMX_UNPACK_32 "                                                     n
  241. pxor      %%mm3, %%mm3  # zero mm3                                          n
  242. movq      %%mm0, %%mm6  #                 B7 B6 B5 B4 B3 B2 B1 B0           n
  243. movq      %%mm1, %%mm7  #                 R7 R6 R5 R4 R3 R2 R1 R0           n
  244. movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           n
  245. movq      %%mm1, %%mm5  #                 R7 R6 R5 R4 R3 R2 R1 R0           n
  246. punpcklbw %%mm2, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           n
  247. punpcklbw %%mm3, %%mm7  #                 00 R3 00 R2 00 R1 00 R0           n
  248. punpcklwd %%mm7, %%mm6  #                 00 R1 B1 G1 00 R0 B0 G0           n
  249. movq      %%mm6, (%3)   # Store ARGB1 ARGB0                                 n
  250. movq      %%mm0, %%mm6  #                 B7 B6 B5 B4 B3 B2 B1 B0           n
  251. punpcklbw %%mm2, %%mm6  #                 G3 B3 G2 B2 G1 B1 G0 B0           n
  252. punpckhwd %%mm7, %%mm6  #                 00 R3 G3 B3 00 R2 B3 G2           n
  253. movq      %%mm6, 8(%3)  # Store ARGB3 ARGB2                                 n
  254. punpckhbw %%mm2, %%mm4  #                 G7 B7 G6 B6 G5 B5 G4 B4           n
  255. punpckhbw %%mm3, %%mm5  #                 00 R7 00 R6 00 R5 00 R4           n
  256. punpcklwd %%mm5, %%mm4  #                 00 R5 B5 G5 00 R4 B4 G4           n
  257. movq      %%mm4, 16(%3) # Store ARGB5 ARGB4                                 n
  258. movq      %%mm0, %%mm4  #                 B7 B6 B5 B4 B3 B2 B1 B0           n
  259. punpckhbw %%mm2, %%mm4  #                 G7 B7 G6 B6 G5 B5 G4 B4           n
  260. punpckhwd %%mm5, %%mm4  #                 00 R7 G7 B7 00 R6 B6 G6           n
  261. movq      %%mm4, 24(%3) # Store ARGB7 ARGB6                                 n
  262.                                                                             n
  263. #movd      4(%1), %%mm0  # Load 4 Cb       00 00 00 00 u3 u2 u1 u0           n
  264. #movd      4(%2), %%mm1  # Load 4 Cr       00 00 00 00 v3 v2 v1 v0           n
  265. #pxor      %%mm4, %%mm4  # zero mm4                                          n
  266. #movq      8(%0), %%mm6  # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0           n
  267. "