mc.c
上传用户:lctgjx
上传日期:2022-06-04
资源大小:8887k
文件大小:29k
源码类别:

流媒体/Mpeg4/MP4

开发平台:

Visual C++

  1. /*****************************************************************************
  2.  * mc.c: h264 encoder library (Motion Compensation)
  3.  *****************************************************************************
  4.  * Copyright (C) 2003-2008 x264 project
  5.  *
  6.  * Authors: Eric Petit <eric.petit@lapsus.org>
  7.  *          Guillaume Poirier <gpoirier@mplayerhq.hu>
  8.  *
  9.  * This program is free software; you can redistribute it and/or modify
  10.  * it under the terms of the GNU General Public License as published by
  11.  * the Free Software Foundation; either version 2 of the License, or
  12.  * (at your option) any later version.
  13.  *
  14.  * This program is distributed in the hope that it will be useful,
  15.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17.  * GNU General Public License for more details.
  18.  *
  19.  * You should have received a copy of the GNU General Public License
  20.  * along with this program; if not, write to the Free Software
  21.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
  22.  *****************************************************************************/
  23. #include <stdlib.h>
  24. #include <stdio.h>
  25. #include <string.h>
  26. #include <stdint.h>
  27. #include <stdarg.h>
  28. #include "x264.h"
  29. #include "common/common.h"
  30. #include "common/mc.h"
  31. #include "mc.h"
  32. #include "ppccommon.h"
  33. typedef void (*pf_mc_t)( uint8_t *src, int i_src,
  34.                          uint8_t *dst, int i_dst, int i_height );
  35. static const int hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
  36. static const int hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
  37. static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
  38. {
  39.     return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] +
  40.            pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
  41.            pix[ 3*i_pix_next];
  42. }
  43. static inline int x264_tapfilter1( uint8_t *pix )
  44. {
  45.     return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
  46.            pix[ 3];
  47. }
  48. static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst,  int i_dst,
  49.                                                uint8_t *src1, int i_src1,
  50.                                                uint8_t *src2, int i_height )
  51. {
  52.     int x, y;
  53.     for( y = 0; y < i_height; y++ )
  54.     {
  55.         for( x = 0; x < 4; x++ )
  56.         {
  57.             dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
  58.         }
  59.         dst  += i_dst;
  60.         src1 += i_src1;
  61.         src2 += i_src1;
  62.     }
  63. }
  64. static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  int i_dst,
  65.                                                uint8_t *src1, int i_src1,
  66.                                                uint8_t *src2, int i_height )
  67. {
  68.     int y;
  69.     vec_u8_t src1v, src2v;
  70.     PREP_LOAD;
  71.     PREP_STORE8;
  72.     PREP_LOAD_SRC( src1 );
  73.     PREP_LOAD_SRC( src2 );
  74.     for( y = 0; y < i_height; y++ )
  75.     {
  76.         VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
  77.         VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
  78.         src1v = vec_avg( src1v, src2v );
  79.         VEC_STORE8( src1v, dst );
  80.         dst  += i_dst;
  81.         src1 += i_src1;
  82.         src2 += i_src1;
  83.     }
  84. }
  85. static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  int i_dst,
  86.                                                 uint8_t *src1, int i_src1,
  87.                                                 uint8_t *src2, int i_height )
  88. {
  89.     int y;
  90.     vec_u8_t src1v, src2v;
  91.     PREP_LOAD;
  92.     PREP_LOAD_SRC( src1 );
  93.     PREP_LOAD_SRC( src2 );
  94.     for( y = 0; y < i_height; y++ )
  95.     {
  96.         VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
  97.         VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
  98.         src1v = vec_avg( src1v, src2v );
  99.         vec_st(src1v, 0, dst);
  100.         dst  += i_dst;
  101.         src1 += i_src1;
  102.         src2 += i_src1;
  103.     }
  104. }
  105. static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  int i_dst,
  106.                                                 uint8_t *src1, int i_src1,
  107.                                                 uint8_t *src2, int i_height )
  108. {
  109.     x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
  110.     x264_pixel_avg2_w4_altivec(dst+16, i_dst, src1+16, i_src1, src2+16, i_height);
  111. }
  112. /* mc_copy: plain c */
  113. #define MC_COPY( name, a )                                
  114. static void name( uint8_t *dst, int i_dst,                
  115.                   uint8_t *src, int i_src, int i_height ) 
  116. {                                                         
  117.     int y;                                                
  118.     for( y = 0; y < i_height; y++ )                       
  119.     {                                                     
  120.         memcpy( dst, src, a );                            
  121.         src += i_src;                                     
  122.         dst += i_dst;                                     
  123.     }                                                     
  124. }
  125. MC_COPY( x264_mc_copy_w4_altivec,  4  )
  126. MC_COPY( x264_mc_copy_w8_altivec,  8  )
  127. static void x264_mc_copy_w16_altivec( uint8_t *dst, int i_dst,
  128.                                       uint8_t *src, int i_src, int i_height )
  129. {
  130.     int y;
  131.     vec_u8_t cpyV;
  132.     PREP_LOAD;
  133.     PREP_LOAD_SRC( src );
  134.     for( y = 0; y < i_height; y++)
  135.     {
  136.         VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
  137.         vec_st(cpyV, 0, dst);
  138.         src += i_src;
  139.         dst += i_dst;
  140.     }
  141. }
  142. static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, int i_dst,
  143.                                               uint8_t *src, int i_src, int i_height )
  144. {
  145.     int y;
  146.     for( y = 0; y < i_height; ++y)
  147.     {
  148.         vec_u8_t cpyV = vec_ld( 0, src);
  149.         vec_st(cpyV, 0, dst);
  150.         src += i_src;
  151.         dst += i_dst;
  152.     }
  153. }
  154. static void mc_luma_altivec( uint8_t *dst,    int i_dst_stride,
  155.                              uint8_t *src[4], int i_src_stride,
  156.                              int mvx, int mvy,
  157.                              int i_width, int i_height )
  158. {
  159.     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
  160.     int offset = (mvy>>2)*i_src_stride + (mvx>>2);
  161.     uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
  162.     if( qpel_idx & 5 ) /* qpel interpolation needed */
  163.     {
  164.         uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
  165.         switch(i_width) {
  166.         case 4:
  167.             x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
  168.             break;
  169.         case 8:
  170.             x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
  171.             break;
  172.         case 16:
  173.         default:
  174.             x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
  175.         }
  176.     }
  177.     else
  178.     {
  179.         switch(i_width) {
  180.         case 4:
  181.             x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
  182.             break;
  183.         case 8:
  184.             x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
  185.             break;
  186.         case 16:
  187.             x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
  188.             break;
  189.         }
  190.     }
  191. }
  192. static uint8_t *get_ref_altivec( uint8_t *dst,   int *i_dst_stride,
  193.                                  uint8_t *src[4], int i_src_stride,
  194.                                  int mvx, int mvy,
  195.                                  int i_width, int i_height )
  196. {
  197.     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
  198.     int offset = (mvy>>2)*i_src_stride + (mvx>>2);
  199.     uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
  200.     if( qpel_idx & 5 ) /* qpel interpolation needed */
  201.     {
  202.         uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
  203.         switch(i_width) {
  204.         case 4:
  205.             x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
  206.             break;
  207.         case 8:
  208.             x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
  209.             break;
  210.         case 12:
  211.         case 16:
  212.         default:
  213.             x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
  214.             break;
  215.         case 20:
  216.             x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
  217.             break;
  218.         }
  219.         return dst;
  220.     }
  221.     else
  222.     {
  223.         *i_dst_stride = i_src_stride;
  224.         return src1;
  225.     }
  226. }
  227. static void mc_chroma_2xh( uint8_t *dst, int i_dst_stride,
  228.                            uint8_t *src, int i_src_stride,
  229.                            int mvx, int mvy,
  230.                            int i_height )
  231. {
  232.     uint8_t *srcp;
  233.     int y;
  234.     int d8x = mvx&0x07;
  235.     int d8y = mvy&0x07;
  236.     const int cA = (8-d8x)*(8-d8y);
  237.     const int cB = d8x    *(8-d8y);
  238.     const int cC = (8-d8x)*d8y;
  239.     const int cD = d8x    *d8y;
  240.     src  += (mvy >> 3) * i_src_stride + (mvx >> 3);
  241.     srcp  = &src[i_src_stride];
  242.     for( y = 0; y < i_height; y++ )
  243.     {
  244.         dst[0] = ( cA*src[0] +  cB*src[0+1] +
  245.                   cC*srcp[0] + cD*srcp[0+1] + 32 ) >> 6;
  246.         dst[1] = ( cA*src[1] +  cB*src[1+1] +
  247.                   cC*srcp[1] + cD*srcp[1+1] + 32 ) >> 6;
  248.         src  += i_src_stride;
  249.         srcp += i_src_stride;
  250.         dst  += i_dst_stride;
  251.     }
  252.  }
  253. #define DO_PROCESS_W4( a )  
  254.     dstv_16A = vec_mladd( src##a##v_16A, coeff##a##v, dstv_16A );   
  255.     dstv_16B = vec_mladd( src##a##v_16B, coeff##a##v, dstv_16B )
  256. static void mc_chroma_altivec_4xh( uint8_t *dst, int i_dst_stride,
  257.                                    uint8_t *src, int i_src_stride,
  258.                                    int mvx, int mvy,
  259.                                    int i_height )
  260. {
  261.     uint8_t *srcp;
  262.     int y;
  263.     int d8x = mvx & 0x07;
  264.     int d8y = mvy & 0x07;
  265.     ALIGNED_16( uint16_t coeff[4] );
  266.     coeff[0] = (8-d8x)*(8-d8y);
  267.     coeff[1] = d8x    *(8-d8y);
  268.     coeff[2] = (8-d8x)*d8y;
  269.     coeff[3] = d8x    *d8y;
  270.     src  += (mvy >> 3) * i_src_stride + (mvx >> 3);
  271.     srcp  = &src[i_src_stride];
  272.     LOAD_ZERO;
  273.     PREP_LOAD;
  274.     PREP_LOAD_SRC( src );
  275.     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
  276.     vec_u8_t    src2v_8A, dstv_8A;
  277.     vec_u8_t    src2v_8B, dstv_8B;
  278.     vec_u16_t   src0v_16A, src1v_16A, src2v_16A, src3v_16A, dstv_16A;
  279.     vec_u16_t   src0v_16B, src1v_16B, src2v_16B, src3v_16B, dstv_16B;
  280.     vec_u16_t   shiftv, k32v;
  281.     coeff0v = vec_ld( 0, coeff );
  282.     coeff3v = vec_splat( coeff0v, 3 );
  283.     coeff2v = vec_splat( coeff0v, 2 );
  284.     coeff1v = vec_splat( coeff0v, 1 );
  285.     coeff0v = vec_splat( coeff0v, 0 );
  286.     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
  287.     shiftv  = vec_splat_u16( 6 );
  288.     VEC_LOAD( src, src2v_8B, 5, vec_u8_t, src );
  289.     src2v_16B = vec_u8_to_u16( src2v_8B );
  290.     src3v_16B = vec_sld( src2v_16B, src2v_16B, 2 );
  291.     for( y = 0; y < i_height; y+=2 )
  292.     {
  293.         src0v_16A = src2v_16B;
  294.         src1v_16A = src3v_16B;
  295.         VEC_LOAD_G( srcp, src2v_8A, 5, vec_u8_t );
  296.         srcp += i_src_stride;
  297.         VEC_LOAD_G( srcp, src2v_8B, 5, vec_u8_t );
  298.         srcp += i_src_stride;
  299.         src2v_16A = vec_u8_to_u16( src2v_8A );
  300.         src2v_16B = vec_u8_to_u16( src2v_8B );
  301.         src3v_16A = vec_sld( src2v_16A, src2v_16A, 2 );
  302.         src3v_16B = vec_sld( src2v_16B, src2v_16B, 2 );
  303.         src0v_16B = src2v_16A;
  304.         src1v_16B = src3v_16A;
  305.         dstv_16A = dstv_16B = k32v;
  306.         DO_PROCESS_W4( 0 );
  307.         DO_PROCESS_W4( 1 );
  308.         DO_PROCESS_W4( 2 );
  309.         DO_PROCESS_W4( 3 );
  310.         dstv_16A = vec_sr( dstv_16A, shiftv );
  311.         dstv_16B = vec_sr( dstv_16B, shiftv );
  312.         dstv_8A  = vec_u16_to_u8( dstv_16A );
  313.         dstv_8B  = vec_u16_to_u8( dstv_16B );
  314.         vec_ste( vec_splat( (vec_u32_t) dstv_8A, 0 ), 0, (uint32_t*) dst );
  315.         dst += i_dst_stride;
  316.         vec_ste( vec_splat( (vec_u32_t) dstv_8B, 0 ), 0, (uint32_t*) dst );
  317.         dst += i_dst_stride;
  318.     }
  319. }
  320. #define DO_PROCESS_W8( a )  
  321.     src##a##v_16A = vec_u8_to_u16( src##a##v_8A );  
  322.     src##a##v_16B = vec_u8_to_u16( src##a##v_8B );  
  323.     dstv_16A = vec_mladd( src##a##v_16A, coeff##a##v, dstv_16A );   
  324.     dstv_16B = vec_mladd( src##a##v_16B, coeff##a##v, dstv_16B )
  325. static void mc_chroma_altivec_8xh( uint8_t *dst, int i_dst_stride,
  326.                                    uint8_t *src, int i_src_stride,
  327.                                    int mvx, int mvy,
  328.                                    int i_height )
  329. {
  330.     uint8_t *srcp;
  331.     int y;
  332.     int d8x = mvx & 0x07;
  333.     int d8y = mvy & 0x07;
  334.     ALIGNED_16( uint16_t coeff[4] );
  335.     coeff[0] = (8-d8x)*(8-d8y);
  336.     coeff[1] = d8x    *(8-d8y);
  337.     coeff[2] = (8-d8x)*d8y;
  338.     coeff[3] = d8x    *d8y;
  339.     src  += (mvy >> 3) * i_src_stride + (mvx >> 3);
  340.     srcp  = &src[i_src_stride];
  341.     LOAD_ZERO;
  342.     PREP_LOAD;
  343.     PREP_LOAD_SRC( src );
  344.     PREP_STORE8;
  345.     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
  346.     vec_u8_t    src0v_8A, src1v_8A, src2v_8A, src3v_8A, dstv_8A;
  347.     vec_u8_t    src0v_8B, src1v_8B, src2v_8B, src3v_8B, dstv_8B;
  348.     vec_u16_t   src0v_16A, src1v_16A, src2v_16A, src3v_16A, dstv_16A;
  349.     vec_u16_t   src0v_16B, src1v_16B, src2v_16B, src3v_16B, dstv_16B;
  350.     vec_u16_t   shiftv, k32v;
  351.     coeff0v = vec_ld( 0, coeff );
  352.     coeff3v = vec_splat( coeff0v, 3 );
  353.     coeff2v = vec_splat( coeff0v, 2 );
  354.     coeff1v = vec_splat( coeff0v, 1 );
  355.     coeff0v = vec_splat( coeff0v, 0 );
  356.     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
  357.     shiftv  = vec_splat_u16( 6 );
  358.     VEC_LOAD( src, src2v_8B, 9, vec_u8_t, src );
  359.     src3v_8B = vec_sld( src2v_8B, src2v_8B, 1 );
  360.     for( y = 0; y < i_height; y+=2 )
  361.     {
  362.         src0v_8A = src2v_8B;
  363.         src1v_8A = src3v_8B;
  364.         VEC_LOAD_G( srcp, src2v_8A, 9, vec_u8_t );
  365.         srcp += i_src_stride;
  366.         VEC_LOAD_G( srcp, src2v_8B, 9, vec_u8_t );
  367.         srcp += i_src_stride;
  368.         src3v_8A = vec_sld( src2v_8A, src2v_8A, 1 );
  369.         src3v_8B = vec_sld( src2v_8B, src2v_8B, 1 );
  370.         src0v_8B = src2v_8A;
  371.         src1v_8B = src3v_8A;
  372.         dstv_16A = dstv_16B = k32v;
  373.         DO_PROCESS_W8( 0 );
  374.         DO_PROCESS_W8( 1 );
  375.         DO_PROCESS_W8( 2 );
  376.         DO_PROCESS_W8( 3 );
  377.         dstv_16A = vec_sr( dstv_16A, shiftv );
  378.         dstv_16B = vec_sr( dstv_16B, shiftv );
  379.         dstv_8A  = vec_u16_to_u8( dstv_16A );
  380.         dstv_8B  = vec_u16_to_u8( dstv_16B );
  381.         VEC_STORE8( dstv_8A, dst );
  382.         dst += i_dst_stride;
  383.         VEC_STORE8( dstv_8B, dst );
  384.         dst += i_dst_stride;
  385.     }
  386. }
  387. static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
  388.                                uint8_t *src, int i_src_stride,
  389.                                int mvx, int mvy,
  390.                                int i_width, int i_height )
  391. {
  392.     if( i_width == 8 )
  393.     {
  394.         mc_chroma_altivec_8xh( dst, i_dst_stride, src, i_src_stride,
  395.                                mvx, mvy, i_height );
  396.     }
  397.     else if( i_width == 4 )
  398.     {
  399.         mc_chroma_altivec_4xh( dst, i_dst_stride, src, i_src_stride,
  400.                                mvx, mvy, i_height );
  401.     }
  402.     else
  403.     {
  404.         mc_chroma_2xh( dst, i_dst_stride, src, i_src_stride,
  405.                        mvx, mvy, i_height );
  406.     }
  407. }
  408. #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) 
  409. {                                                     
  410.     t1v = vec_add( t1v, t6v );                        
  411.     t2v = vec_add( t2v, t5v );                        
  412.     t3v = vec_add( t3v, t4v );                        
  413.                                                       
  414.     t1v = vec_sub( t1v, t2v );   /* (a-b) */          
  415.     t2v = vec_sub( t2v, t3v );   /* (b-c) */          
  416.     t2v = vec_sl(  t2v, twov );  /* (b-c)*4 */        
  417.     t1v = vec_sub( t1v, t2v );   /* a-5*b+4*c */      
  418.     t3v = vec_sl(  t3v, fourv ); /* 16*c */           
  419.     t1v = vec_add( t1v, t3v );   /* a-5*b+20*c */     
  420. }
  421. #define HPEL_FILTER_2( t1v, t2v, t3v, t4v, t5v, t6v ) 
  422. {                                                     
  423.     t1v = vec_add( t1v, t6v );                        
  424.     t2v = vec_add( t2v, t5v );                        
  425.     t3v = vec_add( t3v, t4v );                        
  426.                                                       
  427.     t1v = vec_sub( t1v, t2v );  /* (a-b) */           
  428.     t1v = vec_sra( t1v, twov ); /* (a-b)/4 */         
  429.     t1v = vec_sub( t1v, t2v );  /* (a-b)/4-b */       
  430.     t1v = vec_add( t1v, t3v );  /* (a-b)/4-b+c */     
  431.     t1v = vec_sra( t1v, twov ); /* ((a-b)/4-b+c)/4 */ 
  432.     t1v = vec_add( t1v, t3v );  /* ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 */ 
  433. }
  434. #define HPEL_FILTER_HORIZONTAL()                             
  435. {                                                            
  436.     VEC_LOAD_G( &src[x- 2+i_stride*y], src1v, 16, vec_u8_t); 
  437.     VEC_LOAD_G( &src[x+14+i_stride*y], src6v, 16, vec_u8_t); 
  438.                                                              
  439.     src2v = vec_sld( src1v, src6v,  1 );                     
  440.     src3v = vec_sld( src1v, src6v,  2 );                     
  441.     src4v = vec_sld( src1v, src6v,  3 );                     
  442.     src5v = vec_sld( src1v, src6v,  4 );                     
  443.     src6v = vec_sld( src1v, src6v,  5 );                     
  444.                                                              
  445.     temp1v = vec_u8_to_s16_h( src1v );                       
  446.     temp2v = vec_u8_to_s16_h( src2v );                       
  447.     temp3v = vec_u8_to_s16_h( src3v );                       
  448.     temp4v = vec_u8_to_s16_h( src4v );                       
  449.     temp5v = vec_u8_to_s16_h( src5v );                       
  450.     temp6v = vec_u8_to_s16_h( src6v );                       
  451.                                                              
  452.     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   
  453.                    temp4v, temp5v, temp6v );                 
  454.                                                              
  455.     dest1v = vec_add( temp1v, sixteenv );                    
  456.     dest1v = vec_sra( dest1v, fivev );                       
  457.                                                              
  458.     temp1v = vec_u8_to_s16_l( src1v );                       
  459.     temp2v = vec_u8_to_s16_l( src2v );                       
  460.     temp3v = vec_u8_to_s16_l( src3v );                       
  461.     temp4v = vec_u8_to_s16_l( src4v );                       
  462.     temp5v = vec_u8_to_s16_l( src5v );                       
  463.     temp6v = vec_u8_to_s16_l( src6v );                       
  464.                                                              
  465.     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   
  466.                    temp4v, temp5v, temp6v );                 
  467.                                                              
  468.     dest2v = vec_add( temp1v, sixteenv );                    
  469.     dest2v = vec_sra( dest2v, fivev );                       
  470.                                                              
  471.     destv = vec_packsu( dest1v, dest2v );                    
  472.                                                              
  473.     VEC_STORE16( destv, &dsth[x+i_stride*y], dsth );         
  474. }
  475. #define HPEL_FILTER_VERTICAL()                                    
  476. {                                                                 
  477.     VEC_LOAD( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src ); 
  478.     VEC_LOAD( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src ); 
  479.     VEC_LOAD( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src ); 
  480.     VEC_LOAD( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src ); 
  481.     VEC_LOAD( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src ); 
  482.     VEC_LOAD( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src ); 
  483.                                                                   
  484.     temp1v = vec_u8_to_s16_h( src1v );                            
  485.     temp2v = vec_u8_to_s16_h( src2v );                            
  486.     temp3v = vec_u8_to_s16_h( src3v );                            
  487.     temp4v = vec_u8_to_s16_h( src4v );                            
  488.     temp5v = vec_u8_to_s16_h( src5v );                            
  489.     temp6v = vec_u8_to_s16_h( src6v );                            
  490.                                                                   
  491.     HPEL_FILTER_1( temp1v, temp2v, temp3v,                        
  492.                    temp4v, temp5v, temp6v );                      
  493.                                                                   
  494.     dest1v = vec_add( temp1v, sixteenv );                         
  495.     dest1v = vec_sra( dest1v, fivev );                            
  496.                                                                   
  497.     temp4v = vec_u8_to_s16_l( src1v );                            
  498.     temp5v = vec_u8_to_s16_l( src2v );                            
  499.     temp6v = vec_u8_to_s16_l( src3v );                            
  500.     temp7v = vec_u8_to_s16_l( src4v );                            
  501.     temp8v = vec_u8_to_s16_l( src5v );                            
  502.     temp9v = vec_u8_to_s16_l( src6v );                            
  503.                                                                   
  504.     HPEL_FILTER_1( temp4v, temp5v, temp6v,                        
  505.                    temp7v, temp8v, temp9v );                      
  506.                                                                   
  507.     dest2v = vec_add( temp4v, sixteenv );                         
  508.     dest2v = vec_sra( dest2v, fivev );                            
  509.                                                                   
  510.     destv = vec_packsu( dest1v, dest2v );                         
  511.                                                                   
  512.     VEC_STORE16( destv, &dstv[x+i_stride*y], dsth );              
  513. }
  514. #define HPEL_FILTER_CENTRAL()                           
  515. {                                                       
  516.     temp1v = vec_sld( tempav, tempbv, 12 );             
  517.     temp2v = vec_sld( tempav, tempbv, 14 );             
  518.     temp3v = tempbv;                                    
  519.     temp4v = vec_sld( tempbv, tempcv,  2 );             
  520.     temp5v = vec_sld( tempbv, tempcv,  4 );             
  521.     temp6v = vec_sld( tempbv, tempcv,  6 );             
  522.                                                         
  523.     HPEL_FILTER_2( temp1v, temp2v, temp3v,              
  524.                    temp4v, temp5v, temp6v );            
  525.                                                         
  526.     dest1v = vec_add( temp1v, thirtytwov );             
  527.     dest1v = vec_sra( dest1v, sixv );                   
  528.                                                         
  529.     temp1v = vec_sld( tempbv, tempcv, 12 );             
  530.     temp2v = vec_sld( tempbv, tempcv, 14 );             
  531.     temp3v = tempcv;                                    
  532.     temp4v = vec_sld( tempcv, tempdv,  2 );             
  533.     temp5v = vec_sld( tempcv, tempdv,  4 );             
  534.     temp6v = vec_sld( tempcv, tempdv,  6 );             
  535.                                                         
  536.     HPEL_FILTER_2( temp1v, temp2v, temp3v,              
  537.                    temp4v, temp5v, temp6v );            
  538.                                                         
  539.     dest2v = vec_add( temp1v, thirtytwov );             
  540.     dest2v = vec_sra( dest2v, sixv );                   
  541.                                                         
  542.     destv = vec_packsu( dest1v, dest2v );               
  543.                                                         
  544.     VEC_STORE16( destv, &dstc[x-16+i_stride*y], dsth ); 
  545. }
  546. void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
  547.                                int i_stride, int i_width, int i_height, int16_t *buf )
  548. {
  549.     int x, y;
  550.     vec_u8_t destv;
  551.     vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
  552.     vec_s16_t dest1v, dest2v;
  553.     vec_s16_t temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, temp8v, temp9v;
  554.     vec_s16_t tempav, tempbv, tempcv, tempdv, tempev;
  555.     PREP_LOAD;
  556.     PREP_LOAD_SRC( src);
  557.     PREP_STORE16;
  558.     PREP_STORE16_DST( dsth );
  559.     LOAD_ZERO;
  560.     vec_u16_t twov, fourv, fivev, sixv;
  561.     vec_s16_t sixteenv, thirtytwov;
  562.     vec_u16_u temp_u;
  563.     temp_u.s[0]=2;
  564.     twov = vec_splat( temp_u.v, 0 );
  565.     temp_u.s[0]=4;
  566.     fourv = vec_splat( temp_u.v, 0 );
  567.     temp_u.s[0]=5;
  568.     fivev = vec_splat( temp_u.v, 0 );
  569.     temp_u.s[0]=6;
  570.     sixv = vec_splat( temp_u.v, 0 );
  571.     temp_u.s[0]=16;
  572.     sixteenv = (vec_s16_t)vec_splat( temp_u.v, 0 );
  573.     temp_u.s[0]=32;
  574.     thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
  575.     for( y = 0; y < i_height; y++ )
  576.     {
  577.         x = 0;
  578.         /* horizontal_filter */
  579.         HPEL_FILTER_HORIZONTAL();
  580.         /* vertical_filter */
  581.         HPEL_FILTER_VERTICAL();
  582.         /* central_filter */
  583.         tempav = tempcv;
  584.         tempbv = tempdv;
  585.         tempcv = vec_splat( temp1v, 0 ); /* first only */
  586.         tempdv = temp1v;
  587.         tempev = temp4v;
  588.         for( x = 16; x < i_width; x+=16 )
  589.         {
  590.             /* horizontal_filter */
  591.             HPEL_FILTER_HORIZONTAL();
  592.             /* vertical_filter */
  593.             HPEL_FILTER_VERTICAL();
  594.             /* central_filter */
  595.             tempav = tempcv;
  596.             tempbv = tempdv;
  597.             tempcv = tempev;
  598.             tempdv = temp1v;
  599.             tempev = temp4v;
  600.             HPEL_FILTER_CENTRAL();
  601.         }
  602.         /* Partial vertical filter */
  603.         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src );
  604.         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src );
  605.         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src );
  606.         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src );
  607.         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src );
  608.         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src );
  609.         temp1v = vec_u8_to_s16_h( src1v );
  610.         temp2v = vec_u8_to_s16_h( src2v );
  611.         temp3v = vec_u8_to_s16_h( src3v );
  612.         temp4v = vec_u8_to_s16_h( src4v );
  613.         temp5v = vec_u8_to_s16_h( src5v );
  614.         temp6v = vec_u8_to_s16_h( src6v );
  615.         HPEL_FILTER_1( temp1v, temp2v, temp3v,
  616.                        temp4v, temp5v, temp6v );
  617.         /* central_filter */
  618.         tempav = tempcv;
  619.         tempbv = tempdv;
  620.         tempcv = tempev;
  621.         tempdv = temp1v;
  622.         /* tempev is not used */
  623.         HPEL_FILTER_CENTRAL();
  624.     }
  625. }
  626. static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
  627.                                            int src_stride, int dst_stride, int width, int height )
  628. {
  629.     int w = width/16;
  630.     int end = (width & 15);
  631.     int x, y;
  632.     vec_u8_t src0v, src1v, src2v;
  633.     vec_u8_t lv, hv, src1p1v;
  634.     vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
  635.     static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
  636.     for( y=0; y<height; y++ )
  637.     {
  638.         uint8_t *src1 = src0+src_stride;
  639.         uint8_t *src2 = src1+src_stride;
  640.         src0v = vec_ld(0, src0);
  641.         src1v = vec_ld(0, src1);
  642.         src2v = vec_ld(0, src2);
  643.         avg0v = vec_avg(src0v, src1v);
  644.         avg1v = vec_avg(src1v, src2v);
  645.         for( x=0; x<w; x++ )
  646.         {
  647.             lv = vec_ld(16*(x*2+1), src0);
  648.             src1v = vec_ld(16*(x*2+1), src1);
  649.             avghv = vec_avg(lv, src1v);
  650.             lv = vec_ld(16*(x*2+2), src0);
  651.             src1p1v = vec_ld(16*(x*2+2), src1);
  652.             avghp1v = vec_avg(lv, src1p1v);
  653.             avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
  654.             avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
  655.             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
  656.             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
  657.             avg0v = avghp1v;
  658.             hv = vec_ld(16*(x*2+1), src2);
  659.             avghv = vec_avg(src1v, hv);
  660.             hv = vec_ld(16*(x*2+2), src2);
  661.             avghp1v = vec_avg(src1p1v, hv);
  662.             avgleftv = vec_avg(vec_sld(avg1v, avghv, 1), avg1v);
  663.             avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
  664.             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
  665.             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
  666.             avg1v = avghp1v;
  667.         }
  668.         if (end)
  669.         {
  670.             lv = vec_ld(16*(x*2+1), src0);
  671.             src1v = vec_ld(16*(x*2+1), src1);
  672.             avghv = vec_avg(lv, src1v);
  673.             lv = vec_ld(16*(x*2+1), src2);
  674.             avghp1v = vec_avg(src1v, lv);
  675.             avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
  676.             avgrightv = vec_avg(vec_sld(avg1v, avghp1v, 1), avg1v);
  677.             lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
  678.             hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
  679.             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
  680.             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
  681.             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
  682.             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
  683.             lv = vec_sld(lv, lv, 8);
  684.             hv = vec_sld(hv, hv, 8);
  685.             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
  686.             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
  687.             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
  688.             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
  689.         }
  690.         src0 += src_stride*2;
  691.         dst0 += dst_stride;
  692.         dsth += dst_stride;
  693.         dstv += dst_stride;
  694.         dstc += dst_stride;
  695.     }
  696. }
  697. void x264_mc_altivec_init( x264_mc_functions_t *pf )
  698. {
  699.     pf->mc_luma   = mc_luma_altivec;
  700.     pf->get_ref   = get_ref_altivec;
  701.     pf->mc_chroma = mc_chroma_altivec;
  702.     pf->copy_16x16_unaligned = x264_mc_copy_w16_altivec;
  703.     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_altivec;
  704.     pf->hpel_filter = x264_hpel_filter_altivec;
  705.     pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
  706. }