macroblock.c
上传用户:lctgjx
上传日期:2022-06-04
资源大小:8887k
文件大小:45k
源码类别:

流媒体/Mpeg4/MP4

开发平台:

Visual C++

  1. /*****************************************************************************
  2.  * macroblock.c: h264 encoder library
  3.  *****************************************************************************
  4.  * Copyright (C) 2003-2008 x264 project
  5.  *
  6.  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
  7.  *          Loren Merritt <lorenm@u.washington.edu>
  8.  *          Jason Garrett-Glaser <darkshikari@gmail.com>
  9.  *
  10.  * This program is free software; you can redistribute it and/or modify
  11.  * it under the terms of the GNU General Public License as published by
  12.  * the Free Software Foundation; either version 2 of the License, or
  13.  * (at your option) any later version.
  14.  *
  15.  * This program is distributed in the hope that it will be useful,
  16.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18.  * GNU General Public License for more details.
  19.  *
  20.  * You should have received a copy of the GNU General Public License
  21.  * along with this program; if not, write to the Free Software
  22.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
  23.  *****************************************************************************/
  24. #include "common/common.h"
  25. #include "macroblock.h"
  26. /* These chroma DC functions don't have assembly versions and are only used here. */
  27. #define ZIG(i,y,x) level[i] = dct[x][y];
  28. static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[2][2] )
  29. {
  30.     ZIG(0,0,0)
  31.     ZIG(1,0,1)
  32.     ZIG(2,1,0)
  33.     ZIG(3,1,1)
  34. }
  35. #undef ZIG
  36. #define IDCT_DEQUANT_START 
  37.     int d0 = dct[0][0] + dct[0][1]; 
  38.     int d1 = dct[1][0] + dct[1][1]; 
  39.     int d2 = dct[0][0] - dct[0][1]; 
  40.     int d3 = dct[1][0] - dct[1][1]; 
  41.     int dmf = dequant_mf[i_qp%6][0][0]; 
  42.     int qbits = i_qp/6 - 5; 
  43.     if( qbits > 0 ) 
  44.     { 
  45.         dmf <<= qbits; 
  46.         qbits = 0; 
  47.     }
  48. static inline void idct_dequant_2x2_dc( int16_t dct[2][2], int16_t dct4x4[4][4][4], int dequant_mf[6][4][4], int i_qp )
  49. {
  50.     IDCT_DEQUANT_START
  51.     dct4x4[0][0][0] = (d0 + d1) * dmf >> -qbits;
  52.     dct4x4[1][0][0] = (d0 - d1) * dmf >> -qbits;
  53.     dct4x4[2][0][0] = (d2 + d3) * dmf >> -qbits;
  54.     dct4x4[3][0][0] = (d2 - d3) * dmf >> -qbits;
  55. }
  56. static inline void idct_dequant_2x2_dconly( int16_t out[2][2], int16_t dct[2][2], int dequant_mf[6][4][4], int i_qp )
  57. {
  58.     IDCT_DEQUANT_START
  59.     out[0][0] = (d0 + d1) * dmf >> -qbits;
  60.     out[0][1] = (d0 - d1) * dmf >> -qbits;
  61.     out[1][0] = (d2 + d3) * dmf >> -qbits;
  62.     out[1][1] = (d2 - d3) * dmf >> -qbits;
  63. }
  64. static inline void dct2x2dc( int16_t d[2][2], int16_t dct4x4[4][4][4] )
  65. {
  66.     int d0 = dct4x4[0][0][0] + dct4x4[1][0][0];
  67.     int d1 = dct4x4[2][0][0] + dct4x4[3][0][0];
  68.     int d2 = dct4x4[0][0][0] - dct4x4[1][0][0];
  69.     int d3 = dct4x4[2][0][0] - dct4x4[3][0][0];
  70.     d[0][0] = d0 + d1;
  71.     d[1][0] = d2 + d3;
  72.     d[0][1] = d0 - d1;
  73.     d[1][1] = d2 - d3;
  74.     dct4x4[0][0][0] = 0;
  75.     dct4x4[1][0][0] = 0;
  76.     dct4x4[2][0][0] = 0;
  77.     dct4x4[3][0][0] = 0;
  78. }
  79. static inline void dct2x2dc_dconly( int16_t d[2][2] )
  80. {
  81.     int d0 = d[0][0] + d[0][1];
  82.     int d1 = d[1][0] + d[1][1];
  83.     int d2 = d[0][0] - d[0][1];
  84.     int d3 = d[1][0] - d[1][1];
  85.     d[0][0] = d0 + d1;
  86.     d[1][0] = d2 + d3;
  87.     d[0][1] = d0 - d1;
  88.     d[1][1] = d2 - d3;
  89. }
  90. static ALWAYS_INLINE int x264_quant_4x4( x264_t *h, int16_t dct[4][4], int i_qp, int i_ctxBlockCat, int b_intra, int idx )
  91. {
  92.     int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
  93.     if( h->mb.b_trellis )
  94.         return x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, i_ctxBlockCat, b_intra, 0, idx );
  95.     else
  96.         return h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
  97. }
  98. static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, int16_t dct[8][8], int i_qp, int b_intra, int idx )
  99. {
  100.     int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
  101.     if( h->mb.b_trellis )
  102.         return x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
  103.     else
  104.         return h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
  105. }
  106. /* All encoding functions must output the correct CBP and NNZ values.
  107.  * The entropy coding functions will check CBP first, then NNZ, before
  108.  * actually reading the DCT coefficients.  NNZ still must be correct even
  109.  * if CBP is zero because of the use of NNZ values for context selection.
  110.  * "NNZ" need only be 0 or 1 rather than the exact coefficient count because
  111.  * that is only needed in CAVLC, and will be calculated by CAVLC's residual
  112.  * coding and stored as necessary. */
  113. /* This means that decimation can be done merely by adjusting the CBP and NNZ
  114.  * rather than memsetting the coefficients. */
  115. void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
  116. {
  117.     int nz;
  118.     uint8_t *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
  119.     uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
  120.     ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[4] );
  121.     if( h->mb.b_lossless )
  122.     {
  123.         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
  124.         h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
  125.         h->mb.i_cbp_luma |= nz<<(idx>>2);
  126.         return;
  127.     }
  128.     h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
  129.     nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
  130.     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
  131.     if( nz )
  132.     {
  133.         h->mb.i_cbp_luma |= 1<<(idx>>2);
  134.         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
  135.         h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
  136.         h->dctf.add4x4_idct( p_dst, dct4x4 );
  137.     }
  138. }
  139. #define STORE_8x8_NNZ(idx,nz)
  140. {
  141.     *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[idx*4+0]] = nz * 0x0101;
  142.     *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[idx*4+2]] = nz * 0x0101;
  143. }
  144. void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
  145. {
  146.     int x = 8 * (idx&1);
  147.     int y = 8 * (idx>>1);
  148.     int nz;
  149.     uint8_t *p_src = &h->mb.pic.p_fenc[0][x+y*FENC_STRIDE];
  150.     uint8_t *p_dst = &h->mb.pic.p_fdec[0][x+y*FDEC_STRIDE];
  151.     ALIGNED_ARRAY_16( int16_t, dct8x8,[8],[8] );
  152.     if( h->mb.b_lossless )
  153.     {
  154.         nz = h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
  155.         STORE_8x8_NNZ(idx,nz);
  156.         h->mb.i_cbp_luma |= nz<<idx;
  157.         return;
  158.     }
  159.     h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
  160.     nz = x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
  161.     if( nz )
  162.     {
  163.         h->mb.i_cbp_luma |= 1<<idx;
  164.         h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
  165.         h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
  166.         h->dctf.add8x8_idct8( p_dst, dct8x8 );
  167.         STORE_8x8_NNZ(idx,1);
  168.     }
  169.     else
  170.         STORE_8x8_NNZ(idx,0);
  171. }
  172. static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
  173. {
  174.     uint8_t  *p_src = h->mb.pic.p_fenc[0];
  175.     uint8_t  *p_dst = h->mb.pic.p_fdec[0];
  176.     ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[4][4] );
  177.     ALIGNED_ARRAY_16( int16_t, dct_dc4x4,[4],[4] );
  178.     int i, nz;
  179.     int b_decimate = h->sh.i_type == SLICE_TYPE_B || (h->param.analyse.b_dct_decimate && h->sh.i_type == SLICE_TYPE_P);
  180.     int decimate_score = b_decimate ? 0 : 9;
  181.     if( h->mb.b_lossless )
  182.     {
  183.         for( i = 0; i < 16; i++ )
  184.         {
  185.             int oe = block_idx_xy_fenc[i];
  186.             int od = block_idx_xy_fdec[i];
  187.             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[i], p_src+oe, p_dst+od, &dct_dc4x4[0][block_idx_yx_1d[i]] );
  188.             h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
  189.             h->mb.i_cbp_luma |= nz;
  190.         }
  191.         h->mb.i_cbp_luma *= 0xf;
  192.         h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( dct_dc4x4 );
  193.         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
  194.         return;
  195.     }
  196.     h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
  197.     for( i = 0; i < 16; i++ )
  198.     {
  199.         /* copy dc coeff */
  200.         dct_dc4x4[0][block_idx_xy_1d[i]] = dct4x4[i][0][0];
  201.         dct4x4[i][0][0] = 0;
  202.         /* quant/scan/dequant */
  203.         nz = x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
  204.         h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
  205.         if( nz )
  206.         {
  207.             h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
  208.             h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
  209.             if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[i] );
  210.             h->mb.i_cbp_luma = 0xf;
  211.         }
  212.     }
  213.     /* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */
  214.     /* More useful with CAVLC, but still useful with CABAC. */
  215.     if( decimate_score < 6 )
  216.     {
  217.         h->mb.i_cbp_luma = 0;
  218.         *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = 0;
  219.         *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = 0;
  220.         *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = 0;
  221.         *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = 0;
  222.     }
  223.     h->dctf.dct4x4dc( dct_dc4x4 );
  224.     if( h->mb.b_trellis )
  225.         nz = x264_quant_dc_trellis( h, (int16_t*)dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1, 0 );
  226.     else
  227.         nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
  228.     h->mb.cache.non_zero_count[x264_scan8[24]] = nz;
  229.     if( nz )
  230.     {
  231.         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
  232.         /* output samples to fdec */
  233.         h->dctf.idct4x4dc( dct_dc4x4 );
  234.         h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp );  /* XXX not inversed */
  235.         if( h->mb.i_cbp_luma )
  236.             for( i = 0; i < 16; i++ )
  237.                 dct4x4[i][0][0] = dct_dc4x4[0][block_idx_xy_1d[i]];
  238.     }
  239.     /* put pixels to fdec */
  240.     if( h->mb.i_cbp_luma )
  241.         h->dctf.add16x16_idct( p_dst, dct4x4 );
  242.     else if( nz )
  243.         h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 );
  244. }
  245. static inline int idct_dequant_round_2x2_dc( int16_t ref[2][2], int16_t dct[2][2], int dequant_mf[6][4][4], int i_qp )
  246. {
  247.     int16_t out[2][2];
  248.     idct_dequant_2x2_dconly( out, dct, dequant_mf, i_qp );
  249.     return ((ref[0][0] ^ (out[0][0]+32))
  250.           | (ref[0][1] ^ (out[0][1]+32))
  251.           | (ref[1][0] ^ (out[1][0]+32))
  252.           | (ref[1][1] ^ (out[1][1]+32))) >> 6;
  253. }
  254. /* Round down coefficients losslessly in DC-only chroma blocks.
  255.  * Unlike luma blocks, this can't be done with a lookup table or
  256.  * other shortcut technique because of the interdependencies
  257.  * between the coefficients due to the chroma DC transform. */
  258. static inline int x264_mb_optimize_chroma_dc( x264_t *h, int b_inter, int i_qp, int16_t dct2x2[2][2] )
  259. {
  260.     int16_t dct2x2_orig[2][2];
  261.     int coeff;
  262.     int nz = 0;
  263.     /* If the QP is too high, there's no benefit to rounding optimization. */
  264.     if( h->dequant4_mf[CQM_4IC + b_inter][i_qp%6][0][0] << (i_qp/6) > 32*64 )
  265.         return 1;
  266.     idct_dequant_2x2_dconly( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
  267.     dct2x2_orig[0][0] += 32;
  268.     dct2x2_orig[0][1] += 32;
  269.     dct2x2_orig[1][0] += 32;
  270.     dct2x2_orig[1][1] += 32;
  271.     /* If the DC coefficients already round to zero, terminate early. */
  272.     if( !((dct2x2_orig[0][0]|dct2x2_orig[0][1]|dct2x2_orig[1][0]|dct2x2_orig[1][1])>>6) )
  273.         return 0;
  274.     /* Start with the highest frequency coefficient... is this the best option? */
  275.     for( coeff = 3; coeff >= 0; coeff-- )
  276.     {
  277.         int sign = dct2x2[0][coeff] < 0 ? -1 : 1;
  278.         int level = dct2x2[0][coeff];
  279.         if( !level )
  280.             continue;
  281.         while( level )
  282.         {
  283.             dct2x2[0][coeff] = level - sign;
  284.             if( idct_dequant_round_2x2_dc( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp ) )
  285.                 break;
  286.             level -= sign;
  287.         }
  288.         nz |= level;
  289.         dct2x2[0][coeff] = level;
  290.     }
  291.     return !!nz;
  292. }
  293. void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
  294. {
  295.     int i, ch, nz, nz_dc;
  296.     int b_decimate = b_inter && (h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate);
  297.     ALIGNED_ARRAY_16( int16_t, dct2x2,[2],[2] );
  298.     h->mb.i_cbp_chroma = 0;
  299.     /* Early termination: check variance of chroma residual before encoding.
  300.      * Don't bother trying early termination at low QPs.
  301.      * Values are experimentally derived. */
  302.     if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) )
  303.     {
  304.         int thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
  305.         int ssd[2];
  306.         int score  = h->pixf.var2_8x8( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] );
  307.             score += h->pixf.var2_8x8( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] );
  308.         if( score < thresh*4 )
  309.         {
  310.             h->mb.cache.non_zero_count[x264_scan8[16]] = 0;
  311.             h->mb.cache.non_zero_count[x264_scan8[17]] = 0;
  312.             h->mb.cache.non_zero_count[x264_scan8[18]] = 0;
  313.             h->mb.cache.non_zero_count[x264_scan8[19]] = 0;
  314.             h->mb.cache.non_zero_count[x264_scan8[20]] = 0;
  315.             h->mb.cache.non_zero_count[x264_scan8[21]] = 0;
  316.             h->mb.cache.non_zero_count[x264_scan8[22]] = 0;
  317.             h->mb.cache.non_zero_count[x264_scan8[23]] = 0;
  318.             h->mb.cache.non_zero_count[x264_scan8[25]] = 0;
  319.             h->mb.cache.non_zero_count[x264_scan8[26]] = 0;
  320.             for( ch = 0; ch < 2; ch++ )
  321.             {
  322.                 if( ssd[ch] > thresh )
  323.                 {
  324.                     h->dctf.sub8x8_dct_dc( dct2x2, h->mb.pic.p_fenc[1+ch], h->mb.pic.p_fdec[1+ch] );
  325.                     dct2x2dc_dconly( dct2x2 );
  326.                     if( h->mb.b_trellis )
  327.                         nz_dc = x264_quant_dc_trellis( h, (int16_t*)dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
  328.                     else
  329.                         nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<
  330.     1 );
  331.                     if( nz_dc )
  332.                     {
  333.                         if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
  334.                             continue;
  335.                         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 1;
  336.                         zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
  337.                         idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
  338.                         h->dctf.add8x8_idct_dc( h->mb.pic.p_fdec[1+ch], dct2x2 );
  339.                         h->mb.i_cbp_chroma = 1;
  340.                     }
  341.                 }
  342.             }
  343.             return;
  344.         }
  345.     }
  346.     for( ch = 0; ch < 2; ch++ )
  347.     {
  348.         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
  349.         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
  350.         int i_decimate_score = 0;
  351.         int nz_ac = 0;
  352.         ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[4][4] );
  353.         if( h->mb.b_lossless )
  354.         {
  355.             for( i = 0; i < 4; i++ )
  356.             {
  357.                 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
  358.                 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
  359.                 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od, &h->dct.chroma_dc[ch][i] );
  360.                 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
  361.                 h->mb.i_cbp_chroma |= nz;
  362.             }
  363.             h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] );
  364.             continue;
  365.         }
  366.         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
  367.         dct2x2dc( dct2x2, dct4x4 );
  368.         /* calculate dct coeffs */
  369.         for( i = 0; i < 4; i++ )
  370.         {
  371.             if( h->mb.b_trellis )
  372.                 nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 );
  373.             else
  374.                 nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
  375.             h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
  376.             if( nz )
  377.             {
  378.                 nz_ac = 1;
  379.                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
  380.                 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
  381.                 if( b_decimate )
  382.                     i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
  383.             }
  384.         }
  385.         if( h->mb.b_trellis )
  386.             nz_dc = x264_quant_dc_trellis( h, (int16_t*)dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
  387.         else
  388.             nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
  389.         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = nz_dc;
  390.         if( (b_decimate && i_decimate_score < 7) || !nz_ac )
  391.         {
  392.             /* Decimate the block */
  393.             h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0;
  394.             h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0;
  395.             h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0;
  396.             h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0;
  397.             if( !nz_dc ) /* Whole block is empty */
  398.                 continue;
  399.             if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
  400.             {
  401.                 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 0;
  402.                 continue;
  403.             }
  404.             /* DC-only */
  405.             zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
  406.             idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
  407.             h->dctf.add8x8_idct_dc( p_dst, dct2x2 );
  408.         }
  409.         else
  410.         {
  411.             h->mb.i_cbp_chroma = 1;
  412.             if( nz_dc )
  413.             {
  414.                 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
  415.                 idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
  416.             }
  417.             h->dctf.add8x8_idct( p_dst, dct4x4 );
  418.         }
  419.     }
  420.     if( h->mb.i_cbp_chroma )
  421.         h->mb.i_cbp_chroma = 2;    /* dc+ac (we can't do only ac) */
  422.     else if( h->mb.cache.non_zero_count[x264_scan8[25]] |
  423.              h->mb.cache.non_zero_count[x264_scan8[26]] )
  424.         h->mb.i_cbp_chroma = 1;    /* dc only */
  425. }
  426. static void x264_macroblock_encode_skip( x264_t *h )
  427. {
  428.     h->mb.i_cbp_luma = 0x00;
  429.     h->mb.i_cbp_chroma = 0x00;
  430.     memset( h->mb.cache.non_zero_count, 0, X264_SCAN8_SIZE );
  431.     /* store cbp */
  432.     h->mb.cbp[h->mb.i_mb_xy] = 0;
  433. }
  434. /*****************************************************************************
  435.  * x264_macroblock_encode_pskip:
  436.  *  Encode an already marked skip block
  437.  *****************************************************************************/
  438. static void x264_macroblock_encode_pskip( x264_t *h )
  439. {
  440.     const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
  441.                                 h->mb.mv_min[0], h->mb.mv_max[0] );
  442.     const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
  443.                                 h->mb.mv_min[1], h->mb.mv_max[1] );
  444.     /* don't do pskip motion compensation if it was already done in macroblock_analyse */
  445.     if( !h->mb.b_skip_mc )
  446.     {
  447.         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
  448.                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
  449.                        mvx, mvy, 16, 16 );
  450.         h->mc.mc_chroma( h->mb.pic.p_fdec[1],       FDEC_STRIDE,
  451.                          h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
  452.                          mvx, mvy, 8, 8 );
  453.         h->mc.mc_chroma( h->mb.pic.p_fdec[2],       FDEC_STRIDE,
  454.                          h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2],
  455.                          mvx, mvy, 8, 8 );
  456.     }
  457.     x264_macroblock_encode_skip( h );
  458. }
  459. /*****************************************************************************
  460.  * Intra prediction for predictive lossless mode.
  461.  *****************************************************************************/
  462. /* Note that these functions take a shortcut (mc.copy instead of actual pixel prediction) which assumes
  463.  * that the edge pixels of the reconstructed frame are the same as that of the source frame.  This means
  464.  * they will only work correctly if the neighboring blocks are losslessly coded.  In practice, this means
  465.  * lossless mode cannot be mixed with lossy mode within a frame. */
  466. /* This can be resolved by explicitly copying the edge pixels after doing the mc.copy, but this doesn't
  467.  * need to be done unless we decide to allow mixing lossless and lossy compression. */
  468. void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
  469. {
  470.     int stride = h->fenc->i_stride[1] << h->mb.b_interlaced;
  471.     if( i_mode == I_PRED_CHROMA_V )
  472.     {
  473.         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-stride, stride, 8 );
  474.         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-stride, stride, 8 );
  475.     }
  476.     else if( i_mode == I_PRED_CHROMA_H )
  477.     {
  478.         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-1, stride, 8 );
  479.         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-1, stride, 8 );
  480.     }
  481.     else
  482.     {
  483.         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
  484.         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
  485.     }
  486. }
  487. void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode )
  488. {
  489.     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
  490.     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
  491.     if( i_mode == I_PRED_4x4_V )
  492.         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
  493.     else if( i_mode == I_PRED_4x4_H )
  494.         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
  495.     else
  496.         h->predict_4x4[i_mode]( p_dst );
  497. }
  498. void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, uint8_t edge[33] )
  499. {
  500.     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
  501.     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
  502.     if( i_mode == I_PRED_8x8_V )
  503.         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
  504.     else if( i_mode == I_PRED_8x8_H )
  505.         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
  506.     else
  507.         h->predict_8x8[i_mode]( p_dst, edge );
  508. }
  509. void x264_predict_lossless_16x16( x264_t *h, int i_mode )
  510. {
  511.     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
  512.     if( i_mode == I_PRED_16x16_V )
  513.         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
  514.     else if( i_mode == I_PRED_16x16_H )
  515.         h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
  516.     else
  517.         h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
  518. }
  519. /*****************************************************************************
  520.  * x264_macroblock_encode:
  521.  *****************************************************************************/
  522. void x264_macroblock_encode( x264_t *h )
  523. {
  524.     int i_cbp_dc = 0;
  525.     int i_qp = h->mb.i_qp;
  526.     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
  527.     int b_force_no_skip = 0;
  528.     int i,idx,nz;
  529.     h->mb.i_cbp_luma = 0;
  530.     h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
  531.     if( h->sh.b_mbaff
  532.         && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
  533.         && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
  534.     {
  535.         /* The first skip is predicted to be a frame mb pair.
  536.          * We don't yet support the aff part of mbaff, so force it to non-skip
  537.          * so that we can pick the aff flag. */
  538.         b_force_no_skip = 1;
  539.         if( IS_SKIP(h->mb.i_type) )
  540.         {
  541.             if( h->mb.i_type == P_SKIP )
  542.                 h->mb.i_type = P_L0;
  543.             else if( h->mb.i_type == B_SKIP )
  544.                 h->mb.i_type = B_DIRECT;
  545.         }
  546.     }
  547.     if( h->mb.i_type == P_SKIP )
  548.     {
  549.         /* A bit special */
  550.         x264_macroblock_encode_pskip( h );
  551.         return;
  552.     }
  553.     if( h->mb.i_type == B_SKIP )
  554.     {
  555.         /* don't do bskip motion compensation if it was already done in macroblock_analyse */
  556.         if( !h->mb.b_skip_mc )
  557.             x264_mb_mc( h );
  558.         x264_macroblock_encode_skip( h );
  559.         return;
  560.     }
  561.     if( h->mb.i_type == I_16x16 )
  562.     {
  563.         const int i_mode = h->mb.i_intra16x16_pred_mode;
  564.         h->mb.b_transform_8x8 = 0;
  565.         if( h->mb.b_lossless )
  566.             x264_predict_lossless_16x16( h, i_mode );
  567.         else
  568.             h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
  569.         /* encode the 16x16 macroblock */
  570.         x264_mb_encode_i16x16( h, i_qp );
  571.     }
  572.     else if( h->mb.i_type == I_8x8 )
  573.     {
  574.         ALIGNED_ARRAY_16( uint8_t, edge,[33] );
  575.         h->mb.b_transform_8x8 = 1;
  576.         /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
  577.         if( h->mb.i_skip_intra )
  578.         {
  579.             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
  580.             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = h->mb.pic.i8x8_nnz_buf[0];
  581.             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = h->mb.pic.i8x8_nnz_buf[1];
  582.             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = h->mb.pic.i8x8_nnz_buf[2];
  583.             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = h->mb.pic.i8x8_nnz_buf[3];
  584.             h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
  585.             /* In RD mode, restore the now-overwritten DCT data. */
  586.             if( h->mb.i_skip_intra == 2 )
  587.                 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
  588.         }
  589.         for( i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
  590.         {
  591.             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
  592.             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
  593.             h->predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
  594.             if( h->mb.b_lossless )
  595.                 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
  596.             else
  597.                 h->predict_8x8[i_mode]( p_dst, edge );
  598.             x264_mb_encode_i8x8( h, i, i_qp );
  599.         }
  600.     }
  601.     else if( h->mb.i_type == I_4x4 )
  602.     {
  603.         h->mb.b_transform_8x8 = 0;
  604.         /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
  605.         if( h->mb.i_skip_intra )
  606.         {
  607.             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
  608.             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = h->mb.pic.i4x4_nnz_buf[0];
  609.             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = h->mb.pic.i4x4_nnz_buf[1];
  610.             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = h->mb.pic.i4x4_nnz_buf[2];
  611.             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = h->mb.pic.i4x4_nnz_buf[3];
  612.             h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
  613.             /* In RD mode, restore the now-overwritten DCT data. */
  614.             if( h->mb.i_skip_intra == 2 )
  615.                 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
  616.         }
  617.         for( i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
  618.         {
  619.             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
  620.             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
  621.             if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
  622.                 /* emulate missing topright samples */
  623.                 *(uint32_t*) &p_dst[4-FDEC_STRIDE] = p_dst[3-FDEC_STRIDE] * 0x01010101U;
  624.             if( h->mb.b_lossless )
  625.                 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
  626.             else
  627.                 h->predict_4x4[i_mode]( p_dst );
  628.             x264_mb_encode_i4x4( h, i, i_qp );
  629.         }
  630.     }
  631.     else    /* Inter MB */
  632.     {
  633.         int i8x8, i4x4;
  634.         int i_decimate_mb = 0;
  635.         /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
  636.         if( !h->mb.b_skip_mc )
  637.             x264_mb_mc( h );
  638.         if( h->mb.b_lossless )
  639.         {
  640.             if( h->mb.b_transform_8x8 )
  641.                 for( i8x8 = 0; i8x8 < 4; i8x8++ )
  642.                 {
  643.                     int x = 8*(i8x8&1);
  644.                     int y = 8*(i8x8>>1);
  645.                     nz = h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8],
  646.                                         h->mb.pic.p_fenc[0]+x+y*FENC_STRIDE,
  647.                                         h->mb.pic.p_fdec[0]+x+y*FDEC_STRIDE );
  648.                     STORE_8x8_NNZ(i8x8,nz);
  649.                     h->mb.i_cbp_luma |= nz << i8x8;
  650.                 }
  651.             else
  652.                 for( i4x4 = 0; i4x4 < 16; i4x4++ )
  653.                 {
  654.                     nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
  655.                                         h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
  656.                                         h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
  657.                     h->mb.cache.non_zero_count[x264_scan8[i4x4]] = nz;
  658.                     h->mb.i_cbp_luma |= nz << (i4x4>>2);
  659.                 }
  660.         }
  661.         else if( h->mb.b_transform_8x8 )
  662.         {
  663.             ALIGNED_ARRAY_16( int16_t, dct8x8,[4],[8][8] );
  664.             b_decimate &= !h->mb.b_trellis; // 8x8 trellis is inherently optimal decimation
  665.             h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
  666.             h->nr_count[1] += h->mb.b_noise_reduction * 4;
  667.             for( idx = 0; idx < 4; idx++ )
  668.             {
  669.                 if( h->mb.b_noise_reduction )
  670.                     h->quantf.denoise_dct( *dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
  671.                 nz = x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
  672.                 if( nz )
  673.                 {
  674.                     h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
  675.                     if( b_decimate )
  676.                     {
  677.                         int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
  678.                         i_decimate_mb += i_decimate_8x8;
  679.                         if( i_decimate_8x8 >= 4 )
  680.                             h->mb.i_cbp_luma |= 1<<idx;
  681.                     }
  682.                     else
  683.                         h->mb.i_cbp_luma |= 1<<idx;
  684.                 }
  685.             }
  686.             if( i_decimate_mb < 6 && b_decimate )
  687.             {
  688.                 h->mb.i_cbp_luma = 0;
  689.                 *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = 0;
  690.                 *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = 0;
  691.                 *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = 0;
  692.                 *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = 0;
  693.             }
  694.             else
  695.             {
  696.                 for( idx = 0; idx < 4; idx++ )
  697.                 {
  698.                     if( h->mb.i_cbp_luma&(1<<idx) )
  699.                     {
  700.                         h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
  701.                         h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][(idx&1)*8 + (idx>>1)*8*FDEC_STRIDE], dct8x8[idx] );
  702.                         STORE_8x8_NNZ(idx,1);
  703.                     }
  704.                     else
  705.                         STORE_8x8_NNZ(idx,0);
  706.                 }
  707.             }
  708.         }
  709.         else
  710.         {
  711.             ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[4][4] );
  712.             h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
  713.             h->nr_count[0] += h->mb.b_noise_reduction * 16;
  714.             for( i8x8 = 0; i8x8 < 4; i8x8++ )
  715.             {
  716.                 int i_decimate_8x8 = 0;
  717.                 int cbp = 0;
  718.                 /* encode one 4x4 block */
  719.                 for( i4x4 = 0; i4x4 < 4; i4x4++ )
  720.                 {
  721.                     idx = i8x8 * 4 + i4x4;
  722.                     if( h->mb.b_noise_reduction )
  723.                         h->quantf.denoise_dct( *dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
  724.                     nz = x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
  725.                     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
  726.                     if( nz )
  727.                     {
  728.                         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
  729.                         h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp );
  730.                         if( b_decimate && i_decimate_8x8 < 6 )
  731.                             i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
  732.                         cbp = 1;
  733.                     }
  734.                 }
  735.                 /* decimate this 8x8 block */
  736.                 i_decimate_mb += i_decimate_8x8;
  737.                 if( b_decimate )
  738.                 {
  739.                     if( i_decimate_8x8 < 4 )
  740.                         STORE_8x8_NNZ(i8x8,0)
  741.                     else
  742.                         h->mb.i_cbp_luma |= 1<<i8x8;
  743.                 }
  744.                 else if( cbp )
  745.                 {
  746.                     h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
  747.                     h->mb.i_cbp_luma |= 1<<i8x8;
  748.                 }
  749.             }
  750.             if( b_decimate )
  751.             {
  752.                 if( i_decimate_mb < 6 )
  753.                 {
  754.                     h->mb.i_cbp_luma = 0;
  755.                     *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = 0;
  756.                     *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = 0;
  757.                     *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = 0;
  758.                     *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = 0;
  759.                 }
  760.                 else
  761.                 {
  762.                     for( i8x8 = 0; i8x8 < 4; i8x8++ )
  763.                         if( h->mb.i_cbp_luma&(1<<i8x8) )
  764.                             h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
  765.                 }
  766.             }
  767.         }
  768.     }
  769.     /* encode chroma */
  770.     if( IS_INTRA( h->mb.i_type ) )
  771.     {
  772.         const int i_mode = h->mb.i_chroma_pred_mode;
  773.         if( h->mb.b_lossless )
  774.             x264_predict_lossless_8x8_chroma( h, i_mode );
  775.         else
  776.         {
  777.             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
  778.             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
  779.         }
  780.     }
  781.     /* encode the 8x8 blocks */
  782.     x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
  783.     if( h->param.b_cabac )
  784.     {
  785.         i_cbp_dc = h->mb.cache.non_zero_count[x264_scan8[24]]
  786.                  | h->mb.cache.non_zero_count[x264_scan8[25]] << 1
  787.                  | h->mb.cache.non_zero_count[x264_scan8[26]] << 2;
  788.     }
  789.     /* store cbp */
  790.     h->mb.cbp[h->mb.i_mb_xy] = (i_cbp_dc << 8) | (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma;
  791.     /* Check for P_SKIP
  792.      * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
  793.      *      (if multiple mv give same result)*/
  794.     if( !b_force_no_skip )
  795.     {
  796.         if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
  797.             !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
  798.             *(uint32_t*)h->mb.cache.mv[0][x264_scan8[0]] == *(uint32_t*)h->mb.cache.pskip_mv
  799.             && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
  800.         {
  801.             h->mb.i_type = P_SKIP;
  802.         }
  803.         /* Check for B_SKIP */
  804.         if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
  805.         {
  806.             h->mb.i_type = B_SKIP;
  807.         }
  808.     }
  809. }
  810. /*****************************************************************************
  811.  * x264_macroblock_probe_skip:
  812.  *  Check if the current MB could be encoded as a [PB]_SKIP (it supposes you use
  813.  *  the previous QP
  814.  *****************************************************************************/
  815. int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
  816. {
  817.     ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[4][4] );
  818.     ALIGNED_ARRAY_16( int16_t, dct2x2,[2],[2] );
  819.     ALIGNED_ARRAY_16( int16_t, dctscan,[16] );
  820.     int i_qp = h->mb.i_qp;
  821.     int mvp[2];
  822.     int ch, thresh, ssd;
  823.     int i8x8, i4x4;
  824.     int i_decimate_mb;
  825.     if( !b_bidir )
  826.     {
  827.         /* Get the MV */
  828.         mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
  829.         mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
  830.         /* Motion compensation */
  831.         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
  832.                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
  833.                        mvp[0], mvp[1], 16, 16 );
  834.     }
  835.     for( i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
  836.     {
  837.         int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
  838.         int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
  839.         /* get luma diff */
  840.         h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
  841.                                     h->mb.pic.p_fdec[0] + fdec_offset );
  842.         /* encode one 4x4 block */
  843.         for( i4x4 = 0; i4x4 < 4; i4x4++ )
  844.         {
  845.             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ) )
  846.                 continue;
  847.             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
  848.             i_decimate_mb += h->quantf.decimate_score16( dctscan );
  849.             if( i_decimate_mb >= 6 )
  850.                 return 0;
  851.         }
  852.     }
  853.     /* encode chroma */
  854.     i_qp = h->mb.i_chroma_qp;
  855.     thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
  856.     for( ch = 0; ch < 2; ch++ )
  857.     {
  858.         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
  859.         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
  860.         if( !b_bidir )
  861.         {
  862.             h->mc.mc_chroma( h->mb.pic.p_fdec[1+ch],       FDEC_STRIDE,
  863.                              h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch],
  864.                              mvp[0], mvp[1], 8, 8 );
  865.         }
  866.         /* there is almost never a termination during chroma, but we can't avoid the check entirely */
  867.         /* so instead we check SSD and skip the actual check if the score is low enough. */
  868.         ssd = h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
  869.         if( ssd < thresh )
  870.             continue;
  871.         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
  872.         /* calculate dct DC */
  873.         dct2x2dc( dct2x2, dct4x4 );
  874.         if( h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 ) )
  875.             return 0;
  876.         /* If there wasn't a termination in DC, we can check against a much higher threshold. */
  877.         if( ssd < thresh*4 )
  878.             continue;
  879.         /* calculate dct coeffs */
  880.         for( i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
  881.         {
  882.             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) )
  883.                 continue;
  884.             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
  885.             i_decimate_mb += h->quantf.decimate_score15( dctscan );
  886.             if( i_decimate_mb >= 7 )
  887.                 return 0;
  888.         }
  889.     }
  890.     h->mb.b_skip_mc = 1;
  891.     return 1;
  892. }
  893. /****************************************************************************
  894.  * DCT-domain noise reduction / adaptive deadzone
  895.  * from libavcodec
  896.  ****************************************************************************/
  897. void x264_noise_reduction_update( x264_t *h )
  898. {
  899.     int cat, i;
  900.     for( cat = 0; cat < 2; cat++ )
  901.     {
  902.         int size = cat ? 64 : 16;
  903.         const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
  904.         if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
  905.         {
  906.             for( i = 0; i < size; i++ )
  907.                 h->nr_residual_sum[cat][i] >>= 1;
  908.             h->nr_count[cat] >>= 1;
  909.         }
  910.         for( i = 0; i < size; i++ )
  911.             h->nr_offset[cat][i] =
  912.                 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
  913.                  + h->nr_residual_sum[cat][i]/2)
  914.               / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
  915.     }
  916. }
  917. /*****************************************************************************
  918.  * RD only; 4 calls to this do not make up for one macroblock_encode.
  919.  * doesn't transform chroma dc.
  920.  *****************************************************************************/
  921. void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
  922. {
  923.     int i_qp = h->mb.i_qp;
  924.     uint8_t *p_fenc = h->mb.pic.p_fenc[0] + (i8&1)*8 + (i8>>1)*8*FENC_STRIDE;
  925.     uint8_t *p_fdec = h->mb.pic.p_fdec[0] + (i8&1)*8 + (i8>>1)*8*FDEC_STRIDE;
  926.     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
  927.     int nnz8x8 = 0;
  928.     int ch, nz;
  929.     if( !h->mb.b_skip_mc )
  930.         x264_mb_mc_8x8( h, i8 );
  931.     if( h->mb.b_lossless )
  932.     {
  933.         int i4;
  934.         if( h->mb.b_transform_8x8 )
  935.         {
  936.             nnz8x8 = h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
  937.             STORE_8x8_NNZ(i8,nnz8x8);
  938.         }
  939.         else
  940.         {
  941.             for( i4 = i8*4; i4 < i8*4+4; i4++ )
  942.             {
  943.                 int nz;
  944.                 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
  945.                                     h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
  946.                                     h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
  947.                 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
  948.                 nnz8x8 |= nz;
  949.             }
  950.         }
  951.         for( ch = 0; ch < 2; ch++ )
  952.         {
  953.             int16_t dc;
  954.             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
  955.             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
  956.             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec, &dc );
  957.             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
  958.         }
  959.     }
  960.     else
  961.     {
  962.         if( h->mb.b_transform_8x8 )
  963.         {
  964.             ALIGNED_ARRAY_16( int16_t, dct8x8,[8],[8] );
  965.             h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
  966.             nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
  967.             if( nnz8x8 )
  968.             {
  969.                 h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
  970.                 if( b_decimate && !h->mb.b_trellis )
  971.                     nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
  972.                 if( nnz8x8 )
  973.                 {
  974.                     h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
  975.                     h->dctf.add8x8_idct8( p_fdec, dct8x8 );
  976.                     STORE_8x8_NNZ(i8,1);
  977.                 }
  978.                 else
  979.                     STORE_8x8_NNZ(i8,0);
  980.             }
  981.             else
  982.                 STORE_8x8_NNZ(i8,0);
  983.         }
  984.         else
  985.         {
  986.             int i4;
  987.             int i_decimate_8x8 = 0;
  988.             ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[4][4] );
  989.             h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
  990.             for( i4 = 0; i4 < 4; i4++ )
  991.             {
  992.                 nz = x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
  993.                 h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = nz;
  994.                 if( nz )
  995.                 {
  996.                     h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
  997.                     h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
  998.                     if( b_decimate )
  999.                         i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
  1000.                     nnz8x8 = 1;
  1001.                 }
  1002.             }
  1003.             if( b_decimate && i_decimate_8x8 < 4 )
  1004.                 nnz8x8 = 0;
  1005.             if( nnz8x8 )
  1006.                 h->dctf.add8x8_idct( p_fdec, dct4x4 );
  1007.             else
  1008.                 STORE_8x8_NNZ(i8,0);
  1009.         }
  1010.         i_qp = h->mb.i_chroma_qp;
  1011.         for( ch = 0; ch < 2; ch++ )
  1012.         {
  1013.             ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[4] );
  1014.             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
  1015.             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
  1016.             h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
  1017.             dct4x4[0][0] = 0;
  1018.             if( h->mb.b_trellis )
  1019.                 nz = x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 1, 0 );
  1020.             else
  1021.                 nz = h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
  1022.             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
  1023.             if( nz )
  1024.             {
  1025.                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
  1026.                 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
  1027.                 h->dctf.add4x4_idct( p_fdec, dct4x4 );
  1028.             }
  1029.         }
  1030.     }
  1031.     h->mb.i_cbp_luma &= ~(1 << i8);
  1032.     h->mb.i_cbp_luma |= nnz8x8 << i8;
  1033.     h->mb.i_cbp_chroma = 0x02;
  1034. }
  1035. /*****************************************************************************
  1036.  * RD only, luma only
  1037.  *****************************************************************************/
  1038. void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
  1039. {
  1040.     int i_qp = h->mb.i_qp;
  1041.     uint8_t *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
  1042.     uint8_t *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
  1043.     const int i_ref = h->mb.cache.ref[0][x264_scan8[i4]];
  1044.     const int mvx   = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][0], h->mb.mv_min[0], h->mb.mv_max[0] );
  1045.     const int mvy   = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][1], h->mb.mv_min[1], h->mb.mv_max[1] );
  1046.     int nz;
  1047.     h->mc.mc_luma( p_fdec, FDEC_STRIDE, h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0], mvx + 4*4*block_idx_x[i4], mvy + 4*4*block_idx_y[i4], 4, 4 );
  1048.     if( h->mb.b_lossless )
  1049.     {
  1050.         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
  1051.         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
  1052.     }
  1053.     else
  1054.     {
  1055.         ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[4] );
  1056.         h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
  1057.         nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
  1058.         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
  1059.         if( nz )
  1060.         {
  1061.             h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
  1062.             h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
  1063.             h->dctf.add4x4_idct( p_fdec, dct4x4 );
  1064.         }
  1065.     }
  1066. }