mpegvideo.c
上传用户:hjq518
上传日期:2021-12-09
资源大小:5084k
文件大小:230k
源码类别:

Audio

开发平台:

Visual C++

  1. /*  * The simplest mpeg encoder (well, it was the simplest!)  * Copyright (c) 2000,2001 Fabrice Bellard.  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>  *  * This library is free software; you can redistribute it and/or  * modify it under the terms of the GNU Lesser General Public  * License as published by the Free Software Foundation; either  * version 2 of the License, or (at your option) any later version.  *  * This library is distributed in the hope that it will be useful,  * but WITHOUT ANY WARRANTY; without even the implied warranty of  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU  * Lesser General Public License for more details.  *  * You should have received a copy of the GNU Lesser General Public  * License along with this library; if not, write to the Free Software  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA  *  * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>  */   /**  * @file mpegvideo.c  * The simplest mpeg encoder (well, it was the simplest!).  */    #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" //#include "faandct.h" //Del by ty
  2. #include <limits.h> #ifdef USE_FASTMEMCPY #include "fastmemcpy.h" #endif //#undef NDEBUG //#include <assert.h> #ifdef CONFIG_ENCODERS static void encode_picture(MpegEncContext *s, int picture_number); #endif //CONFIG_ENCODERS static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,                                     DCTELEM *block, int n, int qscale); static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,                                     DCTELEM *block, int n, int qscale); static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,                                    DCTELEM *block, int n, int qscale); static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,                                    DCTELEM *block, int n, int qscale); static void dct_unquantize_h263_intra_c(MpegEncContext *s,                                    DCTELEM *block, int n, int qscale); static void dct_unquantize_h263_inter_c(MpegEncContext *s,                                    DCTELEM *block, int n, int qscale); static void dct_unquantize_h261_intra_c(MpegEncContext *s,                                    DCTELEM *block, int n, int qscale); static void dct_unquantize_h261_inter_c(MpegEncContext *s,                                    DCTELEM *block, int n, int qscale); static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w); #ifdef CONFIG_ENCODERS static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale); static int sse_mb(MpegEncContext *s); static void  denoise_dct_c(MpegEncContext *s, DCTELEM *block); #endif //CONFIG_ENCODERS #ifdef HAVE_XVMC extern int  XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx); extern void XVMC_field_end(MpegEncContext *s); extern void XVMC_decode_mb(MpegEncContext *s); #endif void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c; //Del by ty
  3. /* enable all paranoid tests for rounding, overflows, etc... */ //#define PARANOID //#define DEBUG /* for jpeg fast DCT */ #define CONST_BITS 14 static const uint16_t aanscales[64] = {     /* precomputed values scaled up by 14 bits */     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,     22725, 31521, 29692, 26722, 22725, 17855, 12299,  6270,     21407, 29692, 27969, 25172, 21407, 16819, 11585,  5906,     19266, 26722, 25172, 22654, 19266, 15137, 10426,  5315,     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,     12873, 17855, 16819, 15137, 12873, 10114,  6967,  3552,     8867 , 12299, 11585, 10426,  8867,  6967,  4799,  2446,     4520 ,  6270,  5906,  5315,  4520,  3552,  2446,  1247 }; static const uint8_t h263_chroma_roundtab[16] = { //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15     0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, }; static const uint8_t ff_default_chroma_qscale_table[32]={ //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31     0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 };
  4. //AS by ty
  5. //typedef short DCTELEM;
  6. uint8_t ff_mpeg1_dc_scale_table[128]={
  7. //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
  8.     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  9.     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  10.     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  11.     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  12. };
  13. //AE by ty #ifdef CONFIG_ENCODERS static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL; static uint8_t default_fcode_tab[MAX_MV*2+1]; enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1}; static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],                            const uint16_t *quant_matrix, int bias, int qmin, int qmax) {     int qscale;     for(qscale=qmin; qscale<=qmax; qscale++){         int i;         if (dsp->fdct == ff_jpeg_fdct_islow  #ifdef FAAN_POSTSCALE             || dsp->fdct == ff_faandct #endif             ) {             for(i=0;i<64;i++) {                 const int j= dsp->idct_permutation[i];                 /* 16 <= qscale * quant_matrix[i] <= 7905 */                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */                                  qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) /                                  (qscale * quant_matrix[j]));             }         } else if (dsp->fdct == fdct_ifast #ifndef FAAN_POSTSCALE                    || dsp->fdct == ff_faandct #endif                    ) {             for(i=0;i<64;i++) {                 const int j= dsp->idct_permutation[i];                 /* 16 <= qscale * quant_matrix[i] <= 7905 */                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */                                  qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) /                                  (aanscales[i] * qscale * quant_matrix[j]));             }         } else {             for(i=0;i<64;i++) {                 const int j= dsp->idct_permutation[i];                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255                    So 16           <= qscale * quant_matrix[i]             <= 7905                    so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905                    so 32768        >= (1<<19) / (qscale * quant_matrix[i]) >= 67                 */                 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j])); //                qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);                 if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1;                 qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);             }         }     } } static inline void update_qscale(MpegEncContext *s){     s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);     s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax);          s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; } #endif //CONFIG_ENCODERS void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
  14.     int i;
  15.     int end;
  16.     
  17.     st->scantable= src_scantable;
  18.     for(i=0; i<64; i++){
  19.         int j;
  20.         j = src_scantable[i];
  21.         st->permutated[i] = permutation[j];
  22. #ifdef ARCH_POWERPC
  23.         st->inverse[j] = i;
  24. #endif
  25.     }
  26.     
  27.     end=-1;
  28.     for(i=0; i<64; i++){
  29.         int j;
  30.         j = st->permutated[i];
  31.         if(j>end) end=j;
  32.         st->raster_end[i]= end;
  33.     }
  34. }
  35. #ifdef CONFIG_ENCODERS
  36. void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
  37.     int i;
  38.     if(matrix){
  39.         put_bits(pb, 1, 1);
  40.         for(i=0;i<64;i++) {
  41.             put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
  42.         }
  43.     }else
  44.         put_bits(pb, 1, 0);
  45. }
  46. #endif //CONFIG_ENCODERS
  47. /* init common dct for both encoder and decoder */
  48. int DCT_common_init(MpegEncContext *s)
  49. {
  50.     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  51.     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  52.     s->dct_unquantize_h261_intra = dct_unquantize_h261_intra_c;
  53.     s->dct_unquantize_h261_inter = dct_unquantize_h261_inter_c;
  54.     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  55.     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  56.     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  57.     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  58. #ifdef CONFIG_ENCODERS
  59.     s->dct_quantize= dct_quantize_c;
  60.     s->denoise_dct= denoise_dct_c;
  61. #endif
  62.         
  63. #ifdef HAVE_MMX
  64.     MPV_common_init_mmx(s);
  65. #endif
  66. #ifdef ARCH_ALPHA
  67.     MPV_common_init_axp(s);
  68. #endif
  69. #ifdef HAVE_MLIB
  70.     MPV_common_init_mlib(s);
  71. #endif
  72. #ifdef HAVE_MMI
  73.     MPV_common_init_mmi(s);
  74. #endif
  75. #ifdef ARCH_ARMV4L
  76.     MPV_common_init_armv4l(s);
  77. #endif
  78. #ifdef ARCH_POWERPC
  79.     MPV_common_init_ppc(s);
  80. #endif
  81. #ifdef CONFIG_ENCODERS
  82.     s->fast_dct_quantize= s->dct_quantize;
  83.     if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
  84.         s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
  85.     }
  86. #endif //CONFIG_ENCODERS
  87.     /* load & permutate scantables
  88.        note: only wmv uses differnt ones 
  89.     */
  90.     if(s->alternate_scan){
  91.         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
  92.         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
  93.     }else{
  94.         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
  95.         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
  96.     }
  97.     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  98.     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  99.     return 0;
  100. }
  101. static void copy_picture(Picture *dst, Picture *src){
  102.     *dst = *src;
  103.     dst->type= FF_BUFFER_TYPE_COPY;
  104. }
  105. static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){
  106.     int i;
  107.     dst->pict_type              = src->pict_type;
  108.     dst->quality                = src->quality;
  109.     dst->coded_picture_number   = src->coded_picture_number;
  110.     dst->display_picture_number = src->display_picture_number;
  111. //    dst->reference              = src->reference;
  112.     dst->pts                    = src->pts;
  113.     dst->interlaced_frame       = src->interlaced_frame;
  114.     dst->top_field_first        = src->top_field_first;
  115.     if(s->avctx->me_threshold){
  116.         if(!src->motion_val[0])
  117.             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!n");
  118.         if(!src->mb_type)
  119.             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!n");
  120.         if(!src->ref_index[0])
  121.             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!n");
  122.         if(src->motion_subsample_log2 != dst->motion_subsample_log2)
  123.             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesnt match! (%d!=%d)n",
  124.             src->motion_subsample_log2, dst->motion_subsample_log2);
  125.         memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
  126.         
  127.         for(i=0; i<2; i++){
  128.             int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1;
  129.             int height= ((16*s->mb_height)>>src->motion_subsample_log2);
  130.             if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){
  131.                 memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t));
  132.             }
  133.             if(src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]){
  134.                 memcpy(dst->ref_index[i], src->ref_index[i], s->b8_stride*2*s->mb_height*sizeof(int8_t));
  135.             }
  136.         }
  137.     }
  138. }
  139. /**
  140.  * allocates a Picture
  141.  * The pixels are allocated/set by calling get_buffer() if shared=0
  142.  */
  143. static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
  144.     const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
  145.     const int mb_array_size= s->mb_stride*s->mb_height;
  146.     const int b8_array_size= s->b8_stride*s->mb_height*2;
  147.     const int b4_array_size= s->b4_stride*s->mb_height*4;
  148.     int i;
  149.     
  150.     if(shared){
  151.         assert(pic->data[0]);
  152.         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
  153.         pic->type= FF_BUFFER_TYPE_SHARED;
  154.     }else{
  155.         int r;
  156.         
  157.         assert(!pic->data[0]);
  158.         
  159.         r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
  160.         
  161.         if(r<0 || !pic->age || !pic->type || !pic->data[0]){
  162.     av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)n", r, pic->age, pic->type, pic->data[0]);
  163.             return -1;
  164.         }
  165.         if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
  166.             av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)n");
  167.             return -1;
  168.         }
  169.         if(pic->linesize[1] != pic->linesize[2]){
  170.             av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride missmatch)n");
  171.             return -1;
  172.         }
  173.         s->linesize  = pic->linesize[0];
  174.         s->uvlinesize= pic->linesize[1];
  175.     }
  176.     
  177.     if(pic->qscale_table==NULL){
  178.         if (s->encoding) {        
  179.             CHECKED_ALLOCZ(pic->mb_var   , mb_array_size * sizeof(int16_t))
  180.             CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
  181.             CHECKED_ALLOCZ(pic->mb_mean  , mb_array_size * sizeof(int8_t))
  182.         }
  183.         CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
  184.         CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
  185.         CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num    * sizeof(uint32_t))
  186.         pic->mb_type= pic->mb_type_base + s->mb_stride+1;
  187.         if(s->out_format == FMT_H264){
  188.             for(i=0; i<2; i++){
  189.                 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+2)  * sizeof(int16_t))
  190.                 pic->motion_val[i]= pic->motion_val_base[i]+2;
  191.                 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
  192.             }
  193.             pic->motion_subsample_log2= 2;
  194.         }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
  195.             for(i=0; i<2; i++){
  196.                 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+2) * sizeof(int16_t))
  197.                 pic->motion_val[i]= pic->motion_val_base[i]+2;
  198.                 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
  199.             }
  200.             pic->motion_subsample_log2= 3;
  201.         }
  202.         if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  203.             CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6)
  204.         }
  205.         pic->qstride= s->mb_stride;
  206.         CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
  207.     }
  208.     //it might be nicer if the application would keep track of these but it would require a API change
  209.     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
  210.     s->prev_pict_types[0]= s->pict_type;
  211.     if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
  212.         pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
  213.     
  214.     return 0;
  215. fail: //for the CHECKED_ALLOCZ macro
  216.     return -1;
  217. }
  218. /**
  219.  * deallocates a picture
  220.  */
  221. static void free_picture(MpegEncContext *s, Picture *pic){
  222.     int i;
  223.     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
  224.         s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
  225.     }
  226.     av_freep(&pic->mb_var);
  227.     av_freep(&pic->mc_mb_var);
  228.     av_freep(&pic->mb_mean);
  229.     av_freep(&pic->mbskip_table);
  230.     av_freep(&pic->qscale_table);
  231.     av_freep(&pic->mb_type_base);
  232.     av_freep(&pic->dct_coeff);
  233.     av_freep(&pic->pan_scan);
  234.     pic->mb_type= NULL;
  235.     for(i=0; i<2; i++){
  236.         av_freep(&pic->motion_val_base[i]);
  237.         av_freep(&pic->ref_index[i]);
  238.     }
  239.     
  240.     if(pic->type == FF_BUFFER_TYPE_SHARED){
  241.         for(i=0; i<4; i++){
  242.             pic->base[i]=
  243.             pic->data[i]= NULL;
  244.         }
  245.         pic->type= 0;        
  246.     }
  247. }
  248. static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
  249.     int i;
  250.     // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264) 
  251.     CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
  252.     s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
  253.      //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
  254.     CHECKED_ALLOCZ(s->me.scratchpad,  (s->width+64)*4*16*2*sizeof(uint8_t)) 
  255.     s->rd_scratchpad=   s->me.scratchpad;
  256.     s->b_scratchpad=    s->me.scratchpad;
  257.     s->obmc_scratchpad= s->me.scratchpad + 16;
  258.     if (s->encoding) {
  259.         CHECKED_ALLOCZ(s->me.map      , ME_MAP_SIZE*sizeof(uint32_t))
  260.         CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
  261.         if(s->avctx->noise_reduction){
  262.             CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
  263.         }
  264.     }   
  265.     CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM))
  266.     s->block= s->blocks[0];
  267.     for(i=0;i<12;i++){
  268.         s->pblocks[i] = (short *)(&s->block[i]);
  269.     }
  270.     return 0;
  271. fail:
  272.     return -1; //free() through MPV_common_end()
  273. }
  274. static void free_duplicate_context(MpegEncContext *s){
  275.     if(s==NULL) return;
  276.     av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
  277.     av_freep(&s->me.scratchpad);
  278.     s->rd_scratchpad=   
  279.     s->b_scratchpad=    
  280.     s->obmc_scratchpad= NULL;
  281.     
  282.     av_freep(&s->dct_error_sum);
  283.     av_freep(&s->me.map);
  284.     av_freep(&s->me.score_map);
  285.     av_freep(&s->blocks);
  286.     s->block= NULL;
  287. }
  288. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
  289. #define COPY(a) bak->a= src->a
  290.     COPY(allocated_edge_emu_buffer);
  291.     COPY(edge_emu_buffer);
  292.     COPY(me.scratchpad);
  293.     COPY(rd_scratchpad);
  294.     COPY(b_scratchpad);
  295.     COPY(obmc_scratchpad);
  296.     COPY(me.map);
  297.     COPY(me.score_map);
  298.     COPY(blocks);
  299.     COPY(block);
  300.     COPY(start_mb_y);
  301.     COPY(end_mb_y);
  302.     COPY(me.map_generation);
  303.     COPY(pb);
  304.     COPY(dct_error_sum);
  305.     COPY(dct_count[0]);
  306.     COPY(dct_count[1]);
  307. #undef COPY
  308. }
  309. void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
  310.     MpegEncContext bak;
  311.     int i;
  312.     //FIXME copy only needed parts
  313. //START_TIMER
  314.     backup_duplicate_context(&bak, dst);
  315.     memcpy(dst, src, sizeof(MpegEncContext));
  316.     backup_duplicate_context(dst, &bak);
  317.     for(i=0;i<12;i++){
  318.         dst->pblocks[i] = (short *)(&dst->block[i]);
  319.     }
  320. //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
  321. }
  322. static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src){
  323. #define COPY(a) dst->a= src->a
  324.     COPY(pict_type);
  325.     COPY(current_picture);
  326.     COPY(f_code);
  327.     COPY(b_code);
  328.     COPY(qscale);
  329.     COPY(lambda);
  330.     COPY(lambda2);
  331.     COPY(picture_in_gop_number);
  332.     COPY(gop_picture_number);
  333.     COPY(frame_pred_frame_dct); //FIXME dont set in encode_header
  334.     COPY(progressive_frame); //FIXME dont set in encode_header
  335.     COPY(partitioned_frame); //FIXME dont set in encode_header
  336. #undef COPY
  337. }
  338. /**
  339.  * sets the given MpegEncContext to common defaults (same for encoding and decoding).
  340.  * the changed fields will not depend upon the prior state of the MpegEncContext.
  341.  */
  342. static void MPV_common_defaults(MpegEncContext *s){
  343.     s->y_dc_scale_table=
  344.     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
  345.     s->chroma_qscale_table= ff_default_chroma_qscale_table;
  346.     s->progressive_frame= 1;
  347.     s->progressive_sequence= 1;
  348.     s->picture_structure= PICT_FRAME;
  349.     s->coded_picture_number = 0;
  350.     s->picture_number = 0;
  351.     s->input_picture_number = 0;
  352.     s->picture_in_gop_number = 0;
  353.     s->f_code = 1;
  354.     s->b_code = 1;
  355. }
  356. /**
  357.  * sets the given MpegEncContext to defaults for decoding.
  358.  * the changed fields will not depend upon the prior state of the MpegEncContext.
  359.  */
  360. void MPV_decode_defaults(MpegEncContext *s){
  361.     MPV_common_defaults(s);
  362. }
  363. /**
  364.  * sets the given MpegEncContext to defaults for encoding.
  365.  * the changed fields will not depend upon the prior state of the MpegEncContext.
  366.  */
  367. #ifdef CONFIG_ENCODERS
  368. static void MPV_encode_defaults(MpegEncContext *s){
  369.     static int done=0;
  370.     
  371.     MPV_common_defaults(s);
  372.     
  373.     if(!done){
  374.         int i;
  375.         done=1;
  376.         default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
  377.         memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
  378.         memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
  379.         for(i=-16; i<16; i++){
  380.             default_fcode_tab[i + MAX_MV]= 1;
  381.         }
  382.     }
  383.     s->me.mv_penalty= default_mv_penalty;
  384.     s->fcode_tab= default_fcode_tab;
  385. }
  386. #endif //CONFIG_ENCODERS
  387. /** 
  388.  * init common structure for both encoder and decoder.
  389.  * this assumes that some variables like width/height are already set
  390.  */
  391. int MPV_common_init(MpegEncContext *s)
  392. {
  393.     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  394.     if(s->avctx->thread_count > MAX_THREADS || (16*s->avctx->thread_count > s->height && s->height)){
  395.         av_log(s->avctx, AV_LOG_ERROR, "too many threadsn");
  396.         return -1;
  397.     }
  398.     dsputil_init(&s->dsp, s->avctx);
  399.     DCT_common_init(s);
  400.     s->flags= s->avctx->flags;
  401.     s->flags2= s->avctx->flags2;
  402.     s->mb_width  = (s->width  + 15) / 16;
  403.     s->mb_height = (s->height + 15) / 16;
  404.     s->mb_stride = s->mb_width + 1;
  405.     s->b8_stride = s->mb_width*2 + 1;
  406.     s->b4_stride = s->mb_width*4 + 1;
  407.     mb_array_size= s->mb_height * s->mb_stride;
  408.     mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
  409.     /* set chroma shifts */
  410.     avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
  411.                                                     &(s->chroma_y_shift) );
  412.     /* set default edge pos, will be overriden in decode_header if needed */
  413.     s->h_edge_pos= s->mb_width*16;
  414.     s->v_edge_pos= s->mb_height*16;
  415.     s->mb_num = s->mb_width * s->mb_height;
  416.     
  417.     s->block_wrap[0]=
  418.     s->block_wrap[1]=
  419.     s->block_wrap[2]=
  420.     s->block_wrap[3]= s->b8_stride;
  421.     s->block_wrap[4]=
  422.     s->block_wrap[5]= s->mb_stride;
  423.  
  424.     y_size = s->b8_stride * (2 * s->mb_height + 1);
  425.     c_size = s->mb_stride * (s->mb_height + 1);
  426.     yc_size = y_size + 2 * c_size;
  427.     
  428.     /* convert fourcc to upper case */
  429.     s->avctx->codec_tag=   toupper( s->avctx->codec_tag     &0xFF)          
  430.                         + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
  431.                         + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16) 
  432.                         + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
  433.     s->avctx->stream_codec_tag=   toupper( s->avctx->stream_codec_tag     &0xFF)          
  434.                                + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
  435.                                + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16) 
  436.                                + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
  437.     s->avctx->coded_frame= (AVFrame*)&s->current_picture;
  438.     CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
  439.     for(y=0; y<s->mb_height; y++){
  440.         for(x=0; x<s->mb_width; x++){
  441.             s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
  442.         }
  443.     }
  444.     s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
  445.     
  446.     if (s->encoding) {
  447.         /* Allocate MV tables */
  448.         CHECKED_ALLOCZ(s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t))
  449.         CHECKED_ALLOCZ(s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t))
  450.         CHECKED_ALLOCZ(s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t))
  451.         CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
  452.         CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
  453.         CHECKED_ALLOCZ(s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t))
  454.         s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
  455.         s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
  456.         s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
  457.         s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
  458.         s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
  459.         s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
  460.         if(s->msmpeg4_version){
  461.             CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
  462.         }
  463.         CHECKED_ALLOCZ(s->avctx->stats_out, 256);
  464.         /* Allocate MB type table */
  465.         CHECKED_ALLOCZ(s->mb_type  , mb_array_size * sizeof(uint16_t)) //needed for encoding
  466.         
  467.         CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
  468.         
  469.         CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
  470.         CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
  471.         CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
  472.         CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
  473.         CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
  474.         CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
  475.         
  476.         if(s->avctx->noise_reduction){
  477.             CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
  478.         }
  479.     }
  480.     CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
  481.     CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
  482.     
  483.     if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
  484.         /* interlaced direct mode decoding tables */
  485.             for(i=0; i<2; i++){
  486.                 int j, k;
  487.                 for(j=0; j<2; j++){
  488.                     for(k=0; k<2; k++){
  489.                         CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k]     , mv_table_size * 2 * sizeof(int16_t))
  490.                         s->b_field_mv_table[i][j][k]    = s->b_field_mv_table_base[i][j][k]     + s->mb_stride + 1;
  491.                     }
  492.                     CHECKED_ALLOCZ(s->b_field_select_table[i][j]     , mb_array_size * 2 * sizeof(uint8_t))
  493.                     CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j]     , mv_table_size * 2 * sizeof(int16_t))
  494.                     s->p_field_mv_table[i][j]    = s->p_field_mv_table_base[i][j]     + s->mb_stride + 1;
  495.                 }
  496.                 CHECKED_ALLOCZ(s->p_field_select_table[i]      , mb_array_size * 2 * sizeof(uint8_t))
  497.             }
  498.     }
  499.     if (s->out_format == FMT_H263) {
  500.         /* ac values */
  501.         CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16);
  502.         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  503.         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  504.         s->ac_val[2] = s->ac_val[1] + c_size;
  505.         
  506.         /* cbp values */
  507.         CHECKED_ALLOCZ(s->coded_block_base, y_size);
  508.         s->coded_block= s->coded_block_base + s->b8_stride + 1;
  509.         
  510.         /* divx501 bitstream reorder buffer */
  511.         CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
  512.         /* cbp, ac_pred, pred_dir */
  513.         CHECKED_ALLOCZ(s->cbp_table  , mb_array_size * sizeof(uint8_t))
  514.         CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
  515.     }
  516.     
  517.     if (s->h263_pred || s->h263_plus || !s->encoding) {
  518.         /* dc values */
  519.         //MN: we need these for error resilience of intra-frames
  520.         CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t));
  521.         s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  522.         s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  523.         s->dc_val[2] = s->dc_val[1] + c_size;
  524.         for(i=0;i<yc_size;i++)
  525.             s->dc_val_base[i] = 1024;
  526.     }
  527.     /* which mb is a intra block */
  528.     CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
  529.     memset(s->mbintra_table, 1, mb_array_size);
  530.     
  531.     /* init macroblock skip table */
  532.     CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
  533.     //Note the +1 is for a quicker mpeg4 slice_end detection
  534.     CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
  535.     
  536.     s->parse_context.state= -1;
  537.     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
  538.        s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
  539.        s->visualization_buffer[1] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
  540.        s->visualization_buffer[2] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
  541.     }
  542.     s->context_initialized = 1;
  543.     s->thread_context[0]= s;
  544.     for(i=1; i<s->avctx->thread_count; i++){
  545.         s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
  546.         memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  547.     }
  548.     for(i=0; i<s->avctx->thread_count; i++){
  549.         if(init_duplicate_context(s->thread_context[i], s) < 0)
  550.            goto fail;
  551.         s->thread_context[i]->start_mb_y= (s->mb_height*(i  ) + s->avctx->thread_count/2) / s->avctx->thread_count;
  552.         s->thread_context[i]->end_mb_y  = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
  553.     }
  554.     return 0;
  555.  fail:
  556.     MPV_common_end(s);
  557.     return -1;
  558. }
  559. /* init common structure for both encoder and decoder */
  560. void MPV_common_end(MpegEncContext *s)
  561. {
  562.     int i, j, k;
  563.     for(i=0; i<s->avctx->thread_count; i++){
  564.         free_duplicate_context(s->thread_context[i]);
  565.     }
  566.     for(i=1; i<s->avctx->thread_count; i++){
  567.         av_freep(&s->thread_context[i]);
  568.     }
  569.     av_freep(&s->parse_context.buffer);
  570.     s->parse_context.buffer_size=0;
  571.     av_freep(&s->mb_type);
  572.     av_freep(&s->p_mv_table_base);
  573.     av_freep(&s->b_forw_mv_table_base);
  574.     av_freep(&s->b_back_mv_table_base);
  575.     av_freep(&s->b_bidir_forw_mv_table_base);
  576.     av_freep(&s->b_bidir_back_mv_table_base);
  577.     av_freep(&s->b_direct_mv_table_base);
  578.     s->p_mv_table= NULL;
  579.     s->b_forw_mv_table= NULL;
  580.     s->b_back_mv_table= NULL;
  581.     s->b_bidir_forw_mv_table= NULL;
  582.     s->b_bidir_back_mv_table= NULL;
  583.     s->b_direct_mv_table= NULL;
  584.     for(i=0; i<2; i++){
  585.         for(j=0; j<2; j++){
  586.             for(k=0; k<2; k++){
  587.                 av_freep(&s->b_field_mv_table_base[i][j][k]);
  588.                 s->b_field_mv_table[i][j][k]=NULL;
  589.             }
  590.             av_freep(&s->b_field_select_table[i][j]);
  591.             av_freep(&s->p_field_mv_table_base[i][j]);
  592.             s->p_field_mv_table[i][j]=NULL;
  593.         }
  594.         av_freep(&s->p_field_select_table[i]);
  595.     }
  596.     
  597.     av_freep(&s->dc_val_base);
  598.     av_freep(&s->ac_val_base);
  599.     av_freep(&s->coded_block_base);
  600.     av_freep(&s->mbintra_table);
  601.     av_freep(&s->cbp_table);
  602.     av_freep(&s->pred_dir_table);
  603.     
  604.     av_freep(&s->mbskip_table);
  605.     av_freep(&s->prev_pict_types);
  606.     av_freep(&s->bitstream_buffer);
  607.     av_freep(&s->avctx->stats_out);
  608.     av_freep(&s->ac_stats);
  609.     av_freep(&s->error_status_table);
  610.     av_freep(&s->mb_index2xy);
  611.     av_freep(&s->lambda_table);
  612.     av_freep(&s->q_intra_matrix);
  613.     av_freep(&s->q_inter_matrix);
  614.     av_freep(&s->q_intra_matrix16);
  615.     av_freep(&s->q_inter_matrix16);
  616.     av_freep(&s->input_picture);
  617.     av_freep(&s->reordered_input_picture);
  618.     av_freep(&s->dct_offset);
  619.     if(s->picture){
  620.         for(i=0; i<MAX_PICTURE_COUNT; i++){
  621.             free_picture(s, &s->picture[i]);
  622.         }
  623.     }
  624.     av_freep(&s->picture);
  625.     s->context_initialized = 0;
  626.     s->last_picture_ptr=
  627.     s->next_picture_ptr=
  628.     s->current_picture_ptr= NULL;
  629.     for(i=0; i<3; i++)
  630.         av_freep(&s->visualization_buffer[i]);
  631. }
  632. #ifdef CONFIG_ENCODERS
  633. /* init video encoder */
  634. int MPV_encode_init(AVCodecContext *avctx)
  635. {
  636.     MpegEncContext *s = avctx->priv_data;
  637.     int i, dummy;
  638.     int chroma_h_shift, chroma_v_shift;
  639.     
  640.     MPV_encode_defaults(s);
  641.     avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
  642.     s->bit_rate = avctx->bit_rate;
  643.     s->width = avctx->width;
  644.     s->height = avctx->height;
  645.     if(avctx->gop_size > 600){
  646. av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...n");
  647.         avctx->gop_size=600;
  648.     }
  649.     s->gop_size = avctx->gop_size;
  650.     s->avctx = avctx;
  651.     s->flags= avctx->flags;
  652.     s->flags2= avctx->flags2;
  653.     s->max_b_frames= avctx->max_b_frames;
  654.     s->codec_id= avctx->codec->id;
  655.     s->luma_elim_threshold  = avctx->luma_elim_threshold;
  656.     s->chroma_elim_threshold= avctx->chroma_elim_threshold;
  657.     s->strict_std_compliance= avctx->strict_std_compliance;
  658.     s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
  659.     s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
  660.     s->mpeg_quant= avctx->mpeg_quant;
  661.     s->rtp_mode= !!avctx->rtp_payload_size;
  662.     s->intra_dc_precision= avctx->intra_dc_precision;
  663.     if (s->gop_size <= 1) {
  664.         s->intra_only = 1;
  665.         s->gop_size = 12;
  666.     } else {
  667.         s->intra_only = 0;
  668.     }
  669.     s->me_method = avctx->me_method;
  670.     /* Fixed QSCALE */
  671.     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
  672.     
  673.     s->adaptive_quant= (   s->avctx->lumi_masking
  674.                         || s->avctx->dark_masking
  675.                         || s->avctx->temporal_cplx_masking 
  676.                         || s->avctx->spatial_cplx_masking
  677.                         || s->avctx->p_masking
  678.                         || (s->flags&CODEC_FLAG_QP_RD))
  679.                        && !s->fixed_qscale;
  680.     
  681.     s->obmc= !!(s->flags & CODEC_FLAG_OBMC);
  682.     s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER);
  683.     s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN);
  684.     if(avctx->rc_max_rate && !avctx->rc_buffer_size){
  685.         av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitraten");
  686.         return -1;
  687.     }    
  688.     if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){
  689.         av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!n");
  690.     }
  691.     
  692.     if(avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate){
  693.         av_log(avctx, AV_LOG_INFO, "bitrate below min bitraten");
  694.         return -1;
  695.     }
  696.     
  697.     if(avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate){
  698.         av_log(avctx, AV_LOG_INFO, "bitrate above max bitraten");
  699.         return -1;
  700.     }
  701.         
  702.     if(   s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate 
  703.        && (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO)
  704.        && 90000LL * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*0xFFFFLL){
  705.         
  706.         av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!n");
  707.     }
  708.        
  709.     if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 
  710.        && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P && s->codec_id != CODEC_ID_FLV1){
  711.         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codecn");
  712.         return -1;
  713.     }
  714.         
  715.     if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){
  716.         av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decissionn");
  717.         return -1;
  718.     }
  719.     
  720.     if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
  721.         av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)n");
  722.         return -1;
  723.     }
  724.     
  725.     if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
  726.         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codecn");
  727.         return -1;
  728.     }
  729.     if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
  730.         av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codecn");
  731.         return -1;
  732.     }
  733.     
  734.     if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
  735.         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codecn");
  736.         return -1;
  737.     }
  738.     if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN)) 
  739.        && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO){
  740.         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codecn");
  741.         return -1;
  742.     }
  743.         
  744.     if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
  745.         av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codecn");
  746.         return -1;
  747.     }
  748.         
  749.     if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
  750.         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quantn");
  751.         return -1;
  752.     }
  753.     if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){
  754.         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2n");
  755.         return -1;
  756.     }
  757.     
  758.     if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){
  759.         av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection arent supported yetn");
  760.         return -1;
  761.     }
  762.     
  763.     if(s->avctx->thread_count > 1 && s->codec_id != CODEC_ID_MPEG4 
  764.        && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO 
  765.        && (s->codec_id != CODEC_ID_H263P || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))){
  766.         av_log(avctx, AV_LOG_ERROR, "multi threaded encoding not supported by codecn");
  767.         return -1;
  768.     }
  769.     
  770.     if(s->avctx->thread_count > 1)
  771.         s->rtp_mode= 1;
  772.     i= ff_gcd(avctx->frame_rate, avctx->frame_rate_base);
  773.     if(i > 1){
  774.         av_log(avctx, AV_LOG_INFO, "removing common factors from frameraten");
  775.         avctx->frame_rate /= i;
  776.         avctx->frame_rate_base /= i;
  777. //        return -1;
  778.     }
  779.     
  780.     if(s->codec_id==CODEC_ID_MJPEG){
  781.         s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
  782.         s->inter_quant_bias= 0;
  783.     }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
  784.         s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
  785.         s->inter_quant_bias= 0;
  786.     }else{
  787.         s->intra_quant_bias=0;
  788.         s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
  789.     }
  790.     
  791.     if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
  792.         s->intra_quant_bias= avctx->intra_quant_bias;
  793.     if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
  794.         s->inter_quant_bias= avctx->inter_quant_bias;
  795.         
  796.     avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
  797.     av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1);
  798.     s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1;
  799.     switch(avctx->codec->id) {
  800.     case CODEC_ID_MPEG1VIDEO:
  801.         s->out_format = FMT_MPEG1;
  802.         s->low_delay= 0; //s->max_b_frames ? 0 : 1;
  803.         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
  804.         break;
  805.     case CODEC_ID_MPEG2VIDEO:
  806.         s->out_format = FMT_MPEG1;
  807.         s->low_delay= 0; //s->max_b_frames ? 0 : 1;
  808.         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
  809.         s->rtp_mode= 1;
  810.         break;
  811.     case CODEC_ID_LJPEG:
  812.     case CODEC_ID_MJPEG:
  813.         s->out_format = FMT_MJPEG;
  814.         s->intra_only = 1; /* force intra only for jpeg */
  815.         s->mjpeg_write_tables = 1; /* write all tables */
  816. s->mjpeg_data_only_frames = 0; /* write all the needed headers */
  817.         s->mjpeg_vsample[0] = 1<<chroma_v_shift;
  818.         s->mjpeg_vsample[1] = 1;
  819.         s->mjpeg_vsample[2] = 1; 
  820.         s->mjpeg_hsample[0] = 1<<chroma_h_shift;
  821.         s->mjpeg_hsample[1] = 1; 
  822.         s->mjpeg_hsample[2] = 1; 
  823.         if (mjpeg_init(s) < 0)
  824.             return -1;
  825.         avctx->delay=0;
  826.         s->low_delay=1;
  827.         break;
  828. #ifdef CONFIG_RISKY
  829.     case CODEC_ID_H263:
  830.         if (h263_get_picture_format(s->width, s->height) == 7) {
  831.             av_log(avctx, AV_LOG_INFO, "Input picture size isn't suitable for h263 codec! try h263+n");
  832.             return -1;
  833.         }
  834.         s->out_format = FMT_H263;
  835. s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
  836.         avctx->delay=0;
  837.         s->low_delay=1;
  838.         break;
  839.     case CODEC_ID_H263P:
  840.         s->out_format = FMT_H263;
  841.         s->h263_plus = 1;
  842. /* Fx */
  843.         s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
  844. s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
  845. s->modified_quant= s->h263_aic;
  846. s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0;
  847. s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
  848. s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0;
  849. s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
  850.         s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0;
  851. /* /Fx */
  852.         /* These are just to be sure */
  853.         avctx->delay=0;
  854.         s->low_delay=1;
  855.         break;
  856.     case CODEC_ID_FLV1:
  857.         s->out_format = FMT_H263;
  858.         s->h263_flv = 2; /* format = 1; 11-bit codes */
  859.         s->unrestricted_mv = 1;
  860.         s->rtp_mode=0; /* don't allow GOB */
  861.         avctx->delay=0;
  862.         s->low_delay=1;
  863.         break;
  864.     case CODEC_ID_RV10:
  865.         s->out_format = FMT_H263;
  866.         avctx->delay=0;
  867.         s->low_delay=1;
  868.         break;
  869.     case CODEC_ID_MPEG4:
  870.         s->out_format = FMT_H263;
  871.         s->h263_pred = 1;
  872.         s->unrestricted_mv = 1;
  873.         s->low_delay= s->max_b_frames ? 0 : 1;
  874.         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
  875.         break;
  876.     case CODEC_ID_MSMPEG4V1:
  877.         s->out_format = FMT_H263;
  878.         s->h263_msmpeg4 = 1;
  879.         s->h263_pred = 1;
  880.         s->unrestricted_mv = 1;
  881.         s->msmpeg4_version= 1;
  882.         avctx->delay=0;
  883.         s->low_delay=1;
  884.         break;
  885.     case CODEC_ID_MSMPEG4V2:
  886.         s->out_format = FMT_H263;
  887.         s->h263_msmpeg4 = 1;
  888.         s->h263_pred = 1;
  889.         s->unrestricted_mv = 1;
  890.         s->msmpeg4_version= 2;
  891.         avctx->delay=0;
  892.         s->low_delay=1;
  893.         break;
  894.     case CODEC_ID_MSMPEG4V3:
  895.         s->out_format = FMT_H263;
  896.         s->h263_msmpeg4 = 1;
  897.         s->h263_pred = 1;
  898.         s->unrestricted_mv = 1;
  899.         s->msmpeg4_version= 3;
  900.         s->flipflop_rounding=1;
  901.         avctx->delay=0;
  902.         s->low_delay=1;
  903.         break;
  904.     case CODEC_ID_WMV1:
  905.         s->out_format = FMT_H263;
  906.         s->h263_msmpeg4 = 1;
  907.         s->h263_pred = 1;
  908.         s->unrestricted_mv = 1;
  909.         s->msmpeg4_version= 4;
  910.         s->flipflop_rounding=1;
  911.         avctx->delay=0;
  912.         s->low_delay=1;
  913.         break;
  914.     case CODEC_ID_WMV2:
  915.         s->out_format = FMT_H263;
  916.         s->h263_msmpeg4 = 1;
  917.         s->h263_pred = 1;
  918.         s->unrestricted_mv = 1;
  919.         s->msmpeg4_version= 5;
  920.         s->flipflop_rounding=1;
  921.         avctx->delay=0;
  922.         s->low_delay=1;
  923.         break;
  924. #endif
  925.     default:
  926.         return -1;
  927.     }
  928.     
  929.     avctx->has_b_frames= !s->low_delay;
  930.     s->encoding = 1;
  931.     /* init */
  932.     if (MPV_common_init(s) < 0)
  933.         return -1;
  934.     if(s->modified_quant)
  935.         s->chroma_qscale_table= ff_h263_chroma_qscale_table;
  936.     s->progressive_frame= 
  937.     s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME));
  938.     s->quant_precision=5;
  939.     
  940.     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
  941.     
  942. #ifdef CONFIG_ENCODERS
  943. #ifdef CONFIG_RISKY
  944.     if (s->out_format == FMT_H263)
  945.         h263_encode_init(s);
  946.     if(s->msmpeg4_version)
  947.         ff_msmpeg4_encode_init(s);
  948. #endif
  949.     if (s->out_format == FMT_MPEG1)
  950.         ff_mpeg1_encode_init(s);
  951. #endif
  952.     /* init q matrix */
  953.     for(i=0;i<64;i++) {
  954.         int j= s->dsp.idct_permutation[i];
  955. #ifdef CONFIG_RISKY
  956.         if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
  957.             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
  958.             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
  959.         }else if(s->out_format == FMT_H263){
  960.             s->intra_matrix[j] =
  961.             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
  962.         }else
  963. #endif
  964.         { /* mpeg1/2 */
  965.             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
  966.             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
  967.         }
  968.         if(s->avctx->intra_matrix)
  969.             s->intra_matrix[j] = s->avctx->intra_matrix[i];
  970.         if(s->avctx->inter_matrix)
  971.             s->inter_matrix[j] = s->avctx->inter_matrix[i];
  972.     }
  973.     /* precompute matrix */
  974.     /* for mjpeg, we do include qscale in the matrix */
  975.     if (s->out_format != FMT_MJPEG) {
  976.         convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, 
  977.                        s->intra_matrix, s->intra_quant_bias, 1, 31);
  978.         convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16, 
  979.                        s->inter_matrix, s->inter_quant_bias, 1, 31);
  980.     }
  981.     if(ff_rate_control_init(s) < 0)
  982.         return -1;
  983.     
  984.     return 0;
  985. }
  986. int MPV_encode_end(AVCodecContext *avctx)
  987. {
  988.     MpegEncContext *s = avctx->priv_data;
  989. #ifdef STATS
  990.     print_stats();
  991. #endif
  992.     ff_rate_control_uninit(s);
  993.     MPV_common_end(s);
  994.     if (s->out_format == FMT_MJPEG)
  995.         mjpeg_close(s);
  996.     av_freep(&avctx->extradata);
  997.       
  998.     return 0;
  999. }
  1000. #endif //CONFIG_ENCODERS
  1001. void init_rl(RLTable *rl)
  1002. {
  1003.     int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
  1004.     uint8_t index_run[MAX_RUN+1];
  1005.     int last, run, level, start, end, i;
  1006.     /* compute max_level[], max_run[] and index_run[] */
  1007.     for(last=0;last<2;last++) {
  1008.         if (last == 0) {
  1009.             start = 0;
  1010.             end = rl->last;
  1011.         } else {
  1012.             start = rl->last;
  1013.             end = rl->n;
  1014.         }
  1015.         memset(max_level, 0, MAX_RUN + 1);
  1016.         memset(max_run, 0, MAX_LEVEL + 1);
  1017.         memset(index_run, rl->n, MAX_RUN + 1);
  1018.         for(i=start;i<end;i++) {
  1019.             run = rl->table_run[i];
  1020.             level = rl->table_level[i];
  1021.             if (index_run[run] == rl->n)
  1022.                 index_run[run] = i;
  1023.             if (level > max_level[run])
  1024.                 max_level[run] = level;
  1025.             if (run > max_run[level])
  1026.                 max_run[level] = run;
  1027.         }
  1028.         rl->max_level[last] = av_malloc(MAX_RUN + 1);
  1029.         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
  1030.         rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
  1031.         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
  1032.         rl->index_run[last] = av_malloc(MAX_RUN + 1);
  1033.         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
  1034.     }
  1035. }
  1036. /* draw the edges of width 'w' of an image of size width, height */
  1037. //FIXME check that this is ok for mpeg4 interlaced
  1038. static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
  1039. {
  1040.     uint8_t *ptr, *last_line;
  1041.     int i;
  1042.     last_line = buf + (height - 1) * wrap;
  1043.     for(i=0;i<w;i++) {
  1044.         /* top and bottom */
  1045.         memcpy(buf - (i + 1) * wrap, buf, width);
  1046.         memcpy(last_line + (i + 1) * wrap, last_line, width);
  1047.     }
  1048.     /* left and right */
  1049.     ptr = buf;
  1050.     for(i=0;i<height;i++) {
  1051.         memset(ptr - w, ptr[0], w);
  1052.         memset(ptr + width, ptr[width-1], w);
  1053.         ptr += wrap;
  1054.     }
  1055.     /* corners */
  1056.     for(i=0;i<w;i++) {
  1057.         memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
  1058.         memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
  1059.         memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
  1060.         memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
  1061.     }
  1062. }
  1063. int ff_find_unused_picture(MpegEncContext *s, int shared){
  1064.     int i;
  1065.     
  1066.     if(shared){
  1067.         for(i=0; i<MAX_PICTURE_COUNT; i++){
  1068.             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
  1069.         }
  1070.     }else{
  1071.         for(i=0; i<MAX_PICTURE_COUNT; i++){
  1072.             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
  1073.         }
  1074.         for(i=0; i<MAX_PICTURE_COUNT; i++){
  1075.             if(s->picture[i].data[0]==NULL) return i;
  1076.         }
  1077.     }
  1078.     assert(0);
  1079.     return -1;
  1080. }
  1081. static void update_noise_reduction(MpegEncContext *s){
  1082.     int intra, i;
  1083.     for(intra=0; intra<2; intra++){
  1084.         if(s->dct_count[intra] > (1<<16)){
  1085.             for(i=0; i<64; i++){
  1086.                 s->dct_error_sum[intra][i] >>=1;
  1087.             }
  1088.             s->dct_count[intra] >>= 1;
  1089.         }
  1090.         
  1091.         for(i=0; i<64; i++){
  1092.             s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
  1093.         }
  1094.     }
  1095. }
  1096. /**
  1097.  * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
  1098.  */
  1099. int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  1100. {
  1101.     int i;
  1102.     AVFrame *pic;
  1103.     s->mb_skiped = 0;
  1104.     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
  1105.     /* mark&release old frames */
  1106.     if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
  1107.         avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
  1108.         /* release forgotten pictures */
  1109.         /* if(mpeg124/h263) */
  1110.         if(!s->encoding){
  1111.             for(i=0; i<MAX_PICTURE_COUNT; i++){
  1112.                 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
  1113.                     av_log(avctx, AV_LOG_ERROR, "releasing zombie picturen");
  1114.                     avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);                
  1115.                 }
  1116.             }
  1117.         }
  1118.     }
  1119. alloc:
  1120.     if(!s->encoding){
  1121.         /* release non refernce frames */
  1122.         for(i=0; i<MAX_PICTURE_COUNT; i++){
  1123.             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
  1124.                 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
  1125.             }
  1126.         }
  1127.         if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
  1128.             pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header)
  1129.         else{
  1130.             i= ff_find_unused_picture(s, 0);
  1131.             pic= (AVFrame*)&s->picture[i];
  1132.         }
  1133.         pic->reference= s->pict_type != B_TYPE && !s->dropable ? 3 : 0;
  1134.         pic->coded_picture_number= s->coded_picture_number++;
  1135.         
  1136.         if( alloc_picture(s, (Picture*)pic, 0) < 0)
  1137.             return -1;
  1138.         s->current_picture_ptr= (Picture*)pic;
  1139.         s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
  1140.         s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
  1141.     }
  1142.     s->current_picture_ptr->pict_type= s->pict_type;
  1143. //    if(s->flags && CODEC_FLAG_QSCALE) 
  1144.   //      s->current_picture_ptr->quality= s->new_picture_ptr->quality;
  1145.     s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
  1146.     copy_picture(&s->current_picture, s->current_picture_ptr);
  1147.   
  1148.   if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
  1149.     if (s->pict_type != B_TYPE) {
  1150.         s->last_picture_ptr= s->next_picture_ptr;
  1151.         if(!s->dropable)
  1152.             s->next_picture_ptr= s->current_picture_ptr;
  1153.     }
  1154. /*    av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%dn", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1155.         s->last_picture_ptr    ? s->last_picture_ptr->data[0] : NULL, 
  1156.         s->next_picture_ptr    ? s->next_picture_ptr->data[0] : NULL, 
  1157.         s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
  1158.         s->pict_type, s->dropable);*/
  1159.     
  1160.     if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
  1161.     if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
  1162.     
  1163.     if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){
  1164.         av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframen");
  1165.         assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
  1166.         goto alloc;
  1167.     }
  1168.     assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
  1169.     if(s->picture_structure!=PICT_FRAME){
  1170.         int i;
  1171.         for(i=0; i<4; i++){
  1172.             if(s->picture_structure == PICT_BOTTOM_FIELD){
  1173.                  s->current_picture.data[i] += s->current_picture.linesize[i];
  1174.             } 
  1175.             s->current_picture.linesize[i] *= 2;
  1176.             s->last_picture.linesize[i] *=2;
  1177.             s->next_picture.linesize[i] *=2;
  1178.         }
  1179.     }
  1180.   }
  1181.    
  1182.     s->hurry_up= s->avctx->hurry_up;
  1183.     s->error_resilience= avctx->error_resilience;
  1184.     /* set dequantizer, we cant do it during init as it might change for mpeg4
  1185.        and we cant do it in the header decode as init isnt called for mpeg4 there yet */
  1186.     if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
  1187.         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1188.         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1189.     }else if(s->out_format == FMT_H263){
  1190.         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1191.         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1192.     }else if(s->out_format == FMT_H261){
  1193.         s->dct_unquantize_intra = s->dct_unquantize_h261_intra;
  1194.         s->dct_unquantize_inter = s->dct_unquantize_h261_inter;
  1195.     }else{
  1196.         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1197.         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1198.     }
  1199.     if(s->dct_error_sum){
  1200.         assert(s->avctx->noise_reduction && s->encoding);
  1201.         update_noise_reduction(s);
  1202.     }
  1203.         
  1204. #ifdef HAVE_XVMC
  1205.     if(s->avctx->xvmc_acceleration)
  1206.         return XVMC_field_start(s, avctx);
  1207. #endif
  1208.     return 0;
  1209. }
  1210. /* generic function for encode/decode called after a frame has been coded/decoded */
  1211. void MPV_frame_end(MpegEncContext *s)
  1212. {
  1213.     int i;
  1214.     /* draw edge for correct motion prediction if outside */
  1215. #ifdef HAVE_XVMC
  1216. //just to make sure that all data is rendered.
  1217.     if(s->avctx->xvmc_acceleration){
  1218.         XVMC_field_end(s);
  1219.     }else
  1220. #endif
  1221.     if(s->unrestricted_mv && s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
  1222.             draw_edges(s->current_picture.data[0], s->linesize  , s->h_edge_pos   , s->v_edge_pos   , EDGE_WIDTH  );
  1223.             draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
  1224.             draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
  1225.     }
  1226.     emms_c();
  1227.     
  1228.     s->last_pict_type    = s->pict_type;
  1229.     if(s->pict_type!=B_TYPE){
  1230.         s->last_non_b_pict_type= s->pict_type;
  1231.     }
  1232. #if 0
  1233.         /* copy back current_picture variables */
  1234.     for(i=0; i<MAX_PICTURE_COUNT; i++){
  1235.         if(s->picture[i].data[0] == s->current_picture.data[0]){
  1236.             s->picture[i]= s->current_picture;
  1237.             break;
  1238.         }    
  1239.     }
  1240.     assert(i<MAX_PICTURE_COUNT);
  1241. #endif    
  1242.     if(s->encoding){
  1243.         /* release non refernce frames */
  1244.         for(i=0; i<MAX_PICTURE_COUNT; i++){
  1245.             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
  1246.                 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
  1247.             }
  1248.         }
  1249.     }
  1250.     // clear copies, to avoid confusion
  1251. #if 0
  1252.     memset(&s->last_picture, 0, sizeof(Picture));
  1253.     memset(&s->next_picture, 0, sizeof(Picture));
  1254.     memset(&s->current_picture, 0, sizeof(Picture));
  1255. #endif
  1256. }
  1257. /**
  1258.  * draws an line from (ex, ey) -> (sx, sy).
  1259.  * @param w width of the image
  1260.  * @param h height of the image
  1261.  * @param stride stride/linesize of the image
  1262.  * @param color color of the arrow
  1263.  */
  1264. static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
  1265.     int t, x, y, fr, f;
  1266.     
  1267.     sx= clip(sx, 0, w-1);
  1268.     sy= clip(sy, 0, h-1);
  1269.     ex= clip(ex, 0, w-1);
  1270.     ey= clip(ey, 0, h-1);
  1271.     
  1272.     buf[sy*stride + sx]+= color;
  1273.     
  1274.     if(ABS(ex - sx) > ABS(ey - sy)){
  1275.         if(sx > ex){
  1276.             t=sx; sx=ex; ex=t;
  1277.             t=sy; sy=ey; ey=t;
  1278.         }
  1279.         buf+= sx + sy*stride;
  1280.         ex-= sx;
  1281.         f= ((ey-sy)<<16)/ex;
  1282.         for(x= 0; x <= ex; x++){
  1283.             y = (x*f)>>16;
  1284.             fr= (x*f)&0xFFFF;
  1285.             buf[ y   *stride + x]+= (color*(0x10000-fr))>>16;
  1286.             buf[(y+1)*stride + x]+= (color*         fr )>>16;
  1287.         }
  1288.     }else{
  1289.         if(sy > ey){
  1290.             t=sx; sx=ex; ex=t;
  1291.             t=sy; sy=ey; ey=t;
  1292.         }
  1293.         buf+= sx + sy*stride;
  1294.         ey-= sy;
  1295.         if(ey) f= ((ex-sx)<<16)/ey;
  1296.         else   f= 0;
  1297.         for(y= 0; y <= ey; y++){
  1298.             x = (y*f)>>16;
  1299.             fr= (y*f)&0xFFFF;
  1300.             buf[y*stride + x  ]+= (color*(0x10000-fr))>>16;;
  1301.             buf[y*stride + x+1]+= (color*         fr )>>16;;
  1302.         }
  1303.     }
  1304. }
  1305. /**
  1306.  * draws an arrow from (ex, ey) -> (sx, sy).
  1307.  * @param w width of the image
  1308.  * @param h height of the image
  1309.  * @param stride stride/linesize of the image
  1310.  * @param color color of the arrow
  1311.  */
  1312. static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ 
  1313.     int dx,dy;
  1314.     sx= clip(sx, -100, w+100);
  1315.     sy= clip(sy, -100, h+100);
  1316.     ex= clip(ex, -100, w+100);
  1317.     ey= clip(ey, -100, h+100);
  1318.     
  1319.     dx= ex - sx;
  1320.     dy= ey - sy;
  1321.     
  1322.     if(dx*dx + dy*dy > 3*3){
  1323.         int rx=  dx + dy;
  1324.         int ry= -dx + dy;
  1325.         int length= ff_sqrt((rx*rx + ry*ry)<<8);
  1326.         
  1327.         //FIXME subpixel accuracy
  1328.         rx= ROUNDED_DIV(rx*3<<4, length);
  1329.         ry= ROUNDED_DIV(ry*3<<4, length);
  1330.         
  1331.         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
  1332.         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
  1333.     }
  1334.     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
  1335. }
  1336. ///**
  1337. // * prints debuging info for the given picture.
  1338. // */
  1339. //void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
  1340. //    if(!pict || !pict->mb_type) return;
  1341. //    if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
  1342. //        int x,y;
  1343. //        
  1344. //        av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
  1345. //        switch (pict->pict_type) {
  1346. //            case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"In"); break;
  1347. //            case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"Pn"); break;
  1348. //            case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"Bn"); break;
  1349. //            case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"Sn"); break;
  1350. //            case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SIn"); break;
  1351. //            case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SPn"); break;            
  1352. //        }
  1353. //        for(y=0; y<s->mb_height; y++){
  1354. //            for(x=0; x<s->mb_width; x++){
  1355. //                if(s->avctx->debug&FF_DEBUG_SKIP){
  1356. //                    int count= s->mbskip_table[x + y*s->mb_stride];
  1357. //                    if(count>9) count=9;
  1358. //                    av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
  1359. //                }
  1360. //                if(s->avctx->debug&FF_DEBUG_QP){
  1361. //                    av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
  1362. //                }
  1363. //                if(s->avctx->debug&FF_DEBUG_MB_TYPE){
  1364. //                    int mb_type= pict->mb_type[x + y*s->mb_stride];
  1365. //                    //Type & MV direction
  1366. //                    if(IS_PCM(mb_type))
  1367. //                        av_log(s->avctx, AV_LOG_DEBUG, "P");
  1368. //                    else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
  1369. //                        av_log(s->avctx, AV_LOG_DEBUG, "A");
  1370. //                    else if(IS_INTRA4x4(mb_type))
  1371. //                        av_log(s->avctx, AV_LOG_DEBUG, "i");
  1372. //                    else if(IS_INTRA16x16(mb_type))
  1373. //                        av_log(s->avctx, AV_LOG_DEBUG, "I");
  1374. //                    else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
  1375. //                        av_log(s->avctx, AV_LOG_DEBUG, "d");
  1376. //                    else if(IS_DIRECT(mb_type))
  1377. //                        av_log(s->avctx, AV_LOG_DEBUG, "D");
  1378. //                    else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
  1379. //                        av_log(s->avctx, AV_LOG_DEBUG, "g");
  1380. //                    else if(IS_GMC(mb_type))
  1381. //                        av_log(s->avctx, AV_LOG_DEBUG, "G");
  1382. //                    else if(IS_SKIP(mb_type))
  1383. //                        av_log(s->avctx, AV_LOG_DEBUG, "S");
  1384. //                    else if(!USES_LIST(mb_type, 1))
  1385. //                        av_log(s->avctx, AV_LOG_DEBUG, ">");
  1386. //                    else if(!USES_LIST(mb_type, 0))
  1387. //                        av_log(s->avctx, AV_LOG_DEBUG, "<");
  1388. //                    else{
  1389. //                        assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1390. //                        av_log(s->avctx, AV_LOG_DEBUG, "X");
  1391. //                    }
  1392. //                    
  1393. //                    //segmentation
  1394. //                    if(IS_8X8(mb_type))
  1395. //                        av_log(s->avctx, AV_LOG_DEBUG, "+");
  1396. //                    else if(IS_16X8(mb_type))
  1397. //                        av_log(s->avctx, AV_LOG_DEBUG, "-");
  1398. //                    else if(IS_8X16(mb_type))
  1399. //                        av_log(s->avctx, AV_LOG_DEBUG, "?);
  1400. //                    else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
  1401. //                        av_log(s->avctx, AV_LOG_DEBUG, " ");
  1402. //                    else
  1403. //                        av_log(s->avctx, AV_LOG_DEBUG, "?");
  1404. //                    
  1405. //                        
  1406. //                    if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
  1407. //                        av_log(s->avctx, AV_LOG_DEBUG, "=");
  1408. //                    else
  1409. //                        av_log(s->avctx, AV_LOG_DEBUG, " ");
  1410. //                }
  1411. ////                av_log(s->avctx, AV_LOG_DEBUG, " ");
  1412. //            }
  1413. //            av_log(s->avctx, AV_LOG_DEBUG, "n");
  1414. //        }
  1415. //    }
  1416. //    if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
  1417. //        const int shift= 1 + s->quarter_sample;
  1418. //        int mb_y;
  1419. //        uint8_t *ptr;
  1420. //        int i;
  1421. //        int h_chroma_shift, v_chroma_shift;
  1422. //        s->low_delay=0; //needed to see the vectors without trashing the buffers
  1423. //        avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
  1424. //        for(i=0; i<3; i++){
  1425. //            memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*s->height:pict->linesize[i]*s->height >> v_chroma_shift);
  1426. //            pict->data[i]= s->visualization_buffer[i];
  1427. //        }
  1428. //        pict->type= FF_BUFFER_TYPE_COPY;
  1429. //        ptr= pict->data[0];
  1430. //        for(mb_y=0; mb_y<s->mb_height; mb_y++){
  1431. //            int mb_x;
  1432. //            for(mb_x=0; mb_x<s->mb_width; mb_x++){
  1433. //                const int mb_index= mb_x + mb_y*s->mb_stride;
  1434. //                if((s->avctx->debug_mv) && pict->motion_val){
  1435. //                  int type;
  1436. //                  for(type=0; type<3; type++){
  1437. //                    int direction = 0;
  1438. //                    switch (type) {
  1439. //                      case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
  1440. //                                continue;
  1441. //                              direction = 0;
  1442. //                              break;
  1443. //                      case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
  1444. //                                continue;
  1445. //                              direction = 0;
  1446. //                              break;
  1447. //                      case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
  1448. //                                continue;
  1449. //                              direction = 1;
  1450. //                              break;
  1451. //                    }
  1452. //                    if(!USES_LIST(pict->mb_type[mb_index], direction))
  1453. //                        continue;
  1454. //                    //FIXME for h264
  1455. //                    if(IS_8X8(pict->mb_type[mb_index])){
  1456. //                      int i;
  1457. //                      for(i=0; i<4; i++){
  1458. //                        int sx= mb_x*16 + 4 + 8*(i&1);
  1459. //                        int sy= mb_y*16 + 4 + 8*(i>>1);
  1460. //                        int xy= mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*s->b8_stride;
  1461. //                        int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
  1462. //                        int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
  1463. //                        draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
  1464. //                      }
  1465. //                    }else if(IS_16X8(pict->mb_type[mb_index])){
  1466. //                      int i;
  1467. //                      for(i=0; i<2; i++){
  1468. //                        int sx=mb_x*16 + 8;
  1469. //                        int sy=mb_y*16 + 4 + 8*i;
  1470. //                        int xy= mb_x*2 + (mb_y*2 + i)*s->b8_stride;
  1471. //                        int mx=(pict->motion_val[direction][xy][0]>>shift);
  1472. //                        int my=(pict->motion_val[direction][xy][1]>>shift);
  1473. //                        
  1474. //                        if(IS_INTERLACED(pict->mb_type[mb_index]))
  1475. //                            my*=2;
  1476. //                        
  1477. //                        draw_arrow(ptr, sx, sy, mx+sx, my+sy, s->width, s->height, s->linesize, 100);
  1478. //                      }
  1479. //                    }else{
  1480. //                      int sx= mb_x*16 + 8;
  1481. //                      int sy= mb_y*16 + 8;
  1482. //                      int xy= mb_x*2 + mb_y*2*s->b8_stride;
  1483. //                      int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
  1484. //                      int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
  1485. //                      draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
  1486. //                    }
  1487. //                  }                  
  1488. //                }
  1489. //                if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
  1490. //                    uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
  1491. //                    int y;
  1492. //                    for(y=0; y<8; y++){
  1493. //                        *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= c;
  1494. //                        *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= c;
  1495. //                    }
  1496. //                }
  1497. //                if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
  1498. //                    int mb_type= pict->mb_type[mb_index];
  1499. //                    uint64_t u,v;
  1500. //                    int y;
  1501. //#define COLOR(theta, r)
  1502. //u= (int)(128 + r*cos(theta*3.141592/180));
  1503. //v= (int)(128 + r*sin(theta*3.141592/180));
  1504. //                    
  1505. //                    u=v=128;
  1506. //                    if(IS_PCM(mb_type)){
  1507. //                        COLOR(120,48)
  1508. //                    }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
  1509. //                        COLOR(30,48)
  1510. //                    }else if(IS_INTRA4x4(mb_type)){
  1511. //                        COLOR(90,48)
  1512. //                    }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
  1513. ////                        COLOR(120,48)
  1514. //                    }else if(IS_DIRECT(mb_type)){
  1515. //                        COLOR(150,48)
  1516. //                    }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
  1517. //                        COLOR(170,48)
  1518. //                    }else if(IS_GMC(mb_type)){
  1519. //                        COLOR(190,48)
  1520. //                    }else if(IS_SKIP(mb_type)){
  1521. ////                        COLOR(180,48)
  1522. //                    }else if(!USES_LIST(mb_type, 1)){
  1523. //                        COLOR(240,48)
  1524. //                    }else if(!USES_LIST(mb_type, 0)){
  1525. //                        COLOR(0,48)
  1526. //                    }else{
  1527. //                        assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1528. //                        COLOR(300,48)
  1529. //                    }
  1530. //                    u*= 0x0101010101010101ULL;
  1531. //                    v*= 0x0101010101010101ULL;
  1532. //                    for(y=0; y<8; y++){
  1533. //                        *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= u;
  1534. //                        *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= v;
  1535. //                    }
  1536. //                    //segmentation
  1537. //                    if(IS_8X8(mb_type) || IS_16X8(mb_type)){
  1538. //                        *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
  1539. //                        *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
  1540. //                    }
  1541. //                    if(IS_8X8(mb_type) || IS_8X16(mb_type)){
  1542. //                        for(y=0; y<16; y++)
  1543. //                            pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
  1544. //                    }
  1545. //                        
  1546. //                    if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
  1547. //                        // hmm
  1548. //                    }
  1549. //                }
  1550. //                s->mbskip_table[mb_index]=0;
  1551. //            }
  1552. //        }
  1553. //    }
  1554. //}
  1555. //#ifdef CONFIG_ENCODERS
  1556. //static int get_sae(uint8_t *src, int ref, int stride){
  1557. //    int x,y;
  1558. //    int acc=0;
  1559. //    
  1560. //    for(y=0; y<16; y++){
  1561. //        for(x=0; x<16; x++){
  1562. //            acc+= ABS(src[x+y*stride] - ref);
  1563. //        }
  1564. //    }
  1565. //    
  1566. //    return acc;
  1567. //}
  1568. //static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
  1569. //    int x, y, w, h;
  1570. //    int acc=0;
  1571. //    
  1572. //    w= s->width &~15;
  1573. //    h= s->height&~15;
  1574. //    
  1575. //    for(y=0; y<h; y+=16){
  1576. //        for(x=0; x<w; x+=16){
  1577. //            int offset= x + y*stride;
  1578. //            int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16);
  1579. //            int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
  1580. //            int sae = get_sae(src + offset, mean, stride);
  1581. //            
  1582. //            acc+= sae + 500 < sad;
  1583. //        }
  1584. //    }
  1585. //    return acc;
  1586. //}
  1587. //static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
  1588. //    AVFrame *pic=NULL;
  1589. //    int i;
  1590. //    const int encoding_delay= s->max_b_frames;
  1591. //    int direct=1;
  1592. //    
  1593. //  if(pic_arg){
  1594. //    if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
  1595. //    if(pic_arg->linesize[0] != s->linesize) direct=0;
  1596. //    if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
  1597. //    if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
  1598. //  
  1599. ////    av_log(AV_LOG_DEBUG, "%d %d %d %dn",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
  1600. //    
  1601. //    if(direct){
  1602. //        i= ff_find_unused_picture(s, 1);
  1603. //        pic= (AVFrame*)&s->picture[i];
  1604. //        pic->reference= 3;
  1605. //    
  1606. //        for(i=0; i<4; i++){
  1607. //            pic->data[i]= pic_arg->data[i];
  1608. //            pic->linesize[i]= pic_arg->linesize[i];
  1609. //        }
  1610. //        alloc_picture(s, (Picture*)pic, 1);
  1611. //    }else{
  1612. //        int offset= 16;
  1613. //        i= ff_find_unused_picture(s, 0);
  1614. //        pic= (AVFrame*)&s->picture[i];
  1615. //        pic->reference= 3;
  1616. //        alloc_picture(s, (Picture*)pic, 0);
  1617. //        if(   pic->data[0] + offset == pic_arg->data[0] 
  1618. //           && pic->data[1] + offset == pic_arg->data[1]
  1619. //           && pic->data[2] + offset == pic_arg->data[2]){
  1620. //       // empty
  1621. //        }else{
  1622. //            int h_chroma_shift, v_chroma_shift;
  1623. //            avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
  1624. //        
  1625. //            for(i=0; i<3; i++){
  1626. //                int src_stride= pic_arg->linesize[i];
  1627. //                int dst_stride= i ? s->uvlinesize : s->linesize;
  1628. //                int h_shift= i ? h_chroma_shift : 0;
  1629. //                int v_shift= i ? v_chroma_shift : 0;
  1630. //                int w= s->width >>h_shift;
  1631. //                int h= s->height>>v_shift;
  1632. //                uint8_t *src= pic_arg->data[i];
  1633. //                uint8_t *dst= pic->data[i] + offset;
  1634. //            
  1635. //                if(src_stride==dst_stride)
  1636. //                    memcpy(dst, src, src_stride*h);
  1637. //                else{
  1638. //                    while(h--){
  1639. //                        memcpy(dst, src, w);
  1640. //                        dst += dst_stride;
  1641. //                        src += src_stride;
  1642. //                    }
  1643. //                }
  1644. //            }
  1645. //        }
  1646. //    }
  1647. //    copy_picture_attributes(s, pic, pic_arg);
  1648. //    
  1649. //    pic->display_picture_number= s->input_picture_number++;
  1650. //    if(pic->pts != AV_NOPTS_VALUE){ 
  1651. //        s->user_specified_pts= pic->pts;
  1652. //    }else{
  1653. //        if(s->user_specified_pts){
  1654. //            pic->pts= s->user_specified_pts + AV_TIME_BASE*(int64_t)s->avctx->frame_rate_base / s->avctx->frame_rate;
  1655. //            av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%Ld)n", pic->pts);
  1656. //        }else{
  1657. //            pic->pts= av_rescale(pic->display_picture_number*(int64_t)s->avctx->frame_rate_base, AV_TIME_BASE, s->avctx->frame_rate);
  1658. //        }
  1659. //    }
  1660. //  }
  1661. //  
  1662. //    /* shift buffer entries */
  1663. //    for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
  1664. //        s->input_picture[i-1]= s->input_picture[i];
  1665. //        
  1666. //    s->input_picture[encoding_delay]= (Picture*)pic;
  1667. //    return 0;
  1668. //}
  1669. //static void select_input_picture(MpegEncContext *s){
  1670. //    int i;
  1671. //    for(i=1; i<MAX_PICTURE_COUNT; i++)
  1672. //        s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
  1673. //    s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
  1674. //    /* set next picture types & ordering */
  1675. //    if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
  1676. //        if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
  1677. //            s->reordered_input_picture[0]= s->input_picture[0];
  1678. //            s->reordered_input_picture[0]->pict_type= I_TYPE;
  1679. //            s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
  1680. //        }else{
  1681. //            int b_frames;
  1682. //            
  1683. //            if(s->flags&CODEC_FLAG_PASS2){
  1684. //                for(i=0; i<s->max_b_frames+1; i++){
  1685. //                    int pict_num= s->input_picture[0]->display_picture_number + i;
  1686. //                    int pict_type= s->rc_context.entry[pict_num].new_pict_type;
  1687. //                    s->input_picture[i]->pict_type= pict_type;
  1688. //                    
  1689. //                    if(i + 1 >= s->rc_context.num_entries) break;
  1690. //                }
  1691. //            }
  1692. //            if(s->input_picture[0]->pict_type){
  1693. //                /* user selected pict_type */
  1694. //                for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
  1695. //                    if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
  1696. //                }
  1697. //            
  1698. //                if(b_frames > s->max_b_frames){
  1699. //                    av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a rown");
  1700. //                    b_frames = s->max_b_frames;
  1701. //                }
  1702. //            }else if(s->avctx->b_frame_strategy==0){
  1703. //                b_frames= s->max_b_frames;
  1704. //                while(b_frames && !s->input_picture[b_frames]) b_frames--;
  1705. //            }else if(s->avctx->b_frame_strategy==1){
  1706. //                for(i=1; i<s->max_b_frames+1; i++){
  1707. //                    if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
  1708. //                        s->input_picture[i]->b_frame_score= 
  1709. //                            get_intra_count(s, s->input_picture[i  ]->data[0], 
  1710. //                                               s->input_picture[i-1]->data[0], s->linesize) + 1;
  1711. //                    }
  1712. //                }
  1713. //                for(i=0; i<s->max_b_frames; i++){
  1714. //                    if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
  1715. //                }
  1716. //                                
  1717. //                b_frames= FFMAX(0, i-1);
  1718. //                
  1719. //                /* reset scores */
  1720. //                for(i=0; i<b_frames+1; i++){
  1721. //                    s->input_picture[i]->b_frame_score=0;
  1722. //                }
  1723. //            }else{
  1724. //                av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategyn");
  1725. //                b_frames=0;
  1726. //            }
  1727. //            emms_c();
  1728. ////static int b_count=0;
  1729. ////b_count+= b_frames;
  1730. ////av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %dn", b_count);
  1731. //            if(s->picture_in_gop_number + b_frames >= s->gop_size){
  1732. //                if(s->flags & CODEC_FLAG_CLOSED_GOP)
  1733. //                    b_frames=0;
  1734. //                s->input_picture[b_frames]->pict_type= I_TYPE;
  1735. //            }
  1736. //            
  1737. //            if(   (s->flags & CODEC_FLAG_CLOSED_GOP)
  1738. //               && b_frames
  1739. //               && s->input_picture[b_frames]->pict_type== I_TYPE)
  1740. //                b_frames--;
  1741. //            s->reordered_input_picture[0]= s->input_picture[b_frames];
  1742. //            if(s->reordered_input_picture[0]->pict_type != I_TYPE)
  1743. //                s->reordered_input_picture[0]->pict_type= P_TYPE;
  1744. //            s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
  1745. //            for(i=0; i<b_frames; i++){
  1746. //                s->reordered_input_picture[i+1]= s->input_picture[i];
  1747. //                s->reordered_input_picture[i+1]->pict_type= B_TYPE;
  1748. //                s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
  1749. //            }
  1750. //        }
  1751. //    }
  1752. //    
  1753. //    if(s->reordered_input_picture[0]){
  1754. //        s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
  1755. //        copy_picture(&s->new_picture, s->reordered_input_picture[0]);
  1756. //        if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
  1757. //            // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
  1758. //        
  1759. //            int i= ff_find_unused_picture(s, 0);
  1760. //            Picture *pic= &s->picture[i];
  1761. //            /* mark us unused / free shared pic */
  1762. //            for(i=0; i<4; i++)
  1763. //                s->reordered_input_picture[0]->data[i]= NULL;
  1764. //            s->reordered_input_picture[0]->type= 0;
  1765. //            
  1766. //            pic->reference              = s->reordered_input_picture[0]->reference;
  1767. //            
  1768. //            alloc_picture(s, pic, 0);
  1769. //            copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
  1770. //            s->current_picture_ptr= pic;
  1771. //        }else{
  1772. //            // input is not a shared pix -> reuse buffer for current_pix
  1773. //            assert(   s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER 
  1774. //                   || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
  1775. //            
  1776. //            s->current_picture_ptr= s->reordered_input_picture[0];
  1777. //            for(i=0; i<4; i++){
  1778. //                s->new_picture.data[i]+=16;
  1779. //            }
  1780. //        }
  1781. //        copy_picture(&s->current_picture, s->current_picture_ptr);
  1782. //    
  1783. //        s->picture_number= s->new_picture.display_picture_number;
  1784. ////printf("dpn:%dn", s->picture_number);
  1785. //    }else{
  1786. //       memset(&s->new_picture, 0, sizeof(Picture));
  1787. //    }
  1788. //}
  1789. //int MPV_encode_picture(AVCodecContext *avctx,
  1790. //                       unsigned char *buf, int buf_size, void *data)
  1791. //{
  1792. //    MpegEncContext *s = avctx->priv_data;
  1793. //    AVFrame *pic_arg = data;
  1794. //    int i, stuffing_count;
  1795. //    if(avctx->pix_fmt != PIX_FMT_YUV420P){
  1796. //        av_log(avctx, AV_LOG_ERROR, "this codec supports only YUV420Pn");
  1797. //        return -1;
  1798. //    }
  1799. //    
  1800. //    for(i=0; i<avctx->thread_count; i++){
  1801. //        int start_y= s->thread_context[i]->start_mb_y;
  1802. //        int   end_y= s->thread_context[i]->  end_mb_y;
  1803. //        int h= s->mb_height;
  1804. //        uint8_t *start= buf + buf_size*start_y/h;
  1805. //        uint8_t *end  = buf + buf_size*  end_y/h;
  1806. //        init_put_bits(&s->thread_context[i]->pb, start, end - start);
  1807. //    }
  1808. //    s->picture_in_gop_number++;
  1809. //    load_input_picture(s, pic_arg);
  1810. //    
  1811. //    select_input_picture(s);
  1812. //    
  1813. //    /* output? */
  1814. //    if(s->new_picture.data[0]){
  1815. //        s->pict_type= s->new_picture.pict_type;
  1816. ////emms_c();
  1817. ////printf("qs:%f %f %dn", s->new_picture.quality, s->current_picture.quality, s->qscale);
  1818. //        MPV_frame_start(s, avctx);
  1819. //        encode_picture(s, s->picture_number);
  1820. //        
  1821. //        avctx->real_pict_num  = s->picture_number;
  1822. //        avctx->header_bits = s->header_bits;
  1823. //        avctx->mv_bits     = s->mv_bits;
  1824. //        avctx->misc_bits   = s->misc_bits;
  1825. //        avctx->i_tex_bits  = s->i_tex_bits;
  1826. //        avctx->p_tex_bits  = s->p_tex_bits;
  1827. //        avctx->i_count     = s->i_count;
  1828. //        avctx->p_count     = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
  1829. //        avctx->skip_count  = s->skip_count;
  1830. //        MPV_frame_end(s);
  1831. //        if (s->out_format == FMT_MJPEG)
  1832. //            mjpeg_picture_trailer(s);
  1833. //        
  1834. //        if(s->flags&CODEC_FLAG_PASS1)
  1835. //            ff_write_pass1_stats(s);
  1836. //        for(i=0; i<4; i++){
  1837. //            avctx->error[i] += s->current_picture_ptr->error[i];
  1838. //        }
  1839. //        flush_put_bits(&s->pb);
  1840. //        s->frame_bits  = put_bits_count(&s->pb);
  1841. //        stuffing_count= ff_vbv_update(s, s->frame_bits);
  1842. //        if(stuffing_count){
  1843. //            switch(s->codec_id){
  1844. //            case CODEC_ID_MPEG1VIDEO:
  1845. //            case CODEC_ID_MPEG2VIDEO:
  1846. //                while(stuffing_count--){
  1847. //                    put_bits(&s->pb, 8, 0);
  1848. //                }
  1849. //            break;
  1850. //            case CODEC_ID_MPEG4:
  1851. //                put_bits(&s->pb, 16, 0);
  1852. //                put_bits(&s->pb, 16, 0x1C3);
  1853. //                stuffing_count -= 4;
  1854. //                while(stuffing_count--){
  1855. //                    put_bits(&s->pb, 8, 0xFF);
  1856. //                }
  1857. //            break;
  1858. //            default:
  1859. //                av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflown");
  1860. //            }
  1861. //            flush_put_bits(&s->pb);
  1862. //            s->frame_bits  = put_bits_count(&s->pb);
  1863. //        }
  1864. //        /* update mpeg1/2 vbv_delay for CBR */    
  1865. //        if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1
  1866. //           && 90000LL * (avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*0xFFFFLL){
  1867. //            int vbv_delay;
  1868. //            assert(s->repeat_first_field==0);
  1869. //            
  1870. //            vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate);
  1871. //            assert(vbv_delay < 0xFFFF);
  1872. //            s->vbv_delay_ptr[0] &= 0xF8;
  1873. //            s->vbv_delay_ptr[0] |= vbv_delay>>13;
  1874. //            s->vbv_delay_ptr[1]  = vbv_delay>>5;
  1875. //            s->vbv_delay_ptr[2] &= 0x07;
  1876. //            s->vbv_delay_ptr[2] |= vbv_delay<<3;
  1877. //        }
  1878. //        s->total_bits += s->frame_bits;
  1879. //        avctx->frame_bits  = s->frame_bits;
  1880. //    }else{
  1881. //        assert((pbBufPtr(&s->pb) == s->pb.buf));
  1882. //        s->frame_bits=0;
  1883. //    }
  1884. //    assert((s->frame_bits&7)==0);
  1885. //    
  1886. //    return s->frame_bits/8;
  1887. //}
  1888. //#endif //CONFIG_ENCODERS
  1889. //static inline void gmc1_motion(MpegEncContext *s,
  1890. //                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  1891. //                               uint8_t **ref_picture)
  1892. //{
  1893. //    uint8_t *ptr;
  1894. //    int offset, src_x, src_y, linesize, uvlinesize;
  1895. //    int motion_x, motion_y;
  1896. //    int emu=0;
  1897. //    motion_x= s->sprite_offset[0][0];
  1898. //    motion_y= s->sprite_offset[0][1];
  1899. //    src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
  1900. //    src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
  1901. //    motion_x<<=(3-s->sprite_warping_accuracy);
  1902. //    motion_y<<=(3-s->sprite_warping_accuracy);
  1903. //    src_x = clip(src_x, -16, s->width);
  1904. //    if (src_x == s->width)
  1905. //        motion_x =0;
  1906. //    src_y = clip(src_y, -16, s->height);
  1907. //    if (src_y == s->height)
  1908. //        motion_y =0;
  1909. //    linesize = s->linesize;
  1910. //    uvlinesize = s->uvlinesize;
  1911. //    
  1912. //    ptr = ref_picture[0] + (src_y * linesize) + src_x;
  1913. //    if(s->flags&CODEC_FLAG_EMU_EDGE){
  1914. //        if(   (unsigned)src_x >= s->h_edge_pos - 17
  1915. //           || (unsigned)src_y >= s->v_edge_pos - 17){
  1916. //            ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
  1917. //            ptr= s->edge_emu_buffer;
  1918. //        }
  1919. //    }
  1920. //    
  1921. //    if((motion_x|motion_y)&7){
  1922. //        s->dsp.gmc1(dest_y  , ptr  , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
  1923. //        s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
  1924. //    }else{
  1925. //        int dxy;
  1926. //        
  1927. //        dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
  1928. //        if (s->no_rounding){
  1929. //     s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
  1930. //        }else{
  1931. //            s->dsp.put_pixels_tab       [0][dxy](dest_y, ptr, linesize, 16);
  1932. //        }
  1933. //    }
  1934. //    
  1935. //    if(s->flags&CODEC_FLAG_GRAY) return;
  1936. //    motion_x= s->sprite_offset[1][0];
  1937. //    motion_y= s->sprite_offset[1][1];
  1938. //    src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
  1939. //    src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
  1940. //    motion_x<<=(3-s->sprite_warping_accuracy);
  1941. //    motion_y<<=(3-s->sprite_warping_accuracy);
  1942. //    src_x = clip(src_x, -8, s->width>>1);
  1943. //    if (src_x == s->width>>1)
  1944. //        motion_x =0;
  1945. //    src_y = clip(src_y, -8, s->height>>1);
  1946. //    if (src_y == s->height>>1)
  1947. //        motion_y =0;
  1948. //    offset = (src_y * uvlinesize) + src_x;
  1949. //    ptr = ref_picture[1] + offset;
  1950. //    if(s->flags&CODEC_FLAG_EMU_EDGE){
  1951. //        if(   (unsigned)src_x >= (s->h_edge_pos>>1) - 9
  1952. //           || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
  1953. //            ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  1954. //            ptr= s->edge_emu_buffer;
  1955. //            emu=1;
  1956. //        }
  1957. //    }
  1958. //    s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
  1959. //    
  1960. //    ptr = ref_picture[2] + offset;
  1961. //    if(emu){
  1962. //        ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  1963. //        ptr= s->edge_emu_buffer;
  1964. //    }
  1965. //    s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
  1966. //    
  1967. //    return;
  1968. //}
  1969. //static inline void gmc_motion(MpegEncContext *s,
  1970. //                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  1971. //                               uint8_t **ref_picture)
  1972. //{
  1973. //    uint8_t *ptr;
  1974. //    int linesize, uvlinesize;
  1975. //    const int a= s->sprite_warping_accuracy;
  1976. //    int ox, oy;
  1977. //    linesize = s->linesize;
  1978. //    uvlinesize = s->uvlinesize;
  1979. //    ptr = ref_picture[0];
  1980. //    ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
  1981. //    oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
  1982. //    s->dsp.gmc(dest_y, ptr, linesize, 16,
  1983. //           ox, 
  1984. //           oy, 
  1985. //           s->sprite_delta[0][0], s->sprite_delta[0][1],
  1986. //           s->sprite_delta[1][0], s->sprite_delta[1][1], 
  1987. //           a+1, (1<<(2*a+1)) - s->no_rounding,
  1988. //           s->h_edge_pos, s->v_edge_pos);
  1989. //    s->dsp.gmc(dest_y+8, ptr, linesize, 16,
  1990. //           ox + s->sprite_delta[0][0]*8, 
  1991. //           oy + s->sprite_delta[1][0]*8, 
  1992. //           s->sprite_delta[0][0], s->sprite_delta[0][1],
  1993. //           s->sprite_delta[1][0], s->sprite_delta[1][1], 
  1994. //           a+1, (1<<(2*a+1)) - s->no_rounding,
  1995. //           s->h_edge_pos, s->v_edge_pos);
  1996. //    if(s->flags&CODEC_FLAG_GRAY) return;
  1997. //    ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
  1998. //    oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
  1999. //    ptr = ref_picture[1];
  2000. //    s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
  2001. //           ox, 
  2002. //           oy, 
  2003. //           s->sprite_delta[0][0], s->sprite_delta[0][1],
  2004. //           s->sprite_delta[1][0], s->sprite_delta[1][1], 
  2005. //           a+1, (1<<(2*a+1)) - s->no_rounding,
  2006. //           s->h_edge_pos>>1, s->v_edge_pos>>1);
  2007. //    
  2008. //    ptr = ref_picture[2];
  2009. //    s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
  2010. //           ox, 
  2011. //           oy, 
  2012. //           s->sprite_delta[0][0], s->sprite_delta[0][1],
  2013. //           s->sprite_delta[1][0], s->sprite_delta[1][1], 
  2014. //           a+1, (1<<(2*a+1)) - s->no_rounding,
  2015. //           s->h_edge_pos>>1, s->v_edge_pos>>1);
  2016. //}
  2017. /**
  2018.  * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
  2019.  * @param buf destination buffer
  2020.  * @param src source buffer
  2021.  * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
  2022.  * @param block_w width of block
  2023.  * @param block_h height of block
  2024.  * @param src_x x coordinate of the top left sample of the block in the source buffer
  2025.  * @param src_y y coordinate of the top left sample of the block in the source buffer
  2026.  * @param w width of the source buffer
  2027.  * @param h height of the source buffer
  2028.  */
  2029. void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h, 
  2030.                                     int src_x, int src_y, int w, int h){
  2031.     int x, y;
  2032.     int start_y, start_x, end_y, end_x;
  2033.     if(src_y>= h){
  2034.         src+= (h-1-src_y)*linesize;
  2035.         src_y=h-1;
  2036.     }else if(src_y<=-block_h){
  2037.         src+= (1-block_h-src_y)*linesize;
  2038.         src_y=1-block_h;
  2039.     }
  2040.     if(src_x>= w){
  2041.         src+= (w-1-src_x);
  2042.         src_x=w-1;
  2043.     }else if(src_x<=-block_w){
  2044.         src+= (1-block_w-src_x);
  2045.         src_x=1-block_w;
  2046.     }
  2047.     start_y= FFMAX(0, -src_y);
  2048.     start_x= FFMAX(0, -src_x);
  2049.     end_y= FFMIN(block_h, h-src_y);
  2050.     end_x= FFMIN(block_w, w-src_x);
  2051.     // copy existing part
  2052.     for(y=start_y; y<end_y; y++){
  2053.         for(x=start_x; x<end_x; x++){
  2054.             buf[x + y*linesize]= src[x + y*linesize];
  2055.         }
  2056.     }
  2057.     //top
  2058.     for(y=0; y<start_y; y++){
  2059.         for(x=start_x; x<end_x; x++){
  2060.             buf[x + y*linesize]= buf[x + start_y*linesize];
  2061.         }
  2062.     }
  2063.     //bottom
  2064.     for(y=end_y; y<block_h; y++){
  2065.         for(x=start_x; x<end_x; x++){
  2066.             buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
  2067.         }
  2068.     }
  2069.                                     
  2070.     for(y=0; y<block_h; y++){
  2071.        //left
  2072.         for(x=0; x<start_x; x++){
  2073.             buf[x + y*linesize]= buf[start_x + y*linesize];
  2074.         }
  2075.        
  2076.        //right
  2077.         for(x=end_x; x<block_w; x++){
  2078.             buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
  2079.         }
  2080.     }
  2081. }
  2082. static inline int hpel_motion(MpegEncContext *s, 
  2083.                                   uint8_t *dest, uint8_t *src,
  2084.                                   int field_based, int field_select,
  2085.                                   int src_x, int src_y,
  2086.                                   int width, int height, int stride,
  2087.                                   int h_edge_pos, int v_edge_pos,
  2088.                                   int w, int h, op_pixels_func *pix_op,
  2089.                                   int motion_x, int motion_y)
  2090. {
  2091.     int dxy;
  2092.     int emu=0;
  2093.     dxy = ((motion_y & 1) << 1) | (motion_x & 1);
  2094.     src_x += motion_x >> 1;
  2095.     src_y += motion_y >> 1;
  2096.                 
  2097.     /* WARNING: do no forget half pels */
  2098.     src_x = clip(src_x, -16, width); //FIXME unneeded for emu?
  2099.     if (src_x == width)
  2100.         dxy &= ~1;
  2101.     src_y = clip(src_y, -16, height);
  2102.     if (src_y == height)
  2103.         dxy &= ~2;
  2104.     src += src_y * stride + src_x;
  2105.     if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
  2106.         if(   (unsigned)src_x > h_edge_pos - (motion_x&1) - w
  2107.            || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
  2108.             ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
  2109.                              src_x, src_y<<field_based, h_edge_pos, s->v_edge_pos);
  2110.             src= s->edge_emu_buffer;
  2111.             emu=1;
  2112.         }
  2113.     }
  2114.     if(field_select)
  2115.         src += s->linesize;
  2116.     pix_op[dxy](dest, src, stride, h);
  2117.     return emu;
  2118. }
  2119. /* apply one mpeg motion vector to the three components */
  2120. static always_inline void mpeg_motion(MpegEncContext *s,
  2121.                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  2122.                                int field_based, int bottom_field, int field_select,
  2123.                                uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
  2124.                                int motion_x, int motion_y, int h)
  2125. {
  2126.     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  2127.     int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, uvlinesize, linesize;
  2128.     
  2129. #if 0    
  2130. if(s->quarter_sample)
  2131. {
  2132.     motion_x>>=1;
  2133.     motion_y>>=1;
  2134. }
  2135. #endif
  2136.     v_edge_pos = s->v_edge_pos >> field_based;
  2137.     linesize   = s->current_picture.linesize[0] << field_based;
  2138.     uvlinesize = s->current_picture.linesize[1] << field_based;
  2139.     dxy = ((motion_y & 1) << 1) | (motion_x & 1);
  2140.     src_x = s->mb_x* 16               + (motion_x >> 1);
  2141.     src_y =(s->mb_y<<(4-field_based)) + (motion_y >> 1);
  2142.     if (s->out_format == FMT_H263) {
  2143.         if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){
  2144.             mx = (motion_x>>1)|(motion_x&1);
  2145.             my = motion_y >>1;
  2146.             uvdxy = ((my & 1) << 1) | (mx & 1);
  2147.             uvsrc_x = s->mb_x* 8               + (mx >> 1);
  2148.             uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1);
  2149.         }else{
  2150.             uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
  2151.             uvsrc_x = src_x>>1;
  2152.             uvsrc_y = src_y>>1;
  2153.         }
  2154.     }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
  2155.         mx = motion_x / 4;
  2156.         my = motion_y / 4;
  2157.         uvdxy = 0;
  2158.         uvsrc_x = s->mb_x*8 + mx;
  2159.         uvsrc_y = s->mb_y*8 + my;
  2160.     } else {
  2161.         if(s->chroma_y_shift){
  2162.             mx = motion_x / 2;
  2163.             my = motion_y / 2;
  2164.             uvdxy = ((my & 1) << 1) | (mx & 1);
  2165.             uvsrc_x = s->mb_x* 8               + (mx >> 1);
  2166.             uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1);
  2167.         } else {
  2168.             if(s->chroma_x_shift){
  2169.             //Chroma422
  2170.                 mx = motion_x / 2;
  2171.                 uvdxy = ((motion_y & 1) << 1) | (mx & 1);
  2172.                 uvsrc_x = s->mb_x* 8           + (mx >> 1);
  2173.                 uvsrc_y = src_y;
  2174.             } else {
  2175.             //Chroma444
  2176.                 uvdxy = dxy;
  2177.                 uvsrc_x = src_x;
  2178.                 uvsrc_y = src_y;
  2179.             }
  2180.         }
  2181.     }
  2182.     ptr_y  = ref_picture[0] + src_y * linesize + src_x;
  2183.     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  2184.     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  2185.     if(   (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16
  2186.        || (unsigned)src_y >    v_edge_pos - (motion_y&1) - h){
  2187.             if(s->codec_id == CODEC_ID_MPEG2VIDEO ||
  2188.                s->codec_id == CODEC_ID_MPEG1VIDEO){
  2189.                 av_log(s->avctx,AV_LOG_DEBUG,"MPEG motion vector out of boundaryn");
  2190.                 return ;
  2191.             }
  2192.             ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
  2193.                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
  2194.             ptr_y = s->edge_emu_buffer;
  2195.             if(!(s->flags&CODEC_FLAG_GRAY)){
  2196.                 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
  2197.                 ff_emulated_edge_mc(uvbuf  , ptr_cb, s->uvlinesize, 9, 9+field_based, 
  2198.                                  uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
  2199.                 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based, 
  2200.                                  uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
  2201.                 ptr_cb= uvbuf;
  2202.                 ptr_cr= uvbuf+16;
  2203.             }
  2204.     }
  2205.     if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
  2206.         dest_y += s->linesize;
  2207.         dest_cb+= s->uvlinesize;
  2208.         dest_cr+= s->uvlinesize;
  2209.     }
  2210.     if(field_select){
  2211.         ptr_y += s->linesize;
  2212.         ptr_cb+= s->uvlinesize;
  2213.         ptr_cr+= s->uvlinesize;
  2214.     }
  2215.     pix_op[0][dxy](dest_y, ptr_y, linesize, h);
  2216.     
  2217.     if(!(s->flags&CODEC_FLAG_GRAY)){
  2218.         pix_op[s->chroma_x_shift][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
  2219.         pix_op[s->chroma_x_shift][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
  2220.     }
  2221. }
  2222. //FIXME move to dsputil, avg variant, 16x16 version
  2223. static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){
  2224.     int x;
  2225.     uint8_t * const top   = src[1];
  2226.     uint8_t * const left  = src[2];
  2227.     uint8_t * const mid   = src[0];
  2228.     uint8_t * const right = src[3];
  2229.     uint8_t * const bottom= src[4];
  2230. #define OBMC_FILTER(x, t, l, m, r, b)
  2231.     dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
  2232. #define OBMC_FILTER4(x, t, l, m, r, b)
  2233.     OBMC_FILTER(x         , t, l, m, r, b);
  2234.     OBMC_FILTER(x+1       , t, l, m, r, b);
  2235.     OBMC_FILTER(x  +stride, t, l, m, r, b);
  2236.     OBMC_FILTER(x+1+stride, t, l, m, r, b);
  2237.     
  2238.     x=0;
  2239.     OBMC_FILTER (x  , 2, 2, 4, 0, 0);
  2240.     OBMC_FILTER (x+1, 2, 1, 5, 0, 0);
  2241.     OBMC_FILTER4(x+2, 2, 1, 5, 0, 0);
  2242.     OBMC_FILTER4(x+4, 2, 0, 5, 1, 0);
  2243.     OBMC_FILTER (x+6, 2, 0, 5, 1, 0);
  2244.     OBMC_FILTER (x+7, 2, 0, 4, 2, 0);
  2245.     x+= stride;
  2246.     OBMC_FILTER (x  , 1, 2, 5, 0, 0);
  2247.     OBMC_FILTER (x+1, 1, 2, 5, 0, 0);
  2248.     OBMC_FILTER (x+6, 1, 0, 5, 2, 0);
  2249.     OBMC_FILTER (x+7, 1, 0, 5, 2, 0);
  2250.     x+= stride;
  2251.     OBMC_FILTER4(x  , 1, 2, 5, 0, 0);
  2252.     OBMC_FILTER4(x+2, 1, 1, 6, 0, 0);
  2253.     OBMC_FILTER4(x+4, 1, 0, 6, 1, 0);
  2254.     OBMC_FILTER4(x+6, 1, 0, 5, 2, 0);
  2255.     x+= 2*stride;
  2256.     OBMC_FILTER4(x  , 0, 2, 5, 0, 1);
  2257.     OBMC_FILTER4(x+2, 0, 1, 6, 0, 1);
  2258.     OBMC_FILTER4(x+4, 0, 0, 6, 1, 1);
  2259.     OBMC_FILTER4(x+6, 0, 0, 5, 2, 1);
  2260.     x+= 2*stride;
  2261.     OBMC_FILTER (x  , 0, 2, 5, 0, 1);
  2262.     OBMC_FILTER (x+1, 0, 2, 5, 0, 1);
  2263.     OBMC_FILTER4(x+2, 0, 1, 5, 0, 2);
  2264.     OBMC_FILTER4(x+4, 0, 0, 5, 1, 2);
  2265.     OBMC_FILTER (x+6, 0, 0, 5, 2, 1);
  2266.     OBMC_FILTER (x+7, 0, 0, 5, 2, 1);
  2267.     x+= stride;
  2268.     OBMC_FILTER (x  , 0, 2, 4, 0, 2);
  2269.     OBMC_FILTER (x+1, 0, 1, 5, 0, 2);
  2270.     OBMC_FILTER (x+6, 0, 0, 5, 1, 2);
  2271.     OBMC_FILTER (x+7, 0, 0, 4, 2, 2);
  2272. }
  2273. /* obmc for 1 8x8 luma block */
  2274. static inline void obmc_motion(MpegEncContext *s,
  2275.                                uint8_t *dest, uint8_t *src,
  2276.                                int src_x, int src_y,
  2277.                                op_pixels_func *pix_op,
  2278.                                int16_t mv[5][2]/* mid top left right bottom*/)
  2279. #define MID    0
  2280. {
  2281.     int i;
  2282.     uint8_t *ptr[5];
  2283.     
  2284.     assert(s->quarter_sample==0);
  2285.     
  2286.     for(i=0; i<5; i++){
  2287.         if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){
  2288.             ptr[i]= ptr[MID];
  2289.         }else{
  2290.             ptr[i]= s->obmc_scratchpad + 8*(i&1) + s->linesize*8*(i>>1);
  2291.             hpel_motion(s, ptr[i], src, 0, 0,
  2292.                         src_x, src_y,
  2293.                         s->width, s->height, s->linesize,
  2294.                         s->h_edge_pos, s->v_edge_pos,
  2295.                         8, 8, pix_op,
  2296.                         mv[i][0], mv[i][1]);
  2297.         }
  2298.     }
  2299.     put_obmc(dest, ptr, s->linesize);                
  2300. }
  2301. static inline void qpel_motion(MpegEncContext *s,
  2302.                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  2303.                                int field_based, int bottom_field, int field_select,
  2304.                                uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
  2305.                                qpel_mc_func (*qpix_op)[16],
  2306.                                int motion_x, int motion_y, int h)
  2307. {
  2308.     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  2309.     int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize;
  2310.     dxy = ((motion_y & 3) << 2) | (motion_x & 3);
  2311.     src_x = s->mb_x *  16                 + (motion_x >> 2);
  2312.     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
  2313.     v_edge_pos = s->v_edge_pos >> field_based;
  2314.     linesize = s->linesize << field_based;
  2315.     uvlinesize = s->uvlinesize << field_based;
  2316.     
  2317.     if(field_based){
  2318.         mx= motion_x/2;
  2319.         my= motion_y>>1;
  2320.     }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
  2321.         static const int rtab[8]= {0,0,1,1,0,0,0,1};
  2322.         mx= (motion_x>>1) + rtab[motion_x&7];
  2323.         my= (motion_y>>1) + rtab[motion_y&7];
  2324.     }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
  2325.         mx= (motion_x>>1)|(motion_x&1);
  2326.         my= (motion_y>>1)|(motion_y&1);
  2327.     }else{
  2328.         mx= motion_x/2;
  2329.         my= motion_y/2;
  2330.     }
  2331.     mx= (mx>>1)|(mx&1);
  2332.     my= (my>>1)|(my&1);
  2333.     uvdxy= (mx&1) | ((my&1)<<1);
  2334.     mx>>=1;
  2335.     my>>=1;
  2336.     uvsrc_x = s->mb_x *  8                 + mx;
  2337.     uvsrc_y = s->mb_y * (8 >> field_based) + my;
  2338.     ptr_y  = ref_picture[0] +   src_y *   linesize +   src_x;
  2339.     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  2340.     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  2341.     if(   (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16 
  2342.        || (unsigned)src_y >    v_edge_pos - (motion_y&3) - h  ){
  2343.         ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based, 
  2344.                          src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
  2345.         ptr_y= s->edge_emu_buffer;
  2346.         if(!(s->flags&CODEC_FLAG_GRAY)){
  2347.             uint8_t *uvbuf= s->edge_emu_buffer + 18*s->linesize;
  2348.             ff_emulated_edge_mc(uvbuf, ptr_cb, s->uvlinesize, 9, 9 + field_based, 
  2349.                              uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
  2350.             ff_emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9, 9 + field_based, 
  2351.                              uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
  2352.             ptr_cb= uvbuf;
  2353.             ptr_cr= uvbuf + 16;
  2354.         }
  2355.     }
  2356.     if(!field_based)
  2357.         qpix_op[0][dxy](dest_y, ptr_y, linesize);
  2358.     else{
  2359.         if(bottom_field){
  2360.             dest_y += s->linesize;
  2361.             dest_cb+= s->uvlinesize;
  2362.             dest_cr+= s->uvlinesize;
  2363.         }
  2364.         if(field_select){
  2365.             ptr_y  += s->linesize;
  2366.             ptr_cb += s->uvlinesize;
  2367.             ptr_cr += s->uvlinesize;
  2368.         }
  2369.         //damn interlaced mode
  2370.         //FIXME boundary mirroring is not exactly correct here
  2371.         qpix_op[1][dxy](dest_y  , ptr_y  , linesize);
  2372.         qpix_op[1][dxy](dest_y+8, ptr_y+8, linesize);
  2373.     }
  2374.     if(!(s->flags&CODEC_FLAG_GRAY)){
  2375.         pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
  2376.         pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
  2377.     }
  2378. }
  2379. inline int ff_h263_round_chroma(int x){
  2380.     if (x >= 0)
  2381.         return  (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
  2382.     else {
  2383.         x = -x;
  2384.         return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
  2385.     }
  2386. }
  2387. /**
  2388.  * h263 chorma 4mv motion compensation.
  2389.  */
  2390. static inline void chroma_4mv_motion(MpegEncContext *s,
  2391.                                      uint8_t *dest_cb, uint8_t *dest_cr,
  2392.                                      uint8_t **ref_picture,
  2393.                                      op_pixels_func *pix_op,
  2394.                                      int mx, int my){
  2395.     int dxy, emu=0, src_x, src_y, offset;
  2396.     uint8_t *ptr;
  2397.     
  2398.     /* In case of 8X8, we construct a single chroma motion vector
  2399.        with a special rounding */
  2400.     mx= ff_h263_round_chroma(mx);
  2401.     my= ff_h263_round_chroma(my);
  2402.     
  2403.     dxy = ((my & 1) << 1) | (mx & 1);
  2404.     mx >>= 1;
  2405.     my >>= 1;
  2406.     src_x = s->mb_x * 8 + mx;
  2407.     src_y = s->mb_y * 8 + my;
  2408.     src_x = clip(src_x, -8, s->width/2);
  2409.     if (src_x == s->width/2)
  2410.         dxy &= ~1;
  2411.     src_y = clip(src_y, -8, s->height/2);
  2412.     if (src_y == s->height/2)
  2413.         dxy &= ~2;
  2414.     
  2415.     offset = (src_y * (s->uvlinesize)) + src_x;
  2416.     ptr = ref_picture[1] + offset;
  2417.     if(s->flags&CODEC_FLAG_EMU_EDGE){
  2418.         if(   (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8
  2419.            || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){
  2420.             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  2421.             ptr= s->edge_emu_buffer;
  2422.             emu=1;
  2423.         }
  2424.     }
  2425.     pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
  2426.     ptr = ref_picture[2] + offset;
  2427.     if(emu){
  2428.         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  2429.         ptr= s->edge_emu_buffer;
  2430.     }
  2431.     pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
  2432. }
  2433. /**
  2434.  * motion compesation of a single macroblock
  2435.  * @param s context
  2436.  * @param dest_y luma destination pointer
  2437.  * @param dest_cb chroma cb/u destination pointer
  2438.  * @param dest_cr chroma cr/v destination pointer
  2439.  * @param dir direction (0->forward, 1->backward)
  2440.  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  2441.  * @param pic_op halfpel motion compensation function (average or put normally)
  2442.  * @param pic_op qpel motion compensation function (average or put normally)
  2443.  * the motion vectors are taken from s->mv and the MV type from s->mv_type
  2444.  */
  2445. static inline void MPV_motion(MpegEncContext *s, 
  2446.                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  2447.                               int dir, uint8_t **ref_picture, 
  2448.                               op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
  2449. {
  2450.     int dxy, mx, my, src_x, src_y, motion_x, motion_y;
  2451.     int mb_x, mb_y, i;
  2452.     uint8_t *ptr, *dest;
  2453.     mb_x = s->mb_x;
  2454.     mb_y = s->mb_y;
  2455.     if(s->obmc && s->pict_type != B_TYPE){
  2456.         int16_t mv_cache[4][4][2];
  2457.         const int xy= s->mb_x + s->mb_y*s->mb_stride;
  2458.         const int mot_stride= s->b8_stride;
  2459.         const int mot_xy= mb_x*2 + mb_y*2*mot_stride;
  2460.         assert(!s->mb_skiped);
  2461.                 
  2462.         memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy           ], sizeof(int16_t)*4);
  2463.         memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
  2464.         memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
  2465.         if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
  2466.             memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
  2467.         }else{
  2468.             memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
  2469.         }
  2470.         if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
  2471.             *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1];
  2472.             *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1];
  2473.         }else{
  2474.             *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1];
  2475.             *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride];
  2476.         }
  2477.         if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
  2478.             *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2];
  2479.             *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2];
  2480.         }else{
  2481.             *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2];
  2482.             *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride];
  2483.         }
  2484.         
  2485.         mx = 0;
  2486.         my = 0;
  2487.         for(i=0;i<4;i++) {
  2488.             const int x= (i&1)+1;
  2489.             const int y= (i>>1)+1;
  2490.             int16_t mv[5][2]= {
  2491.                 {mv_cache[y][x  ][0], mv_cache[y][x  ][1]},
  2492.                 {mv_cache[y-1][x][0], mv_cache[y-1][x][1]},
  2493.                 {mv_cache[y][x-1][0], mv_cache[y][x-1][1]},
  2494.                 {mv_cache[y][x+1][0], mv_cache[y][x+1][1]},
  2495.                 {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}};
  2496.             //FIXME cleanup
  2497.             obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
  2498.                         ref_picture[0],
  2499.                         mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
  2500.                         pix_op[1],
  2501.                         mv);
  2502.             mx += mv[0][0];
  2503.             my += mv[0][1];
  2504.         }
  2505.         if(!(s->flags&CODEC_FLAG_GRAY))
  2506.             chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
  2507.         return;
  2508.     }
  2509.    
  2510.     switch(s->mv_type) {
  2511.     case MV_TYPE_16X16:
  2512. #ifdef CONFIG_RISKY
  2513.         if(s->mcsel){
  2514.             if(s->real_sprite_warping_points==1){
  2515.                 gmc1_motion(s, dest_y, dest_cb, dest_cr,
  2516.                             ref_picture);
  2517.             }else{
  2518.                 gmc_motion(s, dest_y, dest_cb, dest_cr,
  2519.                             ref_picture);
  2520.             }
  2521.         }else if(s->quarter_sample){
  2522.             qpel_motion(s, dest_y, dest_cb, dest_cr, 
  2523.                         0, 0, 0,
  2524.                         ref_picture, pix_op, qpix_op,
  2525.                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
  2526.         }else if(s->mspel){
  2527.             ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
  2528.                         ref_picture, pix_op,
  2529.                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
  2530.         }else
  2531. #endif
  2532.         {
  2533.             mpeg_motion(s, dest_y, dest_cb, dest_cr, 
  2534.                         0, 0, 0,
  2535.                         ref_picture, pix_op,
  2536.                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
  2537.         }           
  2538.         break;
  2539.     case MV_TYPE_8X8:
  2540.         mx = 0;
  2541.         my = 0;
  2542.         if(s->quarter_sample){
  2543.             for(i=0;i<4;i++) {
  2544.                 motion_x = s->mv[dir][i][0];
  2545.                 motion_y = s->mv[dir][i][1];
  2546.                 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
  2547.                 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
  2548.                 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
  2549.                     
  2550.                 /* WARNING: do no forget half pels */
  2551.                 src_x = clip(src_x, -16, s->width);
  2552.                 if (src_x == s->width)
  2553.                     dxy &= ~3;
  2554.                 src_y = clip(src_y, -16, s->height);
  2555.                 if (src_y == s->height)
  2556.                     dxy &= ~12;
  2557.                     
  2558.                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
  2559.                 if(s->flags&CODEC_FLAG_EMU_EDGE){
  2560.                     if(   (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8 
  2561.                        || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){
  2562.                         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
  2563.                         ptr= s->edge_emu_buffer;
  2564.                     }
  2565.                 }
  2566.                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
  2567.                 qpix_op[1][dxy](dest, ptr, s->linesize);
  2568.                 mx += s->mv[dir][i][0]/2;
  2569.                 my += s->mv[dir][i][1]/2;
  2570.             }
  2571.         }else{
  2572.             for(i=0;i<4;i++) {
  2573.                 hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
  2574.                             ref_picture[0], 0, 0,
  2575.                             mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
  2576.                             s->width, s->height, s->linesize,
  2577.                             s->h_edge_pos, s->v_edge_pos,
  2578.                             8, 8, pix_op[1],
  2579.                             s->mv[dir][i][0], s->mv[dir][i][1]);
  2580.                 mx += s->mv[dir][i][0];
  2581.                 my += s->mv[dir][i][1];
  2582.             }
  2583.         }
  2584.         if(!(s->flags&CODEC_FLAG_GRAY))
  2585.             chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
  2586.         break;
  2587.     case MV_TYPE_FIELD:
  2588.         if (s->picture_structure == PICT_FRAME) {
  2589.             if(s->quarter_sample){
  2590.                 for(i=0; i<2; i++){
  2591.                     qpel_motion(s, dest_y, dest_cb, dest_cr,
  2592.                                 1, i, s->field_select[dir][i],
  2593.                                 ref_picture, pix_op, qpix_op,
  2594.                                 s->mv[dir][i][0], s->mv[dir][i][1], 8);
  2595.                 }
  2596.             }else{
  2597.                 /* top field */       
  2598.                 mpeg_motion(s, dest_y, dest_cb, dest_cr,
  2599.                             1, 0, s->field_select[dir][0],
  2600.                             ref_picture, pix_op,
  2601.                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
  2602.                 /* bottom field */
  2603.                 mpeg_motion(s, dest_y, dest_cb, dest_cr,
  2604.                             1, 1, s->field_select[dir][1],
  2605.                             ref_picture, pix_op,
  2606.                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
  2607.             }
  2608.         } else {
  2609.             if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != B_TYPE && !s->first_field){
  2610.                 ref_picture= s->current_picture_ptr->data;
  2611.             } 
  2612.             mpeg_motion(s, dest_y, dest_cb, dest_cr,
  2613.                         0, 0, s->field_select[dir][0],
  2614.                         ref_picture, pix_op,
  2615.                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
  2616.         }
  2617.         break;
  2618.     case MV_TYPE_16X8:
  2619.         for(i=0; i<2; i++){
  2620.             uint8_t ** ref2picture;
  2621.             if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == B_TYPE || s->first_field){
  2622.                 ref2picture= ref_picture;
  2623.             }else{
  2624.                 ref2picture= s->current_picture_ptr->data;
  2625.             } 
  2626.             mpeg_motion(s, dest_y, dest_cb, dest_cr, 
  2627.                         0, 0, s->field_select[dir][i],
  2628.                         ref2picture, pix_op,
  2629.                         s->mv[dir][i][0], s->mv[dir][i][1] + 16*i, 8);
  2630.                 
  2631.             dest_y += 16*s->linesize;
  2632.             dest_cb+= (16>>s->chroma_y_shift)*s->uvlinesize;
  2633.             dest_cr+= (16>>s->chroma_y_shift)*s->uvlinesize;
  2634.         }        
  2635.         break;
  2636.     case MV_TYPE_DMV:
  2637.         if(s->picture_structure == PICT_FRAME){
  2638.             for(i=0; i<2; i++){
  2639.                 int j;
  2640.                 for(j=0; j<2; j++){
  2641.                     mpeg_motion(s, dest_y, dest_cb, dest_cr,
  2642.                                 1, j, j^i,
  2643.                                 ref_picture, pix_op,
  2644.                                 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], 8);
  2645.                 }
  2646.                 pix_op = s->dsp.avg_pixels_tab; 
  2647.             }
  2648.         }else{
  2649.             for(i=0; i<2; i++){
  2650.                 mpeg_motion(s, dest_y, dest_cb, dest_cr, 
  2651.                             0, 0, s->picture_structure != i+1,
  2652.                             ref_picture, pix_op,
  2653.                             s->mv[dir][2*i][0],s->mv[dir][2*i][1],16);
  2654.                 // after put we make avg of the same block
  2655.                 pix_op=s->dsp.avg_pixels_tab; 
  2656.                 //opposite parity is always in the same frame if this is second field
  2657.                 if(!s->first_field){
  2658.                     ref_picture = s->current_picture_ptr->data;    
  2659.                 }
  2660.             }
  2661.         }
  2662.     break;
  2663.     default: assert(0);
  2664.     }
  2665. }
  2666. /* put block[] to dest[] */
  2667. static inline void put_dct(MpegEncContext *s, 
  2668.                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
  2669. {
  2670.     s->dct_unquantize_intra(s, block, i, qscale);
  2671.     s->dsp.idct_put (dest, line_size, block);
  2672. }
  2673. /* add block[] to dest[] */
  2674. static inline void add_dct(MpegEncContext *s, 
  2675.                            DCTELEM *block, int i, uint8_t *dest, int line_size)
  2676. {
  2677.     if (s->block_last_index[i] >= 0) {
  2678.         s->dsp.idct_add (dest, line_size, block);
  2679.     }
  2680. }
  2681. static inline void add_dequant_dct(MpegEncContext *s, 
  2682.                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
  2683. {
  2684.     if (s->block_last_index[i] >= 0) {
  2685.         s->dct_unquantize_inter(s, block, i, qscale);
  2686.         s->dsp.idct_add (dest, line_size, block);
  2687.     }
  2688. }
  2689. /**
  2690.  * cleans dc, ac, coded_block for the current non intra MB
  2691.  */
  2692. void ff_clean_intra_table_entries(MpegEncContext *s)
  2693. {
  2694.     int wrap = s->b8_stride;
  2695.     int xy = s->block_index[0];
  2696.     
  2697.     s->dc_val[0][xy           ] = 
  2698.     s->dc_val[0][xy + 1       ] = 
  2699.     s->dc_val[0][xy     + wrap] =
  2700.     s->dc_val[0][xy + 1 + wrap] = 1024;
  2701.     /* ac pred */
  2702.     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
  2703.     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  2704.     if (s->msmpeg4_version>=3) {
  2705.         s->coded_block[xy           ] =
  2706.         s->coded_block[xy + 1       ] =
  2707.         s->coded_block[xy     + wrap] =
  2708.         s->coded_block[xy + 1 + wrap] = 0;
  2709.     }
  2710.     /* chroma */
  2711.     wrap = s->mb_stride;
  2712.     xy = s->mb_x + s->mb_y * wrap;
  2713.     s->dc_val[1][xy] =
  2714.     s->dc_val[2][xy] = 1024;
  2715.     /* ac pred */
  2716.     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  2717.     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  2718.     
  2719.     s->mbintra_table[xy]= 0;
  2720. }
  2721. /* generic function called after a macroblock has been parsed by the
  2722.    decoder or after it has been encoded by the encoder.
  2723.    Important variables used:
  2724.    s->mb_intra : true if intra macroblock
  2725.    s->mv_dir   : motion vector direction
  2726.    s->mv_type  : motion vector type
  2727.    s->mv       : motion vector
  2728.    s->interlaced_dct : true if interlaced dct used (mpeg2)
  2729.  */