h264.c
上传用户:jylinhe
上传日期:2022-07-11
资源大小:334k
文件大小:325k
源码类别:

多媒体编程

开发平台:

Visual C++

  1.         if(i[1] < len){
  2.             in[ i[1] ]->pic_id= is_long ? i[1] : in[ i[1] ]->frame_num;
  3.             split_field_copy(&def[index++], in[ i[1]++ ], sel^3, 0);
  4.         }
  5.     }
  6.     return index;
  7. }
  8. static int add_sorted(Picture **sorted, Picture **src, int len, int limit, int dir){
  9.     int i, best_poc;
  10.     int out_i= 0;
  11.     for(;;){
  12.         best_poc= dir ? INT_MIN : INT_MAX;
  13.         for(i=0; i<len; i++){
  14.             const int poc= src[i]->poc;
  15.             if(((poc > limit) ^ dir) && ((poc < best_poc) ^ dir)){
  16.                 best_poc= poc;
  17.                 sorted[out_i]= src[i];
  18.             }
  19.         }
  20.         if(best_poc == (dir ? INT_MIN : INT_MAX))
  21.             break;
  22.         limit= sorted[out_i++]->poc - dir;
  23.     }
  24.     return out_i;
  25. }
  26. /**
  27.  * fills the default_ref_list.
  28.  */
  29. static int fill_default_ref_list(H264Context *h){
  30.     MpegEncContext * const s = &h->s;
  31.     int i, len;
  32.     if(h->slice_type_nos==FF_B_TYPE){
  33.         Picture *sorted[32];
  34.         int cur_poc, list;
  35.         int lens[2];
  36.         if(FIELD_PICTURE)
  37.             cur_poc= s->current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ];
  38.         else
  39.             cur_poc= s->current_picture_ptr->poc;
  40.         for(list= 0; list<2; list++){
  41.             len= add_sorted(sorted    , h->short_ref, h->short_ref_count, cur_poc, 1^list);
  42.             len+=add_sorted(sorted+len, h->short_ref, h->short_ref_count, cur_poc, 0^list);
  43.             assert(len<=32);
  44.             len= build_def_list(h->default_ref_list[list]    , sorted     , len, 0, s->picture_structure);
  45.             len+=build_def_list(h->default_ref_list[list]+len, h->long_ref, 16 , 1, s->picture_structure);
  46.             assert(len<=32);
  47.             if(len < h->ref_count[list])
  48.                 memset(&h->default_ref_list[list][len], 0, sizeof(Picture)*(h->ref_count[list] - len));
  49.             lens[list]= len;
  50.         }
  51.         if(lens[0] == lens[1] && lens[1] > 1){
  52.             for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0] && i<lens[0]; i++);
  53.             if(i == lens[0])
  54.                 FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
  55.         }
  56.     }else{
  57.         len = build_def_list(h->default_ref_list[0]    , h->short_ref, h->short_ref_count, 0, s->picture_structure);
  58.         len+= build_def_list(h->default_ref_list[0]+len, h-> long_ref, 16                , 1, s->picture_structure);
  59.         assert(len <= 32);
  60.         if(len < h->ref_count[0])
  61.             memset(&h->default_ref_list[0][len], 0, sizeof(Picture)*(h->ref_count[0] - len));
  62.     }
  63. #ifdef TRACE
  64.     for (i=0; i<h->ref_count[0]; i++) {
  65.         tprintf(h->s.avctx, "List0: %s fn:%d 0x%pn", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]);
  66.     }
  67.     if(h->slice_type_nos==FF_B_TYPE){
  68.         for (i=0; i<h->ref_count[1]; i++) {
  69.             tprintf(h->s.avctx, "List1: %s fn:%d 0x%pn", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]);
  70.         }
  71.     }
  72. #endif
  73.     return 0;
  74. }
  75. static void print_short_term(H264Context *h);
  76. static void print_long_term(H264Context *h);
  77. /**
  78.  * Extract structure information about the picture described by pic_num in
  79.  * the current decoding context (frame or field). Note that pic_num is
  80.  * picture number without wrapping (so, 0<=pic_num<max_pic_num).
  81.  * @param pic_num picture number for which to extract structure information
  82.  * @param structure one of PICT_XXX describing structure of picture
  83.  *                      with pic_num
  84.  * @return frame number (short term) or long term index of picture
  85.  *         described by pic_num
  86.  */
  87. static int pic_num_extract(H264Context *h, int pic_num, int *structure){
  88.     MpegEncContext * const s = &h->s;
  89.     *structure = s->picture_structure;
  90.     if(FIELD_PICTURE){
  91.         if (!(pic_num & 1))
  92.             /* opposite field */
  93.             *structure ^= PICT_FRAME;
  94.         pic_num >>= 1;
  95.     }
  96.     return pic_num;
  97. }
  98. static int decode_ref_pic_list_reordering(H264Context *h){
  99.     MpegEncContext * const s = &h->s;
  100.     int list, index, pic_structure;
  101.     print_short_term(h);
  102.     print_long_term(h);
  103.     for(list=0; list<h->list_count; list++){
  104.         memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]);
  105.         if(get_bits1(&s->gb)){
  106.             int pred= h->curr_pic_num;
  107.             for(index=0; ; index++){
  108.                 unsigned int reordering_of_pic_nums_idc= get_ue_golomb_31(&s->gb);
  109.                 unsigned int pic_id;
  110.                 int i;
  111.                 Picture *ref = NULL;
  112.                 if(reordering_of_pic_nums_idc==3)
  113.                     break;
  114.                 if(index >= h->ref_count[list]){
  115.                     av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflown");
  116.                     return -1;
  117.                 }
  118.                 if(reordering_of_pic_nums_idc<3){
  119.                     if(reordering_of_pic_nums_idc<2){
  120.                         const unsigned int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1;
  121.                         int frame_num;
  122.                         if((int)abs_diff_pic_num > h->max_pic_num){
  123.                             av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflown");
  124.                             return -1;
  125.                         }
  126.                         if(reordering_of_pic_nums_idc == 0) pred-= abs_diff_pic_num;
  127.                         else                                pred+= abs_diff_pic_num;
  128.                         pred &= h->max_pic_num - 1;
  129.                         frame_num = pic_num_extract(h, pred, &pic_structure);
  130.                         for(i= h->short_ref_count-1; i>=0; i--){
  131.                             ref = h->short_ref[i];
  132.                             assert(ref->reference);
  133.                             assert(!ref->long_ref);
  134.                             if(
  135.                                    ref->frame_num == frame_num &&
  136.                                    (ref->reference & pic_structure)
  137.                               )
  138.                                 break;
  139.                         }
  140.                         if(i>=0)
  141.                             ref->pic_id= pred;
  142.                     }else{
  143.                         int long_idx;
  144.                         pic_id= get_ue_golomb(&s->gb); //long_term_pic_idx
  145.                         long_idx= pic_num_extract(h, pic_id, &pic_structure);
  146.                         if(long_idx>31){
  147.                             av_log(h->s.avctx, AV_LOG_ERROR, "long_term_pic_idx overflown");
  148.                             return -1;
  149.                         }
  150.                         ref = h->long_ref[long_idx];
  151.                         assert(!(ref && !ref->reference));
  152.                         if(ref && (ref->reference & pic_structure)){
  153.                             ref->pic_id= pic_id;
  154.                             assert(ref->long_ref);
  155.                             i=0;
  156.                         }else{
  157.                             i=-1;
  158.                         }
  159.                     }
  160.                     if (i < 0) {
  161.                         av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reordern");
  162.                         memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME
  163.                     } else {
  164.                         for(i=index; i+1<h->ref_count[list]; i++){
  165.                             if(ref->long_ref == h->ref_list[list][i].long_ref && ref->pic_id == h->ref_list[list][i].pic_id)
  166.                                 break;
  167.                         }
  168.                         for(; i > index; i--){
  169.                             h->ref_list[list][i]= h->ref_list[list][i-1];
  170.                         }
  171.                         h->ref_list[list][index]= *ref;
  172.                         if (FIELD_PICTURE){
  173.                             pic_as_field(&h->ref_list[list][index], pic_structure);
  174.                         }
  175.                     }
  176.                 }else{
  177.                     av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idcn");
  178.                     return -1;
  179.                 }
  180.             }
  181.         }
  182.     }
  183.     for(list=0; list<h->list_count; list++){
  184.         for(index= 0; index < h->ref_count[list]; index++){
  185.             if(!h->ref_list[list][index].data[0]){
  186.                 av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picturen");
  187.                 h->ref_list[list][index]= s->current_picture; //FIXME this is not a sensible solution
  188.             }
  189.         }
  190.     }
  191.     return 0;
  192. }
  193. static void fill_mbaff_ref_list(H264Context *h){
  194.     int list, i, j;
  195.     for(list=0; list<2; list++){ //FIXME try list_count
  196.         for(i=0; i<h->ref_count[list]; i++){
  197.             Picture *frame = &h->ref_list[list][i];
  198.             Picture *field = &h->ref_list[list][16+2*i];
  199.             field[0] = *frame;
  200.             for(j=0; j<3; j++)
  201.                 field[0].linesize[j] <<= 1;
  202.             field[0].reference = PICT_TOP_FIELD;
  203.             field[0].poc= field[0].field_poc[0];
  204.             field[1] = field[0];
  205.             for(j=0; j<3; j++)
  206.                 field[1].data[j] += frame->linesize[j];
  207.             field[1].reference = PICT_BOTTOM_FIELD;
  208.             field[1].poc= field[1].field_poc[1];
  209.             h->luma_weight[list][16+2*i] = h->luma_weight[list][16+2*i+1] = h->luma_weight[list][i];
  210.             h->luma_offset[list][16+2*i] = h->luma_offset[list][16+2*i+1] = h->luma_offset[list][i];
  211.             for(j=0; j<2; j++){
  212.                 h->chroma_weight[list][16+2*i][j] = h->chroma_weight[list][16+2*i+1][j] = h->chroma_weight[list][i][j];
  213.                 h->chroma_offset[list][16+2*i][j] = h->chroma_offset[list][16+2*i+1][j] = h->chroma_offset[list][i][j];
  214.             }
  215.         }
  216.     }
  217.     for(j=0; j<h->ref_count[1]; j++){
  218.         for(i=0; i<h->ref_count[0]; i++)
  219.             h->implicit_weight[j][16+2*i] = h->implicit_weight[j][16+2*i+1] = h->implicit_weight[j][i];
  220.         memcpy(h->implicit_weight[16+2*j],   h->implicit_weight[j], sizeof(*h->implicit_weight));
  221.         memcpy(h->implicit_weight[16+2*j+1], h->implicit_weight[j], sizeof(*h->implicit_weight));
  222.     }
  223. }
  224. static int pred_weight_table(H264Context *h){
  225.     MpegEncContext * const s = &h->s;
  226.     int list, i;
  227.     int luma_def, chroma_def;
  228.     h->use_weight= 0;
  229.     h->use_weight_chroma= 0;
  230.     h->luma_log2_weight_denom= get_ue_golomb(&s->gb);
  231.     h->chroma_log2_weight_denom= get_ue_golomb(&s->gb);
  232.     luma_def = 1<<h->luma_log2_weight_denom;
  233.     chroma_def = 1<<h->chroma_log2_weight_denom;
  234.     for(list=0; list<2; list++){
  235.         for(i=0; i<h->ref_count[list]; i++){
  236.             int luma_weight_flag, chroma_weight_flag;
  237.             luma_weight_flag= get_bits1(&s->gb);
  238.             if(luma_weight_flag){
  239.                 h->luma_weight[list][i]= get_se_golomb(&s->gb);
  240.                 h->luma_offset[list][i]= get_se_golomb(&s->gb);
  241.                 if(   h->luma_weight[list][i] != luma_def
  242.                    || h->luma_offset[list][i] != 0)
  243.                     h->use_weight= 1;
  244.             }else{
  245.                 h->luma_weight[list][i]= luma_def;
  246.                 h->luma_offset[list][i]= 0;
  247.             }
  248.             if(CHROMA){
  249.                 chroma_weight_flag= get_bits1(&s->gb);
  250.                 if(chroma_weight_flag){
  251.                     int j;
  252.                     for(j=0; j<2; j++){
  253.                         h->chroma_weight[list][i][j]= get_se_golomb(&s->gb);
  254.                         h->chroma_offset[list][i][j]= get_se_golomb(&s->gb);
  255.                         if(   h->chroma_weight[list][i][j] != chroma_def
  256.                         || h->chroma_offset[list][i][j] != 0)
  257.                             h->use_weight_chroma= 1;
  258.                     }
  259.                 }else{
  260.                     int j;
  261.                     for(j=0; j<2; j++){
  262.                         h->chroma_weight[list][i][j]= chroma_def;
  263.                         h->chroma_offset[list][i][j]= 0;
  264.                     }
  265.                 }
  266.             }
  267.         }
  268.         if(h->slice_type_nos != FF_B_TYPE) break;
  269.     }
  270.     h->use_weight= h->use_weight || h->use_weight_chroma;
  271.     return 0;
  272. }
  273. static void implicit_weight_table(H264Context *h){
  274.     MpegEncContext * const s = &h->s;
  275.     int ref0, ref1;
  276.     int cur_poc = s->current_picture_ptr->poc;
  277.     if(   h->ref_count[0] == 1 && h->ref_count[1] == 1
  278.        && h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2*cur_poc){
  279.         h->use_weight= 0;
  280.         h->use_weight_chroma= 0;
  281.         return;
  282.     }
  283.     h->use_weight= 2;
  284.     h->use_weight_chroma= 2;
  285.     h->luma_log2_weight_denom= 5;
  286.     h->chroma_log2_weight_denom= 5;
  287.     for(ref0=0; ref0 < h->ref_count[0]; ref0++){
  288.         int poc0 = h->ref_list[0][ref0].poc;
  289.         for(ref1=0; ref1 < h->ref_count[1]; ref1++){
  290.             int poc1 = h->ref_list[1][ref1].poc;
  291.             int td = av_clip(poc1 - poc0, -128, 127);
  292.             if(td){
  293.                 int tb = av_clip(cur_poc - poc0, -128, 127);
  294.                 int tx = (16384 + (FFABS(td) >> 1)) / td;
  295.                 int dist_scale_factor = av_clip((tb*tx + 32) >> 6, -1024, 1023) >> 2;
  296.                 if(dist_scale_factor < -64 || dist_scale_factor > 128)
  297.                     h->implicit_weight[ref0][ref1] = 32;
  298.                 else
  299.                     h->implicit_weight[ref0][ref1] = 64 - dist_scale_factor;
  300.             }else
  301.                 h->implicit_weight[ref0][ref1] = 32;
  302.         }
  303.     }
  304. }
  305. /**
  306.  * Mark a picture as no longer needed for reference. The refmask
  307.  * argument allows unreferencing of individual fields or the whole frame.
  308.  * If the picture becomes entirely unreferenced, but is being held for
  309.  * display purposes, it is marked as such.
  310.  * @param refmask mask of fields to unreference; the mask is bitwise
  311.  *                anded with the reference marking of pic
  312.  * @return non-zero if pic becomes entirely unreferenced (except possibly
  313.  *         for display purposes) zero if one of the fields remains in
  314.  *         reference
  315.  */
  316. static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
  317.     int i;
  318.     if (pic->reference &= refmask) {
  319.         return 0;
  320.     } else {
  321.         for(i = 0; h->delayed_pic[i]; i++)
  322.             if(pic == h->delayed_pic[i]){
  323.                 pic->reference=DELAYED_PIC_REF;
  324.                 break;
  325.             }
  326.         return 1;
  327.     }
  328. }
  329. /**
  330.  * instantaneous decoder refresh.
  331.  */
  332. static void idr(H264Context *h){
  333.     int i;
  334.     for(i=0; i<16; i++){
  335.         remove_long(h, i, 0);
  336.     }
  337.     assert(h->long_ref_count==0);
  338.     for(i=0; i<h->short_ref_count; i++){
  339.         unreference_pic(h, h->short_ref[i], 0);
  340.         h->short_ref[i]= NULL;
  341.     }
  342.     h->short_ref_count=0;
  343.     h->prev_frame_num= 0;
  344.     h->prev_frame_num_offset= 0;
  345.     h->prev_poc_msb=
  346.     h->prev_poc_lsb= 0;
  347. }
  348. /* forget old pics after a seek */
  349. static void flush_dpb(AVCodecContext *avctx){
  350.     H264Context *h= avctx->priv_data;
  351.     int i;
  352.     for(i=0; i<MAX_DELAYED_PIC_COUNT; i++) {
  353.         if(h->delayed_pic[i])
  354.             h->delayed_pic[i]->reference= 0;
  355.         h->delayed_pic[i]= NULL;
  356.     }
  357.     h->outputed_poc= INT_MIN;
  358.     idr(h);
  359.     if(h->s.current_picture_ptr)
  360.         h->s.current_picture_ptr->reference= 0;
  361.     h->s.first_field= 0;
  362.     ff_mpeg_flush(avctx);
  363. }
  364. /**
  365.  * Find a Picture in the short term reference list by frame number.
  366.  * @param frame_num frame number to search for
  367.  * @param idx the index into h->short_ref where returned picture is found
  368.  *            undefined if no picture found.
  369.  * @return pointer to the found picture, or NULL if no pic with the provided
  370.  *                 frame number is found
  371.  */
  372. static Picture * find_short(H264Context *h, int frame_num, int *idx){
  373.     MpegEncContext * const s = &h->s;
  374.     int i;
  375.     for(i=0; i<h->short_ref_count; i++){
  376.         Picture *pic= h->short_ref[i];
  377.         if(s->avctx->debug&FF_DEBUG_MMCO)
  378.             av_log(h->s.avctx, AV_LOG_DEBUG, "%d %d %pn", i, pic->frame_num, pic);
  379.         if(pic->frame_num == frame_num) {
  380.             *idx = i;
  381.             return pic;
  382.         }
  383.     }
  384.     return NULL;
  385. }
  386. /**
  387.  * Remove a picture from the short term reference list by its index in
  388.  * that list.  This does no checking on the provided index; it is assumed
  389.  * to be valid. Other list entries are shifted down.
  390.  * @param i index into h->short_ref of picture to remove.
  391.  */
  392. static void remove_short_at_index(H264Context *h, int i){
  393.     assert(i >= 0 && i < h->short_ref_count);
  394.     h->short_ref[i]= NULL;
  395.     if (--h->short_ref_count)
  396.         memmove(&h->short_ref[i], &h->short_ref[i+1], (h->short_ref_count - i)*sizeof(Picture*));
  397. }
  398. /**
  399.  *
  400.  * @return the removed picture or NULL if an error occurs
  401.  */
  402. static Picture * remove_short(H264Context *h, int frame_num, int ref_mask){
  403.     MpegEncContext * const s = &h->s;
  404.     Picture *pic;
  405.     int i;
  406.     if(s->avctx->debug&FF_DEBUG_MMCO)
  407.         av_log(h->s.avctx, AV_LOG_DEBUG, "remove short %d count %dn", frame_num, h->short_ref_count);
  408.     pic = find_short(h, frame_num, &i);
  409.     if (pic){
  410.         if(unreference_pic(h, pic, ref_mask))
  411.         remove_short_at_index(h, i);
  412.     }
  413.     return pic;
  414. }
  415. /**
  416.  * Remove a picture from the long term reference list by its index in
  417.  * that list.
  418.  * @return the removed picture or NULL if an error occurs
  419.  */
  420. static Picture * remove_long(H264Context *h, int i, int ref_mask){
  421.     Picture *pic;
  422.     pic= h->long_ref[i];
  423.     if (pic){
  424.         if(unreference_pic(h, pic, ref_mask)){
  425.             assert(h->long_ref[i]->long_ref == 1);
  426.             h->long_ref[i]->long_ref= 0;
  427.             h->long_ref[i]= NULL;
  428.             h->long_ref_count--;
  429.         }
  430.     }
  431.     return pic;
  432. }
  433. /**
  434.  * print short term list
  435.  */
  436. static void print_short_term(H264Context *h) {
  437.     int32_t i;
  438.     if(h->s.avctx->debug&FF_DEBUG_MMCO) {
  439.         av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:n");
  440.         for(i=0; i<h->short_ref_count; i++){
  441.             Picture *pic= h->short_ref[i];
  442.             av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %pn", i, pic->frame_num, pic->poc, pic->data[0]);
  443.         }
  444.     }
  445. }
  446. /**
  447.  * print long term list
  448.  */
  449. static void print_long_term(H264Context *h) {
  450.     uint32_t i;
  451.     if(h->s.avctx->debug&FF_DEBUG_MMCO) {
  452.         av_log(h->s.avctx, AV_LOG_DEBUG, "long term list:n");
  453.         for(i = 0; i < 16; i++){
  454.             Picture *pic= h->long_ref[i];
  455.             if (pic) {
  456.                 av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %pn", i, pic->frame_num, pic->poc, pic->data[0]);
  457.             }
  458.         }
  459.     }
  460. }
  461. /**
  462.  * Executes the reference picture marking (memory management control operations).
  463.  */
  464. static int execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
  465.     MpegEncContext * const s = &h->s;
  466.     int i, j;
  467.     int current_ref_assigned=0;
  468.     Picture *pic;
  469.     if((s->avctx->debug&FF_DEBUG_MMCO) && mmco_count==0)
  470.         av_log(h->s.avctx, AV_LOG_DEBUG, "no mmco heren");
  471.     for(i=0; i<mmco_count; i++){
  472.         int structure, frame_num;
  473.         if(s->avctx->debug&FF_DEBUG_MMCO)
  474.             av_log(h->s.avctx, AV_LOG_DEBUG, "mmco:%d %d %dn", h->mmco[i].opcode, h->mmco[i].short_pic_num, h->mmco[i].long_arg);
  475.         if(   mmco[i].opcode == MMCO_SHORT2UNUSED
  476.            || mmco[i].opcode == MMCO_SHORT2LONG){
  477.             frame_num = pic_num_extract(h, mmco[i].short_pic_num, &structure);
  478.             pic = find_short(h, frame_num, &j);
  479.             if(!pic){
  480.                 if(mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg]
  481.                    || h->long_ref[mmco[i].long_arg]->frame_num != frame_num)
  482.                 av_log(h->s.avctx, AV_LOG_ERROR, "mmco: unref short failuren");
  483.                 continue;
  484.             }
  485.         }
  486.         switch(mmco[i].opcode){
  487.         case MMCO_SHORT2UNUSED:
  488.             if(s->avctx->debug&FF_DEBUG_MMCO)
  489.                 av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: unref short %d count %dn", h->mmco[i].short_pic_num, h->short_ref_count);
  490.             remove_short(h, frame_num, structure ^ PICT_FRAME);
  491.             break;
  492.         case MMCO_SHORT2LONG:
  493.                 if (h->long_ref[mmco[i].long_arg] != pic)
  494.                     remove_long(h, mmco[i].long_arg, 0);
  495.                 remove_short_at_index(h, j);
  496.                 h->long_ref[ mmco[i].long_arg ]= pic;
  497.                 if (h->long_ref[ mmco[i].long_arg ]){
  498.                     h->long_ref[ mmco[i].long_arg ]->long_ref=1;
  499.                     h->long_ref_count++;
  500.                 }
  501.             break;
  502.         case MMCO_LONG2UNUSED:
  503.             j = pic_num_extract(h, mmco[i].long_arg, &structure);
  504.             pic = h->long_ref[j];
  505.             if (pic) {
  506.                 remove_long(h, j, structure ^ PICT_FRAME);
  507.             } else if(s->avctx->debug&FF_DEBUG_MMCO)
  508.                 av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: unref long failuren");
  509.             break;
  510.         case MMCO_LONG:
  511.                     // Comment below left from previous code as it is an interresting note.
  512.                     /* First field in pair is in short term list or
  513.                      * at a different long term index.
  514.                      * This is not allowed; see 7.4.3.3, notes 2 and 3.
  515.                      * Report the problem and keep the pair where it is,
  516.                      * and mark this field valid.
  517.                      */
  518.             if (h->long_ref[mmco[i].long_arg] != s->current_picture_ptr) {
  519.                 remove_long(h, mmco[i].long_arg, 0);
  520.                 h->long_ref[ mmco[i].long_arg ]= s->current_picture_ptr;
  521.                 h->long_ref[ mmco[i].long_arg ]->long_ref=1;
  522.                 h->long_ref_count++;
  523.             }
  524.             s->current_picture_ptr->reference |= s->picture_structure;
  525.             current_ref_assigned=1;
  526.             break;
  527.         case MMCO_SET_MAX_LONG:
  528.             assert(mmco[i].long_arg <= 16);
  529.             // just remove the long term which index is greater than new max
  530.             for(j = mmco[i].long_arg; j<16; j++){
  531.                 remove_long(h, j, 0);
  532.             }
  533.             break;
  534.         case MMCO_RESET:
  535.             while(h->short_ref_count){
  536.                 remove_short(h, h->short_ref[0]->frame_num, 0);
  537.             }
  538.             for(j = 0; j < 16; j++) {
  539.                 remove_long(h, j, 0);
  540.             }
  541.             s->current_picture_ptr->poc=
  542.             s->current_picture_ptr->field_poc[0]=
  543.             s->current_picture_ptr->field_poc[1]=
  544.             h->poc_lsb=
  545.             h->poc_msb=
  546.             h->frame_num=
  547.             s->current_picture_ptr->frame_num= 0;
  548.             break;
  549.         default: assert(0);
  550.         }
  551.     }
  552.     if (!current_ref_assigned) {
  553.         /* Second field of complementary field pair; the first field of
  554.          * which is already referenced. If short referenced, it
  555.          * should be first entry in short_ref. If not, it must exist
  556.          * in long_ref; trying to put it on the short list here is an
  557.          * error in the encoded bit stream (ref: 7.4.3.3, NOTE 2 and 3).
  558.          */
  559.         if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) {
  560.             /* Just mark the second field valid */
  561.             s->current_picture_ptr->reference = PICT_FRAME;
  562.         } else if (s->current_picture_ptr->long_ref) {
  563.             av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference "
  564.                                              "assignment for second field "
  565.                                              "in complementary field pair "
  566.                                              "(first field is long term)n");
  567.         } else {
  568.             pic= remove_short(h, s->current_picture_ptr->frame_num, 0);
  569.             if(pic){
  570.                 av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term buffer state detectedn");
  571.             }
  572.             if(h->short_ref_count)
  573.                 memmove(&h->short_ref[1], &h->short_ref[0], h->short_ref_count*sizeof(Picture*));
  574.             h->short_ref[0]= s->current_picture_ptr;
  575.             h->short_ref_count++;
  576.             s->current_picture_ptr->reference |= s->picture_structure;
  577.         }
  578.     }
  579.     if (h->long_ref_count + h->short_ref_count > h->sps.ref_frame_count){
  580.         /* We have too many reference frames, probably due to corrupted
  581.          * stream. Need to discard one frame. Prevents overrun of the
  582.          * short_ref and long_ref buffers.
  583.          */
  584.         av_log(h->s.avctx, AV_LOG_ERROR,
  585.                "number of reference frames exceeds max (probably "
  586.                "corrupt input), discarding onen");
  587.         if (h->long_ref_count && !h->short_ref_count) {
  588.             for (i = 0; i < 16; ++i)
  589.                 if (h->long_ref[i])
  590.                     break;
  591.             assert(i < 16);
  592.             remove_long(h, i, 0);
  593.         } else {
  594.             pic = h->short_ref[h->short_ref_count - 1];
  595.             remove_short(h, pic->frame_num, 0);
  596.         }
  597.     }
  598.     print_short_term(h);
  599.     print_long_term(h);
  600.     return 0;
  601. }
  602. static int decode_ref_pic_marking(H264Context *h, GetBitContext *gb){
  603.     MpegEncContext * const s = &h->s;
  604.     int i;
  605.     h->mmco_index= 0;
  606.     if(h->nal_unit_type == NAL_IDR_SLICE){ //FIXME fields
  607.         s->broken_link= get_bits1(gb) -1;
  608.         if(get_bits1(gb)){
  609.             h->mmco[0].opcode= MMCO_LONG;
  610.             h->mmco[0].long_arg= 0;
  611.             h->mmco_index= 1;
  612.         }
  613.     }else{
  614.         if(get_bits1(gb)){ // adaptive_ref_pic_marking_mode_flag
  615.             for(i= 0; i<MAX_MMCO_COUNT; i++) {
  616.                 MMCOOpcode opcode= (MMCOOpcode)get_ue_golomb_31(gb);
  617.                 h->mmco[i].opcode= opcode;
  618.                 if(opcode==MMCO_SHORT2UNUSED || opcode==MMCO_SHORT2LONG){
  619.                     h->mmco[i].short_pic_num= (h->curr_pic_num - get_ue_golomb(gb) - 1) & (h->max_pic_num - 1);
  620. /*                    if(h->mmco[i].short_pic_num >= h->short_ref_count || h->short_ref[ h->mmco[i].short_pic_num ] == NULL){
  621.                         av_log(s->avctx, AV_LOG_ERROR, "illegal short ref in memory management control operation %dn", mmco);
  622.                         return -1;
  623.                     }*/
  624.                 }
  625.                 if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){
  626.                     unsigned int long_arg= get_ue_golomb_31(gb);
  627.                     if(long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){
  628.                         av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %dn", opcode);
  629.                         return -1;
  630.                     }
  631.                     h->mmco[i].long_arg= long_arg;
  632.                 }
  633.                 if(opcode > (unsigned)MMCO_LONG){
  634.                     av_log(h->s.avctx, AV_LOG_ERROR, "illegal memory management control operation %dn", opcode);
  635.                     return -1;
  636.                 }
  637.                 if(opcode == MMCO_END)
  638.                     break;
  639.             }
  640.             h->mmco_index= i;
  641.         }else{
  642.             assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count);
  643.             if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
  644.                     !(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->reference)) {
  645.                 h->mmco[0].opcode= MMCO_SHORT2UNUSED;
  646.                 h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
  647.                 h->mmco_index= 1;
  648.                 if (FIELD_PICTURE) {
  649.                     h->mmco[0].short_pic_num *= 2;
  650.                     h->mmco[1].opcode= MMCO_SHORT2UNUSED;
  651.                     h->mmco[1].short_pic_num= h->mmco[0].short_pic_num + 1;
  652.                     h->mmco_index= 2;
  653.                 }
  654.             }
  655.         }
  656.     }
  657.     return 0;
  658. }
  659. static int init_poc(H264Context *h){
  660.     MpegEncContext * const s = &h->s;
  661.     const int max_frame_num= 1<<h->sps.log2_max_frame_num;
  662.     int field_poc[2];
  663.     Picture *cur = s->current_picture_ptr;
  664.     h->frame_num_offset= h->prev_frame_num_offset;
  665.     if(h->frame_num < h->prev_frame_num)
  666.         h->frame_num_offset += max_frame_num;
  667.     if(h->sps.poc_type==0){
  668.         const int max_poc_lsb= 1<<h->sps.log2_max_poc_lsb;
  669.         if     (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb/2)
  670.             h->poc_msb = h->prev_poc_msb + max_poc_lsb;
  671.         else if(h->poc_lsb > h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb/2)
  672.             h->poc_msb = h->prev_poc_msb - max_poc_lsb;
  673.         else
  674.             h->poc_msb = h->prev_poc_msb;
  675. //printf("poc: %d %dn", h->poc_msb, h->poc_lsb);
  676.         field_poc[0] =
  677.         field_poc[1] = h->poc_msb + h->poc_lsb;
  678.         if(s->picture_structure == PICT_FRAME)
  679.             field_poc[1] += h->delta_poc_bottom;
  680.     }else if(h->sps.poc_type==1){
  681.         int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
  682.         int i;
  683.         if(h->sps.poc_cycle_length != 0)
  684.             abs_frame_num = h->frame_num_offset + h->frame_num;
  685.         else
  686.             abs_frame_num = 0;
  687.         if(h->nal_ref_idc==0 && abs_frame_num > 0)
  688.             abs_frame_num--;
  689.         expected_delta_per_poc_cycle = 0;
  690.         for(i=0; i < h->sps.poc_cycle_length; i++)
  691.             expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[ i ]; //FIXME integrate during sps parse
  692.         if(abs_frame_num > 0){
  693.             int poc_cycle_cnt          = (abs_frame_num - 1) / h->sps.poc_cycle_length;
  694.             int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
  695.             expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
  696.             for(i = 0; i <= frame_num_in_poc_cycle; i++)
  697.                 expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[ i ];
  698.         } else
  699.             expectedpoc = 0;
  700.         if(h->nal_ref_idc == 0)
  701.             expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
  702.         field_poc[0] = expectedpoc + h->delta_poc[0];
  703.         field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
  704.         if(s->picture_structure == PICT_FRAME)
  705.             field_poc[1] += h->delta_poc[1];
  706.     }else{
  707.         int poc= 2*(h->frame_num_offset + h->frame_num);
  708.         if(!h->nal_ref_idc)
  709.             poc--;
  710.         field_poc[0]= poc;
  711.         field_poc[1]= poc;
  712.     }
  713.     if(s->picture_structure != PICT_BOTTOM_FIELD)
  714.         s->current_picture_ptr->field_poc[0]= field_poc[0];
  715.     if(s->picture_structure != PICT_TOP_FIELD)
  716.         s->current_picture_ptr->field_poc[1]= field_poc[1];
  717.     cur->poc= FFMIN(cur->field_poc[0], cur->field_poc[1]);
  718.     return 0;
  719. }
  720. /**
  721.  * initialize scan tables
  722.  */
  723. static void init_scan_tables(H264Context *h){
  724.     MpegEncContext * const s = &h->s;
  725.     int i;
  726.     if(s->dsp.h264_idct_add == ff_h264_idct_add_c){ //FIXME little ugly
  727.         memcpy(h->zigzag_scan, zigzag_scan, 16*sizeof(uint8_t));
  728.         memcpy(h-> field_scan,  field_scan, 16*sizeof(uint8_t));
  729.     }else{
  730.         for(i=0; i<16; i++){
  731. #define T(x) (x>>2) | ((x<<2) & 0xF)
  732.             h->zigzag_scan[i] = T(zigzag_scan[i]);
  733.             h-> field_scan[i] = T( field_scan[i]);
  734. #undef T
  735.         }
  736.     }
  737.     if(s->dsp.h264_idct8_add == ff_h264_idct8_add_c){
  738.         memcpy(h->zigzag_scan8x8,       zigzag_scan8x8,       64*sizeof(uint8_t));
  739.         memcpy(h->zigzag_scan8x8_cavlc, zigzag_scan8x8_cavlc, 64*sizeof(uint8_t));
  740.         memcpy(h->field_scan8x8,        field_scan8x8,        64*sizeof(uint8_t));
  741.         memcpy(h->field_scan8x8_cavlc,  field_scan8x8_cavlc,  64*sizeof(uint8_t));
  742.     }else{
  743.         for(i=0; i<64; i++){
  744. #define T(x) (x>>3) | ((x&7)<<3)
  745.             h->zigzag_scan8x8[i]       = T(zigzag_scan8x8[i]);
  746.             h->zigzag_scan8x8_cavlc[i] = T(zigzag_scan8x8_cavlc[i]);
  747.             h->field_scan8x8[i]        = T(field_scan8x8[i]);
  748.             h->field_scan8x8_cavlc[i]  = T(field_scan8x8_cavlc[i]);
  749. #undef T
  750.         }
  751.     }
  752.     if(h->sps.transform_bypass){ //FIXME same ugly
  753.         h->zigzag_scan_q0          = zigzag_scan;
  754.         h->zigzag_scan8x8_q0       = zigzag_scan8x8;
  755.         h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc;
  756.         h->field_scan_q0           = field_scan;
  757.         h->field_scan8x8_q0        = field_scan8x8;
  758.         h->field_scan8x8_cavlc_q0  = field_scan8x8_cavlc;
  759.     }else{
  760.         h->zigzag_scan_q0          = h->zigzag_scan;
  761.         h->zigzag_scan8x8_q0       = h->zigzag_scan8x8;
  762.         h->zigzag_scan8x8_cavlc_q0 = h->zigzag_scan8x8_cavlc;
  763.         h->field_scan_q0           = h->field_scan;
  764.         h->field_scan8x8_q0        = h->field_scan8x8;
  765.         h->field_scan8x8_cavlc_q0  = h->field_scan8x8_cavlc;
  766.     }
  767. }
  768. /**
  769.  * Replicates H264 "master" context to thread contexts.
  770.  */
  771. static void clone_slice(H264Context *dst, H264Context *src)
  772. {
  773.     memcpy(dst->block_offset,     src->block_offset, sizeof(dst->block_offset));
  774.     dst->s.current_picture_ptr  = src->s.current_picture_ptr;
  775.     dst->s.current_picture      = src->s.current_picture;
  776.     dst->s.linesize             = src->s.linesize;
  777.     dst->s.uvlinesize           = src->s.uvlinesize;
  778.     dst->s.first_field          = src->s.first_field;
  779.     dst->prev_poc_msb           = src->prev_poc_msb;
  780.     dst->prev_poc_lsb           = src->prev_poc_lsb;
  781.     dst->prev_frame_num_offset  = src->prev_frame_num_offset;
  782.     dst->prev_frame_num         = src->prev_frame_num;
  783.     dst->short_ref_count        = src->short_ref_count;
  784.     memcpy(dst->short_ref,        src->short_ref,        sizeof(dst->short_ref));
  785.     memcpy(dst->long_ref,         src->long_ref,         sizeof(dst->long_ref));
  786.     memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list));
  787.     memcpy(dst->ref_list,         src->ref_list,         sizeof(dst->ref_list));
  788.     memcpy(dst->dequant4_coeff,   src->dequant4_coeff,   sizeof(src->dequant4_coeff));
  789.     memcpy(dst->dequant8_coeff,   src->dequant8_coeff,   sizeof(src->dequant8_coeff));
  790. }
  791. /**
  792.  * decodes a slice header.
  793.  * This will also call MPV_common_init() and frame_start() as needed.
  794.  *
  795.  * @param h h264context
  796.  * @param h0 h264 master context (differs from 'h' when doing sliced based parallel decoding)
  797.  *
  798.  * @return 0 if okay, <0 if an error occurred, 1 if decoding must not be multithreaded
  799.  */
  800. static int decode_slice_header(H264Context *h, H264Context *h0){
  801.     MpegEncContext * const s = &h->s;
  802.     MpegEncContext * const s0 = &h0->s;
  803.     unsigned int first_mb_in_slice;
  804.     unsigned int pps_id;
  805.     int num_ref_idx_active_override_flag;
  806.     unsigned int slice_type, tmp;
  807. int i, j;
  808.     int default_ref_list_done = 0;
  809.     int last_pic_structure;
  810.     s->dropable= h->nal_ref_idc == 0;
  811.     if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc){
  812.         s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
  813.         s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
  814.     }else{
  815.         s->me.qpel_put= s->dsp.put_h264_qpel_pixels_tab;
  816.         s->me.qpel_avg= s->dsp.avg_h264_qpel_pixels_tab;
  817.     }
  818.     first_mb_in_slice= get_ue_golomb(&s->gb);
  819.     if((s->flags2 & CODEC_FLAG2_CHUNKS) && first_mb_in_slice == 0){
  820.         h0->current_slice = 0;
  821.         if (!s0->first_field)
  822.             s->current_picture_ptr= NULL;
  823.     }
  824.     slice_type= get_ue_golomb_31(&s->gb);
  825.     if(slice_type > 9){
  826.         av_log(h->s.avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %dn", h->slice_type, s->mb_x, s->mb_y);
  827.         return -1;
  828.     }
  829.     if(slice_type > 4){
  830.         slice_type -= 5;
  831.         h->slice_type_fixed=1;
  832.     }else
  833.         h->slice_type_fixed=0;
  834.     slice_type= golomb_to_pict_type[ slice_type ];
  835.     if (slice_type == FF_I_TYPE
  836.         || (h0->current_slice != 0 && slice_type == h0->last_slice_type) ) {
  837.         default_ref_list_done = 1;
  838.     }
  839.     h->slice_type= slice_type;
  840.     h->slice_type_nos= slice_type & 3;
  841.     s->pict_type= h->slice_type; // to make a few old functions happy, it's wrong though
  842.     if (s->pict_type == FF_B_TYPE && s0->last_picture_ptr == NULL) {
  843.         av_log(h->s.avctx, AV_LOG_ERROR,
  844.                "B picture before any references, skippingn");
  845.         return -1;
  846.     }
  847.     pps_id= get_ue_golomb(&s->gb);
  848.     if(pps_id>=MAX_PPS_COUNT){
  849.         av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of rangen");
  850.         return -1;
  851.     }
  852.     if(!h0->pps_buffers[pps_id]) {
  853.         av_log(h->s.avctx, AV_LOG_ERROR, "non-existing PPS referencedn");
  854.         return -1;
  855.     }
  856.     h->pps= *h0->pps_buffers[pps_id];
  857.     if(!h0->sps_buffers[h->pps.sps_id]) {
  858.         av_log(h->s.avctx, AV_LOG_ERROR, "non-existing SPS referencedn");
  859.         return -1;
  860.     }
  861.     h->sps = *h0->sps_buffers[h->pps.sps_id];
  862.     if(h == h0 && h->dequant_coeff_pps != pps_id){
  863.         h->dequant_coeff_pps = pps_id;
  864.         init_dequant_tables(h);
  865.     }
  866.     s->mb_width= h->sps.mb_width;
  867.     s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
  868.     h->b_stride=  s->mb_width*4;
  869.     h->b8_stride= s->mb_width*2;
  870.     s->width = 16*s->mb_width - 2*FFMIN(h->sps.crop_right, 7);
  871.     if(h->sps.frame_mbs_only_flag)
  872.         s->height= 16*s->mb_height - 2*FFMIN(h->sps.crop_bottom, 7);
  873.     else
  874.         s->height= 16*s->mb_height - 4*FFMIN(h->sps.crop_bottom, 3);
  875.     if (s->context_initialized
  876.         && (   s->width != s->avctx->width || s->height != s->avctx->height)) {
  877.         if(h != h0)
  878.             return -1;   // width / height changed during parallelized decoding
  879.         free_tables(h);
  880.         flush_dpb(s->avctx);
  881.         MPV_common_end(s);
  882.     }
  883.     if (!s->context_initialized) {
  884.         if(h != h0)
  885.             return -1;  // we cant (re-)initialize context during parallel decoding
  886.         if (MPV_common_init(s) < 0)
  887.             return -1;
  888.         s->first_field = 0;
  889.         init_scan_tables(h);
  890.         alloc_tables(h);
  891.         for(i = 1; i < s->avctx->thread_count; i++) {
  892.             H264Context *c;
  893.             c = h->thread_context[i] = av_malloc(sizeof(H264Context));
  894.             memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext));
  895.             memset(&c->s + 1, 0, sizeof(H264Context) - sizeof(MpegEncContext));
  896.             c->sps = h->sps;
  897.             c->pps = h->pps;
  898.             init_scan_tables(c);
  899.             clone_tables(c, h);
  900.         }
  901.         for(i = 0; i < s->avctx->thread_count; i++)
  902.             if(context_init(h->thread_context[i]) < 0)
  903.                 return -1;
  904.         s->avctx->width = s->width;
  905.         s->avctx->height = s->height;
  906.         s->avctx->sample_aspect_ratio= h->sps.sar;
  907.         if(!s->avctx->sample_aspect_ratio.den)
  908.             s->avctx->sample_aspect_ratio.den = 1;
  909.         if(h->sps.timing_info_present_flag){
  910.             //s->avctx->time_base= (AVRational){h->sps.num_units_in_tick * 2, h->sps.time_scale};
  911. s->avctx->time_base.num = h->sps.num_units_in_tick * 2;
  912. s->avctx->time_base.den = h->sps.time_scale;
  913.             if(h->x264_build > 0 && h->x264_build < 44)
  914.                 s->avctx->time_base.den *= 2;
  915.             av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den,
  916.                       s->avctx->time_base.num, s->avctx->time_base.den, 1<<30);
  917.         }
  918.     }
  919.     h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num);
  920.     h->mb_mbaff = 0;
  921.     h->mb_aff_frame = 0;
  922.     last_pic_structure = s0->picture_structure;
  923.     if(h->sps.frame_mbs_only_flag){
  924.         s->picture_structure= PICT_FRAME;
  925.     }else{
  926.         if(get_bits1(&s->gb)) { //field_pic_flag
  927.             s->picture_structure= PICT_TOP_FIELD + get_bits1(&s->gb); //bottom_field_flag
  928.         } else {
  929.             s->picture_structure= PICT_FRAME;
  930.             h->mb_aff_frame = h->sps.mb_aff;
  931.         }
  932.     }
  933.     h->mb_field_decoding_flag= s->picture_structure != PICT_FRAME;
  934.     if(h0->current_slice == 0){
  935.         while(h->frame_num !=  h->prev_frame_num &&
  936.               h->frame_num != (h->prev_frame_num+1)%(1<<h->sps.log2_max_frame_num)){
  937.             av_log(NULL, AV_LOG_DEBUG, "Frame num gap %d %dn", h->frame_num, h->prev_frame_num);
  938.             frame_start(h);
  939.             h->prev_frame_num++;
  940.             h->prev_frame_num %= 1<<h->sps.log2_max_frame_num;
  941.             s->current_picture_ptr->frame_num= h->prev_frame_num;
  942.             execute_ref_pic_marking(h, NULL, 0);
  943.         }
  944.         /* See if we have a decoded first field looking for a pair... */
  945.         if (s0->first_field) {
  946.             assert(s0->current_picture_ptr);
  947.             assert(s0->current_picture_ptr->data[0]);
  948.             assert(s0->current_picture_ptr->reference != DELAYED_PIC_REF);
  949.             /* figure out if we have a complementary field pair */
  950.             if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
  951.                 /*
  952.                  * Previous field is unmatched. Don't display it, but let it
  953.                  * remain for reference if marked as such.
  954.                  */
  955.                 s0->current_picture_ptr = NULL;
  956.                 s0->first_field = FIELD_PICTURE;
  957.             } else {
  958.                 if (h->nal_ref_idc &&
  959.                         s0->current_picture_ptr->reference &&
  960.                         s0->current_picture_ptr->frame_num != h->frame_num) {
  961.                     /*
  962.                      * This and previous field were reference, but had
  963.                      * different frame_nums. Consider this field first in
  964.                      * pair. Throw away previous field except for reference
  965.                      * purposes.
  966.                      */
  967.                     s0->first_field = 1;
  968.                     s0->current_picture_ptr = NULL;
  969.                 } else {
  970.                     /* Second field in complementary pair */
  971.                     s0->first_field = 0;
  972.                 }
  973.             }
  974.         } else {
  975.             /* Frame or first field in a potentially complementary pair */
  976.             assert(!s0->current_picture_ptr);
  977.             s0->first_field = FIELD_PICTURE;
  978.         }
  979.         if((!FIELD_PICTURE || s0->first_field) && frame_start(h) < 0) {
  980.             s0->first_field = 0;
  981.             return -1;
  982.         }
  983.     }
  984.     if(h != h0)
  985.         clone_slice(h, h0);
  986.     s->current_picture_ptr->frame_num= h->frame_num; //FIXME frame_num cleanup
  987.     assert(s->mb_num == s->mb_width * s->mb_height);
  988.     if((int)first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= s->mb_num || (int)first_mb_in_slice >= s->mb_num)
  989. {
  990.         av_log(h->s.avctx, AV_LOG_ERROR, "first_mb_in_slice overflown");
  991.         return -1;
  992.     }
  993.     s->resync_mb_x = s->mb_x = first_mb_in_slice % s->mb_width;
  994.     s->resync_mb_y = s->mb_y = (first_mb_in_slice / s->mb_width) << FIELD_OR_MBAFF_PICTURE;
  995.     if (s->picture_structure == PICT_BOTTOM_FIELD)
  996.         s->resync_mb_y = s->mb_y = s->mb_y + 1;
  997.     assert(s->mb_y < s->mb_height);
  998.     if(s->picture_structure==PICT_FRAME){
  999.         h->curr_pic_num=   h->frame_num;
  1000.         h->max_pic_num= 1<< h->sps.log2_max_frame_num;
  1001.     }else{
  1002.         h->curr_pic_num= 2*h->frame_num + 1;
  1003.         h->max_pic_num= 1<<(h->sps.log2_max_frame_num + 1);
  1004.     }
  1005.     if(h->nal_unit_type == NAL_IDR_SLICE){
  1006.         get_ue_golomb(&s->gb); /* idr_pic_id */
  1007.     }
  1008.     if(h->sps.poc_type==0){
  1009.         h->poc_lsb= get_bits(&s->gb, h->sps.log2_max_poc_lsb);
  1010.         if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME){
  1011.             h->delta_poc_bottom= get_se_golomb(&s->gb);
  1012.         }
  1013.     }
  1014.     if(h->sps.poc_type==1 && !h->sps.delta_pic_order_always_zero_flag){
  1015.         h->delta_poc[0]= get_se_golomb(&s->gb);
  1016.         if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME)
  1017.             h->delta_poc[1]= get_se_golomb(&s->gb);
  1018.     }
  1019.     init_poc(h);
  1020.     if(h->pps.redundant_pic_cnt_present){
  1021.         h->redundant_pic_count= get_ue_golomb(&s->gb);
  1022.     }
  1023.     //set defaults, might be overridden a few lines later
  1024.     h->ref_count[0]= h->pps.ref_count[0];
  1025.     h->ref_count[1]= h->pps.ref_count[1];
  1026.     if(h->slice_type_nos != FF_I_TYPE){
  1027.         if(h->slice_type_nos == FF_B_TYPE){
  1028.             h->direct_spatial_mv_pred= get_bits1(&s->gb);
  1029.         }
  1030.         num_ref_idx_active_override_flag= get_bits1(&s->gb);
  1031.         if(num_ref_idx_active_override_flag){
  1032.             h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
  1033.             if(h->slice_type_nos==FF_B_TYPE)
  1034.                 h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
  1035.             if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){
  1036.                 av_log(h->s.avctx, AV_LOG_ERROR, "reference overflown");
  1037.                 h->ref_count[0]= h->ref_count[1]= 1;
  1038.                 return -1;
  1039.             }
  1040.         }
  1041.         if(h->slice_type_nos == FF_B_TYPE)
  1042.             h->list_count= 2;
  1043.         else
  1044.             h->list_count= 1;
  1045.     }else
  1046.         h->list_count= 0;
  1047.     if(!default_ref_list_done){
  1048.         fill_default_ref_list(h);
  1049.     }
  1050.     if(h->slice_type_nos!=FF_I_TYPE && decode_ref_pic_list_reordering(h) < 0)
  1051.         return -1;
  1052.     /*if(h->slice_type_nos!=FF_I_TYPE){
  1053.         s->last_picture_ptr= &h->ref_list[0][0];
  1054.         ff_copy_picture(&s->last_picture, s->last_picture_ptr);
  1055.     }
  1056.     if(h->slice_type_nos==FF_B_TYPE){
  1057.         s->next_picture_ptr= &h->ref_list[1][0];
  1058.         ff_copy_picture(&s->next_picture, s->next_picture_ptr);
  1059.     }*/
  1060.     if(   (h->pps.weighted_pred          && h->slice_type_nos == FF_P_TYPE )
  1061.        ||  (h->pps.weighted_bipred_idc==1 && h->slice_type_nos== FF_B_TYPE ) )
  1062.         pred_weight_table(h);
  1063.     else if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== FF_B_TYPE)
  1064.         implicit_weight_table(h);
  1065.     else
  1066.         h->use_weight = 0;
  1067.     if(h->nal_ref_idc)
  1068.         decode_ref_pic_marking(h0, &s->gb);
  1069.     if(FRAME_MBAFF)
  1070.         fill_mbaff_ref_list(h);
  1071.     if(h->slice_type_nos==FF_B_TYPE && !h->direct_spatial_mv_pred)
  1072.         direct_dist_scale_factor(h);
  1073.     direct_ref_list_init(h);
  1074.     if( h->slice_type_nos != FF_I_TYPE && h->pps.cabac ){
  1075.         tmp = get_ue_golomb_31(&s->gb);
  1076.         if(tmp > 2){
  1077.             av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflown");
  1078.             return -1;
  1079.         }
  1080.         h->cabac_init_idc= tmp;
  1081.     }
  1082.     h->last_qscale_diff = 0;
  1083.     tmp = h->pps.init_qp + get_se_golomb(&s->gb);
  1084.     if(tmp>51){
  1085.         av_log(s->avctx, AV_LOG_ERROR, "QP %u out of rangen", tmp);
  1086.         return -1;
  1087.     }
  1088.     s->qscale= tmp;
  1089.     h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
  1090.     h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
  1091.     //FIXME qscale / qp ... stuff
  1092.     if(h->slice_type == FF_SP_TYPE){
  1093.         get_bits1(&s->gb); /* sp_for_switch_flag */
  1094.     }
  1095.     if(h->slice_type==FF_SP_TYPE || h->slice_type == FF_SI_TYPE){
  1096.         get_se_golomb(&s->gb); /* slice_qs_delta */
  1097.     }
  1098.     h->deblocking_filter = 1;
  1099.     h->slice_alpha_c0_offset = 0;
  1100.     h->slice_beta_offset = 0;
  1101.     if( h->pps.deblocking_filter_parameters_present ) {
  1102.         tmp= get_ue_golomb_31(&s->gb);
  1103.         if(tmp > 2){
  1104.             av_log(s->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of rangen", tmp);
  1105.             return -1;
  1106.         }
  1107.         h->deblocking_filter= tmp;
  1108.         if(h->deblocking_filter < 2)
  1109.             h->deblocking_filter^= 1; // 1<->0
  1110.         if( h->deblocking_filter ) {
  1111.             h->slice_alpha_c0_offset = get_se_golomb(&s->gb) << 1;
  1112.             h->slice_beta_offset = get_se_golomb(&s->gb) << 1;
  1113.         }
  1114.     }
  1115.     if(   s->avctx->skip_loop_filter >= AVDISCARD_ALL
  1116.        ||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != FF_I_TYPE)
  1117.        ||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR  && h->slice_type_nos == FF_B_TYPE)
  1118.        ||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
  1119.         h->deblocking_filter= 0;
  1120.     if(h->deblocking_filter == 1 && h0->max_contexts > 1) {
  1121.         if(s->avctx->flags2 & CODEC_FLAG2_FAST) {
  1122.             /* Cheat slightly for speed:
  1123.                Do not bother to deblock across slices. */
  1124.             h->deblocking_filter = 2;
  1125.         } else {
  1126.             h0->max_contexts = 1;
  1127.             if(!h0->single_decode_warning) {
  1128.                 av_log(s->avctx, AV_LOG_INFO, "Cannot parallelize deblocking type 1, decoding such frames in sequential ordern");
  1129.                 h0->single_decode_warning = 1;
  1130.             }
  1131.             if(h != h0)
  1132.                 return 1; // deblocking switched inside frame
  1133.         }
  1134.     }
  1135. #if 0 //FMO
  1136.     if( h->pps.num_slice_groups > 1  && h->pps.mb_slice_group_map_type >= 3 && h->pps.mb_slice_group_map_type <= 5)
  1137.         slice_group_change_cycle= get_bits(&s->gb, ?);
  1138. #endif
  1139.     h0->last_slice_type = slice_type;
  1140.     h->slice_num = ++h0->current_slice;
  1141.     if(h->slice_num >= MAX_SLICES){
  1142.         av_log(s->avctx, AV_LOG_ERROR, "Too many slices, increase MAX_SLICES and recompilen");
  1143.     }
  1144.     for(j=0; j<2; j++){
  1145.         int *ref2frm= h->ref2frm[h->slice_num&(MAX_SLICES-1)][j];
  1146.         ref2frm[0]=
  1147.         ref2frm[1]= -1;
  1148.         for(i=0; i<16; i++)
  1149.             ref2frm[i+2]= 4*h->ref_list[j][i].frame_num
  1150.                           +(h->ref_list[j][i].reference&3);
  1151.         ref2frm[18+0]=
  1152.         ref2frm[18+1]= -1;
  1153.         for(i=16; i<48; i++)
  1154.             ref2frm[i+4]= 4*h->ref_list[j][i].frame_num
  1155.                           +(h->ref_list[j][i].reference&3);
  1156.     }
  1157.     h->emu_edge_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
  1158.     h->emu_edge_height= (FRAME_MBAFF || FIELD_PICTURE) ? 0 : h->emu_edge_width;
  1159.     s->avctx->refs= h->sps.ref_frame_count;
  1160.     if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  1161.         av_log(h->s.avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %sn",
  1162.                h->slice_num,
  1163.                (s->picture_structure==PICT_FRAME ? "F" : s->picture_structure==PICT_TOP_FIELD ? "T" : "B"),
  1164.                first_mb_in_slice,
  1165.                av_get_pict_type_char(h->slice_type), h->slice_type_fixed ? " fix" : "", h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
  1166.                pps_id, h->frame_num,
  1167.                s->current_picture_ptr->field_poc[0], s->current_picture_ptr->field_poc[1],
  1168.                h->ref_count[0], h->ref_count[1],
  1169.                s->qscale,
  1170.                h->deblocking_filter, h->slice_alpha_c0_offset/2, h->slice_beta_offset/2,
  1171.                h->use_weight,
  1172.                h->use_weight==1 && h->use_weight_chroma ? "c" : "",
  1173.                h->slice_type == FF_B_TYPE ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""
  1174.                );
  1175.     }
  1176.     return 0;
  1177. }
  1178. /**
  1179.  *
  1180.  */
  1181. static inline int get_level_prefix(GetBitContext *gb){
  1182.     unsigned int buf;
  1183.     int log;
  1184.     OPEN_READER(re, gb);
  1185.     UPDATE_CACHE(re, gb);
  1186.     buf=GET_CACHE(re, gb);
  1187.     log= 32 - av_log2(buf);
  1188. #ifdef TRACE
  1189.     print_bin(buf>>(32-log), log);
  1190.     av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d lpr @%5d in %s get_level_prefixn", buf>>(32-log), log, log-1, get_bits_count(gb), __FILE__);
  1191. #endif
  1192.     LAST_SKIP_BITS(re, gb, log);
  1193.     CLOSE_READER(re, gb);
  1194.     return log-1;
  1195. }
  1196. static inline int get_dct8x8_allowed(H264Context *h){
  1197.     if(h->sps.direct_8x8_inference_flag)
  1198.         return !(*(uint64_t*)h->sub_mb_type & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8                )*0x0001000100010001ULL));
  1199.     else
  1200.         return !(*(uint64_t*)h->sub_mb_type & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8|MB_TYPE_DIRECT2)*0x0001000100010001ULL));
  1201. }
  1202. /**
  1203.  * decodes a residual block.
  1204.  * @param n block index
  1205.  * @param scantable scantable
  1206.  * @param max_coeff number of coefficients in the block
  1207.  * @return <0 if an error occurred
  1208.  */
  1209. static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff){
  1210.     MpegEncContext * const s = &h->s;
  1211.     static const int coeff_token_table_index[17]= {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
  1212.     int level[16];
  1213.     int zeros_left, coeff_num, coeff_token, total_coeff, i, j, trailing_ones, run_before;
  1214.     //FIXME put trailing_onex into the context
  1215.     if(n == CHROMA_DC_BLOCK_INDEX){
  1216.         coeff_token= get_vlc2(gb, chroma_dc_coeff_token_vlc.table, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 1);
  1217.         total_coeff= coeff_token>>2;
  1218.     }else{
  1219.         if(n == LUMA_DC_BLOCK_INDEX){
  1220.             total_coeff= pred_non_zero_count(h, 0);
  1221.             coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2);
  1222.             total_coeff= coeff_token>>2;
  1223.         }else{
  1224.             total_coeff= pred_non_zero_count(h, n);
  1225.             coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2);
  1226.             total_coeff= coeff_token>>2;
  1227.             h->non_zero_count_cache[ scan8[n] ]= total_coeff;
  1228.         }
  1229.     }
  1230.     //FIXME set last_non_zero?
  1231.     if(total_coeff==0)
  1232.         return 0;
  1233.     if(total_coeff > max_coeff) 
  1234. {
  1235.         av_log(h->s.avctx, AV_LOG_ERROR, "corrupted macroblock %d %d (total_coeff=%d)n", s->mb_x, s->mb_y, total_coeff);
  1236.         return -1;
  1237.     }
  1238.     trailing_ones= coeff_token&3;
  1239.     tprintf(h->s.avctx, "trailing:%d, total:%dn", trailing_ones, total_coeff);
  1240.     assert(total_coeff<=16);
  1241.     i = show_bits(gb, 3);
  1242.     skip_bits(gb, trailing_ones);
  1243.     level[0] = 1-((i&4)>>1);
  1244.     level[1] = 1-((i&2)   );
  1245.     level[2] = 1-((i&1)<<1);
  1246.     if(trailing_ones<total_coeff) {
  1247.         int mask, prefix;
  1248.         int suffix_length = total_coeff > 10 && trailing_ones < 3;
  1249.         int bitsi= show_bits(gb, LEVEL_TAB_BITS);
  1250.         int level_code= cavlc_level_tab[suffix_length][bitsi][0];
  1251.         skip_bits(gb, cavlc_level_tab[suffix_length][bitsi][1]);
  1252.         if(level_code >= 100){
  1253.             prefix= level_code - 100;
  1254.             if(prefix == LEVEL_TAB_BITS)
  1255.                 prefix += get_level_prefix(gb);
  1256.             //first coefficient has suffix_length equal to 0 or 1
  1257.             if(prefix<14){ //FIXME try to build a large unified VLC table for all this
  1258.                 if(suffix_length)
  1259.                     level_code= (prefix<<suffix_length) + get_bits(gb, suffix_length); //part
  1260.                 else
  1261.                     level_code= (prefix<<suffix_length); //part
  1262.             }else if(prefix==14){
  1263.                 if(suffix_length)
  1264.                     level_code= (prefix<<suffix_length) + get_bits(gb, suffix_length); //part
  1265.                 else
  1266.                     level_code= prefix + get_bits(gb, 4); //part
  1267.             }else{
  1268.                 level_code= (15<<suffix_length) + get_bits(gb, prefix-3); //part
  1269.                 if(suffix_length==0) level_code+=15; //FIXME doesn't make (much)sense
  1270.                 if(prefix>=16)
  1271.                     level_code += (1<<(prefix-3))-4096;
  1272.             }
  1273.             if(trailing_ones < 3) level_code += 2;
  1274.             suffix_length = 2;
  1275.             mask= -(level_code&1);
  1276.             level[trailing_ones]= (((2+level_code)>>1) ^ mask) - mask;
  1277.         }else{
  1278.             if(trailing_ones < 3) level_code += (level_code>>31)|1;
  1279.             suffix_length = 1;
  1280.             if(level_code + 3U > 6U)
  1281.                 suffix_length++;
  1282.             level[trailing_ones]= level_code;
  1283.         }
  1284.         //remaining coefficients have suffix_length > 0
  1285.         for(i=trailing_ones+1;i<total_coeff;i++) {
  1286.             static const unsigned int suffix_limit[7] = {0,3,6,12,24,48,INT_MAX };
  1287.             int bitsi= show_bits(gb, LEVEL_TAB_BITS);
  1288.             level_code= cavlc_level_tab[suffix_length][bitsi][0];
  1289.             skip_bits(gb, cavlc_level_tab[suffix_length][bitsi][1]);
  1290.             if(level_code >= 100){
  1291.                 prefix= level_code - 100;
  1292.                 if(prefix == LEVEL_TAB_BITS){
  1293.                     prefix += get_level_prefix(gb);
  1294.                 }
  1295.                 if(prefix<15){
  1296.                     level_code = (prefix<<suffix_length) + get_bits(gb, suffix_length);
  1297.                 }else{
  1298.                     level_code = (15<<suffix_length) + get_bits(gb, prefix-3);
  1299.                     if(prefix>=16)
  1300.                         level_code += (1<<(prefix-3))-4096;
  1301.                 }
  1302.                 mask= -(level_code&1);
  1303.                 level_code= (((2+level_code)>>1) ^ mask) - mask;
  1304.             }
  1305.             level[i]= level_code;
  1306.             if(suffix_limit[suffix_length] + level_code > 2U*suffix_limit[suffix_length])
  1307.                 suffix_length++;
  1308.         }
  1309.     }
  1310.     if(total_coeff == max_coeff)
  1311.         zeros_left=0;
  1312.     else{
  1313.         if(n == CHROMA_DC_BLOCK_INDEX)
  1314.             zeros_left= get_vlc2(gb, chroma_dc_total_zeros_vlc[ total_coeff-1 ].table, CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 1);
  1315.         else
  1316.             zeros_left= get_vlc2(gb, total_zeros_vlc[ total_coeff-1 ].table, TOTAL_ZEROS_VLC_BITS, 1);
  1317.     }
  1318.     coeff_num = zeros_left + total_coeff - 1;
  1319.     j = scantable[coeff_num];
  1320.     if(n > 24){
  1321.         block[j] = level[0];
  1322.         for(i=1;i<total_coeff;i++) {
  1323.             if(zeros_left <= 0)
  1324.                 run_before = 0;
  1325.             else if(zeros_left < 7){
  1326.                 run_before= get_vlc2(gb, run_vlc[zeros_left-1].table, RUN_VLC_BITS, 1);
  1327.             }else{
  1328.                 run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2);
  1329.             }
  1330.             zeros_left -= run_before;
  1331.             coeff_num -= 1 + run_before;
  1332.             j= scantable[ coeff_num ];
  1333.             block[j]= level[i];
  1334.         }
  1335.     }else{
  1336.         block[j] = (level[0] * qmul[j] + 32)>>6;
  1337.         for(i=1;i<total_coeff;i++) {
  1338.             if(zeros_left <= 0)
  1339.                 run_before = 0;
  1340.             else if(zeros_left < 7){
  1341.                 run_before= get_vlc2(gb, run_vlc[zeros_left-1].table, RUN_VLC_BITS, 1);
  1342.             }else{
  1343.                 run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2);
  1344.             }
  1345.             zeros_left -= run_before;
  1346.             coeff_num -= 1 + run_before;
  1347.             j= scantable[ coeff_num ];
  1348.             block[j]= (level[i] * qmul[j] + 32)>>6;
  1349.         }
  1350.     }
  1351.     if(zeros_left<0){
  1352.         av_log(h->s.avctx, AV_LOG_ERROR, "negative number of zero coeffs at %d %dn", s->mb_x, s->mb_y);
  1353.         return -1;
  1354.     }
  1355.     return 0;
  1356. }
  1357. static void predict_field_decoding_flag(H264Context *h){
  1358.     MpegEncContext * const s = &h->s;
  1359.     const int mb_xy= h->mb_xy;
  1360.     int mb_type = (h->slice_table[mb_xy-1] == h->slice_num)
  1361.                 ? s->current_picture.mb_type[mb_xy-1]
  1362.                 : (h->slice_table[mb_xy-s->mb_stride] == h->slice_num)
  1363.                 ? s->current_picture.mb_type[mb_xy-s->mb_stride]
  1364.                 : 0;
  1365.     h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
  1366. }
  1367. /**
  1368.  * decodes a P_SKIP or B_SKIP macroblock
  1369.  */
  1370. static void decode_mb_skip(H264Context *h){
  1371.     MpegEncContext * const s = &h->s;
  1372.     const int mb_xy= h->mb_xy;
  1373.     int mb_type=0;
  1374.     memset(h->non_zero_count[mb_xy], 0, 16);
  1375.     memset(h->non_zero_count_cache + 8, 0, 8*5); //FIXME ugly, remove pfui
  1376.     if(MB_FIELD)
  1377.         mb_type|= MB_TYPE_INTERLACED;
  1378.     if( h->slice_type_nos == FF_B_TYPE )
  1379.     {
  1380.         // just for fill_caches. pred_direct_motion will set the real mb_type
  1381.         mb_type|= MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP;
  1382.         fill_caches(h, mb_type, 0); //FIXME check what is needed and what not ...
  1383.         pred_direct_motion(h, &mb_type);
  1384.         mb_type|= MB_TYPE_SKIP;
  1385.     }
  1386.     else
  1387.     {
  1388.         int mx, my;
  1389.         mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP;
  1390.         fill_caches(h, mb_type, 0); //FIXME check what is needed and what not ...
  1391.         pred_pskip_motion(h, &mx, &my);
  1392.         fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
  1393.         fill_rectangle(  h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4);
  1394.     }
  1395.     write_back_motion(h, mb_type);
  1396.     s->current_picture.mb_type[mb_xy]= mb_type;
  1397.     s->current_picture.qscale_table[mb_xy]= s->qscale;
  1398.     h->slice_table[ mb_xy ]= h->slice_num;
  1399.     h->prev_mb_skipped= 1;
  1400. }
  1401. /**
  1402.  * decodes a macroblock
  1403.  * @returns 0 if OK, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed
  1404.  */
  1405. static int decode_mb_cavlc(H264Context *h){
  1406.     MpegEncContext * const s = &h->s;
  1407.     int mb_xy;
  1408.     int partition_count;
  1409.     unsigned int mb_type, cbp;
  1410.     int dct8x8_allowed= h->pps.transform_8x8_mode;
  1411.     mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
  1412.     tprintf(s->avctx, "pic:%d mb:%d/%dn", h->frame_num, s->mb_x, s->mb_y);
  1413.     cbp = 0; /* avoid warning. FIXME: find a solution without slowing
  1414.                 down the code */
  1415.     if(h->slice_type_nos != FF_I_TYPE){
  1416.         if(s->mb_skip_run==-1)
  1417.             s->mb_skip_run= get_ue_golomb(&s->gb);
  1418.         if (s->mb_skip_run--) {
  1419.             if(FRAME_MBAFF && (s->mb_y&1) == 0){
  1420.                 if(s->mb_skip_run==0)
  1421.                     h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb);
  1422.                 else
  1423.                     predict_field_decoding_flag(h);
  1424.             }
  1425.             decode_mb_skip(h);
  1426.             return 0;
  1427.         }
  1428.     }
  1429.     if(FRAME_MBAFF){
  1430.         if( (s->mb_y&1) == 0 )
  1431.             h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb);
  1432.     }
  1433.     h->prev_mb_skipped= 0;
  1434.     mb_type= get_ue_golomb(&s->gb);
  1435.     if(h->slice_type_nos == FF_B_TYPE){
  1436.         if(mb_type < 23){
  1437.             partition_count= b_mb_type_info[mb_type].partition_count;
  1438.             mb_type=         b_mb_type_info[mb_type].type;
  1439.         }else{
  1440.             mb_type -= 23;
  1441.             goto decode_intra_mb;
  1442.         }
  1443.     }else if(h->slice_type_nos == FF_P_TYPE){
  1444.         if(mb_type < 5){
  1445.             partition_count= p_mb_type_info[mb_type].partition_count;
  1446.             mb_type=         p_mb_type_info[mb_type].type;
  1447.         }else{
  1448.             mb_type -= 5;
  1449.             goto decode_intra_mb;
  1450.         }
  1451.     }else{
  1452.        assert(h->slice_type_nos == FF_I_TYPE);
  1453.         if(h->slice_type == FF_SI_TYPE && mb_type)
  1454.             mb_type--;
  1455. decode_intra_mb:
  1456.         if(mb_type > 25){
  1457.             av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %dn", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y);
  1458.             return -1;
  1459.         }
  1460.         partition_count=0;
  1461.         cbp= i_mb_type_info[mb_type].cbp;
  1462.         h->intra16x16_pred_mode= i_mb_type_info[mb_type].pred_mode;
  1463.         mb_type= i_mb_type_info[mb_type].type;
  1464.     }
  1465.     if(MB_FIELD)
  1466.         mb_type |= MB_TYPE_INTERLACED;
  1467.     h->slice_table[ mb_xy ]= h->slice_num;
  1468.     if(IS_INTRA_PCM(mb_type)){
  1469.         int x;
  1470.         // We assume these blocks are very rare so we do not optimize it.
  1471.         align_get_bits(&s->gb);
  1472.         // The pixels are stored in the same order as levels in h->mb array.
  1473.         for(x=0; x < (CHROMA ? 384 : 256); x++){
  1474.             ((uint8_t*)h->mb)[x]= get_bits(&s->gb, 8);
  1475.         }
  1476.         // In deblocking, the quantizer is 0
  1477.         s->current_picture.qscale_table[mb_xy]= 0;
  1478.         // All coeffs are present
  1479.         memset(h->non_zero_count[mb_xy], 16, 16);
  1480.         s->current_picture.mb_type[mb_xy]= mb_type;
  1481.         return 0;
  1482.     }
  1483.     if(MB_MBAFF){
  1484.         h->ref_count[0] <<= 1;
  1485.         h->ref_count[1] <<= 1;
  1486.     }
  1487.     fill_caches(h, mb_type, 0);
  1488.     //mb_pred
  1489.     if(IS_INTRA(mb_type)){
  1490.         int pred_mode;
  1491. //            init_top_left_availability(h);
  1492.         if(IS_INTRA4x4(mb_type)){
  1493.             int i;
  1494.             int di = 1;
  1495.             if(dct8x8_allowed && get_bits1(&s->gb)){
  1496.                 mb_type |= MB_TYPE_8x8DCT;
  1497.                 di = 4;
  1498.             }
  1499. //                fill_intra4x4_pred_table(h);
  1500.             for(i=0; i<16; i+=di){
  1501.                 int mode= pred_intra_mode(h, i);
  1502.                 if(!get_bits1(&s->gb)){
  1503.                     const int rem_mode= get_bits(&s->gb, 3);
  1504.                     mode = rem_mode + (rem_mode >= mode);
  1505.                 }
  1506.                 if(di==4)
  1507.                     fill_rectangle( &h->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1 );
  1508.                 else
  1509.                     h->intra4x4_pred_mode_cache[ scan8[i] ] = mode;
  1510.             }
  1511.             write_back_intra_pred_mode(h);
  1512.             if( check_intra4x4_pred_mode(h) < 0)
  1513.                 return -1;
  1514.         }else{
  1515.             h->intra16x16_pred_mode= check_intra_pred_mode(h, h->intra16x16_pred_mode);
  1516.             if(h->intra16x16_pred_mode < 0)
  1517.                 return -1;
  1518.         }
  1519.         if(CHROMA){
  1520.             pred_mode= check_intra_pred_mode(h, get_ue_golomb_31(&s->gb));
  1521.             if(pred_mode < 0)
  1522.                 return -1;
  1523.             h->chroma_pred_mode= pred_mode;
  1524.         }
  1525.     }else if(partition_count==4){
  1526.         int i, j, sub_partition_count[4], list, ref[2][4];
  1527.         if(h->slice_type_nos == FF_B_TYPE){
  1528.             for(i=0; i<4; i++){
  1529.                 h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
  1530.                 if(h->sub_mb_type[i] >=13){
  1531.                     av_log(h->s.avctx, AV_LOG_ERROR, "B sub_mb_type %u out of range at %d %dn", h->sub_mb_type[i], s->mb_x, s->mb_y);
  1532.                     return -1;
  1533.                 }
  1534.                 sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
  1535.                 h->sub_mb_type[i]=      b_sub_mb_type_info[ h->sub_mb_type[i] ].type;
  1536.             }
  1537.             if(   IS_DIRECT(h->sub_mb_type[0]) || IS_DIRECT(h->sub_mb_type[1])
  1538.                || IS_DIRECT(h->sub_mb_type[2]) || IS_DIRECT(h->sub_mb_type[3])) {
  1539.                 pred_direct_motion(h, &mb_type);
  1540.                 h->ref_cache[0][scan8[4]] =
  1541.                 h->ref_cache[1][scan8[4]] =
  1542.                 h->ref_cache[0][scan8[12]] =
  1543.                 h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
  1544.             }
  1545.         }else{
  1546.             assert(h->slice_type_nos == FF_P_TYPE); //FIXME SP correct ?
  1547.             for(i=0; i<4; i++){
  1548.                 h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
  1549.                 if(h->sub_mb_type[i] >=4){
  1550.                     av_log(h->s.avctx, AV_LOG_ERROR, "P sub_mb_type %u out of range at %d %dn", h->sub_mb_type[i], s->mb_x, s->mb_y);
  1551.                     return -1;
  1552.                 }
  1553.                 sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
  1554.                 h->sub_mb_type[i]=      p_sub_mb_type_info[ h->sub_mb_type[i] ].type;
  1555.             }
  1556.         }
  1557.         for(list=0; list<h->list_count; list++){
  1558.             int ref_count= IS_REF0(mb_type) ? 1 : h->ref_count[list];
  1559.             for(i=0; i<4; i++){
  1560.                 if(IS_DIRECT(h->sub_mb_type[i])) continue;
  1561.                 if(IS_DIR(h->sub_mb_type[i], 0, list)){
  1562.                     unsigned int tmp;
  1563.                     if(ref_count == 1){
  1564.                         tmp= 0;
  1565.                     }else if(ref_count == 2){
  1566.                         tmp= get_bits1(&s->gb)^1;
  1567.                     }else{
  1568.                         tmp= get_ue_golomb_31(&s->gb);
  1569.                         if((int)tmp>=ref_count){
  1570.                             av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflown", tmp);
  1571.                             return -1;
  1572.                         }
  1573.                     }
  1574.                     ref[list][i]= tmp;
  1575.                 }else{
  1576.                  //FIXME
  1577.                     ref[list][i] = -1;
  1578.                 }
  1579.             }
  1580.         }
  1581.         if(dct8x8_allowed)
  1582.             dct8x8_allowed = get_dct8x8_allowed(h);
  1583.         for(list=0; list<h->list_count; list++){
  1584.             for(i=0; i<4; i++){
  1585.                 if(IS_DIRECT(h->sub_mb_type[i])) {
  1586.                     h->ref_cache[list][ scan8[4*i] ] = h->ref_cache[list][ scan8[4*i]+1 ];
  1587.                     continue;
  1588.                 }
  1589.                 h->ref_cache[list][ scan8[4*i]   ]=h->ref_cache[list][ scan8[4*i]+1 ]=
  1590.                 h->ref_cache[list][ scan8[4*i]+8 ]=h->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i];
  1591.                 if(IS_DIR(h->sub_mb_type[i], 0, list)){
  1592.                     const int sub_mb_type= h->sub_mb_type[i];
  1593.                     const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1;
  1594.                     for(j=0; j<sub_partition_count[i]; j++){
  1595.                         int mx, my;
  1596.                         const int index= 4*i + block_width*j;
  1597.                         int16_t (* mv_cache)[2]= &h->mv_cache[list][ scan8[index] ];
  1598.                         pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mx, &my);
  1599.                         mx += get_se_golomb(&s->gb);
  1600.                         my += get_se_golomb(&s->gb);
  1601.                         tprintf(s->avctx, "final mv:%d %dn", mx, my);
  1602.                         if(IS_SUB_8X8(sub_mb_type)){
  1603.                             mv_cache[ 1 ][0]=
  1604.                             mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx;
  1605.                             mv_cache[ 1 ][1]=
  1606.                             mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my;
  1607.                         }else if(IS_SUB_8X4(sub_mb_type)){
  1608.                             mv_cache[ 1 ][0]= mx;
  1609.                             mv_cache[ 1 ][1]= my;
  1610.                         }else if(IS_SUB_4X8(sub_mb_type)){
  1611.                             mv_cache[ 8 ][0]= mx;
  1612.                             mv_cache[ 8 ][1]= my;
  1613.                         }
  1614.                         mv_cache[ 0 ][0]= mx;
  1615.                         mv_cache[ 0 ][1]= my;
  1616.                     }
  1617.                 }else{
  1618.                     uint32_t *p= (uint32_t *)&h->mv_cache[list][ scan8[4*i] ][0];
  1619.                     p[0] = p[1]=
  1620.                     p[8] = p[9]= 0;
  1621.                 }
  1622.             }
  1623.         }
  1624.     }else if(IS_DIRECT(mb_type)){
  1625.         pred_direct_motion(h, &mb_type);
  1626.         dct8x8_allowed &= h->sps.direct_8x8_inference_flag;
  1627.     }else{
  1628.         int list, mx, my, i;
  1629.          //FIXME we should set ref_idx_l? to 0 if we use that later ...
  1630.         if(IS_16X16(mb_type)){
  1631.             for(list=0; list<h->list_count; list++){
  1632.                     unsigned int val;
  1633.                     if(IS_DIR(mb_type, 0, list)){
  1634.                         if(h->ref_count[list]==1){
  1635.                             val= 0;
  1636.                         }else if(h->ref_count[list]==2){
  1637.                             val= get_bits1(&s->gb)^1;
  1638.                         }else{
  1639.                             val= get_ue_golomb_31(&s->gb);
  1640.                             if((int)val >= h->ref_count[list]){
  1641.                                 av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflown", val);
  1642.                                 return -1;
  1643.                             }
  1644.                         }
  1645.                     }else
  1646.                         val= LIST_NOT_USED&0xFF;
  1647.                     fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, val, 1);
  1648.             }
  1649.             for(list=0; list<h->list_count; list++){
  1650.                 unsigned int val;
  1651.                 if(IS_DIR(mb_type, 0, list)){
  1652.                     pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mx, &my);
  1653.                     mx += get_se_golomb(&s->gb);
  1654.                     my += get_se_golomb(&s->gb);
  1655.                     tprintf(s->avctx, "final mv:%d %dn", mx, my);
  1656.                     val= pack16to32(mx,my);
  1657.                 }else
  1658.                     val=0;
  1659.                 fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, val, 4);
  1660.             }
  1661.         }
  1662.         else if(IS_16X8(mb_type)){
  1663.             for(list=0; list<h->list_count; list++){
  1664.                     for(i=0; i<2; i++){
  1665.                         unsigned int val;
  1666.                         if(IS_DIR(mb_type, i, list)){
  1667.                             if(h->ref_count[list] == 1){
  1668.                                 val= 0;
  1669.                             }else if(h->ref_count[list] == 2){
  1670.                                 val= get_bits1(&s->gb)^1;
  1671.                             }else{
  1672.                                 val= get_ue_golomb_31(&s->gb);
  1673.                                 if((int)val >= h->ref_count[list]){
  1674.                                     av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflown", val);
  1675.                                     return -1;
  1676.                                 }
  1677.                             }
  1678.                         }else
  1679.                             val= LIST_NOT_USED&0xFF;
  1680.                         fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 1);
  1681.                     }
  1682.             }
  1683.             for(list=0; list<h->list_count; list++){
  1684.                 for(i=0; i<2; i++){
  1685.                     unsigned int val;
  1686.                     if(IS_DIR(mb_type, i, list)){
  1687.                         pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mx, &my);
  1688.                         mx += get_se_golomb(&s->gb);
  1689.                         my += get_se_golomb(&s->gb);
  1690.                         tprintf(s->avctx, "final mv:%d %dn", mx, my);
  1691.                         val= pack16to32(mx,my);
  1692.                     }else
  1693.                         val=0;
  1694.                     fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 4);
  1695.                 }
  1696.             }
  1697.         }else{
  1698.             assert(IS_8X16(mb_type));
  1699.             for(list=0; list<h->list_count; list++){
  1700.                     for(i=0; i<2; i++){
  1701.                         unsigned int val;
  1702.                         if(IS_DIR(mb_type, i, list)){ //FIXME optimize
  1703.                             if(h->ref_count[list]==1){
  1704.                                 val= 0;
  1705.                             }else if(h->ref_count[list]==2){
  1706.                                 val= get_bits1(&s->gb)^1;
  1707.                             }else{
  1708.                                 val= get_ue_golomb_31(&s->gb);
  1709.                                 if((int)val >= h->ref_count[list])
  1710. {
  1711.                                     av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflown", val);
  1712.                                     return -1;
  1713.                                 }
  1714.                             }
  1715.                         }else
  1716.                             val= LIST_NOT_USED&0xFF;
  1717.                         fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 1);
  1718.                     }
  1719.             }
  1720.             for(list=0; list<h->list_count; list++){
  1721.                 for(i=0; i<2; i++){
  1722.                     unsigned int val;
  1723.                     if(IS_DIR(mb_type, i, list)){
  1724.                         pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mx, &my);
  1725.                         mx += get_se_golomb(&s->gb);
  1726.                         my += get_se_golomb(&s->gb);
  1727.                         tprintf(s->avctx, "final mv:%d %dn", mx, my);
  1728.                         val= pack16to32(mx,my);
  1729.                     }else
  1730.                         val=0;
  1731.                     fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 4);
  1732.                 }
  1733.             }
  1734.         }
  1735.     }
  1736.     if(IS_INTER(mb_type))
  1737.         write_back_motion(h, mb_type);
  1738.     if(!IS_INTRA16x16(mb_type)){
  1739.         cbp= get_ue_golomb(&s->gb);
  1740.         if(cbp > 47){
  1741.             av_log(h->s.avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %dn", cbp, s->mb_x, s->mb_y);
  1742.             return -1;
  1743.         }
  1744.         if(CHROMA){
  1745.             if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp[cbp];
  1746.             else                     cbp= golomb_to_inter_cbp   [cbp];
  1747.         }else{
  1748.             if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp_gray[cbp];
  1749.             else                     cbp= golomb_to_inter_cbp_gray[cbp];
  1750.         }
  1751.     }
  1752.     h->cbp = cbp;
  1753.     if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){
  1754.         if(get_bits1(&s->gb)){
  1755.             mb_type |= MB_TYPE_8x8DCT;
  1756.             h->cbp_table[mb_xy]= cbp;
  1757.         }
  1758.     }
  1759.     s->current_picture.mb_type[mb_xy]= mb_type;
  1760.     if(cbp || IS_INTRA16x16(mb_type)){
  1761.         int i8x8, i4x4, chroma_idx;
  1762.         int dquant;
  1763.         GetBitContext *gb= IS_INTRA(mb_type) ? h->intra_gb_ptr : h->inter_gb_ptr;
  1764.         const uint8_t *scan, *scan8x8, *dc_scan;
  1765. //        fill_non_zero_count_cache(h);
  1766.         if(IS_INTERLACED(mb_type)){
  1767.             scan8x8= s->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0;
  1768.             scan= s->qscale ? h->field_scan : h->field_scan_q0;
  1769.             dc_scan= luma_dc_field_scan;
  1770.         }else{
  1771.             scan8x8= s->qscale ? h->zigzag_scan8x8_cavlc : h->zigzag_scan8x8_cavlc_q0;
  1772.             scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0;
  1773.             dc_scan= luma_dc_zigzag_scan;
  1774.         }
  1775.         dquant= get_se_golomb(&s->gb);
  1776.         if( dquant > 25 || dquant < -26 ){
  1777.             av_log(h->s.avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %dn", dquant, s->mb_x, s->mb_y);
  1778.             return -1;
  1779.         }
  1780.         s->qscale += dquant;
  1781.         if(((unsigned)s->qscale) > 51){
  1782.             if(s->qscale<0) s->qscale+= 52;
  1783.             else            s->qscale-= 52;
  1784.         }
  1785.         h->chroma_qp[0]= get_chroma_qp(h, 0, s->qscale);
  1786.         h->chroma_qp[1]= get_chroma_qp(h, 1, s->qscale);
  1787.         if(IS_INTRA16x16(mb_type)){
  1788.             if( decode_residual(h, h->intra_gb_ptr, h->mb, LUMA_DC_BLOCK_INDEX, dc_scan, h->dequant4_coeff[0][s->qscale], 16) < 0){
  1789.                 return -1; //FIXME continue if partitioned and other return -1 too
  1790.             }
  1791.             assert((cbp&15) == 0 || (cbp&15) == 15);
  1792.             if(cbp&15){
  1793.                 for(i8x8=0; i8x8<4; i8x8++){
  1794.                     for(i4x4=0; i4x4<4; i4x4++){
  1795.                         const int index= i4x4 + 4*i8x8;
  1796.                         if( decode_residual(h, h->intra_gb_ptr, h->mb + 16*index, index, scan + 1, h->dequant4_coeff[0][s->qscale], 15) < 0 ){
  1797.                             return -1;
  1798.                         }
  1799.                     }
  1800.                 }
  1801.             }else{
  1802.                 fill_rectangle(&h->non_zero_count_cache[scan8[0]], 4, 4, 8, 0, 1);
  1803.             }
  1804.         }else{
  1805.             for(i8x8=0; i8x8<4; i8x8++){
  1806.                 if(cbp & (1<<i8x8)){
  1807.                     if(IS_8x8DCT(mb_type)){
  1808.                         DCTELEM *buf = &h->mb[64*i8x8];
  1809.                         uint8_t *nnz;
  1810.                         for(i4x4=0; i4x4<4; i4x4++){
  1811.                             if( decode_residual(h, gb, buf, i4x4+4*i8x8, scan8x8+16*i4x4,
  1812.                                                 h->dequant8_coeff[IS_INTRA( mb_type ) ? 0:1][s->qscale], 16) <0 )
  1813.                                 return -1;
  1814.                         }
  1815.                         nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ];
  1816.                         nnz[0] += nnz[1] + nnz[8] + nnz[9];
  1817.                     }else{
  1818.                         for(i4x4=0; i4x4<4; i4x4++){
  1819.                             const int index= i4x4 + 4*i8x8;
  1820.                             if( decode_residual(h, gb, h->mb + 16*index, index, scan, h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale], 16) <0 ){
  1821.                                 return -1;
  1822.                             }
  1823.                         }
  1824.                     }
  1825.                 }else{
  1826.                     uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ];
  1827.                     nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0;
  1828.                 }
  1829.             }
  1830.         }
  1831.         if(cbp&0x30){
  1832.             for(chroma_idx=0; chroma_idx<2; chroma_idx++)
  1833.                 if( decode_residual(h, gb, h->mb + 256 + 16*4*chroma_idx, CHROMA_DC_BLOCK_INDEX, chroma_dc_scan, NULL, 4) < 0){
  1834.                     return -1;
  1835.                 }
  1836.         }
  1837.         if(cbp&0x20){
  1838.             for(chroma_idx=0; chroma_idx<2; chroma_idx++){
  1839.                 const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][h->chroma_qp[chroma_idx]];
  1840.                 for(i4x4=0; i4x4<4; i4x4++){
  1841.                     const int index= 16 + 4*chroma_idx + i4x4;
  1842.                     if( decode_residual(h, gb, h->mb + 16*index, index, scan + 1, qmul, 15) < 0){
  1843.                         return -1;
  1844.                     }
  1845.                 }
  1846.             }
  1847.         }else{
  1848.             uint8_t * const nnz= &h->non_zero_count_cache[0];
  1849.             nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
  1850.             nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
  1851.         }
  1852.     }else{
  1853.         uint8_t * const nnz= &h->non_zero_count_cache[0];
  1854.         fill_rectangle(&nnz[scan8[0]], 4, 4, 8, 0, 1);
  1855.         nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
  1856.         nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
  1857.     }
  1858.     s->current_picture.qscale_table[mb_xy]= s->qscale;
  1859.     write_back_non_zero_count(h);
  1860.     if(MB_MBAFF){
  1861.         h->ref_count[0] >>= 1;
  1862.         h->ref_count[1] >>= 1;
  1863.     }
  1864.     return 0;
  1865. }
  1866. static int decode_cabac_field_decoding_flag(H264Context *h) {
  1867.     MpegEncContext * const s = &h->s;
  1868.     const int mb_x = s->mb_x;
  1869.     const int mb_y = s->mb_y & ~1;
  1870.     const int mba_xy = mb_x - 1 +  mb_y   *s->mb_stride;
  1871.     const int mbb_xy = mb_x     + (mb_y-2)*s->mb_stride;
  1872.     unsigned int ctx = 0;
  1873.     if( h->slice_table[mba_xy] == h->slice_num && IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) ) {
  1874.         ctx += 1;
  1875.     }
  1876.     if( h->slice_table[mbb_xy] == h->slice_num && IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) ) {
  1877.         ctx += 1;
  1878.     }
  1879.     return get_cabac_noinline( &h->cabac, &h->cabac_state[70 + ctx] );
  1880. }
  1881. static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_slice) {
  1882.     uint8_t *state= &h->cabac_state[ctx_base];
  1883.     int mb_type;
  1884.     if(intra_slice){
  1885.         MpegEncContext * const s = &h->s;
  1886.         const int mba_xy = h->left_mb_xy[0];
  1887.         const int mbb_xy = h->top_mb_xy;
  1888.         int ctx=0;
  1889.         if( h->slice_table[mba_xy] == h->slice_num && !IS_INTRA4x4( s->current_picture.mb_type[mba_xy] ) )
  1890.             ctx++;
  1891.         if( h->slice_table[mbb_xy] == h->slice_num && !IS_INTRA4x4( s->current_picture.mb_type[mbb_xy] ) )
  1892.             ctx++;
  1893.         if( get_cabac_noinline( &h->cabac, &state[ctx] ) == 0 )
  1894.             return 0;   /* I4x4 */
  1895.         state += 2;
  1896.     }else{
  1897.         if( get_cabac_noinline( &h->cabac, &state[0] ) == 0 )
  1898.             return 0;   /* I4x4 */
  1899.     }
  1900.     if( get_cabac_terminate( &h->cabac ) )
  1901.         return 25;  /* PCM */
  1902.     mb_type = 1; /* I16x16 */
  1903.     mb_type += 12 * get_cabac_noinline( &h->cabac, &state[1] ); /* cbp_luma != 0 */
  1904.     if( get_cabac_noinline( &h->cabac, &state[2] ) ) /* cbp_chroma */
  1905.         mb_type += 4 + 4 * get_cabac_noinline( &h->cabac, &state[2+intra_slice] );
  1906.     mb_type += 2 * get_cabac_noinline( &h->cabac, &state[3+intra_slice] );
  1907.     mb_type += 1 * get_cabac_noinline( &h->cabac, &state[3+2*intra_slice] );
  1908.     return mb_type;
  1909. }
  1910. static int decode_cabac_mb_type_b( H264Context *h ) {
  1911.     MpegEncContext * const s = &h->s;
  1912.         const int mba_xy = h->left_mb_xy[0];
  1913.         const int mbb_xy = h->top_mb_xy;
  1914.         int ctx = 0;
  1915.         int bits;
  1916.         assert(h->slice_type_nos == FF_B_TYPE);
  1917.         if( h->slice_table[mba_xy] == h->slice_num && !IS_DIRECT( s->current_picture.mb_type[mba_xy] ) )
  1918.             ctx++;
  1919.         if( h->slice_table[mbb_xy] == h->slice_num && !IS_DIRECT( s->current_picture.mb_type[mbb_xy] ) )
  1920.             ctx++;
  1921.         if( !get_cabac_noinline( &h->cabac, &h->cabac_state[27+ctx] ) )
  1922.             return 0; /* B_Direct_16x16 */
  1923.         if( !get_cabac_noinline( &h->cabac, &h->cabac_state[27+3] ) ) {
  1924.             return 1 + get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ); /* B_L[01]_16x16 */
  1925.         }
  1926.         bits = get_cabac_noinline( &h->cabac, &h->cabac_state[27+4] ) << 3;
  1927.         bits|= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ) << 2;
  1928.         bits|= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ) << 1;
  1929.         bits|= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] );
  1930.         if( bits < 8 )
  1931.             return bits + 3; /* B_Bi_16x16 through B_L1_L0_16x8 */
  1932.         else if( bits == 13 ) {
  1933.             return decode_cabac_intra_mb_type(h, 32, 0) + 23;
  1934.         } else if( bits == 14 )
  1935.             return 11; /* B_L1_L0_8x16 */
  1936.         else if( bits == 15 )
  1937.             return 22; /* B_8x8 */
  1938.         bits= ( bits<<1 ) | get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] );
  1939.         return bits - 4; /* B_L0_Bi_* through B_Bi_Bi_* */
  1940. }
  1941. static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
  1942.     MpegEncContext * const s = &h->s;
  1943.     int mba_xy, mbb_xy;
  1944.     int ctx = 0;
  1945.     if(FRAME_MBAFF){ //FIXME merge with the stuff in fill_caches?
  1946.         int mb_xy = mb_x + (mb_y&~1)*s->mb_stride;
  1947.         mba_xy = mb_xy - 1;
  1948.         if( (mb_y&1)
  1949.             && h->slice_table[mba_xy] == h->slice_num
  1950.             && MB_FIELD == !!IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) )
  1951.             mba_xy += s->mb_stride;
  1952.         if( MB_FIELD ){
  1953.             mbb_xy = mb_xy - s->mb_stride;
  1954.             if( !(mb_y&1)
  1955.                 && h->slice_table[mbb_xy] == h->slice_num
  1956.                 && IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) )
  1957.                 mbb_xy -= s->mb_stride;
  1958.         }else
  1959.             mbb_xy = mb_x + (mb_y-1)*s->mb_stride;
  1960.     }else{
  1961.         int mb_xy = h->mb_xy;
  1962.         mba_xy = mb_xy - 1;
  1963.         mbb_xy = mb_xy - (s->mb_stride << FIELD_PICTURE);
  1964.     }
  1965.     if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] ))
  1966.         ctx++;
  1967.     if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
  1968.         ctx++;
  1969.     if( h->slice_type_nos == FF_B_TYPE )
  1970.         ctx += 13;
  1971.     return get_cabac_noinline( &h->cabac, &h->cabac_state[11+ctx] );
  1972. }
  1973. static int decode_cabac_mb_intra4x4_pred_mode( H264Context *h, int pred_mode ) {
  1974.     int mode = 0;
  1975.     if( get_cabac( &h->cabac, &h->cabac_state[68] ) )
  1976.         return pred_mode;
  1977.     mode += 1 * get_cabac( &h->cabac, &h->cabac_state[69] );
  1978.     mode += 2 * get_cabac( &h->cabac, &h->cabac_state[69] );
  1979.     mode += 4 * get_cabac( &h->cabac, &h->cabac_state[69] );
  1980.     if( mode >= pred_mode )
  1981.         return mode + 1;
  1982.     else
  1983.         return mode;
  1984. }
  1985. static int decode_cabac_mb_chroma_pre_mode( H264Context *h) {
  1986.     const int mba_xy = h->left_mb_xy[0];
  1987.     const int mbb_xy = h->top_mb_xy;
  1988.     int ctx = 0;
  1989.     /* No need to test for IS_INTRA4x4 and IS_INTRA16x16, as we set chroma_pred_mode_table to 0 */
  1990.     if( h->slice_table[mba_xy] == h->slice_num && h->chroma_pred_mode_table[mba_xy] != 0 )
  1991.         ctx++;
  1992.     if( h->slice_table[mbb_xy] == h->slice_num && h->chroma_pred_mode_table[mbb_xy] != 0 )
  1993.         ctx++;
  1994.     if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+ctx] ) == 0 )
  1995.         return 0;
  1996.     if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+3] ) == 0 )
  1997.         return 1;
  1998.     if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+3] ) == 0 )
  1999.         return 2;
  2000.     else
  2001.         return 3;
  2002. }
  2003. static int decode_cabac_mb_cbp_luma( H264Context *h) {
  2004.     int cbp_b, cbp_a, ctx, cbp = 0;
  2005.     cbp_a = h->slice_table[h->left_mb_xy[0]] == h->slice_num ? h->left_cbp : -1;
  2006.     cbp_b = h->slice_table[h->top_mb_xy]     == h->slice_num ? h->top_cbp  : -1;
  2007.     ctx = !(cbp_a & 0x02) + 2 * !(cbp_b & 0x04);
  2008.     cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]);
  2009.     ctx = !(cbp   & 0x01) + 2 * !(cbp_b & 0x08);
  2010.     cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 1;
  2011.     ctx = !(cbp_a & 0x08) + 2 * !(cbp   & 0x01);
  2012.     cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 2;
  2013.     ctx = !(cbp   & 0x04) + 2 * !(cbp   & 0x02);
  2014.     cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 3;
  2015.     return cbp;
  2016. }
  2017. static int decode_cabac_mb_cbp_chroma( H264Context *h) {
  2018.     int ctx;
  2019.     int cbp_a, cbp_b;
  2020.     cbp_a = (h->left_cbp>>4)&0x03;
  2021.     cbp_b = (h-> top_cbp>>4)&0x03;
  2022.     ctx = 0;
  2023.     if( cbp_a > 0 ) ctx++;
  2024.     if( cbp_b > 0 ) ctx += 2;
  2025.     if( get_cabac_noinline( &h->cabac, &h->cabac_state[77 + ctx] ) == 0 )
  2026.         return 0;
  2027.     ctx = 4;
  2028.     if( cbp_a == 2 ) ctx++;
  2029.     if( cbp_b == 2 ) ctx += 2;
  2030.     return 1 + get_cabac_noinline( &h->cabac, &h->cabac_state[77 + ctx] );
  2031. }
  2032. static int decode_cabac_mb_dqp( H264Context *h) {
  2033.     int   ctx= h->last_qscale_diff != 0;
  2034.     int   val = 0;
  2035.     while( get_cabac_noinline( &h->cabac, &h->cabac_state[60 + ctx] ) ) {
  2036.         ctx= 2+(ctx>>1);
  2037.         val++;
  2038.         if(val > 102) //prevent infinite loop
  2039.             return INT_MIN;
  2040.     }
  2041.     if( val&0x01 )
  2042.         return   (val + 1)>>1 ;
  2043.     else
  2044.         return -((val + 1)>>1);
  2045. }
  2046. static int decode_cabac_p_mb_sub_type( H264Context *h ) {
  2047.     if( get_cabac( &h->cabac, &h->cabac_state[21] ) )
  2048.         return 0;   /* 8x8 */
  2049.     if( !get_cabac( &h->cabac, &h->cabac_state[22] ) )
  2050.         return 1;   /* 8x4 */
  2051.     if( get_cabac( &h->cabac, &h->cabac_state[23] ) )
  2052.         return 2;   /* 4x8 */
  2053.     return 3;       /* 4x4 */
  2054. }
  2055. static int decode_cabac_b_mb_sub_type( H264Context *h ) {
  2056.     int type;
  2057.     if( !get_cabac( &h->cabac, &h->cabac_state[36] ) )
  2058.         return 0;   /* B_Direct_8x8 */
  2059.     if( !get_cabac( &h->cabac, &h->cabac_state[37] ) )
  2060.         return 1 + get_cabac( &h->cabac, &h->cabac_state[39] ); /* B_L0_8x8, B_L1_8x8 */
  2061.     type = 3;
  2062.     if( get_cabac( &h->cabac, &h->cabac_state[38] ) ) {
  2063.         if( get_cabac( &h->cabac, &h->cabac_state[39] ) )
  2064.             return 11 + get_cabac( &h->cabac, &h->cabac_state[39] ); /* B_L1_4x4, B_Bi_4x4 */
  2065.         type += 4;
  2066.     }
  2067.     type += 2*get_cabac( &h->cabac, &h->cabac_state[39] );
  2068.     type +=   get_cabac( &h->cabac, &h->cabac_state[39] );
  2069.     return type;
  2070. }
  2071. static inline int decode_cabac_mb_transform_size( H264Context *h ) {
  2072.     return get_cabac_noinline( &h->cabac, &h->cabac_state[399 + h->neighbor_transform_size] );
  2073. }
  2074. static int decode_cabac_mb_ref( H264Context *h, int list, int n ) {
  2075.     int refa = h->ref_cache[list][scan8[n] - 1];
  2076.     int refb = h->ref_cache[list][scan8[n] - 8];
  2077.     int ref  = 0;
  2078.     int ctx  = 0;
  2079.     if( h->slice_type_nos == FF_B_TYPE) {
  2080.         if( refa > 0 && !h->direct_cache[scan8[n] - 1] )
  2081.             ctx++;
  2082.         if( refb > 0 && !h->direct_cache[scan8[n] - 8] )
  2083.             ctx += 2;
  2084.     } else {
  2085.         if( refa > 0 )
  2086.             ctx++;
  2087.         if( refb > 0 )
  2088.             ctx += 2;
  2089.     }
  2090.     while( get_cabac( &h->cabac, &h->cabac_state[54+ctx] ) ) {
  2091.         ref++;
  2092.         ctx = (ctx>>2)+4;
  2093.         if(ref >= 32 /*h->ref_list[list]*/){
  2094.             return -1;
  2095.         }
  2096.     }
  2097.     return ref;
  2098. }
  2099. static int decode_cabac_mb_mvd( H264Context *h, int list, int n, int l ) {
  2100.     int amvd = abs( h->mvd_cache[list][scan8[n] - 1][l] ) +
  2101.                abs( h->mvd_cache[list][scan8[n] - 8][l] );
  2102.     int ctxbase = (l == 0) ? 40 : 47;
  2103.     int mvd;
  2104.     int ctx = (amvd>2) + (amvd>32);
  2105.     if(!get_cabac(&h->cabac, &h->cabac_state[ctxbase+ctx]))
  2106.         return 0;
  2107.     mvd= 1;
  2108.     ctx= 3;
  2109.     while( mvd < 9 && get_cabac( &h->cabac, &h->cabac_state[ctxbase+ctx] ) ) {
  2110.         mvd++;
  2111.         if( ctx < 6 )
  2112.             ctx++;
  2113.     }
  2114.     if( mvd >= 9 ) {
  2115.         int k = 3;
  2116.         while( get_cabac_bypass( &h->cabac ) ) {
  2117.             mvd += 1 << k;
  2118.             k++;
  2119.             if(k>24){
  2120.                 av_log(h->s.avctx, AV_LOG_ERROR, "overflow in decode_cabac_mb_mvdn");
  2121.                 return INT_MIN;
  2122.             }
  2123.         }
  2124.         while( k-- ) {
  2125.             if( get_cabac_bypass( &h->cabac ) )
  2126.                 mvd += 1 << k;
  2127.         }
  2128.     }
  2129.     return get_cabac_bypass_sign( &h->cabac, -mvd );
  2130. }
  2131. static av_always_inline int get_cabac_cbf_ctx( H264Context *h, int cat, int idx, int is_dc ) {
  2132.     int nza, nzb;
  2133.     int ctx = 0;
  2134.     if( is_dc ) {
  2135.         if( cat == 0 ) {
  2136.             nza = h->left_cbp&0x100;
  2137.             nzb = h-> top_cbp&0x100;
  2138.         } else {
  2139.             nza = (h->left_cbp>>(6+idx))&0x01;
  2140.             nzb = (h-> top_cbp>>(6+idx))&0x01;
  2141.         }
  2142.     } else {
  2143.         assert(cat == 1 || cat == 2 || cat == 4);
  2144.         nza = h->non_zero_count_cache[scan8[idx] - 1];
  2145.         nzb = h->non_zero_count_cache[scan8[idx] - 8];
  2146.     }
  2147.     if( nza > 0 )
  2148.         ctx++;
  2149.     if( nzb > 0 )
  2150.         ctx += 2;
  2151.     return ctx + 4 * cat;
  2152. }
  2153. DECLARE_ASM_CONST(1, uint8_t, last_coeff_flag_offset_8x8[63]) = {
  2154.     0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  2155.     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  2156.     3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
  2157.     5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8
  2158. };
  2159. static av_always_inline void decode_cabac_residual_internal( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff, int is_dc ) {
  2160.     static const int significant_coeff_flag_offset[2][6] = {
  2161.       { 105+0, 105+15, 105+29, 105+44, 105+47, 402 },
  2162.       { 277+0, 277+15, 277+29, 277+44, 277+47, 436 }
  2163.     };
  2164.     static const int last_coeff_flag_offset[2][6] = {
  2165.       { 166+0, 166+15, 166+29, 166+44, 166+47, 417 },
  2166.       { 338+0, 338+15, 338+29, 338+44, 338+47, 451 }
  2167.     };
  2168.     static const int coeff_abs_level_m1_offset[6] = {
  2169.         227+0, 227+10, 227+20, 227+30, 227+39, 426
  2170.     };
  2171.     static const uint8_t significant_coeff_flag_offset_8x8[2][63] = {
  2172.       { 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5,
  2173.         4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7,
  2174.         7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11,
  2175.        12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12 },
  2176.       { 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 7, 7, 8, 4, 5,
  2177.         6, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11,
  2178.         9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,13,13, 9,
  2179.         9,10,10, 8,13,13, 9, 9,10,10,14,14,14,14,14 }
  2180.     };
  2181.     /* node ctx: 0..3: abslevel1 (with abslevelgt1 == 0).
  2182.      * 4..7: abslevelgt1 + 3 (and abslevel1 doesn't matter).
  2183.      * map node ctx => cabac ctx for level=1 */
  2184.     static const uint8_t coeff_abs_level1_ctx[8] = { 1, 2, 3, 4, 0, 0, 0, 0 };
  2185.     /* map node ctx => cabac ctx for level>1 */
  2186.     static const uint8_t coeff_abs_levelgt1_ctx[8] = { 5, 5, 5, 5, 6, 7, 8, 9 };
  2187.     static const uint8_t coeff_abs_level_transition[2][8] = {
  2188.     /* update node ctx after decoding a level=1 */
  2189.         { 1, 2, 3, 3, 4, 5, 6, 7 },
  2190.     /* update node ctx after decoding a level>1 */
  2191.         { 4, 4, 4, 4, 5, 6, 7, 7 }
  2192.     };
  2193.     int index[64];
  2194.     int av_unused last;
  2195.     int coeff_count = 0;
  2196.     int node_ctx = 0;
  2197.     uint8_t *significant_coeff_ctx_base;
  2198.     uint8_t *last_coeff_ctx_base;
  2199.     uint8_t *abs_level_m1_ctx_base;
  2200. #ifndef ARCH_X86
  2201. #define CABAC_ON_STACK
  2202. #endif
  2203. #ifdef CABAC_ON_STACK
  2204. #define CC &cc
  2205.     CABACContext cc;
  2206.     cc.range     = h->cabac.range;
  2207.     cc.low       = h->cabac.low;
  2208.     cc.bytestream= h->cabac.bytestream;
  2209. #else
  2210. #define CC &h->cabac
  2211. #endif
  2212.     /* cat: 0-> DC 16x16  n = 0
  2213.      *      1-> AC 16x16  n = luma4x4idx
  2214.      *      2-> Luma4x4   n = luma4x4idx
  2215.      *      3-> DC Chroma n = iCbCr
  2216.      *      4-> AC Chroma n = 16 + 4 * iCbCr + chroma4x4idx
  2217.      *      5-> Luma8x8   n = 4 * luma8x8idx
  2218.      */
  2219.     /* read coded block flag */
  2220.     if( is_dc || cat != 5 ) {
  2221.         if( get_cabac( CC, &h->cabac_state[85 + get_cabac_cbf_ctx( h, cat, n, is_dc ) ] ) == 0 ) {
  2222.             if( !is_dc )
  2223.                 h->non_zero_count_cache[scan8[n]] = 0;
  2224. #ifdef CABAC_ON_STACK
  2225.             h->cabac.range     = cc.range     ;
  2226.             h->cabac.low       = cc.low       ;
  2227.             h->cabac.bytestream= cc.bytestream;
  2228. #endif
  2229.             return;
  2230.         }
  2231.     }
  2232.     significant_coeff_ctx_base = h->cabac_state
  2233.         + significant_coeff_flag_offset[MB_FIELD][cat];
  2234.     last_coeff_ctx_base = h->cabac_state
  2235.         + last_coeff_flag_offset[MB_FIELD][cat];
  2236.     abs_level_m1_ctx_base = h->cabac_state
  2237.         + coeff_abs_level_m1_offset[cat];
  2238.     if( !is_dc && cat == 5 ) {
  2239. #define DECODE_SIGNIFICANCE( coefs, sig_off, last_off ) 
  2240.         for(last= 0; last < coefs; last++) { 
  2241.             uint8_t *sig_ctx = significant_coeff_ctx_base + sig_off; 
  2242.             if( get_cabac( CC, sig_ctx )) { 
  2243.                 uint8_t *last_ctx = last_coeff_ctx_base + last_off; 
  2244.                 index[coeff_count++] = last; 
  2245.                 if( get_cabac( CC, last_ctx ) ) { 
  2246.                     last= max_coeff; 
  2247.                     break; 
  2248.                 } 
  2249.             } 
  2250.         }
  2251.         if( last == max_coeff -1 ) {
  2252.             index[coeff_count++] = last;
  2253.         }
  2254.         const uint8_t *sig_off = significant_coeff_flag_offset_8x8[MB_FIELD];
  2255. #if defined(ARCH_X86) && defined(HAVE_7REGS) && defined(HAVE_EBX_AVAILABLE) && !defined(BROKEN_RELOCATIONS)
  2256.         coeff_count= decode_significance_8x8_x86(CC, significant_coeff_ctx_base, index, sig_off);
  2257.     } else {
  2258.         coeff_count= decode_significance_x86(CC, max_coeff, significant_coeff_ctx_base, index);
  2259. #else
  2260.         DECODE_SIGNIFICANCE( 63, sig_off[last], last_coeff_flag_offset_8x8[last] );
  2261.     } else {
  2262.         DECODE_SIGNIFICANCE( max_coeff - 1, last, last );
  2263. #endif
  2264.     }
  2265.     assert(coeff_count > 0);
  2266.     if( is_dc ) {
  2267.         if( cat == 0 )
  2268.             h->cbp_table[h->mb_xy] |= 0x100;
  2269.         else
  2270.             h->cbp_table[h->mb_xy] |= 0x40 << n;
  2271.     } else {
  2272.         if( cat == 5 )
  2273.             fill_rectangle(&h->non_zero_count_cache[scan8[n]], 2, 2, 8, coeff_count, 1);
  2274.         else {
  2275.             assert( cat == 1 || cat == 2 || cat == 4 );
  2276.             h->non_zero_count_cache[scan8[n]] = coeff_count;
  2277.         }
  2278.     }
  2279.     do {
  2280.         uint8_t *ctx = coeff_abs_level1_ctx[node_ctx] + abs_level_m1_ctx_base;
  2281.         int j= scantable[index[--coeff_count]];
  2282.         if( get_cabac( CC, ctx ) == 0 ) 
  2283. {
  2284.             int32_t ret;
  2285. node_ctx = coeff_abs_level_transition[0][node_ctx];
  2286.             if( is_dc ) 
  2287. {
  2288.                 block[j] = get_cabac_bypass_sign( CC, -1);
  2289.             }
  2290. else
  2291. {
  2292.                 //block[j] = (get_cabac_bypass_sign( CC, -qmul[j]) + 32) >> 6;
  2293. ret = qmul[j];
  2294. block[j] = (get_cabac_bypass_sign( CC, -ret) + 32) >> 6;
  2295.             }
  2296.         } 
  2297. else 
  2298. {
  2299.             int coeff_abs = 2;
  2300.             ctx = coeff_abs_levelgt1_ctx[node_ctx] + abs_level_m1_ctx_base;
  2301.             node_ctx = coeff_abs_level_transition[1][node_ctx];
  2302.             while( coeff_abs < 15 && get_cabac( CC, ctx ) ) {
  2303.                 coeff_abs++;
  2304.             }
  2305.             if( coeff_abs >= 15 ) {
  2306.                 int j = 0;
  2307.                 while( get_cabac_bypass( CC ) ) {
  2308.                     j++;
  2309.                 }
  2310.                 coeff_abs=1;
  2311.                 while( j-- ) {
  2312.                     coeff_abs += coeff_abs + get_cabac_bypass( CC );
  2313.                 }
  2314.                 coeff_abs+= 14;
  2315.             }
  2316.             if( is_dc ) {
  2317.                 block[j] = get_cabac_bypass_sign( CC, -coeff_abs );
  2318.             }else{
  2319.                 block[j] = (get_cabac_bypass_sign( CC, -coeff_abs ) * qmul[j] + 32) >> 6;
  2320.             }
  2321.         }
  2322.     } while( coeff_count );
  2323. #ifdef CABAC_ON_STACK
  2324.             h->cabac.range     = cc.range     ;
  2325.             h->cabac.low       = cc.low       ;
  2326.             h->cabac.bytestream= cc.bytestream;
  2327. #endif
  2328. }
  2329. #ifndef CONFIG_SMALL
  2330. static void decode_cabac_residual_dc( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff ) {
  2331.     decode_cabac_residual_internal(h, block, cat, n, scantable, qmul, max_coeff, 1);
  2332. }
  2333. static void decode_cabac_residual_nondc( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff ) {
  2334.     decode_cabac_residual_internal(h, block, cat, n, scantable, qmul, max_coeff, 0);
  2335. }
  2336. #endif
  2337. static void decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff ) {
  2338. #ifdef CONFIG_SMALL
  2339.     decode_cabac_residual_internal(h, block, cat, n, scantable, qmul, max_coeff, cat == 0 || cat == 3);
  2340. #else
  2341.     if( cat == 0 || cat == 3 ) decode_cabac_residual_dc(h, block, cat, n, scantable, qmul, max_coeff);
  2342.     else decode_cabac_residual_nondc(h, block, cat, n, scantable, qmul, max_coeff);
  2343. #endif
  2344. }
  2345. static inline void compute_mb_neighbors(H264Context *h)
  2346. {
  2347.     MpegEncContext * const s = &h->s;
  2348.     const int mb_xy  = h->mb_xy;
  2349.     h->top_mb_xy     = mb_xy - s->mb_stride;
  2350.     h->left_mb_xy[0] = mb_xy - 1;
  2351.     if(FRAME_MBAFF){
  2352.         const int pair_xy          = s->mb_x     + (s->mb_y & ~1)*s->mb_stride;
  2353.         const int top_pair_xy      = pair_xy     - s->mb_stride;
  2354.         const int top_mb_field_flag  = IS_INTERLACED(s->current_picture.mb_type[top_pair_xy]);
  2355.         const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[pair_xy-1]);
  2356.         const int curr_mb_field_flag = MB_FIELD;
  2357.         const int bottom = (s->mb_y & 1);
  2358.         if (curr_mb_field_flag && (bottom || top_mb_field_flag)){
  2359.             h->top_mb_xy -= s->mb_stride;
  2360.         }
  2361.         if (!left_mb_field_flag == curr_mb_field_flag) {
  2362.             h->left_mb_xy[0] = pair_xy - 1;
  2363.         }
  2364.     } else if (FIELD_PICTURE) {
  2365.         h->top_mb_xy -= s->mb_stride;
  2366.     }
  2367.     return;
  2368. }
  2369. /**
  2370.  * decodes a macroblock
  2371.  * @returns 0 if OK, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed
  2372.  */
  2373. static int decode_mb_cabac(H264Context *h) {
  2374.     MpegEncContext * const s = &h->s;
  2375.     int mb_xy;
  2376.     int mb_type, partition_count, cbp = 0;
  2377.     int dct8x8_allowed= h->pps.transform_8x8_mode;
  2378.     mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
  2379.     tprintf(s->avctx, "pic:%d mb:%d/%dn", h->frame_num, s->mb_x, s->mb_y);
  2380.     if( h->slice_type_nos != FF_I_TYPE ) {
  2381.         int skip;
  2382.         /* a skipped mb needs the aff flag from the following mb */
  2383.         if( FRAME_MBAFF && s->mb_x==0 && (s->mb_y&1)==0 )
  2384.             predict_field_decoding_flag(h);
  2385.         if( FRAME_MBAFF && (s->mb_y&1)==1 && h->prev_mb_skipped )
  2386.             skip = h->next_mb_skipped;
  2387.         else
  2388.             skip = decode_cabac_mb_skip( h, s->mb_x, s->mb_y );
  2389.         /* read skip flags */
  2390.         if( skip ) {
  2391.             if( FRAME_MBAFF && (s->mb_y&1)==0 ){
  2392.                 s->current_picture.mb_type[mb_xy] = MB_TYPE_SKIP;
  2393.                 h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 );
  2394.                 if(!h->next_mb_skipped)
  2395.                     h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
  2396.             }
  2397.             decode_mb_skip(h);
  2398.             h->cbp_table[mb_xy] = 0;
  2399.             h->chroma_pred_mode_table[mb_xy] = 0;
  2400.             h->last_qscale_diff = 0;
  2401.             return 0;
  2402.         }
  2403.     }
  2404.     if(FRAME_MBAFF){
  2405.         if( (s->mb_y&1) == 0 )
  2406.             h->mb_mbaff =
  2407.             h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
  2408.     }
  2409.     h->prev_mb_skipped = 0;
  2410.     compute_mb_neighbors(h);
  2411.     if( h->slice_type_nos == FF_B_TYPE ) {
  2412.         mb_type = decode_cabac_mb_type_b( h );
  2413.         if( mb_type < 23 ){
  2414.             partition_count= b_mb_type_info[mb_type].partition_count;
  2415.             mb_type=         b_mb_type_info[mb_type].type;
  2416.         }else{
  2417.             mb_type -= 23;
  2418.             goto decode_intra_mb;
  2419.         }
  2420.     } else if( h->slice_type_nos == FF_P_TYPE ) {
  2421.         if( get_cabac_noinline( &h->cabac, &h->cabac_state[14] ) == 0 ) {
  2422.             /* P-type */
  2423.             if( get_cabac_noinline( &h->cabac, &h->cabac_state[15] ) == 0 ) {
  2424.                 /* P_L0_D16x16, P_8x8 */
  2425.                 mb_type= 3 * get_cabac_noinline( &h->cabac, &h->cabac_state[16] );
  2426.             } else {
  2427.                 /* P_L0_D8x16, P_L0_D16x8 */
  2428.                 mb_type= 2 - get_cabac_noinline( &h->cabac, &h->cabac_state[17] );
  2429.             }
  2430.             partition_count= p_mb_type_info[mb_type].partition_count;
  2431.             mb_type=         p_mb_type_info[mb_type].type;
  2432.         } else {
  2433.             mb_type= decode_cabac_intra_mb_type(h, 17, 0);
  2434.             goto decode_intra_mb;
  2435.         }
  2436.     } else {
  2437.         mb_type= decode_cabac_intra_mb_type(h, 3, 1);
  2438.         if(h->slice_type == FF_SI_TYPE && mb_type)
  2439.             mb_type--;
  2440.         assert(h->slice_type_nos == FF_I_TYPE);