deblock.c
上传用户:hjq518
上传日期:2021-12-09
资源大小:5084k
文件大小:17k
源码类别:

Audio

开发平台:

Visual C++

  1. /*****************************************************************************
  2. * deblock.c: Altivec-accelerated deblocking for h264 encoder
  3. *****************************************************************************
  4. * Copyright (C) 2007-2008 Guillaume Poirier <gpoirier@mplayerhq.hu>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
  19. *****************************************************************************/
  20. #if defined SYS_LINUX
  21. #include <altivec.h>
  22. #endif
  23. #include "common/common.h"
  24. #include "ppccommon.h"
  25. #define transpose4x16(r0, r1, r2, r3) {      
  26.     register vec_u8_t r4;                    
  27.     register vec_u8_t r5;                    
  28.     register vec_u8_t r6;                    
  29.     register vec_u8_t r7;                    
  30.                                              
  31.     r4 = vec_mergeh(r0, r2);  /*0, 2 set 0*/ 
  32.     r5 = vec_mergel(r0, r2);  /*0, 2 set 1*/ 
  33.     r6 = vec_mergeh(r1, r3);  /*1, 3 set 0*/ 
  34.     r7 = vec_mergel(r1, r3);  /*1, 3 set 1*/ 
  35.                                              
  36.     r0 = vec_mergeh(r4, r6);  /*all set 0*/  
  37.     r1 = vec_mergel(r4, r6);  /*all set 1*/  
  38.     r2 = vec_mergeh(r5, r7);  /*all set 2*/  
  39.     r3 = vec_mergel(r5, r7);  /*all set 3*/  
  40. }
  41. static inline void write16x4(uint8_t *dst, int dst_stride,
  42.                              register vec_u8_t r0, register vec_u8_t r1,
  43.                              register vec_u8_t r2, register vec_u8_t r3) {
  44.     DECLARE_ALIGNED_16(unsigned char result[64]);
  45.     uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
  46.     int int_dst_stride = dst_stride/4;
  47.     vec_st(r0, 0, result);
  48.     vec_st(r1, 16, result);
  49.     vec_st(r2, 32, result);
  50.     vec_st(r3, 48, result);
  51.     /* FIXME: there has to be a better way!!!! */
  52.     *dst_int = *src_int;
  53.     *(dst_int+   int_dst_stride) = *(src_int + 1);
  54.     *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
  55.     *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
  56.     *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
  57.     *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
  58.     *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
  59.     *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
  60.     *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
  61.     *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
  62.     *(dst_int+10*int_dst_stride) = *(src_int + 10);
  63.     *(dst_int+11*int_dst_stride) = *(src_int + 11);
  64.     *(dst_int+12*int_dst_stride) = *(src_int + 12);
  65.     *(dst_int+13*int_dst_stride) = *(src_int + 13);
  66.     *(dst_int+14*int_dst_stride) = *(src_int + 14);
  67.     *(dst_int+15*int_dst_stride) = *(src_int + 15);
  68. }
  69. /** brief performs a 6x16 transpose of data in src, and stores it to dst
  70.     todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
  71.     out of unaligned_load() */
  72. #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {
  73.     register vec_u8_t r0, r1, r2, r3, r4, r5, r6, r7, r14, r15;
  74.     VEC_LOAD(src,                  r0, 16, vec_u8_t);          
  75.     VEC_LOAD(src +    src_stride,  r1, 16, vec_u8_t);          
  76.     VEC_LOAD(src +  2*src_stride,  r2, 16, vec_u8_t);          
  77.     VEC_LOAD(src +  3*src_stride,  r3, 16, vec_u8_t);          
  78.     VEC_LOAD(src +  4*src_stride,  r4, 16, vec_u8_t);          
  79.     VEC_LOAD(src +  5*src_stride,  r5, 16, vec_u8_t);          
  80.     VEC_LOAD(src +  6*src_stride,  r6, 16, vec_u8_t);          
  81.     VEC_LOAD(src +  7*src_stride,  r7, 16, vec_u8_t);          
  82.     VEC_LOAD(src + 14*src_stride, r14, 16, vec_u8_t);          
  83.     VEC_LOAD(src + 15*src_stride, r15, 16, vec_u8_t);          
  84.                                                                
  85.     VEC_LOAD(src + 8*src_stride,   r8, 16, vec_u8_t);          
  86.     VEC_LOAD(src + 9*src_stride,   r9, 16, vec_u8_t);          
  87.     VEC_LOAD(src + 10*src_stride, r10, 16, vec_u8_t);          
  88.     VEC_LOAD(src + 11*src_stride, r11, 16, vec_u8_t);          
  89.     VEC_LOAD(src + 12*src_stride, r12, 16, vec_u8_t);          
  90.     VEC_LOAD(src + 13*src_stride, r13, 16, vec_u8_t);          
  91.                                                                
  92.     /*Merge first pairs*/                                      
  93.     r0 = vec_mergeh(r0, r8);    /*0, 8*/                       
  94.     r1 = vec_mergeh(r1, r9);    /*1, 9*/                       
  95.     r2 = vec_mergeh(r2, r10);   /*2,10*/                       
  96.     r3 = vec_mergeh(r3, r11);   /*3,11*/                       
  97.     r4 = vec_mergeh(r4, r12);   /*4,12*/                       
  98.     r5 = vec_mergeh(r5, r13);   /*5,13*/                       
  99.     r6 = vec_mergeh(r6, r14);   /*6,14*/                       
  100.     r7 = vec_mergeh(r7, r15);   /*7,15*/                       
  101.                                                                
  102.     /*Merge second pairs*/                                     
  103.     r8  = vec_mergeh(r0, r4);   /*0,4, 8,12 set 0*/            
  104.     r9  = vec_mergel(r0, r4);   /*0,4, 8,12 set 1*/            
  105.     r10 = vec_mergeh(r1, r5);   /*1,5, 9,13 set 0*/            
  106.     r11 = vec_mergel(r1, r5);   /*1,5, 9,13 set 1*/            
  107.     r12 = vec_mergeh(r2, r6);   /*2,6,10,14 set 0*/            
  108.     r13 = vec_mergel(r2, r6);   /*2,6,10,14 set 1*/            
  109.     r14 = vec_mergeh(r3, r7);   /*3,7,11,15 set 0*/            
  110.     r15 = vec_mergel(r3, r7);   /*3,7,11,15 set 1*/            
  111.                                                                
  112.     /*Third merge*/                                            
  113.     r0 = vec_mergeh(r8, r12);   /*0,2,4,6,8,10,12,14 set 0*/   
  114.     r1 = vec_mergel(r8, r12);   /*0,2,4,6,8,10,12,14 set 1*/   
  115.     r2 = vec_mergeh(r9, r13);   /*0,2,4,6,8,10,12,14 set 2*/   
  116.     r4 = vec_mergeh(r10, r14);  /*1,3,5,7,9,11,13,15 set 0*/   
  117.     r5 = vec_mergel(r10, r14);  /*1,3,5,7,9,11,13,15 set 1*/   
  118.     r6 = vec_mergeh(r11, r15);  /*1,3,5,7,9,11,13,15 set 2*/   
  119.     /* Don't need to compute 3 and 7*/                         
  120.                                                                
  121.     /*Final merge*/                                            
  122.     r8  = vec_mergeh(r0, r4);   /*all set 0*/                  
  123.     r9  = vec_mergel(r0, r4);   /*all set 1*/                  
  124.     r10 = vec_mergeh(r1, r5);   /*all set 2*/                  
  125.     r11 = vec_mergel(r1, r5);   /*all set 3*/                  
  126.     r12 = vec_mergeh(r2, r6);   /*all set 4*/                  
  127.     r13 = vec_mergel(r2, r6);   /*all set 5*/                  
  128.     /* Don't need to compute 14 and 15*/                       
  129.                                                                
  130. }
  131. // out: o = |x-y| < a
  132. static inline vec_u8_t diff_lt_altivec ( register vec_u8_t x,
  133.                                          register vec_u8_t y,
  134.                                          register vec_u8_t a) {
  135.     register vec_u8_t diff = vec_subs(x, y);
  136.     register vec_u8_t diffneg = vec_subs(y, x);
  137.     register vec_u8_t o = vec_or(diff, diffneg); /* |x-y| */
  138.     o = (vec_u8_t)vec_cmplt(o, a);
  139.     return o;
  140. }
  141. static inline vec_u8_t h264_deblock_mask ( register vec_u8_t p0,
  142.                                            register vec_u8_t p1,
  143.                                            register vec_u8_t q0,
  144.                                            register vec_u8_t q1,
  145.                                            register vec_u8_t alpha,
  146.                                            register vec_u8_t beta) {
  147.     register vec_u8_t mask;
  148.     register vec_u8_t tempmask;
  149.     mask = diff_lt_altivec(p0, q0, alpha);
  150.     tempmask = diff_lt_altivec(p1, p0, beta);
  151.     mask = vec_and(mask, tempmask);
  152.     tempmask = diff_lt_altivec(q1, q0, beta);
  153.     mask = vec_and(mask, tempmask);
  154.     return mask;
  155. }
  156. // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
  157. static inline vec_u8_t h264_deblock_q1(register vec_u8_t p0,
  158.                                        register vec_u8_t p1,
  159.                                        register vec_u8_t p2,
  160.                                        register vec_u8_t q0,
  161.                                        register vec_u8_t tc0) {
  162.     register vec_u8_t average = vec_avg(p0, q0);
  163.     register vec_u8_t temp;
  164.     register vec_u8_t uncliped;
  165.     register vec_u8_t ones;
  166.     register vec_u8_t max;
  167.     register vec_u8_t min;
  168.     register vec_u8_t newp1;
  169.     temp = vec_xor(average, p2);
  170.     average = vec_avg(average, p2);     /*avg(p2, avg(p0, q0)) */
  171.     ones = vec_splat_u8(1);
  172.     temp = vec_and(temp, ones);         /*(p2^avg(p0, q0)) & 1 */
  173.     uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
  174.     max = vec_adds(p1, tc0);
  175.     min = vec_subs(p1, tc0);
  176.     newp1 = vec_max(min, uncliped);
  177.     newp1 = vec_min(max, newp1);
  178.     return newp1;
  179. }
  180. #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) {                                         
  181.                                                                                                 
  182.     const vec_u8_t A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4));                             
  183.                                                                                                 
  184.     register vec_u8_t pq0bit = vec_xor(p0,q0);                                                  
  185.     register vec_u8_t q1minus;                                                                  
  186.     register vec_u8_t p0minus;                                                                  
  187.     register vec_u8_t stage1;                                                                   
  188.     register vec_u8_t stage2;                                                                   
  189.     register vec_u8_t vec160;                                                                   
  190.     register vec_u8_t delta;                                                                    
  191.     register vec_u8_t deltaneg;                                                                 
  192.                                                                                                 
  193.     q1minus = vec_nor(q1, q1);                /* 255 - q1 */                                    
  194.     stage1 = vec_avg(p1, q1minus);            /* (p1 - q1 + 256)>>1 */                          
  195.     stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */    
  196.     p0minus = vec_nor(p0, p0);                /* 255 - p0 */                                    
  197.     stage1 = vec_avg(q0, p0minus);            /* (q0 - p0 + 256)>>1 */                          
  198.     pq0bit = vec_and(pq0bit, vec_splat_u8(1));                                                  
  199.     stage2 = vec_avg(stage2, pq0bit);         /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */
  200.     stage2 = vec_adds(stage2, stage1);        /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ 
  201.     vec160 = vec_ld(0, &A0v);                                                                   
  202.     deltaneg = vec_subs(vec160, stage2);      /* -d */                                          
  203.     delta = vec_subs(stage2, vec160);         /*  d */                                          
  204.     deltaneg = vec_min(tc0masked, deltaneg);                                                    
  205.     delta = vec_min(tc0masked, delta);                                                          
  206.     p0 = vec_subs(p0, deltaneg);                                                                
  207.     q0 = vec_subs(q0, delta);                                                                   
  208.     p0 = vec_adds(p0, delta);                                                                   
  209.     q0 = vec_adds(q0, deltaneg);                                                                
  210. }
  211. #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) {            
  212.     DECLARE_ALIGNED_16(unsigned char temp[16]);                                              
  213.     register vec_u8_t alphavec;                                                              
  214.     register vec_u8_t betavec;                                                               
  215.     register vec_u8_t mask;                                                                  
  216.     register vec_u8_t p1mask;                                                                
  217.     register vec_u8_t q1mask;                                                                
  218.     register vec_s8_t tc0vec;                                                                
  219.     register vec_u8_t finaltc0;                                                              
  220.     register vec_u8_t tc0masked;                                                             
  221.     register vec_u8_t newp1;                                                                 
  222.     register vec_u8_t newq1;                                                                 
  223.                                                                                              
  224.     temp[0] = alpha;                                                                         
  225.     temp[1] = beta;                                                                          
  226.     alphavec = vec_ld(0, temp);                                                              
  227.     betavec = vec_splat(alphavec, 0x1);                                                      
  228.     alphavec = vec_splat(alphavec, 0x0);                                                     
  229.     mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */            
  230.                                                                                              
  231.     *((int *)temp) = *((int *)tc0);                                                          
  232.     tc0vec = vec_ld(0, (signed char*)temp);                                                  
  233.     tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     
  234.     tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     
  235.     mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1)));  /* if tc0[i] >= 0 */         
  236.     finaltc0 = vec_and((vec_u8_t)tc0vec, mask);                 /* tc = tc0 */               
  237.                                                                                              
  238.     p1mask = diff_lt_altivec(p2, p0, betavec);                                               
  239.     p1mask = vec_and(p1mask, mask);                             /* if( |p2 - p0| < beta) */  
  240.     tc0masked = vec_and(p1mask, (vec_u8_t)tc0vec);                                           
  241.     finaltc0 = vec_sub(finaltc0, p1mask);                       /* tc++ */                   
  242.     newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked);                                      
  243.     /*end if*/                                                                               
  244.                                                                                              
  245.     q1mask = diff_lt_altivec(q2, q0, betavec);                                               
  246.     q1mask = vec_and(q1mask, mask);                             /* if ( |q2 - q0| < beta ) */
  247.     tc0masked = vec_and(q1mask, (vec_u8_t)tc0vec);                                           
  248.     finaltc0 = vec_sub(finaltc0, q1mask);                       /* tc++ */                   
  249.     newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked);                                      
  250.     /*end if*/                                                                               
  251.                                                                                              
  252.     h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0);                                            
  253.     p1 = newp1;                                                                              
  254.     q1 = newq1;                                                                              
  255. }
  256. void x264_deblock_v_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
  257.     if((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
  258.         register vec_u8_t p2 = vec_ld(-3*stride, pix);
  259.         register vec_u8_t p1 = vec_ld(-2*stride, pix);
  260.         register vec_u8_t p0 = vec_ld(-1*stride, pix);
  261.         register vec_u8_t q0 = vec_ld(0, pix);
  262.         register vec_u8_t q1 = vec_ld(stride, pix);
  263.         register vec_u8_t q2 = vec_ld(2*stride, pix);
  264.         h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
  265.         vec_st(p1, -2*stride, pix);
  266.         vec_st(p0, -1*stride, pix);
  267.         vec_st(q0, 0, pix);
  268.         vec_st(q1, stride, pix);
  269.     }
  270. }
  271. void x264_deblock_h_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
  272.     register vec_u8_t line0, line1, line2, line3, line4, line5;
  273.     if((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
  274.         return;
  275.     PREP_LOAD;
  276.     readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
  277.     h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
  278.     transpose4x16(line1, line2, line3, line4);
  279.     write16x4(pix-2, stride, line1, line2, line3, line4);
  280. }