yuv2rgb.c
上传用户:zhongxx05
上传日期:2007-06-06
资源大小:33641k
文件大小:179k
源码类别:

Symbian

开发平台:

C/C++

  1. /* ***** BEGIN LICENSE BLOCK ***** 
  2.  * Version: RCSL 1.0/RPSL 1.0 
  3.  *  
  4.  * Portions Copyright (c) 1995-2002 RealNetworks, Inc. All Rights Reserved. 
  5.  *      
  6.  * The contents of this file, and the files included with this file, are 
  7.  * subject to the current version of the RealNetworks Public Source License 
  8.  * Version 1.0 (the "RPSL") available at 
  9.  * http://www.helixcommunity.org/content/rpsl unless you have licensed 
  10.  * the file under the RealNetworks Community Source License Version 1.0 
  11.  * (the "RCSL") available at http://www.helixcommunity.org/content/rcsl, 
  12.  * in which case the RCSL will apply. You may also obtain the license terms 
  13.  * directly from RealNetworks.  You may not use this file except in 
  14.  * compliance with the RPSL or, if you have a valid RCSL with RealNetworks 
  15.  * applicable to this file, the RCSL.  Please see the applicable RPSL or 
  16.  * RCSL for the rights, obligations and limitations governing use of the 
  17.  * contents of the file.  
  18.  *  
  19.  * This file is part of the Helix DNA Technology. RealNetworks is the 
  20.  * developer of the Original Code and owns the copyrights in the portions 
  21.  * it created. 
  22.  *  
  23.  * This file, and the files included with this file, is distributed and made 
  24.  * available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  25.  * EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  26.  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, FITNESS 
  27.  * FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  28.  * 
  29.  * Technology Compatibility Kit Test Suite(s) Location: 
  30.  *    http://www.helixcommunity.org/content/tck 
  31.  * 
  32.  * Contributor(s): 
  33.  *  
  34.  * ***** END LICENSE BLOCK ***** */ 
  35. /*** #includes: ********************************************/
  36. #include "env.h"
  37. #include "rgb.h"    /* basic RGB-data definitions & macros */
  38. #include "yuv.h"    /* YUV-to-RGB conversion tables & macros */
  39. #include "clip.h"   /* macros for clipping & dithering */
  40. #include "scale.h"  /* scale algorithms */
  41. #include "colorlib.h" /* ensure that prototypes get extern C'ed */
  42. #ifdef _MACINTOSH
  43. #pragma require_prototypes off
  44. #endif
  45. static int YUVtoRGB2 (
  46.     int dest_format,
  47.     unsigned char *dest_ptr, int dest_width, int dest_height,
  48.     int dest_pitch, int dest_x, int dest_y, int dest_dx, int dest_dy,
  49.     unsigned char *pY, unsigned char *pU, unsigned char *pV,
  50.     int src_width, int src_height, int yPitch, int uPitch, int vPitch,
  51.     int src_x, int src_y, int src_dx, int src_dy);
  52. /*** Additional pixel-level macros: ************************/
  53. /*
  54.  * Add dither, clip and assign values to RGB pixels:
  55.  */
  56. #define RGBX_CLIP_X(f,rnd,x,v)  (CLIP(rnd,BITS(f,x),v) << START(f,x))
  57. #define RGBX_CLIP_SET(f,rnd,a,r,g,b) 
  58.     a##_rgb = RGBX_CLIP_X(f,rnd,R,r) | RGBX_CLIP_X(f,rnd,G,g) | RGBX_CLIP_X(f,rnd,B,b)
  59. #define RGB32_CLIP_SET(rnd,a,r,g,b)  RGBX_CLIP_SET(RGB32,rnd,a,r,g,b)
  60. #define BGR32_CLIP_SET(rnd,a,r,g,b)  RGBX_CLIP_SET(BGR32,rnd,a,r,g,b)
  61. #define RGB24_CLIP_SET(rnd,a,r,g,b)  
  62.     a##_b = CLIP(rnd,8,b), a##_g = CLIP(rnd,8,g), a##_r = CLIP(rnd,8,r)
  63. #define RGB565_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(RGB565,rnd,a,r,g,b)
  64. #define RGB555_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(RGB555,rnd,a,r,g,b)
  65. #define RGB444_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(RGB444,rnd,a,r,g,b)
  66. #define RGB8_CLIP_SET(rnd,a,r,g,b)   
  67.     a##_idx = pmap[(CLIP(rnd,4,r)<<8) | (CLIP(rnd,4,g)<<4) | CLIP(rnd,4,b)]
  68. /*
  69.  * Generic RGB clipping & assignment macro:
  70.  */
  71. #define CLIP_SET(f,rnd,a,r,g,b)      f##_CLIP_SET(rnd,a,r,g,b)
  72. /*
  73.  * YUV 2x1-block load and convert macros:
  74.  */
  75. #define YUV_LOAD_CONVERT_2x1_FAST(df,a1,a2,sy1,sy2,su,sv)   
  76.     {                                                       
  77.         register int y1, y2, rv, guv, bu;                   
  78.         bu = butab[su[0]];                                  
  79.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  80.         rv = rvtab[sv[0]];                                  
  81.         y1 = ytab[sy1[0]];                                  
  82.         y2 = ytab[sy2[0]];                                  
  83.         CLIP_SET(df,ROUND,a1,y1+rv,y1+guv,y1+bu);           
  84.         CLIP_SET(df,ROUND,a2,y2+rv,y2+guv,y2+bu);           
  85.     }
  86. /* with Hue rotation: */
  87. #define YUV_LOAD_CONVERT_2x1_FULL(df,a1,a2,sy1,sy2,su,sv)   
  88.     {                                                       
  89.         register int y1, y2, ruv, guv, buv;                 
  90.         buv = butab[su[0]] + bvtab[sv[0]];                  
  91.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  92.         ruv = rutab[su[0]] + rvtab[sv[0]];                  
  93.         y1 = ytab[sy1[0]];                                  
  94.         y2 = ytab[sy2[0]];                                  
  95.         CLIP_SET(df,ROUND,a1,y1+ruv,y1+guv,y1+buv);         
  96.         CLIP_SET(df,ROUND,a2,y2+ruv,y2+guv,y2+buv);         
  97.     }
  98. /*
  99.  * Generic YUV 2x1-block load & convert macro:
  100.  */
  101. #define YUV_LOAD_CONVERT_2x1(cc,df,a1,a2,sy1,sy2,su,sv)  
  102.     YUV_LOAD_CONVERT_2x1_##cc(df,a1,a2,sy1,sy2,su,sv)
  103. /*
  104.  * YUV 2x2-block load and convert macros:
  105.  * (without dithering)
  106.  */
  107. #define YUV_LOAD_CONVERT_2x2_FAST(df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  108.     {                                                       
  109.         register int y11, y12, y21, y22, rv, guv, bu;       
  110.         bu = butab[su[0]];                                  
  111.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  112.         rv = rvtab[sv[0]];                                  
  113.         y11 = ytab[sy1[0]];                                 
  114.         y21 = ytab[sy2[0]];                                 
  115.         y12 = ytab[sy1[1]];                                 
  116.         y22 = ytab[sy2[1]];                                 
  117.         CLIP_SET(df,ROUND,a11,y11+rv,y11+guv,y11+bu);       
  118.         CLIP_SET(df,ROUND,a21,y21+rv,y21+guv,y21+bu);       
  119.         CLIP_SET(df,ROUND,a12,y12+rv,y12+guv,y12+bu);       
  120.         CLIP_SET(df,ROUND,a22,y22+rv,y22+guv,y22+bu);       
  121.     }
  122. /* with Hue rotation: */
  123. #define YUV_LOAD_CONVERT_2x2_FULL(df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  124.     {                                                       
  125.         register int y11, y12, y21, y22, ruv, guv, buv;     
  126.         buv = butab[su[0]] + bvtab[sv[0]];                  
  127.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  128.         ruv = rutab[su[0]] + rvtab[sv[0]];                  
  129.         y11 = ytab[sy1[0]];                                 
  130.         y21 = ytab[sy2[0]];                                 
  131.         y12 = ytab[sy1[1]];                                 
  132.         y22 = ytab[sy2[1]];                                 
  133.         CLIP_SET(df,ROUND,a11,y11+ruv,y11+guv,y11+buv);     
  134.         CLIP_SET(df,ROUND,a21,y21+ruv,y21+guv,y21+buv);     
  135.         CLIP_SET(df,ROUND,a12,y12+ruv,y12+guv,y12+buv);     
  136.         CLIP_SET(df,ROUND,a22,y22+ruv,y22+guv,y22+buv);     
  137.     }
  138. /*
  139.  * Generic YUV 2x1-block load & convert macro:
  140.  */
  141. #define YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  142.     YUV_LOAD_CONVERT_2x2_##cc(df,a11,a12,a21,a22,sy1,sy2,su,sv)
  143. /*
  144.  * YUV 2x2-block load and convert macros:
  145.  *  (adds symmetric 2x2 dither noise)
  146.  */
  147. #define YUV_LOAD_CONVERT_DITHER_2x2_FAST(df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  148.     {                                                       
  149.         register int y11, y12, y21, y22, rv, guv, bu;       
  150.         bu = butab[su[0]];                                  
  151.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  152.         rv = rvtab[sv[0]];                                  
  153.         y11 = ytab[sy1[0]];                                 
  154.         y21 = ytab[sy2[0]];                                 
  155.         y12 = ytab[sy1[1]];                                 
  156.         y22 = ytab[sy2[1]];                                 
  157.         CLIP_SET(df,HIGH,a11,y11+rv,y11+guv,y11+bu);        
  158.         CLIP_SET(df,LOW ,a21,y21+rv,y21+guv,y21+bu);        
  159.         CLIP_SET(df,LOW ,a12,y12+rv,y12+guv,y12+bu);        
  160.         CLIP_SET(df,HIGH,a22,y22+rv,y22+guv,y22+bu);        
  161.     }
  162. /* with Hue rotation: */
  163. #define YUV_LOAD_CONVERT_DITHER_2x2_FULL(df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  164.     {                                                       
  165.         register int y11, y12, y21, y22, ruv, guv, buv;     
  166.         buv = butab[su[0]] + bvtab[sv[0]];                  
  167.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  168.         ruv = rutab[su[0]] + rvtab[sv[0]];                  
  169.         y11 = ytab[sy1[0]];                                 
  170.         y21 = ytab[sy2[0]];                                 
  171.         y12 = ytab[sy1[1]];                                 
  172.         y22 = ytab[sy2[1]];                                 
  173.         CLIP_SET(df,HIGH,a11,y11+ruv,y11+guv,y11+buv);      
  174.         CLIP_SET(df,LOW ,a21,y21+ruv,y21+guv,y21+buv);      
  175.         CLIP_SET(df,LOW ,a12,y12+ruv,y12+guv,y12+buv);      
  176.         CLIP_SET(df,HIGH,a22,y22+ruv,y22+guv,y22+buv);      
  177.     }
  178. /*
  179.  * Generic YUV 2x1-block load & convert macro:
  180.  */
  181. #define YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  182.     YUV_LOAD_CONVERT_DITHER_2x2_##cc(df,a11,a12,a21,a22,sy1,sy2,su,sv)
  183. /*
  184.  * Generic YUV load-convert-store macros:
  185.  */
  186. #define YUV_LOAD_CONVERT_STORE_2x1(cc,df,d1,d2,sy1,sy2,su,sv) 
  187.     {                                                       
  188.         PIXEL(df,a1); PIXEL(df,a2);                         
  189.         YUV_LOAD_CONVERT_2x1(cc,df,a1,a2,sy1,sy2,su,sv);    
  190.         sy1++; sy2++; su++; sv++;                           
  191.         STORE(df,d1,a1);                                    
  192.         d1+=BPP(df);                                        
  193.         STORE(df,d2,a2);                                    
  194.         d2+=BPP(df);                                        
  195.     }
  196. #define YUV_LOAD_CONVERT_STORE_2x2(cc,df,d1,d2,sy1,sy2,su,sv) 
  197.     {                                                       
  198.         PIXEL(df,a11); PIXEL(df,a12);                       
  199.         PIXEL(df,a21); PIXEL(df,a22);                       
  200.         YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  201.         sy1+=2; sy2+=2; su++; sv++;                         
  202.         STORE(df,d1,a11);                                   
  203.         STORE(df,d1+BPP(df),a12);                           
  204.         d1+=2*BPP(df);                                      
  205.         STORE(df,d2,a21);                                   
  206.         STORE(df,d2+BPP(df),a22);                           
  207.         d2+=2*BPP(df);                                      
  208.     }
  209. #define YUV_LOAD_CONVERT_DITHER_STORE_2x2(cc,df,d1,d2,sy1,sy2,su,sv) 
  210.     {                                                       
  211.         PIXEL(df,a11); PIXEL(df,a12);                       
  212.         PIXEL(df,a21); PIXEL(df,a22);                       
  213.         YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  214.         sy1+=2; sy2+=2; su++; sv++;                         
  215.         STORE(df,d1,a11);                                   
  216.         STORE(df,d1+BPP(df),a12);                           
  217.         d1+=2*BPP(df);                                      
  218.         STORE(df,d2,a21);                                   
  219.         STORE(df,d2+BPP(df),a22);                           
  220.         d2+=2*BPP(df);                                      
  221.     }
  222. /*
  223.  * Generic YUV load-convert-average-store macros:
  224.  *  [d1],[d2] = convert([s1],[s2]);
  225.  *  [d01] = ([d0]+[d1])/2;
  226.  *  [d12] = ([d1]+[d2])/2;
  227.  */
  228. #define YUV_LOAD_CONVERT_AVERAGE_STORE_2x1(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv) 
  229.     {                                                       
  230.         PIXEL(df,a1); PIXEL(df,a2);                         
  231.         YUV_LOAD_CONVERT_2x1(cc,df,a1,a2,sy1,sy2,su,sv);    
  232.         sy1++; sy2++; su++; sv++;                           
  233.         STORE(df,d1,a1);                                    
  234.         d1+=BPP(df);                                        
  235.         STORE(df,d2,a2);                                    
  236.         d2+=BPP(df);                                        
  237.         AVERAGE(df,a2,a1,a2);                               
  238.         LOAD_AVERAGE(df,a1,a1,d0);                          
  239.         d0+=BPP(df);                                        
  240.         STORE(df,d01,a1);                                   
  241.         d01+=BPP(df);                                       
  242.         STORE(df,d12,a2);                                   
  243.         d12+=BPP(df);                                       
  244.     }
  245. #define YUV_LOAD_CONVERT_AVERAGE_STORE_2x2(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv) 
  246.     {                                                       
  247.         PIXEL(df,a11); PIXEL(df,a12);                       
  248.         PIXEL(df,a21); PIXEL(df,a22);                       
  249.         YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  250.         sy1+=2; sy2+=2; su++; sv++;                         
  251.         STORE(df,d1,a11);                                   
  252.         STORE(df,d1+BPP(df),a12);                           
  253.         d1+=2*BPP(df);                                      
  254.         STORE(df,d2,a21);                                   
  255.         STORE(df,d2+BPP(df),a22);                           
  256.         d2+=2*BPP(df);                                      
  257.         AVERAGE(df,a21,a11,a21);                            
  258.         AVERAGE(df,a22,a12,a22);                            
  259.         LOAD_AVERAGE(df,a11,a11,d0);                        
  260.         LOAD_AVERAGE(df,a12,a12,d0+BPP(df));                
  261.         d0+=2*BPP(df);                                      
  262.         STORE(df,d01,a11);                                  
  263.         STORE(df,d01+BPP(df),a12);                          
  264.         d01+=2*BPP(df);                                     
  265.         STORE(df,d12,a21);                                  
  266.         STORE(df,d12+BPP(df),a22);                          
  267.         d12+=2*BPP(df);                                     
  268.     }
  269. #define YUV_LOAD_CONVERT_AVERAGE_DITHER_STORE_2x2(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv) 
  270.     {                                                       
  271.         PIXEL(df,a11); PIXEL(df,a12);                       
  272.         PIXEL(df,a21); PIXEL(df,a22);                       
  273.         YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  274.         sy1+=2; sy2+=2; su++; sv++;                         
  275.         STORE(df,d1,a11);                                   
  276.         STORE(df,d1+BPP(df),a12);                           
  277.         d1+=2*BPP(df);                                      
  278.         STORE(df,d2,a21);                                   
  279.         STORE(df,d2+BPP(df),a22);                           
  280.         d2+=2*BPP(df);                                      
  281.         AVERAGE(df,a21,a11,a21);                            
  282.         AVERAGE(df,a22,a12,a22);                            
  283.         LOAD_AVERAGE(df,a11,a11,d0);                        
  284.         LOAD_AVERAGE(df,a12,a12,d0+BPP(df));                
  285.         d0+=2*BPP(df);                                      
  286.         STORE(df,d01,a11);                                  
  287.         STORE(df,d01+BPP(df),a12);                          
  288.         d01+=2*BPP(df);                                     
  289.         STORE(df,d12,a21);                                  
  290.         STORE(df,d12+BPP(df),a22);                          
  291.         d12+=2*BPP(df);                                     
  292.     }
  293. /*** Generic YUVtoRGB double-row converters: ***************/
  294. /*
  295.  * Generic YUVtoRGB double-row shrinking converter:
  296.  *  uses read-ahead optimization to process full 2x2 blocks
  297.  *  whenever possible.
  298.  */
  299. #define DBLROW_SHRINK(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  300.     {                                                       
  301.         /* initialize local variables: */                   
  302.         register int count = dest_dx;                       
  303.         register int limit = src_dx >> 1; /* -1 */          
  304.         register int step = dest_dx;                        
  305.         /* check row length: */                             
  306.         if (count) {                                        
  307.             /* check if we have an odd first block: */      
  308.             if (src_x & 1)                                  
  309.                 goto start_odd;                             
  310.             /* process even pixels: */                      
  311.             do {                                            
  312.                 PIXEL(df,a11); PIXEL(df,a12);               
  313.                 PIXEL(df,a21); PIXEL(df,a22);               
  314.                 /* make one Bresenham step ahead: */        
  315.                 if ((limit -= step) < 0) {                  
  316.                     limit += src_dx;                        
  317.                     /* can we process 2x2 pixels? */        
  318.                     if (!--count)                           
  319.                         goto last_pixel;                    
  320.                     /* process full 2x2 block: */           
  321.                     YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  322.                     sy1+=2; sy2+=2; su++; sv++;             
  323.                     STORE(df,d1,a11);                       
  324.                     STORE(df,d1+BPP(df),a12);               
  325.                     d1+=2*BPP(df);                          
  326.                     STORE(df,d2,a21);                       
  327.                     STORE(df,d2+BPP(df),a22);               
  328.                     d2+=2*BPP(df);                          
  329.                 } else {                                    
  330.                     /* proc. first 2x1 block & skip next: */
  331.                     YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  332.                     sy1+=2; sy2+=2; su++; sv++;             
  333.                     STORE(df,d1,a11);                       
  334.                     d1+=BPP(df);                            
  335.                     STORE(df,d2,a21);                       
  336.                     d2+=BPP(df);                            
  337.                 }                                           
  338.                 /* inverted Bresenham stepping: */          
  339.                 while ((limit -= step) >= 0) {              
  340.                     /* skip next even source pixel: */      
  341.                     sy1++; sy2++;                           
  342.                     if ((limit -= step) < 0)                
  343.                         goto cont_odd;                      
  344.                     /* skip odd source pixel: */            
  345.                     sy1++; sy2++;                           
  346.                     su++; sv++; /* next chroma: */          
  347.                 }                                           
  348. cont_even:      /* continue loop with next even pixel: */   
  349.                 limit += src_dx;                            
  350.             } while (--count);                              
  351.             goto done;                                      
  352. last_pixel: /* use this branch to process last pixel:*/     
  353.             count++;                                        
  354. start_odd:  /* process odd pixels: */                       
  355.             do {                                            
  356.                 PIXEL(df,a11); PIXEL(df,a21);               
  357.                 YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  358.                 STORE(df,d1,a11);                           
  359.                 d1+=BPP(df);                                
  360.                 STORE(df,d2,a21);                           
  361.                 d2+=BPP(df);                                
  362.                 /* inverted Bresenham stepping: */          
  363.                 do {                                        
  364.                     /* skip odd source pixel: */            
  365.                     sy1++; sy2++;                           
  366.                     su++; sv++; /* next chroma: */          
  367.                     if ((limit -= step) < 0)                
  368.                         goto cont_even;                     
  369.                     /* skip even source pixel: */           
  370.                     sy1++; sy2++;                           
  371.                 } while ((limit -= step) >= 0);             
  372. cont_odd:       limit += src_dx;                            
  373.             } while (--count);                              
  374. done:       ;                                               
  375.         }                                                   
  376.     }
  377. /*
  378.  * Generic YUVtoRGB double-row copy converter:
  379.  */
  380. #define DBLROW_COPY(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  381.     {                                                       
  382.         register int count = dest_dx;                       
  383.         /* convert first 2x1 block: */                      
  384.         if ((src_x & 1) && count) {                         
  385.             YUV_LOAD_CONVERT_STORE_2x1(cc,df,d1,d2,sy1,sy2,su,sv); 
  386.             count--;                                        
  387.         }                                                   
  388.         /* convert all integral 2x2 blocks: */              
  389.         while (count >= 2) {                                
  390.             YUV_LOAD_CONVERT_DITHER_STORE_2x2(cc,df,d1,d2,sy1,sy2,su,sv); 
  391.             count -= 2;                                     
  392.         }                                                   
  393.         /* convert last 2x1 block: */                       
  394.         if (count) {                                        
  395.             YUV_LOAD_CONVERT_STORE_2x1(cc,df,d1,d2,sy1,sy2,su,sv); 
  396.         }                                                   
  397.     }
  398. /*
  399.  * Generic YUVtoRGB double row stretching converter:
  400.  */
  401. #define DBLROW_STRETCH(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  402.     {                                                       
  403.         /* initialize local variables: */                   
  404.         register int count = dest_dx;                       
  405.         register int limit = dest_dx >> 1; /* !!! */        
  406.         register int step = src_dx;                         
  407.         /* # of pixels to be processed separately: */       
  408.         int remainder = dest_dx - limit;                    
  409.         if ((src_x + src_dx) & 1) remainder += dest_dx;     
  410.         remainder /= step;                                  
  411.         /* check row length: */                             
  412.         if (count) {                                        
  413.             PIXEL(df,a11); PIXEL(df,a12);                   
  414.             PIXEL(df,a21); PIXEL(df,a22);                   
  415.             /* update count: */                             
  416.             if ((count -= remainder) <= 0)                  
  417.                 goto convert_last;                          
  418.             /* check if we have an odd first block: */      
  419.             if (src_x & 1) {                                
  420.                 /* convert first 2x1 block: */              
  421.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  422.                 sy1++; sy2++; su++; sv++;                   
  423.                 goto rep_odd;                               
  424.             }                                               
  425.             /* the main loop: */                            
  426.             while (1) {                                     
  427.                 /* load & convert next 2x2 pixels: */       
  428.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  429.                 sy1+=2; sy2+=2; su++; sv++;                 
  430.                 /* replicate even pixels: */                
  431.                 do {                                        
  432.                     STORE(df,d1,a11);                       
  433.                     d1+=BPP(df);                            
  434.                     STORE(df,d2,a21);                       
  435.                     d2+=BPP(df);                            
  436.                     if (!(--count))                         
  437.                         goto rep_last;                      
  438.                 } while ((limit -= step) >= 0);             
  439.                 limit += dest_dx;                           
  440. rep_odd:        /* replicate odd pixels: */                 
  441.                 do {                                        
  442.                     STORE(df,d1,a12);                       
  443.                     d1+=BPP(df);                            
  444.                     STORE(df,d2,a22);                       
  445.                     d2+=BPP(df);                            
  446.                     if (!(--count))                         
  447.                         goto check_last;                    
  448.                 } while ((limit -= step) >= 0);             
  449.                 limit += dest_dx;                           
  450.             }                                               
  451. check_last: /* check if we need to convert one more pixel:*/
  452.             if ((src_x + src_dx) & 1) {                     
  453. convert_last:   /* last 2x1 block: */                       
  454.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  455.             }                                               
  456.             /* restore the number of remaining pixels: */   
  457. rep_last:   count += remainder;                             
  458.             while (count --) {                              
  459.                 /* replicate them: */                       
  460.                 STORE(df,d1,a12);                           
  461.                 d1+=BPP(df);                                
  462.                 STORE(df,d2,a22);                           
  463.                 d2+=BPP(df);                                
  464.             }                                               
  465.         }                                                   
  466.     }
  467. /*
  468.  * Generic row 2x-stretching converter:
  469.  */
  470. #define DBLROW_STRETCH2X(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  471.     {                                                       
  472.         /* initialize local variables: */                   
  473.         register int count = src_dx;                        
  474.         /* check row length: */                             
  475.         if (count) {                                        
  476.             PIXEL(df,a11); PIXEL(df,a12);                   
  477.             PIXEL(df,a21); PIXEL(df,a22);                   
  478.             /* check if we have an odd or single pixel: */  
  479.             if ((src_x & 1) || count < 2) {                 
  480.                 /* process first 2x1 block: */              
  481.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  482.                 sy1++; sy2++; su++; sv++;                   
  483.                 STORE(df,d1,a12);                           
  484.                 STORE(df,d2,a22);                           
  485.                 d1 += BPP(df);                              
  486.                 d2 += BPP(df);                              
  487.                 count -= 1;                                 
  488.             } else {                                        
  489.                 /* process first 2x2 block: */              
  490.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  491.                 sy1+=2; sy2+=2; su++; sv++;                 
  492.                 STORE(df,d1,a11);                           
  493.                 STORE(df,d2,a21);                           
  494.                 /* calculate & store half-pixels: */        
  495.                 AVERAGE(df,a11,a11,a12);                    
  496.                 AVERAGE(df,a21,a21,a22);                    
  497.                 STORE(df,d1+BPP(df),a11);                   
  498.                 STORE(df,d1+2*BPP(df),a12);                 
  499.                 STORE(df,d2+BPP(df),a21);                   
  500.                 STORE(df,d2+2*BPP(df),a22);                 
  501.                 d1 += 3*BPP(df);                            
  502.                 d2 += 3*BPP(df);                            
  503.                 count -= 2;                                 
  504.             }                                               
  505.             /* process all internal 4x2 blocks: */          
  506.             while (count >= 4) {                            
  507.                 /* process second 2x2 block: */             
  508.                 PIXEL(df,a13); PIXEL(df,a23);               
  509.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  510.                 sy1+=2; sy2+=2; su++; sv++;                 
  511.                 /* calculate & store first half-pixels: */  
  512.                 AVERAGE(df,a12,a12,a11);                    
  513.                 AVERAGE(df,a22,a22,a21);                    
  514.                 STORE(df,d1+0*BPP(df),a12);                 
  515.                 STORE(df,d1+1*BPP(df),a11);                 
  516.                 STORE(df,d2+0*BPP(df),a22);                 
  517.                 STORE(df,d2+1*BPP(df),a21);                 
  518.                 /* calculate & store second half-pixels: */ 
  519.                 AVERAGE(df,a11,a11,a13);                    
  520.                 AVERAGE(df,a21,a21,a23);                    
  521.                 STORE(df,d1+2*BPP(df),a11);                 
  522.                 STORE(df,d1+3*BPP(df),a13);                 
  523.                 STORE(df,d2+2*BPP(df),a21);                 
  524.                 STORE(df,d2+3*BPP(df),a23);                 
  525.                 /* process third 2x2 block: */              
  526.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  527.                 sy1+=2; sy2+=2; su++; sv++;                 
  528.                 /* calculate & store third half-pixels: */  
  529.                 AVERAGE(df,a13,a13,a11);                    
  530.                 AVERAGE(df,a23,a23,a21);                    
  531.                 STORE(df,d1+4*BPP(df),a13);                 
  532.                 STORE(df,d1+5*BPP(df),a11);                 
  533.                 STORE(df,d2+4*BPP(df),a23);                 
  534.                 STORE(df,d2+5*BPP(df),a21);                 
  535.                 /* calculate & store fourth half-pixels: */ 
  536.                 AVERAGE(df,a11,a11,a12);                    
  537.                 AVERAGE(df,a21,a21,a22);                    
  538.                 STORE(df,d1+6*BPP(df),a11);                 
  539.                 STORE(df,d1+7*BPP(df),a12);                 
  540.                 STORE(df,d2+6*BPP(df),a21);                 
  541.                 STORE(df,d2+7*BPP(df),a22);                 
  542.                 d1 += 8*BPP(df);                            
  543.                 d2 += 8*BPP(df);                            
  544.                 count -= 4;                                 
  545.             }                                               
  546.             /* check if we have one more 2x2 block: */      
  547.             if (count >= 2) {                               
  548.                 /* process last 2x2 block: */               
  549.                 PIXEL(df,a13); PIXEL(df,a23);               
  550.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  551.                 sy1+=2; sy2+=2; su++; sv++;                 
  552.                 /* calculate & store first half-pixels: */  
  553.                 AVERAGE(df,a12,a12,a11);                    
  554.                 AVERAGE(df,a22,a22,a21);                    
  555.                 STORE(df,d1+0*BPP(df),a12);                 
  556.                 STORE(df,d1+1*BPP(df),a11);                 
  557.                 STORE(df,d2+0*BPP(df),a22);                 
  558.                 STORE(df,d2+1*BPP(df),a21);                 
  559.                 /* calculate & store second half-pixels: */ 
  560.                 AVERAGE(df,a11,a11,a13);                    
  561.                 AVERAGE(df,a21,a21,a23);                    
  562.                 STORE(df,d1+2*BPP(df),a11);                 
  563.                 STORE(df,d1+3*BPP(df),a13);                 
  564.                 STORE(df,d2+2*BPP(df),a21);                 
  565.                 STORE(df,d2+3*BPP(df),a23);                 
  566.                 /* move last converted pixels to a12/22: */ 
  567.                 COPY(df,a12,a13);                           
  568.                 COPY(df,a22,a23);                           
  569.                 d1 += 4*BPP(df);                            
  570.                 d2 += 4*BPP(df);                            
  571.                 count -= 2;                                 
  572.             }                                               
  573.             /* check if we have one more 2x1 block: */      
  574.             if (count >= 1) {                               
  575.                 /* process last 2x1 block: */               
  576.                 YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  577.                 /* calculate & store last half-pixels: */   
  578.                 AVERAGE(df,a12,a12,a11);                    
  579.                 AVERAGE(df,a22,a22,a21);                    
  580.                 STORE(df,d1+0*BPP(df),a12);                 
  581.                 STORE(df,d1+1*BPP(df),a11);                 
  582.                 STORE(df,d1+2*BPP(df),a11);                 
  583.                 STORE(df,d2+0*BPP(df),a22);                 
  584.                 STORE(df,d2+1*BPP(df),a21);                 
  585.                 STORE(df,d2+2*BPP(df),a21);                 
  586.             } else {                                        
  587.                 /* just replicate last pixels: */           
  588.                 STORE(df,d1,a12);                           
  589.                 STORE(df,d2,a22);                           
  590.             }                                               
  591.         }                                                   
  592.     }
  593. /*
  594.  * Generic row 2x+ stretching converter:
  595.  *  "???" comments mean that under normal conditions these jumps
  596.  *  should never be executed; nevertheless, I left these checks
  597.  *  in place to guarantee the correct termination of the algorithm
  598.  *  in all possible scenarios.
  599.  */
  600. #define DBLROW_STRETCH2XPLUS(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  601.     {                                                       
  602.         /* initialize local variables: */                   
  603.         register int count = dest_dx;                       
  604.         register int limit = dest_dx >> 1; /* !!! */        
  605.         register int step = src_dx << 1;  /* !!! */         
  606.         /* # of half-pixels to be processed separately: */  
  607.         int remainder = 3*dest_dx - limit;                  
  608.         if ((src_x + src_dx) & 1) remainder += 2*dest_dx;   
  609.         remainder /= step;                                  
  610.         /* check row length: */                             
  611.         if (count) {                                        
  612.             PIXEL(df,a11); PIXEL(df,a12);                   
  613.             PIXEL(df,a21); PIXEL(df,a22);                   
  614.             PIXEL(df,a13); PIXEL(df,a23);                   
  615.             /* check if an odd or single 2x1 block: */      
  616.             if ((src_x & 1) || src_dx < 2) {                
  617.                 /* convert first 2x1 block: */              
  618.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  619.                 sy1++; sy2++; su++; sv++;                   
  620.                 /* update count: */                         
  621.                 if ((count -= remainder) <= 0)              
  622.                     goto rep_last;                          
  623.                 goto rep_odd;                               
  624.             } else {                                        
  625.                 /* convert first 2x2 block: */              
  626.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  627.                 sy1+=2; sy2+=2; su++; sv++;                 
  628.                 /* update count: */                         
  629.                 if ((count -= remainder) <= 0)              
  630.                     goto rep_last_2;        /* ??? */       
  631.                 goto rep_even;                              
  632.             }                                               
  633.             /* the main loop: */                            
  634.             while (1) {                                     
  635.                 /* load & convert second 2x2 block: */      
  636.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  637.                 sy1+=2; sy2+=2; su++; sv++;                 
  638.                 /* calc. & replicate first half-pixels: */  
  639.                 AVERAGE(df,a12,a12,a11);                    
  640.                 AVERAGE(df,a22,a22,a21);                    
  641.                 do {                                        
  642.                     STORE(df,d1,a12);                       
  643.                     d1+=BPP(df);                            
  644.                     STORE(df,d2,a22);                       
  645.                     d2+=BPP(df);                            
  646.                     if (!(--count))                         
  647.                         goto rep_last;      /* ??? */       
  648.                 } while ((limit -= step) >= 0);             
  649.                 limit += dest_dx;                           
  650.                 /* replicate second even integral pixels: */
  651.                 do {                                        
  652.                     STORE(df,d1,a11);                       
  653.                     d1+=BPP(df);                            
  654.                     STORE(df,d2,a21);                       
  655.                     d2+=BPP(df);                            
  656.                     if (!(--count))                         
  657.                         goto rep_last_2;    /* ??? */       
  658.                 } while ((limit -= step) >= 0);             
  659.                 limit += dest_dx;                           
  660.                 /* calc. & replicate second half-pixels: */ 
  661.                 AVERAGE(df,a11,a11,a13);                    
  662.                 AVERAGE(df,a21,a21,a23);                    
  663.                 do {                                        
  664.                     STORE(df,d1,a11);                       
  665.                     d1+=BPP(df);                            
  666.                     STORE(df,d2,a21);                       
  667.                     d2+=BPP(df);                            
  668.                     if (!(--count))                         
  669.                         goto rep_last_3;    /* !!! */       
  670.                 } while ((limit -= step) >= 0);             
  671.                 limit += dest_dx;                           
  672.                 /* replicate second odd integral pixels: */ 
  673.                 do {                                        
  674.                     STORE(df,d1,a13);                       
  675.                     d1+=BPP(df);                            
  676.                     STORE(df,d2,a23);                       
  677.                     d2+=BPP(df);                            
  678.                     if (!(--count))                         
  679.                         goto last_pixel_2;  /* !!! */       
  680.                 } while ((limit -= step) >= 0);             
  681.                 limit += dest_dx;                           
  682.                 /* load & convert third 2x2 block: */       
  683.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  684.                 sy1+=2; sy2+=2; su++; sv++;                 
  685.                 /* calc. & replicate third half-pixels: */  
  686.                 AVERAGE(df,a13,a13,a11);                    
  687.                 AVERAGE(df,a23,a23,a21);                    
  688.                 do {                                        
  689.                     STORE(df,d1,a13);                       
  690.                     d1+=BPP(df);                            
  691.                     STORE(df,d2,a23);                       
  692.                     d2+=BPP(df);                            
  693.                     if (!(--count))                         
  694.                         goto rep_last_3;    /* ??? */       
  695.                 } while ((limit -= step) >= 0);             
  696.                 limit += dest_dx;                           
  697. rep_even:       /* replicate third even integral pixels: */ 
  698.                 do {                                        
  699.                     STORE(df,d1,a11);                       
  700.                     d1+=BPP(df);                            
  701.                     STORE(df,d2,a21);                       
  702.                     d2+=BPP(df);                            
  703.                     if (!(--count))                         
  704.                         goto rep_last_2;    /* ??? */       
  705.                 } while ((limit -= step) >= 0);             
  706.                 limit += dest_dx;                           
  707.                 /* calc. & replicate fourth half-pixels: */ 
  708.                 AVERAGE(df,a11,a11,a12);                    
  709.                 AVERAGE(df,a21,a21,a22);                    
  710.                 do {                                        
  711.                     STORE(df,d1,a11);                       
  712.                     d1+=BPP(df);                            
  713.                     STORE(df,d2,a21);                       
  714.                     d2+=BPP(df);                            
  715.                     if (!(--count))                         
  716.                         goto rep_last;      /* !!! */       
  717.                 } while ((limit -= step) >= 0);             
  718.                 limit += dest_dx;                           
  719. rep_odd:        /* replicate third odd integral pixels: */  
  720.                 do {                                        
  721.                     STORE(df,d1,a12);                       
  722.                     d1+=BPP(df);                            
  723.                     STORE(df,d2,a22);                       
  724.                     d2+=BPP(df);                            
  725.                     if (!(--count))                         
  726.                         goto last_pixel;    /* !!! */       
  727.                 } while ((limit -= step) >= 0);             
  728.                 limit += dest_dx;                           
  729.             }                                               
  730. last_pixel_2:/* store last integral pixels in a11/21: */    
  731.             COPY(df,a11,a13);                               
  732.             COPY(df,a21,a23);                               
  733. last_pixel: /* check if we need to convert one more pixel:*/
  734.             if ((src_x + src_dx) & 1) {                     
  735.                 /* update count & remainder: */             
  736.                 register int r2 = remainder >> 1;           
  737.                 count += r2; remainder -= r2;               
  738.                 if (count <= 0)                             
  739.                     goto rep_last;                          
  740.                 /* load & convert last 2x1 block: */        
  741.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  742.                 /* calc. & replicate last half-pixels: */   
  743.                 AVERAGE(df,a11,a11,a12);                    
  744.                 AVERAGE(df,a21,a21,a22);                    
  745.                 do {                                        
  746.                     STORE(df,d1,a11);                       
  747.                     d1+=BPP(df);                            
  748.                     STORE(df,d2,a21);                       
  749.                     d2+=BPP(df);                            
  750.                     if (!(--count))                         
  751.                         goto rep_last;      /* !!! */       
  752.                 } while ((limit -= step) >= 0);             
  753.             }                                               
  754.             goto rep_last;                                  
  755. rep_last_3: /* store last converted pixels in a12/22: */    
  756.             COPY(df,a12,a13);                               
  757.             COPY(df,a22,a23);                               
  758.             goto rep_last;                                  
  759. rep_last_2: /* store last converted pixels in a12/22: */    
  760.             COPY(df,a12,a11);                               
  761.             COPY(df,a22,a21);                               
  762.             /* restore the number of remaining pixels: */   
  763. rep_last:   count += remainder;                             
  764.             while (count --) {                              
  765.                 /* replicate them: */                       
  766.                 STORE(df,d1,a12);                           
  767.                 d1+=BPP(df);                                
  768.                 STORE(df,d2,a22);                           
  769.                 d2+=BPP(df);                                
  770.             }                                               
  771.         }                                                   
  772.     }
  773. /*** Generic YUVtoRGB double-row 2x converters: ************/
  774. /*
  775.  * Generic YUVtoRGB double-row shrinking converter:
  776.  *  uses read-ahead optimization to process full 2x2 blocks
  777.  *  whenever possible.
  778.  */
  779. #define DBLROW2X_SHRINK(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  780.     {                                                       
  781.         /* initialize local variables: */                   
  782.         register int count = dest_dx;                       
  783.         register int limit = src_dx >> 1; /* -1 */          
  784.         register int step = dest_dx;                        
  785.         /* check row length: */                             
  786.         if (count) {                                        
  787.             /* check if we have an odd first block: */      
  788.             if (src_x & 1)                                  
  789.                 goto start_odd;                             
  790.             /* process even pixels: */                      
  791.             do {                                            
  792.                 PIXEL(df,a11); PIXEL(df,a12);               
  793.                 PIXEL(df,a21); PIXEL(df,a22);               
  794.                 /* make one Bresenham step ahead: */        
  795.                 if ((limit -= step) < 0) {                  
  796.                     limit += src_dx;                        
  797.                     /* can we process 2x2 pixels? */        
  798.                     if (!--count)                           
  799.                         goto last_pixel;                    
  800.                     /* process full 2x2 block: */           
  801.                     YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  802.                     sy1+=2; sy2+=2; su++; sv++;             
  803.                     STORE(df,d1,a11);                       
  804.                     STORE(df,d1+BPP(df),a12);               
  805.                     d1+=2*BPP(df);                          
  806.                     STORE(df,d2,a21);                       
  807.                     STORE(df,d2+BPP(df),a22);               
  808.                     d2+=2*BPP(df);                          
  809.                     /* process average pixels: */           
  810.                     AVERAGE(df,a21,a11,a21);                
  811.                     AVERAGE(df,a22,a12,a22);                
  812.                     LOAD_AVERAGE(df,a11,a11,d0);            
  813.                     LOAD_AVERAGE(df,a12,a12,d0+BPP(df));    
  814.                     d0+=2*BPP(df);                          
  815.                     STORE(df,d01,a11);                      
  816.                     STORE(df,d01+BPP(df),a12);              
  817.                     d01+=2*BPP(df);                         
  818.                     STORE(df,d12,a21);                      
  819.                     STORE(df,d12+BPP(df),a22);              
  820.                     d12+=2*BPP(df);                         
  821.                 } else {                                    
  822.                     /* proc. first 2x1 block & skip next: */
  823.                     YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  824.                     sy1+=2; sy2+=2; su++; sv++;             
  825.                     STORE(df,d1,a11);                       
  826.                     d1+=BPP(df);                            
  827.                     STORE(df,d2,a21);                       
  828.                     d2+=BPP(df);                            
  829.                     /* process average pixels: */           
  830.                     AVERAGE(df,a21,a11,a21);                
  831.                     LOAD_AVERAGE(df,a11,a11,d0);            
  832.                     d0+=BPP(df);                            
  833.                     STORE(df,d01,a11);                      
  834.                     d01+=BPP(df);                           
  835.                     STORE(df,d12,a21);                      
  836.                     d12+=BPP(df);                           
  837.                 }                                           
  838.                 /* inverted Bresenham stepping: */          
  839.                 while ((limit -= step) >= 0) {              
  840.                     /* skip next even source pixel: */      
  841.                     sy1++; sy2++;                           
  842.                     if ((limit -= step) < 0)                
  843.                         goto cont_odd;                      
  844.                     /* skip odd source pixel: */            
  845.                     sy1++; sy2++;                           
  846.                     su++; sv++; /* next chroma: */          
  847.                 }                                           
  848. cont_even:      /* continue loop with next even pixel: */   
  849.                 limit += src_dx;                            
  850.             } while (--count);                              
  851.             goto done;                                      
  852. last_pixel: /* use this branch to process last pixel:*/     
  853.             count++;                                        
  854. start_odd:  /* process odd pixels: */                       
  855.             do {                                            
  856.                 /* convert 2x1 block: */                    
  857.                 PIXEL(df,a11); PIXEL(df,a21);               
  858.                 YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  859.                 STORE(df,d1,a11);                           
  860.                 d1+=BPP(df);                                
  861.                 STORE(df,d2,a21);                           
  862.                 d2+=BPP(df);                                
  863.                 /* process average pixels: */               
  864.                 AVERAGE(df,a21,a11,a21);                    
  865.                 LOAD_AVERAGE(df,a11,a11,d0);                
  866.                 d0+=BPP(df);                                
  867.                 STORE(df,d01,a11);                          
  868.                 d01+=BPP(df);                               
  869.                 STORE(df,d12,a21);                          
  870.                 d12+=BPP(df);                               
  871.                 /* inverted Bresenham stepping: */          
  872.                 do {                                        
  873.                     /* skip odd source pixel: */            
  874.                     sy1++; sy2++;                           
  875.                     su++; sv++; /* next chroma: */          
  876.                     if ((limit -= step) < 0)                
  877.                         goto cont_even;                     
  878.                     /* skip even source pixel: */           
  879.                     sy1++; sy2++;                           
  880.                 } while ((limit -= step) >= 0);             
  881. cont_odd:       limit += src_dx;                            
  882.             } while (--count);                              
  883. done:       ;                                               
  884.         }                                                   
  885.     }
  886. /*
  887.  * Generic YUVtoRGB double-row copy converter:
  888.  */
  889. #define DBLROW2X_COPY(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  890.     {                                                       
  891.         register int count = dest_dx;                       
  892.         /* convert first 2x1 block: */                      
  893.         if ((src_x & 1) && count) {                         
  894.             YUV_LOAD_CONVERT_AVERAGE_STORE_2x1(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv); 
  895.             count--;                                        
  896.         }                                                   
  897.         /* convert all integral 2x2 blocks: */              
  898.         while (count >= 2) {                                
  899.             YUV_LOAD_CONVERT_AVERAGE_DITHER_STORE_2x2(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv); 
  900.             count -= 2;                                     
  901.         }                                                   
  902.         /* convert last 2x1 block: */                       
  903.         if (count) {                                        
  904.             YUV_LOAD_CONVERT_AVERAGE_STORE_2x1(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv); 
  905.         }                                                   
  906.     }
  907. /*
  908.  * Generic YUVtoRGB double row stretching converter:
  909.  */
  910. #define DBLROW2X_STRETCH(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  911.     {                                                       
  912.         /* initialize local variables: */                   
  913.         register int count = dest_dx;                       
  914.         register int limit = dest_dx >> 1; /* !!! */        
  915.         register int step = src_dx;                         
  916.         /* # of pixels to be processed separately: */       
  917.         int remainder = dest_dx - limit;                    
  918.         if ((src_x + src_dx) & 1) remainder += dest_dx;     
  919.         remainder /= step;                                  
  920.         /* check row length: */                             
  921.         if (count) {                                        
  922.             PIXEL(df,a11); PIXEL(df,a12);                   
  923.             PIXEL(df,a21); PIXEL(df,a22);                   
  924.             PIXEL(df,a01x);PIXEL(df,a12x);                  
  925.             /* update count: */                             
  926.             if ((count -= remainder) <= 0)                  
  927.                 goto convert_last;                          
  928.             /* check if we have an odd first block: */      
  929.             if (src_x & 1) {                                
  930.                 /* convert first 2x1 block: */              
  931.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  932.                 sy1++; sy2++; su++; sv++;                   
  933.                 goto rep_odd;                               
  934.             }                                               
  935.             /* the main loop: */                            
  936.             while (1) {                                     
  937.                 /* load & convert next 2x2 pixels: */       
  938.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  939.                 sy1+=2; sy2+=2; su++; sv++;                 
  940.                 /* average and replicate even pixels: */    
  941.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  942.                 AVERAGE(df,a12x,a11,a21);                   
  943.                 do {                                        
  944.                     d0+=BPP(df);                            
  945.                     STORE(df,d01,a01x);                     
  946.                     d01+=BPP(df);                           
  947.                     STORE(df,d1,a11);                       
  948.                     d1+=BPP(df);                            
  949.                     STORE(df,d12,a12x);                     
  950.                     d12+=BPP(df);                           
  951.                     STORE(df,d2,a21);                       
  952.                     d2+=BPP(df);                            
  953.                     if (!(--count))                         
  954.                         goto rep_last;                      
  955.                 } while ((limit -= step) >= 0);             
  956.                 limit += dest_dx;                           
  957. rep_odd:        /* average & replicate odd pixels: */       
  958.                 LOAD_AVERAGE(df,a01x,a12,d0);               
  959.                 AVERAGE(df,a12x,a12,a22);                   
  960.                 do {                                        
  961.                     d0+=BPP(df);                            
  962.                     STORE(df,d01,a01x);                     
  963.                     d01+=BPP(df);                           
  964.                     STORE(df,d1,a12);                       
  965.                     d1+=BPP(df);                            
  966.                     STORE(df,d12,a12x);                     
  967.                     d12+=BPP(df);                           
  968.                     STORE(df,d2,a22);                       
  969.                     d2+=BPP(df);                            
  970.                     if (!(--count))                         
  971.                         goto check_last;                    
  972.                 } while ((limit -= step) >= 0);             
  973.                 limit += dest_dx;                           
  974.             }                                               
  975. check_last: /* check if we need to convert one more pixel:*/
  976.             if ((src_x + src_dx) & 1) {                     
  977. convert_last:   /* last 2x1 block: */                       
  978.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  979.                 /* calc. average pixels: */                 
  980.                 LOAD_AVERAGE(df,a01x,a12,d0);               
  981.                 AVERAGE(df,a12x,a12,a22);                   
  982.             }                                               
  983.             /* restore the number of remaining pixels: */   
  984. rep_last:   count += remainder;                             
  985.             while (count --) {                              
  986.                 /* replicate them: */                       
  987.                 STORE(df,d01,a01x);                         
  988.                 d01+=BPP(df);                               
  989.                 STORE(df,d1,a12);                           
  990.                 d1+=BPP(df);                                
  991.                 STORE(df,d12,a12x);                         
  992.                 d12+=BPP(df);                               
  993.                 STORE(df,d2,a22);                           
  994.                 d2+=BPP(df);                                
  995.             }                                               
  996.         }                                                   
  997.     }
  998. /*
  999.  * Generic row 2x-stretching converter:
  1000.  */
  1001. #define DBLROW2X_STRETCH2X(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  1002.     {                                                       
  1003.         /* initialize local variables: */                   
  1004.         register int count = src_dx;                        
  1005.         /* check row length: */                             
  1006.         if (count) {                                        
  1007.             PIXEL(df,a011);PIXEL(df,a012);                  
  1008.             PIXEL(df,a11); PIXEL(df,a12);                   
  1009.             PIXEL(df,a121);PIXEL(df,a122);                  
  1010.             PIXEL(df,a21); PIXEL(df,a22);                   
  1011.             /* check if we have an odd or single pixel: */  
  1012.             if ((src_x & 1) || count < 2) {                 
  1013.                 /* process first 2x1 block: */              
  1014.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  1015.                 sy1++; sy2++; su++; sv++;                   
  1016.                 STORE(df,d1+0*BPP(df),a12);                 
  1017.                 STORE(df,d2+0*BPP(df),a22);                 
  1018.                 /* process vertical half-pixels: */         
  1019.                 LOAD_AVERAGE(df,a012,a12,d0);               
  1020.                 STORE(df,d01+0*BPP(df),a012);               
  1021.                 AVERAGE(df,a122,a12,a22);                   
  1022.                 STORE(df,d12+0*BPP(df),a122);               
  1023.                 /* shift pointers: */                       
  1024.                 d0  += BPP(df);                             
  1025.                 d01 += BPP(df);                             
  1026.                 d1  += BPP(df);                             
  1027.                 d12 += BPP(df);                             
  1028.                 d2  += BPP(df);                             
  1029.                 count -= 1;                                 
  1030.             } else {                                         
  1031.                 /* process first 2x2 block: */              
  1032.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  1033.                 sy1+=2; sy2+=2; su++; sv++;                 
  1034.                 STORE(df,d1+0*BPP(df),a11);                 
  1035.                 STORE(df,d2+0*BPP(df),a21);                 
  1036.                 STORE(df,d1+2*BPP(df),a12);                 
  1037.                 STORE(df,d2+2*BPP(df),a22);                 
  1038.                 /* process vertical half-pixels: */         
  1039.                 LOAD_AVERAGE(df,a011,a11,d0);               
  1040.                 STORE(df,d01+0*BPP(df),a011);               
  1041.                 AVERAGE(df,a121,a11,a21);                   
  1042.                 STORE(df,d12+0*BPP(df),a121);               
  1043.                 LOAD_AVERAGE(df,a012,a12,d0+2*BPP(df));     
  1044.                 STORE(df,d01+2*BPP(df),a012);               
  1045.                 AVERAGE(df,a122,a12,a22);                   
  1046.                 STORE(df,d12+2*BPP(df),a122);               
  1047.                 /* process horisontal half-pixels: */       
  1048.                 AVERAGE(df,a011,a011,a012);                 
  1049.                 STORE(df,d01+1*BPP(df),a011);               
  1050.                 AVERAGE(df,a11,a11,a12);                    
  1051.                 STORE(df,d1+1*BPP(df),a11);                 
  1052.                 AVERAGE(df,a121,a121,a122);                 
  1053.                 STORE(df,d12+1*BPP(df),a121);               
  1054.                 AVERAGE(df,a21,a21,a22);                    
  1055.                 STORE(df,d2+1*BPP(df),a21);                 
  1056.                 /* shift pointers: */                       
  1057.                 d0  += 3*BPP(df);                           
  1058.                 d01 += 3*BPP(df);                           
  1059.                 d1  += 3*BPP(df);                           
  1060.                 d12 += 3*BPP(df);                           
  1061.                 d2  += 3*BPP(df);                           
  1062.                 count -= 2;                                 
  1063.             }                                               
  1064.             /* process all internal 4x2 blocks: */          
  1065.             while (count >= 4) {                            
  1066.                 /* process second 2x2 block: */             
  1067.                 PIXEL(df,a013); PIXEL(df,a13);              
  1068.                 PIXEL(df,a123); PIXEL(df,a23);              
  1069.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  1070.                 sy1+=2; sy2+=2; su++; sv++;                 
  1071.                 STORE(df,d1+1*BPP(df),a11);                 
  1072.                 STORE(df,d2+1*BPP(df),a21);                 
  1073.                 STORE(df,d1+3*BPP(df),a13);                 
  1074.                 STORE(df,d2+3*BPP(df),a23);                 
  1075.                 /* process vertical half-pixels: */         
  1076.                 LOAD_AVERAGE(df,a011,a11,d0+1*BPP(df));     
  1077.                 STORE(df,d01+1*BPP(df),a011);               
  1078.                 AVERAGE(df,a121,a11,a21);                   
  1079.                 STORE(df,d12+1*BPP(df),a121);               
  1080.                 LOAD_AVERAGE(df,a013,a13,d0+3*BPP(df));     
  1081.                 STORE(df,d01+3*BPP(df),a013);               
  1082.                 AVERAGE(df,a123,a13,a23);                   
  1083.                 STORE(df,d12+3*BPP(df),a123);               
  1084.                 /* process horisontal half-pixels: */       
  1085.                 AVERAGE(df,a012,a012,a011);                 
  1086.                 STORE(df,d01+0*BPP(df),a012);               
  1087.                 AVERAGE(df,a12,a12,a11);                    
  1088.                 STORE(df,d1+0*BPP(df),a12);                 
  1089.                 AVERAGE(df,a122,a122,a121);                 
  1090.                 STORE(df,d12+0*BPP(df),a122);               
  1091.                 AVERAGE(df,a22,a22,a21);                    
  1092.                 STORE(df,d2+0*BPP(df),a22);                 
  1093.                 AVERAGE(df,a011,a011,a013);                 
  1094.                 STORE(df,d01+2*BPP(df),a011); /*!!!*/       
  1095.                 AVERAGE(df,a11,a11,a13);                    
  1096.                 STORE(df,d1+2*BPP(df),a11);                 
  1097.                 AVERAGE(df,a121,a121,a123);                 
  1098.                 STORE(df,d12+2*BPP(df),a121); /*!!!*/       
  1099.                 AVERAGE(df,a21,a21,a23);                    
  1100.                 STORE(df,d2+2*BPP(df),a21);                 
  1101.                 /* process third 2x2 block: */              
  1102.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  1103.                 sy1+=2; sy2+=2; su++; sv++;                 
  1104.                 STORE(df,d1+5*BPP(df),a11);                 
  1105.                 STORE(df,d2+5*BPP(df),a21);                 
  1106.                 STORE(df,d1+7*BPP(df),a12);                 
  1107.                 STORE(df,d2+7*BPP(df),a22);                 
  1108.                 /* process vertical half-pixels: */         
  1109.                 LOAD_AVERAGE(df,a011,a11,d0+5*BPP(df));     
  1110.                 STORE(df,d01+5*BPP(df),a011);               
  1111.                 AVERAGE(df,a121,a11,a21);                   
  1112.                 STORE(df,d12+5*BPP(df),a121);               
  1113.                 LOAD_AVERAGE(df,a012,a12,d0+7*BPP(df));     
  1114.                 STORE(df,d01+7*BPP(df),a012);               
  1115.                 AVERAGE(df,a122,a12,a22);                   
  1116.                 STORE(df,d12+7*BPP(df),a122);               
  1117.                 /* process horisontal half-pixels: */       
  1118.                 AVERAGE(df,a013,a013,a011);                 
  1119.                 STORE(df,d01+4*BPP(df),a013);               
  1120.                 AVERAGE(df,a13,a13,a11);                    
  1121.                 STORE(df,d1+4*BPP(df),a13);                 
  1122.                 AVERAGE(df,a123,a123,a121);                 
  1123.                 STORE(df,d12+4*BPP(df),a123);               
  1124.                 AVERAGE(df,a23,a23,a21);                    
  1125.                 STORE(df,d2+4*BPP(df),a23);                 
  1126.                 AVERAGE(df,a011,a011,a012);                 
  1127.                 STORE(df,d01+6*BPP(df),a011);               
  1128.                 AVERAGE(df,a11,a11,a12);                    
  1129.                 STORE(df,d1+6*BPP(df),a11);                 
  1130.                 AVERAGE(df,a121,a121,a122);                 
  1131.                 STORE(df,d12+6*BPP(df),a121);               
  1132.                 AVERAGE(df,a21,a21,a22);                    
  1133.                 STORE(df,d2+6*BPP(df),a21);                 
  1134.                 /* shift pointers: */                       
  1135.                 d0  += 8*BPP(df);                           
  1136.                 d01 += 8*BPP(df);                           
  1137.                 d1  += 8*BPP(df);                           
  1138.                 d12 += 8*BPP(df);                           
  1139.                 d2  += 8*BPP(df);                           
  1140.                 count -= 4;                                 
  1141.             }                                               
  1142.             /* check if we have one more 2x2 block: */      
  1143.             if (count >= 2) {                               
  1144.                 /* process last 2x2 block: */               
  1145.                 PIXEL(df,a013); PIXEL(df,a13);              
  1146.                 PIXEL(df,a123); PIXEL(df,a23);              
  1147.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  1148.                 sy1+=2; sy2+=2; su++; sv++;                 
  1149.                 STORE(df,d1+1*BPP(df),a11);                 
  1150.                 STORE(df,d2+1*BPP(df),a21);                 
  1151.                 STORE(df,d1+3*BPP(df),a13);                 
  1152.                 STORE(df,d2+3*BPP(df),a23);                 
  1153.                 /* process vertical half-pixels: */         
  1154.                 LOAD_AVERAGE(df,a011,a11,d0+1*BPP(df));     
  1155.                 STORE(df,d01+1*BPP(df),a011);               
  1156.                 AVERAGE(df,a121,a11,a21);                   
  1157.                 STORE(df,d12+1*BPP(df),a121);               
  1158.                 LOAD_AVERAGE(df,a013,a13,d0+3*BPP(df));     
  1159.                 STORE(df,d01+3*BPP(df),a013);               
  1160.                 AVERAGE(df,a123,a13,a23);                   
  1161.                 STORE(df,d12+3*BPP(df),a123);               
  1162.                 /* process horisontal half-pixels: */       
  1163.                 AVERAGE(df,a012,a012,a011);                 
  1164.                 STORE(df,d01+0*BPP(df),a012);               
  1165.                 AVERAGE(df,a12,a12,a11);                    
  1166.                 STORE(df,d1+0*BPP(df),a12);                 
  1167.                 AVERAGE(df,a122,a122,a121);                 
  1168.                 STORE(df,d12+0*BPP(df),a122);               
  1169.                 AVERAGE(df,a22,a22,a21);                    
  1170.                 STORE(df,d2+0*BPP(df),a22);                 
  1171.                 AVERAGE(df,a011,a011,a013);                 
  1172.                 STORE(df,d01+2*BPP(df),a011); /*!!!*/       
  1173.                 AVERAGE(df,a11,a11,a13);                    
  1174.                 STORE(df,d1+2*BPP(df),a11);                 
  1175.                 AVERAGE(df,a121,a121,a123);                 
  1176.                 STORE(df,d12+2*BPP(df),a121); /*!!!*/       
  1177.                 AVERAGE(df,a21,a21,a23);                    
  1178.                 STORE(df,d2+2*BPP(df),a21);                 
  1179.                 /* move last converted pixels to a12/22: */ 
  1180.                 COPY(df,a012,a013);                         
  1181.                 COPY(df,a12,a13);                           
  1182.                 COPY(df,a122,a123);                         
  1183.                 COPY(df,a22,a23);                           
  1184.                 /* shift pointers: */                       
  1185.                 d0  += 4*BPP(df);                           
  1186.                 d01 += 4*BPP(df);                           
  1187.                 d1  += 4*BPP(df);                           
  1188.                 d12 += 4*BPP(df);                           
  1189.                 d2  += 4*BPP(df);                           
  1190.                 count -= 2;                                 
  1191.             }                                               
  1192.             /* check if we have one more 2x1 block: */      
  1193.             if (count >= 1) {                               
  1194.                 /* process last 2x1 block: */               
  1195.                 YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  1196.                 STORE(df,d1+1*BPP(df),a11);                 
  1197.                 STORE(df,d1+2*BPP(df),a11);                 
  1198.                 STORE(df,d2+1*BPP(df),a21);                 
  1199.                 STORE(df,d2+2*BPP(df),a21);                 
  1200.                 /* process vertical half-pixels: */         
  1201.                 LOAD_AVERAGE(df,a011,a11,d0+1*BPP(df));     
  1202.                 STORE(df,d01+1*BPP(df),a011);               
  1203.                 STORE(df,d01+2*BPP(df),a011);               
  1204.                 AVERAGE(df,a121,a11,a21);                   
  1205.                 STORE(df,d12+1*BPP(df),a121);               
  1206.                 STORE(df,d12+2*BPP(df),a121);               
  1207.                 /* process horisontal half-pixels: */       
  1208.                 AVERAGE(df,a012,a012,a011);                 
  1209.                 STORE(df,d01+0*BPP(df),a012);               
  1210.                 AVERAGE(df,a12,a12,a11);                    
  1211.                 STORE(df,d1+0*BPP(df),a12);                 
  1212.                 AVERAGE(df,a122,a122,a121);                 
  1213.                 STORE(df,d12+0*BPP(df),a122);               
  1214.                 AVERAGE(df,a22,a22,a21);                    
  1215.                 STORE(df,d2+0*BPP(df),a22);                 
  1216.             } else {                                        
  1217.                 /* just replicate last column: */           
  1218.                 STORE(df,d01,a012);                         
  1219.                 STORE(df,d1,a12);                           
  1220.                 STORE(df,d12,a122);                         
  1221.                 STORE(df,d2,a22);                           
  1222.             }                                               
  1223.         }                                                   
  1224.     }
  1225. /*
  1226.  * Generic row 2x+ stretching converter:
  1227.  *  "???" comments mean that under normal conditions these jumps
  1228.  *  should never be executed; nevertheless, I left these checks
  1229.  *  in place to guarantee the correct termination of the algorithm
  1230.  *  in all possible scenarios.
  1231.  */
  1232. #define DBLROW2X_STRETCH2XPLUS(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  1233.     {                                                       
  1234.         /* initialize local variables: */                   
  1235.         register int count = dest_dx;                       
  1236.         register int limit = dest_dx >> 1; /* !!! */        
  1237.         register int step = src_dx << 1;  /* !!! */         
  1238.         /* # of half-pixels to be processed separately: */  
  1239.         int remainder = 3*dest_dx - limit;                  
  1240.         if ((src_x + src_dx) & 1) remainder += 2*dest_dx;   
  1241.         remainder /= step;                                  
  1242.         /* check row length: */                             
  1243.         if (count) {                                        
  1244.             PIXEL(df,a11); PIXEL(df,a12);                   
  1245.             PIXEL(df,a21); PIXEL(df,a22);                   
  1246.             PIXEL(df,a13); PIXEL(df,a23);                   
  1247.             PIXEL(df,a01x);PIXEL(df,a12x);                  
  1248.             /* check if an odd or single 2x1 block: */      
  1249.             if ((src_x & 1) || src_dx < 2) {                
  1250.                 /* convert first 2x1 block: */              
  1251.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  1252.                 sy1++; sy2++; su++; sv++;                   
  1253.                 /* update count: */                         
  1254.                 if ((count -= remainder) <= 0)              
  1255.                     goto rep_last;                          
  1256.                 goto rep_odd;                               
  1257.             } else {                                        
  1258.                 /* convert first 2x2 block: */              
  1259.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  1260.                 sy1+=2; sy2+=2; su++; sv++;                 
  1261.                 /* update count: */                         
  1262.                 if ((count -= remainder) <= 0)              
  1263.                     goto rep_last_2;        /* ??? */       
  1264.                 goto rep_even;                              
  1265.             }                                               
  1266.             /* the main loop (a11,a12-last conv.pixels): */ 
  1267.             while (1) {                                     
  1268.                 /* load & convert second 2x2 block: */      
  1269.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  1270.                 sy1+=2; sy2+=2; su++; sv++;                 
  1271.                 /* calc. & replicate first half-pixels: */  
  1272.                 AVERAGE(df,a12,a12,a11);                    
  1273.                 LOAD_AVERAGE(df,a01x,a12,d0);               
  1274.                 AVERAGE(df,a22,a22,a21);                    
  1275.                 AVERAGE(df,a12x,a12,a22);                   
  1276.                 do {                                        
  1277.                     d0+=BPP(df);                            
  1278.                     STORE(df,d01,a01x);                     
  1279.                     d01+=BPP(df);                           
  1280.                     STORE(df,d1,a12);                       
  1281.                     d1+=BPP(df);                            
  1282.                     STORE(df,d12,a12x);                     
  1283.                     d12+=BPP(df);                           
  1284.                     STORE(df,d2,a22);                       
  1285.                     d2+=BPP(df);                            
  1286.                     if (!(--count))                         
  1287.                         goto rep_last;      /* ??? */       
  1288.                 } while ((limit -= step) >= 0);             
  1289.                 limit += dest_dx;                           
  1290.                 /* get vertical half-pixels:*/              
  1291.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  1292.                 AVERAGE(df,a12x,a11,a21);                   
  1293.                 /* replicate second even integral pixels: */
  1294.                 do {                                        
  1295.                     d0+=BPP(df);                            
  1296.                     STORE(df,d01,a01x);                     
  1297.                     d01+=BPP(df);                           
  1298.                     STORE(df,d1,a11);                       
  1299.                     d1+=BPP(df);                            
  1300.                     STORE(df,d12,a12x);                     
  1301.                     d12+=BPP(df);                           
  1302.                     STORE(df,d2,a21);                       
  1303.                     d2+=BPP(df);                            
  1304.                     if (!(--count))                         
  1305.                         goto rep_last_2;    /* ??? */       
  1306.                 } while ((limit -= step) >= 0);             
  1307.                 limit += dest_dx;                           
  1308.                 /* calc. & replicate second half-pixels: */ 
  1309.                 AVERAGE(df,a11,a11,a13);                    
  1310.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  1311.                 AVERAGE(df,a21,a21,a23);                    
  1312.                 AVERAGE(df,a12x,a11,a21);                   
  1313.                 do {                                        
  1314.                     d0+=BPP(df);                            
  1315.                     STORE(df,d01,a01x);                     
  1316.                     d01+=BPP(df);                           
  1317.                     STORE(df,d1,a11);                       
  1318.                     d1+=BPP(df);                            
  1319.                     STORE(df,d12,a12x);                     
  1320.                     d12+=BPP(df);                           
  1321.                     STORE(df,d2,a21);                       
  1322.                     d2+=BPP(df);                            
  1323.                     if (!(--count))                         
  1324.                         goto rep_last_3;    /* !!! */       
  1325.                 } while ((limit -= step) >= 0);             
  1326.                 limit += dest_dx;                           
  1327.                 /* get vertical half-pixels:*/              
  1328.                 LOAD_AVERAGE(df,a01x,a13,d0);               
  1329.                 AVERAGE(df,a12x,a13,a23);                   
  1330.                 /* replicate second odd integral pixels: */ 
  1331.                 do {                                        
  1332.                     d0+=BPP(df);                            
  1333.                     STORE(df,d01,a01x);                     
  1334.                     d01+=BPP(df);                           
  1335.                     STORE(df,d1,a13);                       
  1336.                     d1+=BPP(df);                            
  1337.                     STORE(df,d12,a12x);                     
  1338.                     d12+=BPP(df);                           
  1339.                     STORE(df,d2,a23);                       
  1340.                     d2+=BPP(df);                            
  1341.                     if (!(--count))                         
  1342.                         goto last_pixel_2;  /* !!! */       
  1343.                 } while ((limit -= step) >= 0);             
  1344.                 limit += dest_dx;                           
  1345.                 /* load & convert third 2x2 block: */       
  1346.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  1347.                 sy1+=2; sy2+=2; su++; sv++;                 
  1348.                 /* calc. & replicate third half-pixels: */  
  1349.                 AVERAGE(df,a13,a13,a11);                    
  1350.                 LOAD_AVERAGE(df,a01x,a13,d0);               
  1351.                 AVERAGE(df,a23,a23,a21);                    
  1352.                 AVERAGE(df,a12x,a13,a23);                   
  1353.                 do {                                        
  1354.                     d0+=BPP(df);                            
  1355.                     STORE(df,d01,a01x);                     
  1356.                     d01+=BPP(df);                           
  1357.                     STORE(df,d1,a13);                       
  1358.                     d1+=BPP(df);                            
  1359.                     STORE(df,d12,a12x);                     
  1360.                     d12+=BPP(df);                           
  1361.                     STORE(df,d2,a23);                       
  1362.                     d2+=BPP(df);                            
  1363.                     if (!(--count))                         
  1364.                         goto rep_last_3;    /* ??? */       
  1365.                 } while ((limit -= step) >= 0);             
  1366.                 limit += dest_dx;                           
  1367. rep_even:       /* get vertical half-pixels:*/              
  1368.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  1369.                 AVERAGE(df,a12x,a11,a21);                   
  1370.                 /* replicate third even integral pixels: */ 
  1371.                 do {                                        
  1372.                     d0+=BPP(df);                            
  1373.                     STORE(df,d01,a01x);                     
  1374.                     d01+=BPP(df);                           
  1375.                     STORE(df,d1,a11);                       
  1376.                     d1+=BPP(df);                            
  1377.                     STORE(df,d12,a12x);                     
  1378.                     d12+=BPP(df);                           
  1379.                     STORE(df,d2,a21);                       
  1380.                     d2+=BPP(df);                            
  1381.                     if (!(--count))                         
  1382.                         goto rep_last_2;    /* ??? */       
  1383.                 } while ((limit -= step) >= 0);             
  1384.                 limit += dest_dx;                           
  1385.                 /* calc. & replicate fourth half-pixels: */ 
  1386.                 AVERAGE(df,a11,a11,a12);                    
  1387.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  1388.                 AVERAGE(df,a21,a21,a22);                    
  1389.                 AVERAGE(df,a12x,a11,a21);                   
  1390.                 do {                                        
  1391.                     d0+=BPP(df);                            
  1392.                     STORE(df,d01,a01x);                     
  1393.                     d01+=BPP(df);                           
  1394.                     STORE(df,d1,a11);                       
  1395.                     d1+=BPP(df);                            
  1396.                     STORE(df,d12,a12x);                     
  1397.                     d12+=BPP(df);                           
  1398.                     STORE(df,d2,a21);                       
  1399.                     d2+=BPP(df);                            
  1400.                     if (!(--count))                         
  1401.                         goto rep_last;      /* !!! */       
  1402.                 } while ((limit -= step) >= 0);             
  1403.                 limit += dest_dx;                           
  1404. rep_odd:        /* get vertical half-pixels:*/              
  1405.                 LOAD_AVERAGE(df,a01x,a12,d0);               
  1406.                 AVERAGE(df,a12x,a12,a22);                   
  1407.                 /* replicate third odd integral pixels: */  
  1408.                 do {                                        
  1409.                     d0+=BPP(df);                            
  1410.                     STORE(df,d01,a01x);                     
  1411.                     d01+=BPP(df);                           
  1412.                     STORE(df,d1,a12);                       
  1413.                     d1+=BPP(df);                            
  1414.                     STORE(df,d12,a12x);                     
  1415.                     d12+=BPP(df);                           
  1416.                     STORE(df,d2,a22);                       
  1417.                     d2+=BPP(df);                            
  1418.                     if (!(--count))                         
  1419.                         goto last_pixel;    /* !!! */       
  1420.                 } while ((limit -= step) >= 0);             
  1421.                 limit += dest_dx;                           
  1422.             }                                               
  1423. last_pixel_2:/* store last integral pixels in a11/21: */    
  1424.             COPY(df,a11,a13);                               
  1425.             COPY(df,a21,a23);                               
  1426. last_pixel: /* check if we need to convert one more pixel:*/
  1427.             if ((src_x + src_dx) & 1) {                     
  1428.                 /* update count & remainder: */             
  1429.                 register int r2 = remainder >> 1;           
  1430.                 count += r2; remainder -= r2;               
  1431.                 if (count <= 0)                             
  1432.                     goto rep_last;                          
  1433.                 /* load & convert last 2x1 block: */        
  1434.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  1435.                 /* calc. & replicate last half-pixels: */   
  1436.                 AVERAGE(df,a11,a11,a12);                    
  1437.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  1438.                 AVERAGE(df,a21,a21,a22);                    
  1439.                 AVERAGE(df,a12x,a11,a21);                   
  1440.                 do {                                        
  1441.                     d0+=BPP(df);                            
  1442.                     STORE(df,d01,a01x);                     
  1443.                     d01+=BPP(df);                           
  1444.                     STORE(df,d1,a11);                       
  1445.                     d1+=BPP(df);                            
  1446.                     STORE(df,d12,a12x);                     
  1447.                     d12+=BPP(df);                           
  1448.                     STORE(df,d2,a21);                       
  1449.                     d2+=BPP(df);                            
  1450.                     if (!(--count))                         
  1451.                         goto rep_last;      /* ??? */       
  1452.                 } while ((limit -= step) >= 0);             
  1453.                 /* get last vertical half-pixels:*/         
  1454.                 LOAD_AVERAGE(df,a01x,a12,d0);               
  1455.                 AVERAGE(df,a12x,a12,a22);                   
  1456.             }                                               
  1457.             goto rep_last;                                  
  1458. rep_last_3: /* store last converted pixels in a12/22: */    
  1459.             COPY(df,a12,a13);                               
  1460.             COPY(df,a22,a23);                               
  1461.             goto rep_last;                                  
  1462. rep_last_2: /* store last converted pixels in a12/22: */    
  1463.             COPY(df,a12,a11);                               
  1464.             COPY(df,a22,a21);                               
  1465.             /* restore the number of remaining pixels: */   
  1466. rep_last:   count += remainder;                             
  1467.             /* get vertical half-pixels:*/                  
  1468.             LOAD_AVERAGE(df,a01x,a12,d0);                   
  1469.             AVERAGE(df,a12x,a12,a22);                       
  1470.             /* replicate them: */                           
  1471.             while (count --) {                              
  1472.                 STORE(df,d01,a01x);                         
  1473.                 d01+=BPP(df);                               
  1474.                 STORE(df,d1,a12);                           
  1475.                 d1+=BPP(df);                                
  1476.                 STORE(df,d12,a12x);                         
  1477.                 d12+=BPP(df);                               
  1478.                 STORE(df,d2,a22);                           
  1479.                 d2+=BPP(df);                                
  1480.             }                                               
  1481.         }                                                   
  1482.     }
  1483. /***********************************************************/
  1484. /*
  1485.  * Function names:
  1486.  */
  1487. #define FN(df,sf)               sf##to##df
  1488. #define FN2(df,sf)              sf##to##df##x
  1489. #define DBLROW_FN(df,sf,cc,t)   sf##to##df##_DBLROW_##cc##_##t
  1490. #define DBLROW2X_FN(df,sf,cc,t) sf##to##df##_DBLROW2X_##cc##_##t
  1491. /*
  1492.  * Function replication macros:
  1493.  *  (dblrow- and dblrow2x- converters)
  1494.  */
  1495. #define DBLROW_FUNC(df,sf,cc,t)   
  1496.     static void DBLROW_FN(df,sf,cc,t) (unsigned char *d1, unsigned char *d2,
  1497.         int dest_x, int dest_dx, unsigned char *sy1, unsigned char *sy2,    
  1498.         unsigned char *su, unsigned char *sv, int src_x, int src_dx)        
  1499.         DBLROW_##t(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
  1500. #define DBLROW2X_FUNC(df,sf,cc,t)   
  1501.     static void DBLROW2X_FN(df,sf,cc,t) (unsigned char *d1, unsigned char *d12,
  1502.         unsigned char *d2, unsigned char *d23, unsigned char *d3,           
  1503.         int dest_x, int dest_dx, unsigned char *sy1, unsigned char *sy2,    
  1504.         unsigned char *su, unsigned char *sv, int src_x, int src_dx)        
  1505.         DBLROW2X_##t(cc,df,d1,d12,d2,d23,d3,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
  1506. /***********************************************************/
  1507. /*
  1508.  * Actual double-row functions:
  1509.  */
  1510. DBLROW_FUNC(RGB32,  I420 ,FAST, SHRINK)
  1511. DBLROW_FUNC(RGB32,  I420 ,FAST, COPY)
  1512. DBLROW_FUNC(RGB32,  I420 ,FAST, STRETCH)
  1513. DBLROW_FUNC(RGB32,  I420 ,FAST, STRETCH2X)
  1514. DBLROW_FUNC(RGB32,  I420 ,FAST, STRETCH2XPLUS)
  1515. DBLROW_FUNC(BGR32,  I420 ,FAST, SHRINK)
  1516. DBLROW_FUNC(BGR32,  I420 ,FAST, COPY)
  1517. DBLROW_FUNC(BGR32,  I420 ,FAST, STRETCH)
  1518. DBLROW_FUNC(BGR32,  I420 ,FAST, STRETCH2X)
  1519. DBLROW_FUNC(BGR32,  I420 ,FAST, STRETCH2XPLUS)
  1520. DBLROW_FUNC(RGB24,  I420 ,FAST, SHRINK)
  1521. DBLROW_FUNC(RGB24,  I420 ,FAST, COPY)
  1522. DBLROW_FUNC(RGB24,  I420 ,FAST, STRETCH)
  1523. DBLROW_FUNC(RGB24,  I420 ,FAST, STRETCH2X)
  1524. DBLROW_FUNC(RGB24,  I420 ,FAST, STRETCH2XPLUS)
  1525. DBLROW_FUNC(RGB565, I420 ,FAST, SHRINK)
  1526. DBLROW_FUNC(RGB565, I420 ,FAST, COPY)
  1527. DBLROW_FUNC(RGB565, I420 ,FAST, STRETCH)
  1528. DBLROW_FUNC(RGB565, I420 ,FAST, STRETCH2X)
  1529. DBLROW_FUNC(RGB565, I420 ,FAST, STRETCH2XPLUS)
  1530. DBLROW_FUNC(RGB555, I420 ,FAST, SHRINK)
  1531. DBLROW_FUNC(RGB555, I420 ,FAST, COPY)
  1532. DBLROW_FUNC(RGB555, I420 ,FAST, STRETCH)
  1533. DBLROW_FUNC(RGB555, I420 ,FAST, STRETCH2X)
  1534. DBLROW_FUNC(RGB555, I420 ,FAST, STRETCH2XPLUS)
  1535. DBLROW_FUNC(RGB444, I420 ,FAST, SHRINK)
  1536. DBLROW_FUNC(RGB444, I420 ,FAST, COPY)
  1537. DBLROW_FUNC(RGB444, I420 ,FAST, STRETCH)
  1538. DBLROW_FUNC(RGB444, I420 ,FAST, STRETCH2X)
  1539. DBLROW_FUNC(RGB444, I420 ,FAST, STRETCH2XPLUS)
  1540. DBLROW_FUNC(RGB8,   I420 ,FAST, SHRINK)
  1541. DBLROW_FUNC(RGB8,   I420 ,FAST, COPY)
  1542. DBLROW_FUNC(RGB8,   I420 ,FAST, STRETCH)
  1543. DBLROW_FUNC(RGB8,   I420 ,FAST, STRETCH2X)
  1544. DBLROW_FUNC(RGB8,   I420 ,FAST, STRETCH2XPLUS)
  1545. /* converters with hue correction: */
  1546. DBLROW_FUNC(RGB32,  I420 ,FULL, SHRINK)
  1547. DBLROW_FUNC(RGB32,  I420 ,FULL, COPY)
  1548. DBLROW_FUNC(RGB32,  I420 ,FULL, STRETCH)
  1549. DBLROW_FUNC(RGB32,  I420 ,FULL, STRETCH2X)
  1550. DBLROW_FUNC(RGB32,  I420 ,FULL, STRETCH2XPLUS)
  1551. DBLROW_FUNC(BGR32,  I420 ,FULL, SHRINK)
  1552. DBLROW_FUNC(BGR32,  I420 ,FULL, COPY)
  1553. DBLROW_FUNC(BGR32,  I420 ,FULL, STRETCH)
  1554. DBLROW_FUNC(BGR32,  I420 ,FULL, STRETCH2X)
  1555. DBLROW_FUNC(BGR32,  I420 ,FULL, STRETCH2XPLUS)
  1556. DBLROW_FUNC(RGB24,  I420 ,FULL, SHRINK)
  1557. DBLROW_FUNC(RGB24,  I420 ,FULL, COPY)
  1558. DBLROW_FUNC(RGB24,  I420 ,FULL, STRETCH)
  1559. DBLROW_FUNC(RGB24,  I420 ,FULL, STRETCH2X)
  1560. DBLROW_FUNC(RGB24,  I420 ,FULL, STRETCH2XPLUS)
  1561. DBLROW_FUNC(RGB565, I420 ,FULL, SHRINK)
  1562. DBLROW_FUNC(RGB565, I420 ,FULL, COPY)
  1563. DBLROW_FUNC(RGB565, I420 ,FULL, STRETCH)
  1564. DBLROW_FUNC(RGB565, I420 ,FULL, STRETCH2X)
  1565. DBLROW_FUNC(RGB565, I420 ,FULL, STRETCH2XPLUS)
  1566. DBLROW_FUNC(RGB555, I420 ,FULL, SHRINK)
  1567. DBLROW_FUNC(RGB555, I420 ,FULL, COPY)
  1568. DBLROW_FUNC(RGB555, I420 ,FULL, STRETCH)
  1569. DBLROW_FUNC(RGB555, I420 ,FULL, STRETCH2X)
  1570. DBLROW_FUNC(RGB555, I420 ,FULL, STRETCH2XPLUS)
  1571. DBLROW_FUNC(RGB444, I420 ,FULL, SHRINK)
  1572. DBLROW_FUNC(RGB444, I420 ,FULL, COPY)
  1573. DBLROW_FUNC(RGB444, I420 ,FULL, STRETCH)
  1574. DBLROW_FUNC(RGB444, I420 ,FULL, STRETCH2X)
  1575. DBLROW_FUNC(RGB444, I420 ,FULL, STRETCH2XPLUS)
  1576. DBLROW_FUNC(RGB8,   I420 ,FULL, SHRINK)
  1577. DBLROW_FUNC(RGB8,   I420 ,FULL, COPY)
  1578. DBLROW_FUNC(RGB8,   I420 ,FULL, STRETCH)
  1579. DBLROW_FUNC(RGB8,   I420 ,FULL, STRETCH2X)
  1580. DBLROW_FUNC(RGB8,   I420 ,FULL, STRETCH2XPLUS)
  1581. /*
  1582.  * Actual double-row 2x functions:
  1583.  */
  1584. DBLROW2X_FUNC(RGB32,  I420 ,FAST, SHRINK)
  1585. DBLROW2X_FUNC(RGB32,  I420 ,FAST, COPY)
  1586. DBLROW2X_FUNC(RGB32,  I420 ,FAST, STRETCH)
  1587. DBLROW2X_FUNC(RGB32,  I420 ,FAST, STRETCH2X)
  1588. DBLROW2X_FUNC(RGB32,  I420 ,FAST, STRETCH2XPLUS)
  1589. DBLROW2X_FUNC(BGR32,  I420 ,FAST, SHRINK)
  1590. DBLROW2X_FUNC(BGR32,  I420 ,FAST, COPY)
  1591. DBLROW2X_FUNC(BGR32,  I420 ,FAST, STRETCH)
  1592. DBLROW2X_FUNC(BGR32,  I420 ,FAST, STRETCH2X)
  1593. DBLROW2X_FUNC(BGR32,  I420 ,FAST, STRETCH2XPLUS)
  1594. DBLROW2X_FUNC(RGB24,  I420 ,FAST, SHRINK)
  1595. DBLROW2X_FUNC(RGB24,  I420 ,FAST, COPY)
  1596. DBLROW2X_FUNC(RGB24,  I420 ,FAST, STRETCH)
  1597. DBLROW2X_FUNC(RGB24,  I420 ,FAST, STRETCH2X)
  1598. DBLROW2X_FUNC(RGB24,  I420 ,FAST, STRETCH2XPLUS)
  1599. DBLROW2X_FUNC(RGB565, I420 ,FAST, SHRINK)
  1600. DBLROW2X_FUNC(RGB565, I420 ,FAST, COPY)
  1601. DBLROW2X_FUNC(RGB565, I420 ,FAST, STRETCH)
  1602. DBLROW2X_FUNC(RGB565, I420 ,FAST, STRETCH2X)
  1603. DBLROW2X_FUNC(RGB565, I420 ,FAST, STRETCH2XPLUS)
  1604. DBLROW2X_FUNC(RGB555, I420 ,FAST, SHRINK)
  1605. DBLROW2X_FUNC(RGB555, I420 ,FAST, COPY)
  1606. DBLROW2X_FUNC(RGB555, I420 ,FAST, STRETCH)
  1607. DBLROW2X_FUNC(RGB555, I420 ,FAST, STRETCH2X)
  1608. DBLROW2X_FUNC(RGB555, I420 ,FAST, STRETCH2XPLUS)
  1609. DBLROW2X_FUNC(RGB444, I420 ,FAST, SHRINK)
  1610. DBLROW2X_FUNC(RGB444, I420 ,FAST, COPY)
  1611. DBLROW2X_FUNC(RGB444, I420 ,FAST, STRETCH)
  1612. DBLROW2X_FUNC(RGB444, I420 ,FAST, STRETCH2X)
  1613. DBLROW2X_FUNC(RGB444, I420 ,FAST, STRETCH2XPLUS)
  1614. DBLROW2X_FUNC(RGB8,   I420 ,FAST, SHRINK)
  1615. DBLROW2X_FUNC(RGB8,   I420 ,FAST, COPY)
  1616. DBLROW2X_FUNC(RGB8,   I420 ,FAST, STRETCH)
  1617. DBLROW2X_FUNC(RGB8,   I420 ,FAST, STRETCH2X)
  1618. DBLROW2X_FUNC(RGB8,   I420 ,FAST, STRETCH2XPLUS)
  1619. /* converters with hue correction: */
  1620. DBLROW2X_FUNC(RGB32,  I420 ,FULL, SHRINK)
  1621. DBLROW2X_FUNC(RGB32,  I420 ,FULL, COPY)
  1622. DBLROW2X_FUNC(RGB32,  I420 ,FULL, STRETCH)
  1623. DBLROW2X_FUNC(RGB32,  I420 ,FULL, STRETCH2X)
  1624. DBLROW2X_FUNC(RGB32,  I420 ,FULL, STRETCH2XPLUS)
  1625. DBLROW2X_FUNC(BGR32,  I420 ,FULL, SHRINK)
  1626. DBLROW2X_FUNC(BGR32,  I420 ,FULL, COPY)
  1627. DBLROW2X_FUNC(BGR32,  I420 ,FULL, STRETCH)
  1628. DBLROW2X_FUNC(BGR32,  I420 ,FULL, STRETCH2X)
  1629. DBLROW2X_FUNC(BGR32,  I420 ,FULL, STRETCH2XPLUS)
  1630. DBLROW2X_FUNC(RGB24,  I420 ,FULL, SHRINK)
  1631. DBLROW2X_FUNC(RGB24,  I420 ,FULL, COPY)
  1632. DBLROW2X_FUNC(RGB24,  I420 ,FULL, STRETCH)
  1633. DBLROW2X_FUNC(RGB24,  I420 ,FULL, STRETCH2X)
  1634. DBLROW2X_FUNC(RGB24,  I420 ,FULL, STRETCH2XPLUS)
  1635. DBLROW2X_FUNC(RGB565, I420 ,FULL, SHRINK)
  1636. DBLROW2X_FUNC(RGB565, I420 ,FULL, COPY)
  1637. DBLROW2X_FUNC(RGB565, I420 ,FULL, STRETCH)
  1638. DBLROW2X_FUNC(RGB565, I420 ,FULL, STRETCH2X)
  1639. DBLROW2X_FUNC(RGB565, I420 ,FULL, STRETCH2XPLUS)
  1640. DBLROW2X_FUNC(RGB555, I420 ,FULL, SHRINK)
  1641. DBLROW2X_FUNC(RGB555, I420 ,FULL, COPY)
  1642. DBLROW2X_FUNC(RGB555, I420 ,FULL, STRETCH)
  1643. DBLROW2X_FUNC(RGB555, I420 ,FULL, STRETCH2X)
  1644. DBLROW2X_FUNC(RGB555, I420 ,FULL, STRETCH2XPLUS)
  1645. DBLROW2X_FUNC(RGB444, I420 ,FULL, SHRINK)
  1646. DBLROW2X_FUNC(RGB444, I420 ,FULL, COPY)
  1647. DBLROW2X_FUNC(RGB444, I420 ,FULL, STRETCH)
  1648. DBLROW2X_FUNC(RGB444, I420 ,FULL, STRETCH2X)
  1649. DBLROW2X_FUNC(RGB444, I420 ,FULL, STRETCH2XPLUS)
  1650. DBLROW2X_FUNC(RGB8,   I420 ,FULL, SHRINK)
  1651. DBLROW2X_FUNC(RGB8,   I420 ,FULL, COPY)
  1652. DBLROW2X_FUNC(RGB8,   I420 ,FULL, STRETCH)
  1653. DBLROW2X_FUNC(RGB8,   I420 ,FULL, STRETCH2X)
  1654. DBLROW2X_FUNC(RGB8,   I420 ,FULL, STRETCH2XPLUS)
  1655. /*
  1656.  * Double-row scale function selection tables:
  1657.  *  [conversion type][source format][row scale type]
  1658.  */
  1659. static void (* DblRowFuncs [2][RGB_FORMATS][SCALE_FUNCS]) (
  1660.     unsigned char *d1, unsigned char *d2, int dest_x, int dest_dx,
  1661.     unsigned char *sy1, unsigned char *sy2,
  1662.     unsigned char *su, unsigned char *sv, int src_x, int src_dx) =
  1663. {
  1664.     {   {        
  1665. #if defined (HELIX_FEATURE_CC_RGB32out)
  1666.     #if defined (HXCOLOR_SHRINK)
  1667.             DBLROW_FN(RGB32 ,I420 ,FAST, SHRINK),
  1668.     #else   
  1669.             0,
  1670.     #endif //HXCOLOR_SHRINK
  1671.             
  1672.             DBLROW_FN(RGB32 ,I420 ,FAST, COPY),
  1673.             
  1674.     #if defined (HXCOLOR_STRETCH)
  1675.             DBLROW_FN(RGB32 ,I420 ,FAST, STRETCH),
  1676.     #else
  1677.             0,
  1678.     #endif //HXCOLOR_STRETCH
  1679.     #if defined (HXCOLOR_STRETCH2X)
  1680.             DBLROW_FN(RGB32 ,I420 ,FAST, STRETCH2X),
  1681.     #else
  1682.             0,
  1683.     #endif //HXCOLOR_STRETCH2X
  1684.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1685.             DBLROW_FN(RGB32 ,I420 ,FAST, STRETCH2XPLUS)
  1686.     #else
  1687.             0
  1688.     #endif //HXCOLOR_STRETCH2XPLUS
  1689. #else
  1690.     0,
  1691.     0,
  1692.     0,
  1693.     0,
  1694.     0
  1695. #endif //HELIX_FEATURE_CC_RGB32out
  1696.         },{
  1697. #if defined (HELIX_FEATURE_CC_BGR32out)
  1698.     #if defined (HXCOLOR_SHRINK)
  1699.             DBLROW_FN(BGR32 ,I420 ,FAST, SHRINK),
  1700.     #else
  1701.             0,
  1702.     #endif //HXCOLOR_SHRINK
  1703.             
  1704.             DBLROW_FN(BGR32 ,I420 ,FAST, COPY),
  1705.     
  1706.     #if defined (HXCOLOR_STRETCH)
  1707.             DBLROW_FN(BGR32 ,I420 ,FAST, STRETCH),
  1708.     #else
  1709.             0,
  1710.     #endif //HXCOLOR_STRETCH
  1711.     #if defined (HXCOLOR_STRETCH2X)
  1712.             DBLROW_FN(BGR32 ,I420 ,FAST, STRETCH2X),
  1713.     #else
  1714.             0,
  1715.     #endif //HXCOLOR_STRETCH2X
  1716.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1717.             DBLROW_FN(BGR32 ,I420 ,FAST, STRETCH2XPLUS)
  1718.     #else
  1719.             0
  1720.     #endif //HXCOLOR_STRETCH2XPLUS
  1721. #else
  1722.     0,
  1723.     0,
  1724.     0,
  1725.     0,
  1726.     0
  1727. #endif //HELIX_FEATURE_CC_BGR32out
  1728.         },{
  1729. #if defined (HELIX_FEATURE_CC_RGB24out)
  1730.     #if defined (HXCOLOR_SHRINK)
  1731.             DBLROW_FN(RGB24 ,I420 ,FAST, SHRINK),
  1732.     #else   
  1733.             0,
  1734.     #endif //HXCOLOR_SHRINK
  1735.             DBLROW_FN(RGB24 ,I420 ,FAST, COPY),
  1736.     #if defined (HXCOLOR_STRETCH)
  1737.             DBLROW_FN(RGB24 ,I420 ,FAST, STRETCH),
  1738.     #else
  1739.             0,
  1740.     #endif //HXCOLOR_STRETCH
  1741.     #if defined (HXCOLOR_STRETCH2X)
  1742.             DBLROW_FN(RGB24 ,I420 ,FAST, STRETCH2X),
  1743.     #else
  1744.             0,
  1745.     #endif //HXCOLOR_STRETCH2X
  1746.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1747.             DBLROW_FN(RGB24 ,I420 ,FAST, STRETCH2XPLUS)
  1748.     #else
  1749.             0
  1750.     #endif  //HXCOLOR_STRETCH2XPLUS
  1751. #else
  1752.     0,
  1753.     0,
  1754.     0,
  1755.     0,
  1756.     0
  1757. #endif //HELIX_FEATURE_CC_RGB24out
  1758.         },{
  1759. #if defined (HELIX_FEATURE_CC_RGB565out)
  1760.     #if defined (HXCOLOR_SHRINK)
  1761.             DBLROW_FN(RGB565,I420 ,FAST, SHRINK),
  1762.     #else
  1763.             0,
  1764.     #endif //HXCOLOR_SHRINK
  1765.             DBLROW_FN(RGB565,I420 ,FAST, COPY),
  1766.     #if defined (HXCOLOR_STRETCH)
  1767.             DBLROW_FN(RGB565,I420 ,FAST, STRETCH),
  1768.     #else
  1769.             0,
  1770.     #endif //HXCOLOR_STRETCH
  1771.     #if defined (HXCOLOR_STRETCH2X)
  1772.             DBLROW_FN(RGB565,I420 ,FAST, STRETCH2X),
  1773.     #else
  1774.             0,
  1775.     #endif //HXCOLOR_STRETCH2X
  1776.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1777.             DBLROW_FN(RGB565,I420 ,FAST, STRETCH2XPLUS)
  1778.     #else
  1779.             0
  1780.     #endif //HXCOLOR_STRETCH2XPLUS
  1781. #else
  1782.     0,
  1783.     0,
  1784.     0,
  1785.     0,
  1786.     0
  1787. #endif //HELIX_FEATURE_CC_RGB565out
  1788.         },{
  1789. #if defined (HELIX_FEATURE_CC_RGB555out)
  1790.     #if defined (HXCOLOR_SHRINK)
  1791.             DBLROW_FN(RGB555,I420 ,FAST, SHRINK),
  1792.     #else
  1793.             0,
  1794.     #endif //HXCOLOR_SHRINK
  1795.             DBLROW_FN(RGB555,I420 ,FAST, COPY),
  1796.     #if defined (HXCOLOR_STRETCH)
  1797.             DBLROW_FN(RGB555,I420 ,FAST, STRETCH),
  1798.     #else
  1799.             0,
  1800.     #endif //HXCOLOR_STRETCH
  1801.     #if defined (HXCOLOR_STRETCH2X)
  1802.             DBLROW_FN(RGB555,I420 ,FAST, STRETCH2X),
  1803.     #else
  1804.             0,
  1805.     #endif //HXCOLOR_STRETCH2X
  1806.     #if defined (HXCOLOR_STRETCH2XPLUS)            
  1807.             DBLROW_FN(RGB555,I420 ,FAST, STRETCH2XPLUS)
  1808.     #else
  1809.             0
  1810.     #endif //HXCOLOR_STRETCH2XPLUS
  1811. #else
  1812.     0,
  1813.     0,
  1814.     0,
  1815.     0,
  1816.     0
  1817. #endif //HELIX_FEATURE_CC_RGB555out
  1818.         },{
  1819. #if defined (HELIX_FEATURE_CC_RGB444out)
  1820.     #if defined (HXCOLOR_SHRINK)
  1821.             DBLROW_FN(RGB444,I420 ,FAST, SHRINK),
  1822.     #else
  1823.             0,
  1824.     #endif //HXCOLOR_SHRINK
  1825.             DBLROW_FN(RGB444,I420 ,FAST, COPY),
  1826.     #if defined (HXCOLOR_STRETCH)
  1827.             DBLROW_FN(RGB444,I420 ,FAST, STRETCH),
  1828.     #else
  1829.             0,
  1830.     #endif //HXCOLOR_STRETCH
  1831.     #if defined (HXCOLOR_STRETCH2X)
  1832.             DBLROW_FN(RGB444,I420 ,FAST, STRETCH2X),
  1833.     #else
  1834.             0,
  1835.     #endif //HXCOLOR_STRETCH2X
  1836.     #if defined (HXCOLOR_STRETCH2XPLUS)            
  1837.             DBLROW_FN(RGB444,I420 ,FAST, STRETCH2XPLUS)
  1838.     #else
  1839.             0
  1840.     #endif //HXCOLOR_STRETCH2XPLUS
  1841. #else
  1842.     0,
  1843.     0,
  1844.     0,
  1845.     0,
  1846.     0
  1847. #endif //HELIX_FEATURE_CC_RGB444out
  1848.         },{
  1849. #if defined (HELIX_FEATURE_CC_RGB8out)
  1850.     #if defined (HXCOLOR_SHRINK)
  1851.             DBLROW_FN(RGB8  ,I420 ,FAST, SHRINK),
  1852.     #else
  1853.             0,
  1854.     #endif //HXCOLOR_SHRINK
  1855.             DBLROW_FN(RGB8  ,I420 ,FAST, COPY),
  1856.     #if defined (HXCOLOR_STRETCH)
  1857.             DBLROW_FN(RGB8  ,I420 ,FAST, STRETCH),
  1858.     #else
  1859.             0,
  1860.     #endif //HXCOLOR_STRETCH
  1861.     #if defined (HXCOLOR_STRETCH2X)
  1862.             DBLROW_FN(RGB8  ,I420 ,FAST, STRETCH2X),
  1863.     #else
  1864.             0,
  1865.     #endif //HXCOLOR_STRETCH2X
  1866.     #if defined (HXCOLOR_STRETCH2XPLUS) 
  1867.             DBLROW_FN(RGB8  ,I420 ,FAST, STRETCH2XPLUS)
  1868.     #else
  1869.             0
  1870.     #endif//HXCOLOR_STRETCH2XPLUS
  1871. #else
  1872.     0,
  1873.     0,
  1874.     0,
  1875.     0,
  1876.     0
  1877. #endif //HELIX_FEATURE_CC_RGB8out
  1878.         }
  1879.     },{ {
  1880. #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_RGB32out)
  1881.     #if defined (HXCOLOR_SHRINK)
  1882.             DBLROW_FN(RGB32 ,I420 ,FULL, SHRINK),
  1883.     #else
  1884.             0,
  1885.     #endif //HXCOLOR_SHRINK
  1886.             DBLROW_FN(RGB32 ,I420 ,FULL, COPY),
  1887.     #if defined (HXCOLOR_STRETCH)
  1888.             DBLROW_FN(RGB32 ,I420 ,FULL, STRETCH),
  1889.     #else
  1890.             0,
  1891.     #endif //HXCOLOR_STRETCH
  1892.     #if defined (HXCOLOR_STRETCH2X)
  1893.             DBLROW_FN(RGB32 ,I420 ,FULL, STRETCH2X),
  1894.     #else
  1895.             0,
  1896.     #endif //HXCOLOR_STRETCH2X
  1897.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1898.             DBLROW_FN(RGB32 ,I420 ,FULL, STRETCH2XPLUS)
  1899.     #else
  1900.             0
  1901.     #endif //HXCOLOR_STRETCH2XPLUS
  1902. #else
  1903.     0,
  1904.     0,
  1905.     0,
  1906.     0,
  1907.     0
  1908. #endif
  1909.         },{
  1910. #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_BGR32out)
  1911.     #if defined (HXCOLOR_SHRINK)
  1912.             DBLROW_FN(BGR32 ,I420 ,FULL, SHRINK),
  1913.     #else
  1914.             0,
  1915.     #endif //HXCOLOR_SHRINK
  1916.             
  1917.             DBLROW_FN(BGR32 ,I420 ,FULL, COPY),
  1918.             
  1919.     #if defined (HXCOLOR_STRETCH)
  1920.             DBLROW_FN(BGR32 ,I420 ,FULL, STRETCH),
  1921.     #else
  1922.             0,
  1923.     #endif //HXCOLOR_STRETCH
  1924.     #if defined (HXCOLOR_STRETCH2X)
  1925.             DBLROW_FN(BGR32 ,I420 ,FULL, STRETCH2X),
  1926.     #else
  1927.             0,
  1928.     #endif // HXCOLOR_STRETCH2X
  1929.     #if defined (HXCOLOR_STRETCH2XPLUS)            
  1930.             DBLROW_FN(BGR32 ,I420 ,FULL, STRETCH2XPLUS)
  1931.     #else
  1932.             0
  1933.     #endif //HXCOLOR_STRETCH2XPLUS
  1934. #else
  1935.     0,
  1936.     0,
  1937.     0,
  1938.     0,
  1939.     0
  1940. #endif
  1941.         },{
  1942. #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_RGB24out)
  1943.     #if defined (HXCOLOR_SHRINK)
  1944.             DBLROW_FN(RGB24 ,I420 ,FULL, SHRINK),
  1945.     #else
  1946.             0,
  1947.     #endif //HXCOLOR_SHRINK
  1948.             DBLROW_FN(RGB24 ,I420 ,FULL, COPY),
  1949.     #if defined (HXCOLOR_STRETCH)
  1950.             DBLROW_FN(RGB24 ,I420 ,FULL, STRETCH),
  1951.     #else
  1952.             0,
  1953.     #endif //HXCOLOR_STRETCH
  1954.     #if defined (HXCOLOR_STRETCH2X)
  1955.             DBLROW_FN(RGB24 ,I420 ,FULL, STRETCH2X),
  1956.     #else
  1957.             0,
  1958.     #endif //HXCOLOR_STRETCH2X
  1959.                  
  1960.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1961.             DBLROW_FN(RGB24 ,I420 ,FULL, STRETCH2XPLUS)
  1962.     #else
  1963.             0
  1964.     #endif //HXCOLOR_STRETCH2XPLUS
  1965.             
  1966. #else
  1967.     0,
  1968.     0,
  1969.     0,
  1970.     0,
  1971.     0
  1972. #endif
  1973.         },{
  1974. #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_RGB565out)
  1975.     #if defined (HXCOLOR_SHRINK)
  1976.             DBLROW_FN(RGB565,I420 ,FULL, SHRINK),
  1977.     #else
  1978.             0,
  1979.     #endif //HXCOLOR_SHRINK
  1980.             DBLROW_FN(RGB565,I420 ,FULL, COPY),
  1981.     
  1982.     #if defined (HXCOLOR_STRETCH)
  1983.             DBLROW_FN(RGB565,I420 ,FULL, STRETCH),
  1984.     #else
  1985.             0,
  1986.     #endif //HXCOLOR_STRETCH
  1987.     
  1988.     #if defined (HXCOLOR_STRETCH2X)
  1989.             DBLROW_FN(RGB565,I420 ,FULL, STRETCH2X),
  1990.     #else
  1991.             0,
  1992.     #endif //HXCOLOR_STRETCH2X
  1993.             
  1994.     #if defined (HXCOLOR_STRETCH2XPLUS)