dsputil_mmx.c
资源名称:tcpmp.rar [点击查看]
上传用户:wstnjxml
上传日期:2014-04-03
资源大小:7248k
文件大小:102k
源码类别:
Windows CE
开发平台:
C/C++
- /*
- * MMX optimized DSP utils
- * Copyright (c) 2000, 2001 Fabrice Bellard.
- * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
- */
- #include "../dsputil.h"
- #include "../simple_idct.h"
- #include "../mpegvideo.h"
- #include "mmx.h"
- //#undef NDEBUG
- //#include <assert.h>
- extern const uint8_t ff_h263_loop_filter_strength[32];
- extern void ff_idct_xvid_mmx(short *block);
- extern void ff_idct_xvid_mmx2(short *block);
- int mm_flags; /* multimedia extension flags */
- /* pixel operations */
- static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
- static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
- static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
- static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
- static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
- static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
- static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
- static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
- static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
- static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
- static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
- static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
- static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
- #define JUMPALIGN() __asm __volatile (".balign 8"::)
- #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
- #define MOVQ_WONE(regd)
- __asm __volatile (
- "pcmpeqd %%" #regd ", %%" #regd " nt"
- "psrlw $15, %%" #regd ::)
- #define MOVQ_BFE(regd)
- __asm __volatile (
- "pcmpeqd %%" #regd ", %%" #regd " nt"
- "paddb %%" #regd ", %%" #regd " nt" ::)
- #ifndef PIC
- #define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " nt" ::"m"(mm_bone))
- #define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " nt" ::"m"(mm_wtwo))
- #else
- // for shared library it's better to use this way for accessing constants
- // pcmpeqd -> -1
- #define MOVQ_BONE(regd)
- __asm __volatile (
- "pcmpeqd %%" #regd ", %%" #regd " nt"
- "psrlw $15, %%" #regd " nt"
- "packuswb %%" #regd ", %%" #regd " nt" ::)
- #define MOVQ_WTWO(regd)
- __asm __volatile (
- "pcmpeqd %%" #regd ", %%" #regd " nt"
- "psrlw $15, %%" #regd " nt"
- "psllw $1, %%" #regd " nt"::)
- #endif
- // using regr as temporary and for the output result
- // first argument is unmodifed and second is trashed
- // regfe is supposed to contain 0xfefefefefefefefe
- #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe)
- "movq " #rega ", " #regr " nt"
- "pand " #regb ", " #regr " nt"
- "pxor " #rega ", " #regb " nt"
- "pand " #regfe "," #regb " nt"
- "psrlq $1, " #regb " nt"
- "paddb " #regb ", " #regr " nt"
- #define PAVGB_MMX(rega, regb, regr, regfe)
- "movq " #rega ", " #regr " nt"
- "por " #regb ", " #regr " nt"
- "pxor " #rega ", " #regb " nt"
- "pand " #regfe "," #regb " nt"
- "psrlq $1, " #regb " nt"
- "psubb " #regb ", " #regr " nt"
- // mm6 is supposed to contain 0xfefefefefefefefe
- #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp)
- "movq " #rega ", " #regr " nt"
- "movq " #regc ", " #regp " nt"
- "pand " #regb ", " #regr " nt"
- "pand " #regd ", " #regp " nt"
- "pxor " #rega ", " #regb " nt"
- "pxor " #regc ", " #regd " nt"
- "pand %%mm6, " #regb " nt"
- "pand %%mm6, " #regd " nt"
- "psrlq $1, " #regb " nt"
- "psrlq $1, " #regd " nt"
- "paddb " #regb ", " #regr " nt"
- "paddb " #regd ", " #regp " nt"
- #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp)
- "movq " #rega ", " #regr " nt"
- "movq " #regc ", " #regp " nt"
- "por " #regb ", " #regr " nt"
- "por " #regd ", " #regp " nt"
- "pxor " #rega ", " #regb " nt"
- "pxor " #regc ", " #regd " nt"
- "pand %%mm6, " #regb " nt"
- "pand %%mm6, " #regd " nt"
- "psrlq $1, " #regd " nt"
- "psrlq $1, " #regb " nt"
- "psubb " #regb ", " #regr " nt"
- "psubb " #regd ", " #regp " nt"
- /***********************************/
- /* MMX no rounding */
- #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
- #define SET_RND MOVQ_WONE
- #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
- #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
- #include "dsputil_mmx_rnd.h"
- #undef DEF
- #undef SET_RND
- #undef PAVGBP
- #undef PAVGB
- /***********************************/
- /* MMX rounding */
- #define DEF(x, y) x ## _ ## y ##_mmx
- #define SET_RND MOVQ_WTWO
- #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
- #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
- #include "dsputil_mmx_rnd.h"
- #undef DEF
- #undef SET_RND
- #undef PAVGBP
- #undef PAVGB
- /***********************************/
- /* 3Dnow specific */
- #define DEF(x) x ## _3dnow
- /* for Athlons PAVGUSB is prefered */
- #define PAVGB "pavgusb"
- #include "dsputil_mmx_avg.h"
- #undef DEF
- #undef PAVGB
- /***********************************/
- /* MMX2 specific */
- #define DEF(x) x ## _mmx2
- /* Introduced only in MMX2 set */
- #define PAVGB "pavgb"
- #include "dsputil_mmx_avg.h"
- #undef DEF
- #undef PAVGB
- /***********************************/
- /* standard MMX */
- #ifdef CONFIG_ENCODERS
- static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
- {
- asm volatile(
- "mov $-128, %%"REG_a" nt"
- "pxor %%mm7, %%mm7 nt"
- ".balign 16 nt"
- "1: nt"
- "movq (%0), %%mm0 nt"
- "movq (%0, %2), %%mm2 nt"
- "movq %%mm0, %%mm1 nt"
- "movq %%mm2, %%mm3 nt"
- "punpcklbw %%mm7, %%mm0 nt"
- "punpckhbw %%mm7, %%mm1 nt"
- "punpcklbw %%mm7, %%mm2 nt"
- "punpckhbw %%mm7, %%mm3 nt"
- "movq %%mm0, (%1, %%"REG_a")nt"
- "movq %%mm1, 8(%1, %%"REG_a")nt"
- "movq %%mm2, 16(%1, %%"REG_a")nt"
- "movq %%mm3, 24(%1, %%"REG_a")nt"
- "add %3, %0 nt"
- "add $32, %%"REG_a" nt"
- "js 1b nt"
- : "+r" (pixels)
- : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
- : "%"REG_a
- );
- }
- static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
- {
- asm volatile(
- "pxor %%mm7, %%mm7 nt"
- "mov $-128, %%"REG_a" nt"
- ".balign 16 nt"
- "1: nt"
- "movq (%0), %%mm0 nt"
- "movq (%1), %%mm2 nt"
- "movq %%mm0, %%mm1 nt"
- "movq %%mm2, %%mm3 nt"
- "punpcklbw %%mm7, %%mm0 nt"
- "punpckhbw %%mm7, %%mm1 nt"
- "punpcklbw %%mm7, %%mm2 nt"
- "punpckhbw %%mm7, %%mm3 nt"
- "psubw %%mm2, %%mm0 nt"
- "psubw %%mm3, %%mm1 nt"
- "movq %%mm0, (%2, %%"REG_a")nt"
- "movq %%mm1, 8(%2, %%"REG_a")nt"
- "add %3, %0 nt"
- "add %3, %1 nt"
- "add $16, %%"REG_a" nt"
- "jnz 1b nt"
- : "+r" (s1), "+r" (s2)
- : "r" (block+64), "r" ((long)stride)
- : "%"REG_a
- );
- }
- #endif //CONFIG_ENCODERS
- void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
- {
- const DCTELEM *p;
- uint8_t *pix;
- /* read the pixels */
- p = block;
- pix = pixels;
- /* unrolled loop */
- __asm __volatile(
- "movq %3, %%mm0nt"
- "movq 8%3, %%mm1nt"
- "movq 16%3, %%mm2nt"
- "movq 24%3, %%mm3nt"
- "movq 32%3, %%mm4nt"
- "movq 40%3, %%mm5nt"
- "movq 48%3, %%mm6nt"
- "movq 56%3, %%mm7nt"
- "packuswb %%mm1, %%mm0nt"
- "packuswb %%mm3, %%mm2nt"
- "packuswb %%mm5, %%mm4nt"
- "packuswb %%mm7, %%mm6nt"
- "movq %%mm0, (%0)nt"
- "movq %%mm2, (%0, %1)nt"
- "movq %%mm4, (%0, %1, 2)nt"
- "movq %%mm6, (%0, %2)nt"
- ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
- :"memory");
- pix += line_size*4;
- p += 32;
- // if here would be an exact copy of the code above
- // compiler would generate some very strange code
- // thus using "r"
- __asm __volatile(
- "movq (%3), %%mm0nt"
- "movq 8(%3), %%mm1nt"
- "movq 16(%3), %%mm2nt"
- "movq 24(%3), %%mm3nt"
- "movq 32(%3), %%mm4nt"
- "movq 40(%3), %%mm5nt"
- "movq 48(%3), %%mm6nt"
- "movq 56(%3), %%mm7nt"
- "packuswb %%mm1, %%mm0nt"
- "packuswb %%mm3, %%mm2nt"
- "packuswb %%mm5, %%mm4nt"
- "packuswb %%mm7, %%mm6nt"
- "movq %%mm0, (%0)nt"
- "movq %%mm2, (%0, %1)nt"
- "movq %%mm4, (%0, %1, 2)nt"
- "movq %%mm6, (%0, %2)nt"
- ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
- :"memory");
- }
- static const unsigned char __align8 vector128[8] =
- { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
- void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
- {
- int i;
- movq_m2r(*vector128, mm1);
- for (i = 0; i < 8; i++) {
- movq_m2r(*(block), mm0);
- packsswb_m2r(*(block + 4), mm0);
- block += 8;
- paddb_r2r(mm1, mm0);
- movq_r2m(mm0, *pixels);
- pixels += line_size;
- }
- }
- void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
- {
- const DCTELEM *p;
- uint8_t *pix;
- int i;
- /* read the pixels */
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- i = 4;
- do {
- __asm __volatile(
- "movq (%2), %%mm0nt"
- "movq 8(%2), %%mm1nt"
- "movq 16(%2), %%mm2nt"
- "movq 24(%2), %%mm3nt"
- "movq %0, %%mm4nt"
- "movq %1, %%mm6nt"
- "movq %%mm4, %%mm5nt"
- "punpcklbw %%mm7, %%mm4nt"
- "punpckhbw %%mm7, %%mm5nt"
- "paddsw %%mm4, %%mm0nt"
- "paddsw %%mm5, %%mm1nt"
- "movq %%mm6, %%mm5nt"
- "punpcklbw %%mm7, %%mm6nt"
- "punpckhbw %%mm7, %%mm5nt"
- "paddsw %%mm6, %%mm2nt"
- "paddsw %%mm5, %%mm3nt"
- "packuswb %%mm1, %%mm0nt"
- "packuswb %%mm3, %%mm2nt"
- "movq %%mm0, %0nt"
- "movq %%mm2, %1nt"
- :"+m"(*pix), "+m"(*(pix+line_size))
- :"r"(p)
- :"memory");
- pix += line_size*2;
- p += 16;
- } while (--i);
- }
- static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
- {
- __asm __volatile(
- "lea (%3, %3), %%"REG_a" nt"
- ".balign 8 nt"
- "1: nt"
- "movd (%1), %%mm0 nt"
- "movd (%1, %3), %%mm1 nt"
- "movd %%mm0, (%2) nt"
- "movd %%mm1, (%2, %3) nt"
- "add %%"REG_a", %1 nt"
- "add %%"REG_a", %2 nt"
- "movd (%1), %%mm0 nt"
- "movd (%1, %3), %%mm1 nt"
- "movd %%mm0, (%2) nt"
- "movd %%mm1, (%2, %3) nt"
- "add %%"REG_a", %1 nt"
- "add %%"REG_a", %2 nt"
- "subl $4, %0 nt"
- "jnz 1b nt"
- : "+g"(h), "+r" (pixels), "+r" (block)
- : "r"((long)line_size)
- : "%"REG_a, "memory"
- );
- }
- static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
- {
- __asm __volatile(
- "lea (%3, %3), %%"REG_a" nt"
- ".balign 8 nt"
- "1: nt"
- "movq (%1), %%mm0 nt"
- "movq (%1, %3), %%mm1 nt"
- "movq %%mm0, (%2) nt"
- "movq %%mm1, (%2, %3) nt"
- "add %%"REG_a", %1 nt"
- "add %%"REG_a", %2 nt"
- "movq (%1), %%mm0 nt"
- "movq (%1, %3), %%mm1 nt"
- "movq %%mm0, (%2) nt"
- "movq %%mm1, (%2, %3) nt"
- "add %%"REG_a", %1 nt"
- "add %%"REG_a", %2 nt"
- "subl $4, %0 nt"
- "jnz 1b nt"
- : "+g"(h), "+r" (pixels), "+r" (block)
- : "r"((long)line_size)
- : "%"REG_a, "memory"
- );
- }
- static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
- {
- __asm __volatile(
- "lea (%3, %3), %%"REG_a" nt"
- ".balign 8 nt"
- "1: nt"
- "movq (%1), %%mm0 nt"
- "movq 8(%1), %%mm4 nt"
- "movq (%1, %3), %%mm1 nt"
- "movq 8(%1, %3), %%mm5 nt"
- "movq %%mm0, (%2) nt"
- "movq %%mm4, 8(%2) nt"
- "movq %%mm1, (%2, %3) nt"
- "movq %%mm5, 8(%2, %3) nt"
- "add %%"REG_a", %1 nt"
- "add %%"REG_a", %2 nt"
- "movq (%1), %%mm0 nt"
- "movq 8(%1), %%mm4 nt"
- "movq (%1, %3), %%mm1 nt"
- "movq 8(%1, %3), %%mm5 nt"
- "movq %%mm0, (%2) nt"
- "movq %%mm4, 8(%2) nt"
- "movq %%mm1, (%2, %3) nt"
- "movq %%mm5, 8(%2, %3) nt"
- "add %%"REG_a", %1 nt"
- "add %%"REG_a", %2 nt"
- "subl $4, %0 nt"
- "jnz 1b nt"
- : "+g"(h), "+r" (pixels), "+r" (block)
- : "r"((long)line_size)
- : "%"REG_a, "memory"
- );
- }
- static void clear_blocks_mmx(DCTELEM *blocks)
- {
- __asm __volatile(
- "pxor %%mm7, %%mm7 nt"
- "mov $-128*6, %%"REG_a" nt"
- "1: nt"
- "movq %%mm7, (%0, %%"REG_a") nt"
- "movq %%mm7, 8(%0, %%"REG_a") nt"
- "movq %%mm7, 16(%0, %%"REG_a") nt"
- "movq %%mm7, 24(%0, %%"REG_a") nt"
- "add $32, %%"REG_a" nt"
- " js 1b nt"
- : : "r" (((uint8_t *)blocks)+128*6)
- : "%"REG_a
- );
- }
- #ifdef CONFIG_ENCODERS
- static int pix_sum16_mmx(uint8_t * pix, int line_size){
- const int h=16;
- int sum;
- long index= -line_size*h;
- __asm __volatile(
- "pxor %%mm7, %%mm7 nt"
- "pxor %%mm6, %%mm6 nt"
- "1: nt"
- "movq (%2, %1), %%mm0 nt"
- "movq (%2, %1), %%mm1 nt"
- "movq 8(%2, %1), %%mm2 nt"
- "movq 8(%2, %1), %%mm3 nt"
- "punpcklbw %%mm7, %%mm0 nt"
- "punpckhbw %%mm7, %%mm1 nt"
- "punpcklbw %%mm7, %%mm2 nt"
- "punpckhbw %%mm7, %%mm3 nt"
- "paddw %%mm0, %%mm1 nt"
- "paddw %%mm2, %%mm3 nt"
- "paddw %%mm1, %%mm3 nt"
- "paddw %%mm3, %%mm6 nt"
- "add %3, %1 nt"
- " js 1b nt"
- "movq %%mm6, %%mm5 nt"
- "psrlq $32, %%mm6 nt"
- "paddw %%mm5, %%mm6 nt"
- "movq %%mm6, %%mm5 nt"
- "psrlq $16, %%mm6 nt"
- "paddw %%mm5, %%mm6 nt"
- "movd %%mm6, %0 nt"
- "andl $0xFFFF, %0 nt"
- : "=&r" (sum), "+r" (index)
- : "r" (pix - index), "r" ((long)line_size)
- );
- return sum;
- }
- #endif //CONFIG_ENCODERS
- static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
- long i=0;
- asm volatile(
- "1: nt"
- "movq (%1, %0), %%mm0 nt"
- "movq (%2, %0), %%mm1 nt"
- "paddb %%mm0, %%mm1 nt"
- "movq %%mm1, (%2, %0) nt"
- "movq 8(%1, %0), %%mm0 nt"
- "movq 8(%2, %0), %%mm1 nt"
- "paddb %%mm0, %%mm1 nt"
- "movq %%mm1, 8(%2, %0) nt"
- "add $16, %0 nt"
- "cmp %3, %0 nt"
- " jb 1b nt"
- : "+r" (i)
- : "r"(src), "r"(dst), "r"((long)w-15)
- );
- for(; i<w; i++)
- dst[i+0] += src[i+0];
- }
- #define H263_LOOP_FILTER
- "pxor %%mm7, %%mm7 nt"
- "movq %0, %%mm0 nt"
- "movq %0, %%mm1 nt"
- "movq %3, %%mm2 nt"
- "movq %3, %%mm3 nt"
- "punpcklbw %%mm7, %%mm0 nt"
- "punpckhbw %%mm7, %%mm1 nt"
- "punpcklbw %%mm7, %%mm2 nt"
- "punpckhbw %%mm7, %%mm3 nt"
- "psubw %%mm2, %%mm0 nt"
- "psubw %%mm3, %%mm1 nt"
- "movq %1, %%mm2 nt"
- "movq %1, %%mm3 nt"
- "movq %2, %%mm4 nt"
- "movq %2, %%mm5 nt"
- "punpcklbw %%mm7, %%mm2 nt"
- "punpckhbw %%mm7, %%mm3 nt"
- "punpcklbw %%mm7, %%mm4 nt"
- "punpckhbw %%mm7, %%mm5 nt"
- "psubw %%mm2, %%mm4 nt"
- "psubw %%mm3, %%mm5 nt"
- "psllw $2, %%mm4 nt"
- "psllw $2, %%mm5 nt"
- "paddw %%mm0, %%mm4 nt"
- "paddw %%mm1, %%mm5 nt"
- "pxor %%mm6, %%mm6 nt"
- "pcmpgtw %%mm4, %%mm6 nt"
- "pcmpgtw %%mm5, %%mm7 nt"
- "pxor %%mm6, %%mm4 nt"
- "pxor %%mm7, %%mm5 nt"
- "psubw %%mm6, %%mm4 nt"
- "psubw %%mm7, %%mm5 nt"
- "psrlw $3, %%mm4 nt"
- "psrlw $3, %%mm5 nt"
- "packuswb %%mm5, %%mm4 nt"
- "packsswb %%mm7, %%mm6 nt"
- "pxor %%mm7, %%mm7 nt"
- "movd %4, %%mm2 nt"
- "punpcklbw %%mm2, %%mm2 nt"
- "punpcklbw %%mm2, %%mm2 nt"
- "punpcklbw %%mm2, %%mm2 nt"
- "psubusb %%mm4, %%mm2 nt"
- "movq %%mm2, %%mm3 nt"
- "psubusb %%mm4, %%mm3 nt"
- "psubb %%mm3, %%mm2 nt"
- "movq %1, %%mm3 nt"
- "movq %2, %%mm4 nt"
- "pxor %%mm6, %%mm3 nt"
- "pxor %%mm6, %%mm4 nt"
- "paddusb %%mm2, %%mm3 nt"
- "psubusb %%mm2, %%mm4 nt"
- "pxor %%mm6, %%mm3 nt"
- "pxor %%mm6, %%mm4 nt"
- "paddusb %%mm2, %%mm2 nt"
- "packsswb %%mm1, %%mm0 nt"
- "pcmpgtb %%mm0, %%mm7 nt"
- "pxor %%mm7, %%mm0 nt"
- "psubb %%mm7, %%mm0 nt"
- "movq %%mm0, %%mm1 nt"
- "psubusb %%mm2, %%mm0 nt"
- "psubb %%mm0, %%mm1 nt"
- "pand %5, %%mm1 nt"
- "psrlw $2, %%mm1 nt"
- "pxor %%mm7, %%mm1 nt"
- "psubb %%mm7, %%mm1 nt"
- "movq %0, %%mm5 nt"
- "movq %3, %%mm6 nt"
- "psubb %%mm1, %%mm5 nt"
- "paddb %%mm1, %%mm6 nt"
- static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
- const int strength= ff_h263_loop_filter_strength[qscale];
- asm volatile(
- H263_LOOP_FILTER
- "movq %%mm3, %1 nt"
- "movq %%mm4, %2 nt"
- "movq %%mm5, %0 nt"
- "movq %%mm6, %3 nt"
- : "+m" (*(uint64_t*)(src - 2*stride)),
- "+m" (*(uint64_t*)(src - 1*stride)),
- "+m" (*(uint64_t*)(src + 0*stride)),
- "+m" (*(uint64_t*)(src + 1*stride))
- : "g" (2*strength), "m"(ff_pb_FC)
- );
- }
- static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
- asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
- "movd %4, %%mm0 nt"
- "movd %5, %%mm1 nt"
- "movd %6, %%mm2 nt"
- "movd %7, %%mm3 nt"
- "punpcklbw %%mm1, %%mm0 nt"
- "punpcklbw %%mm3, %%mm2 nt"
- "movq %%mm0, %%mm1 nt"
- "punpcklwd %%mm2, %%mm0 nt"
- "punpckhwd %%mm2, %%mm1 nt"
- "movd %%mm0, %0 nt"
- "punpckhdq %%mm0, %%mm0 nt"
- "movd %%mm0, %1 nt"
- "movd %%mm1, %2 nt"
- "punpckhdq %%mm1, %%mm1 nt"
- "movd %%mm1, %3 nt"
- : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
- "=m" (*(uint32_t*)(dst + 1*dst_stride)),
- "=m" (*(uint32_t*)(dst + 2*dst_stride)),
- "=m" (*(uint32_t*)(dst + 3*dst_stride))
- : "m" (*(uint32_t*)(src + 0*src_stride)),
- "m" (*(uint32_t*)(src + 1*src_stride)),
- "m" (*(uint32_t*)(src + 2*src_stride)),
- "m" (*(uint32_t*)(src + 3*src_stride))
- );
- }
- static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
- const int strength= ff_h263_loop_filter_strength[qscale];
- uint64_t temp[4] __attribute__ ((aligned(8)));
- uint8_t *btemp= (uint8_t*)temp;
- src -= 2;
- transpose4x4(btemp , src , 8, stride);
- transpose4x4(btemp+4, src + 4*stride, 8, stride);
- asm volatile(
- H263_LOOP_FILTER // 5 3 4 6
- : "+m" (temp[0]),
- "+m" (temp[1]),
- "+m" (temp[2]),
- "+m" (temp[3])
- : "g" (2*strength), "m"(ff_pb_FC)
- );
- asm volatile(
- "movq %%mm5, %%mm1 nt"
- "movq %%mm4, %%mm0 nt"
- "punpcklbw %%mm3, %%mm5 nt"
- "punpcklbw %%mm6, %%mm4 nt"
- "punpckhbw %%mm3, %%mm1 nt"
- "punpckhbw %%mm6, %%mm0 nt"
- "movq %%mm5, %%mm3 nt"
- "movq %%mm1, %%mm6 nt"
- "punpcklwd %%mm4, %%mm5 nt"
- "punpcklwd %%mm0, %%mm1 nt"
- "punpckhwd %%mm4, %%mm3 nt"
- "punpckhwd %%mm0, %%mm6 nt"
- "movd %%mm5, (%0) nt"
- "punpckhdq %%mm5, %%mm5 nt"
- "movd %%mm5, (%0,%2) nt"
- "movd %%mm3, (%0,%2,2) nt"
- "punpckhdq %%mm3, %%mm3 nt"
- "movd %%mm3, (%0,%3) nt"
- "movd %%mm1, (%1) nt"
- "punpckhdq %%mm1, %%mm1 nt"
- "movd %%mm1, (%1,%2) nt"
- "movd %%mm6, (%1,%2,2) nt"
- "punpckhdq %%mm6, %%mm6 nt"
- "movd %%mm6, (%1,%3) nt"
- :: "r" (src),
- "r" (src + 4*stride),
- "r" ((long) stride ),
- "r" ((long)(3*stride))
- );
- }
- #ifdef CONFIG_ENCODERS
- static int pix_norm1_mmx(uint8_t *pix, int line_size) {
- int tmp;
- asm volatile (
- "movl $16,%%ecxn"
- "pxor %%mm0,%%mm0n"
- "pxor %%mm7,%%mm7n"
- "1:n"
- "movq (%0),%%mm2n" /* mm2 = pix[0-7] */
- "movq 8(%0),%%mm3n" /* mm3 = pix[8-15] */
- "movq %%mm2,%%mm1n" /* mm1 = mm2 = pix[0-7] */
- "punpckhbw %%mm0,%%mm1n" /* mm1 = [pix4-7] */
- "punpcklbw %%mm0,%%mm2n" /* mm2 = [pix0-3] */
- "movq %%mm3,%%mm4n" /* mm4 = mm3 = pix[8-15] */
- "punpckhbw %%mm0,%%mm3n" /* mm3 = [pix12-15] */
- "punpcklbw %%mm0,%%mm4n" /* mm4 = [pix8-11] */
- "pmaddwd %%mm1,%%mm1n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
- "pmaddwd %%mm2,%%mm2n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
- "pmaddwd %%mm3,%%mm3n"
- "pmaddwd %%mm4,%%mm4n"
- "paddd %%mm1,%%mm2n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
- pix2^2+pix3^2+pix6^2+pix7^2) */
- "paddd %%mm3,%%mm4n"
- "paddd %%mm2,%%mm7n"
- "add %2, %0n"
- "paddd %%mm4,%%mm7n"
- "dec %%ecxn"
- "jnz 1bn"
- "movq %%mm7,%%mm1n"
- "psrlq $32, %%mm7n" /* shift hi dword to lo */
- "paddd %%mm7,%%mm1n"
- "movd %%mm1,%1n"
- : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
- return tmp;
- }
- static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- int tmp;
- asm volatile (
- "movl %4,%%ecxn"
- "pxor %%mm0,%%mm0n" /* mm0 = 0 */
- "pxor %%mm7,%%mm7n" /* mm7 holds the sum */
- "1:n"
- "movq (%0),%%mm1n" /* mm1 = pix1[0-7] */
- "movq (%1),%%mm2n" /* mm2 = pix2[0-7] */
- "movq %%mm1,%%mm5n"
- "psubusb %%mm2,%%mm1n"
- "psubusb %%mm5,%%mm2n"
- "por %%mm1,%%mm2n"
- "movq %%mm2,%%mm1n"
- "punpckhbw %%mm0,%%mm2n"
- "punpcklbw %%mm0,%%mm1n" /* mm1 now spread over (mm1,mm2) */
- "pmaddwd %%mm2,%%mm2n"
- "pmaddwd %%mm1,%%mm1n"
- "add %3,%0n"
- "add %3,%1n"
- "paddd %%mm2,%%mm1n"
- "paddd %%mm1,%%mm7n"
- "decl %%ecxn"
- "jnz 1bn"
- "movq %%mm7,%%mm1n"
- "psrlq $32, %%mm7n" /* shift hi dword to lo */
- "paddd %%mm7,%%mm1n"
- "movd %%mm1,%2n"
- : "+r" (pix1), "+r" (pix2), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp;
- }
- static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- int tmp;
- asm volatile (
- "movl %4,%%ecxn"
- "pxor %%mm0,%%mm0n" /* mm0 = 0 */
- "pxor %%mm7,%%mm7n" /* mm7 holds the sum */
- "1:n"
- "movq (%0),%%mm1n" /* mm1 = pix1[0-7] */
- "movq (%1),%%mm2n" /* mm2 = pix2[0-7] */
- "movq 8(%0),%%mm3n" /* mm3 = pix1[8-15] */
- "movq 8(%1),%%mm4n" /* mm4 = pix2[8-15] */
- /* todo: mm1-mm2, mm3-mm4 */
- /* algo: substract mm1 from mm2 with saturation and vice versa */
- /* OR the results to get absolute difference */
- "movq %%mm1,%%mm5n"
- "movq %%mm3,%%mm6n"
- "psubusb %%mm2,%%mm1n"
- "psubusb %%mm4,%%mm3n"
- "psubusb %%mm5,%%mm2n"
- "psubusb %%mm6,%%mm4n"
- "por %%mm1,%%mm2n"
- "por %%mm3,%%mm4n"
- /* now convert to 16-bit vectors so we can square them */
- "movq %%mm2,%%mm1n"
- "movq %%mm4,%%mm3n"
- "punpckhbw %%mm0,%%mm2n"
- "punpckhbw %%mm0,%%mm4n"
- "punpcklbw %%mm0,%%mm1n" /* mm1 now spread over (mm1,mm2) */
- "punpcklbw %%mm0,%%mm3n" /* mm4 now spread over (mm3,mm4) */
- "pmaddwd %%mm2,%%mm2n"
- "pmaddwd %%mm4,%%mm4n"
- "pmaddwd %%mm1,%%mm1n"
- "pmaddwd %%mm3,%%mm3n"
- "add %3,%0n"
- "add %3,%1n"
- "paddd %%mm2,%%mm1n"
- "paddd %%mm4,%%mm3n"
- "paddd %%mm1,%%mm7n"
- "paddd %%mm3,%%mm7n"
- "decl %%ecxn"
- "jnz 1bn"
- "movq %%mm7,%%mm1n"
- "psrlq $32, %%mm7n" /* shift hi dword to lo */
- "paddd %%mm7,%%mm1n"
- "movd %%mm1,%2n"
- : "+r" (pix1), "+r" (pix2), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp;
- }
- static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
- int tmp;
- asm volatile (
- "movl %3,%%ecxn"
- "pxor %%mm7,%%mm7n"
- "pxor %%mm6,%%mm6n"
- "movq (%0),%%mm0n"
- "movq %%mm0, %%mm1n"
- "psllq $8, %%mm0n"
- "psrlq $8, %%mm1n"
- "psrlq $8, %%mm0n"
- "movq %%mm0, %%mm2n"
- "movq %%mm1, %%mm3n"
- "punpcklbw %%mm7,%%mm0n"
- "punpcklbw %%mm7,%%mm1n"
- "punpckhbw %%mm7,%%mm2n"
- "punpckhbw %%mm7,%%mm3n"
- "psubw %%mm1, %%mm0n"
- "psubw %%mm3, %%mm2n"
- "add %2,%0n"
- "movq (%0),%%mm4n"
- "movq %%mm4, %%mm1n"
- "psllq $8, %%mm4n"
- "psrlq $8, %%mm1n"
- "psrlq $8, %%mm4n"
- "movq %%mm4, %%mm5n"
- "movq %%mm1, %%mm3n"
- "punpcklbw %%mm7,%%mm4n"
- "punpcklbw %%mm7,%%mm1n"
- "punpckhbw %%mm7,%%mm5n"
- "punpckhbw %%mm7,%%mm3n"
- "psubw %%mm1, %%mm4n"
- "psubw %%mm3, %%mm5n"
- "psubw %%mm4, %%mm0n"
- "psubw %%mm5, %%mm2n"
- "pxor %%mm3, %%mm3n"
- "pxor %%mm1, %%mm1n"
- "pcmpgtw %%mm0, %%mm3nt"
- "pcmpgtw %%mm2, %%mm1nt"
- "pxor %%mm3, %%mm0n"
- "pxor %%mm1, %%mm2n"
- "psubw %%mm3, %%mm0n"
- "psubw %%mm1, %%mm2n"
- "paddw %%mm0, %%mm2n"
- "paddw %%mm2, %%mm6n"
- "add %2,%0n"
- "1:n"
- "movq (%0),%%mm0n"
- "movq %%mm0, %%mm1n"
- "psllq $8, %%mm0n"
- "psrlq $8, %%mm1n"
- "psrlq $8, %%mm0n"
- "movq %%mm0, %%mm2n"
- "movq %%mm1, %%mm3n"
- "punpcklbw %%mm7,%%mm0n"
- "punpcklbw %%mm7,%%mm1n"
- "punpckhbw %%mm7,%%mm2n"
- "punpckhbw %%mm7,%%mm3n"
- "psubw %%mm1, %%mm0n"
- "psubw %%mm3, %%mm2n"
- "psubw %%mm0, %%mm4n"
- "psubw %%mm2, %%mm5n"
- "pxor %%mm3, %%mm3n"
- "pxor %%mm1, %%mm1n"
- "pcmpgtw %%mm4, %%mm3nt"
- "pcmpgtw %%mm5, %%mm1nt"
- "pxor %%mm3, %%mm4n"
- "pxor %%mm1, %%mm5n"
- "psubw %%mm3, %%mm4n"
- "psubw %%mm1, %%mm5n"
- "paddw %%mm4, %%mm5n"
- "paddw %%mm5, %%mm6n"
- "add %2,%0n"
- "movq (%0),%%mm4n"
- "movq %%mm4, %%mm1n"
- "psllq $8, %%mm4n"
- "psrlq $8, %%mm1n"
- "psrlq $8, %%mm4n"
- "movq %%mm4, %%mm5n"
- "movq %%mm1, %%mm3n"
- "punpcklbw %%mm7,%%mm4n"
- "punpcklbw %%mm7,%%mm1n"
- "punpckhbw %%mm7,%%mm5n"
- "punpckhbw %%mm7,%%mm3n"
- "psubw %%mm1, %%mm4n"
- "psubw %%mm3, %%mm5n"
- "psubw %%mm4, %%mm0n"
- "psubw %%mm5, %%mm2n"
- "pxor %%mm3, %%mm3n"
- "pxor %%mm1, %%mm1n"
- "pcmpgtw %%mm0, %%mm3nt"
- "pcmpgtw %%mm2, %%mm1nt"
- "pxor %%mm3, %%mm0n"
- "pxor %%mm1, %%mm2n"
- "psubw %%mm3, %%mm0n"
- "psubw %%mm1, %%mm2n"
- "paddw %%mm0, %%mm2n"
- "paddw %%mm2, %%mm6n"
- "add %2,%0n"
- "subl $2, %%ecxn"
- " jnz 1bn"
- "movq %%mm6, %%mm0n"
- "punpcklwd %%mm7,%%mm0n"
- "punpckhwd %%mm7,%%mm6n"
- "paddd %%mm0, %%mm6n"
- "movq %%mm6,%%mm0n"
- "psrlq $32, %%mm6n"
- "paddd %%mm6,%%mm0n"
- "movd %%mm0,%1n"
- : "+r" (pix1), "=r"(tmp)
- : "r" ((long)line_size) , "g" (h-2)
- : "%ecx");
- return tmp;
- }
- static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
- int tmp;
- uint8_t * pix= pix1;
- asm volatile (
- "movl %3,%%ecxn"
- "pxor %%mm7,%%mm7n"
- "pxor %%mm6,%%mm6n"
- "movq (%0),%%mm0n"
- "movq 1(%0),%%mm1n"
- "movq %%mm0, %%mm2n"
- "movq %%mm1, %%mm3n"
- "punpcklbw %%mm7,%%mm0n"
- "punpcklbw %%mm7,%%mm1n"
- "punpckhbw %%mm7,%%mm2n"
- "punpckhbw %%mm7,%%mm3n"
- "psubw %%mm1, %%mm0n"
- "psubw %%mm3, %%mm2n"
- "add %2,%0n"
- "movq (%0),%%mm4n"
- "movq 1(%0),%%mm1n"
- "movq %%mm4, %%mm5n"
- "movq %%mm1, %%mm3n"
- "punpcklbw %%mm7,%%mm4n"
- "punpcklbw %%mm7,%%mm1n"
- "punpckhbw %%mm7,%%mm5n"
- "punpckhbw %%mm7,%%mm3n"
- "psubw %%mm1, %%mm4n"
- "psubw %%mm3, %%mm5n"
- "psubw %%mm4, %%mm0n"
- "psubw %%mm5, %%mm2n"
- "pxor %%mm3, %%mm3n"
- "pxor %%mm1, %%mm1n"
- "pcmpgtw %%mm0, %%mm3nt"
- "pcmpgtw %%mm2, %%mm1nt"
- "pxor %%mm3, %%mm0n"
- "pxor %%mm1, %%mm2n"
- "psubw %%mm3, %%mm0n"
- "psubw %%mm1, %%mm2n"
- "paddw %%mm0, %%mm2n"
- "paddw %%mm2, %%mm6n"
- "add %2,%0n"
- "1:n"
- "movq (%0),%%mm0n"
- "movq 1(%0),%%mm1n"
- "movq %%mm0, %%mm2n"
- "movq %%mm1, %%mm3n"
- "punpcklbw %%mm7,%%mm0n"
- "punpcklbw %%mm7,%%mm1n"
- "punpckhbw %%mm7,%%mm2n"
- "punpckhbw %%mm7,%%mm3n"
- "psubw %%mm1, %%mm0n"
- "psubw %%mm3, %%mm2n"
- "psubw %%mm0, %%mm4n"
- "psubw %%mm2, %%mm5n"
- "pxor %%mm3, %%mm3n"
- "pxor %%mm1, %%mm1n"
- "pcmpgtw %%mm4, %%mm3nt"
- "pcmpgtw %%mm5, %%mm1nt"
- "pxor %%mm3, %%mm4n"
- "pxor %%mm1, %%mm5n"
- "psubw %%mm3, %%mm4n"
- "psubw %%mm1, %%mm5n"
- "paddw %%mm4, %%mm5n"
- "paddw %%mm5, %%mm6n"
- "add %2,%0n"
- "movq (%0),%%mm4n"
- "movq 1(%0),%%mm1n"
- "movq %%mm4, %%mm5n"
- "movq %%mm1, %%mm3n"
- "punpcklbw %%mm7,%%mm4n"
- "punpcklbw %%mm7,%%mm1n"
- "punpckhbw %%mm7,%%mm5n"
- "punpckhbw %%mm7,%%mm3n"
- "psubw %%mm1, %%mm4n"
- "psubw %%mm3, %%mm5n"
- "psubw %%mm4, %%mm0n"
- "psubw %%mm5, %%mm2n"
- "pxor %%mm3, %%mm3n"
- "pxor %%mm1, %%mm1n"
- "pcmpgtw %%mm0, %%mm3nt"
- "pcmpgtw %%mm2, %%mm1nt"
- "pxor %%mm3, %%mm0n"
- "pxor %%mm1, %%mm2n"
- "psubw %%mm3, %%mm0n"
- "psubw %%mm1, %%mm2n"
- "paddw %%mm0, %%mm2n"
- "paddw %%mm2, %%mm6n"
- "add %2,%0n"
- "subl $2, %%ecxn"
- " jnz 1bn"
- "movq %%mm6, %%mm0n"
- "punpcklwd %%mm7,%%mm0n"
- "punpckhwd %%mm7,%%mm6n"
- "paddd %%mm0, %%mm6n"
- "movq %%mm6,%%mm0n"
- "psrlq $32, %%mm6n"
- "paddd %%mm6,%%mm0n"
- "movd %%mm0,%1n"
- : "+r" (pix1), "=r"(tmp)
- : "r" ((long)line_size) , "g" (h-2)
- : "%ecx");
- return tmp + hf_noise8_mmx(pix+8, line_size, h);
- }
- static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- MpegEncContext *c = p;
- int score1= sse16_mmx(c, pix1, pix2, line_size, h);
- int score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
- if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
- else return score1 + ABS(score2)*8;
- }
- static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- MpegEncContext *c = p;
- int score1= sse8_mmx(c, pix1, pix2, line_size, h);
- int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
- if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
- else return score1 + ABS(score2)*8;
- }
- static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
- int tmp;
- assert( (((int)pix) & 7) == 0);
- assert((line_size &7) ==0);
- #define SUM(in0, in1, out0, out1)
- "movq (%0), %%mm2n"
- "movq 8(%0), %%mm3n"
- "add %2,%0n"
- "movq %%mm2, " #out0 "n"
- "movq %%mm3, " #out1 "n"
- "psubusb " #in0 ", %%mm2n"
- "psubusb " #in1 ", %%mm3n"
- "psubusb " #out0 ", " #in0 "n"
- "psubusb " #out1 ", " #in1 "n"
- "por %%mm2, " #in0 "n"
- "por %%mm3, " #in1 "n"
- "movq " #in0 ", %%mm2n"
- "movq " #in1 ", %%mm3n"
- "punpcklbw %%mm7, " #in0 "n"
- "punpcklbw %%mm7, " #in1 "n"
- "punpckhbw %%mm7, %%mm2n"
- "punpckhbw %%mm7, %%mm3n"
- "paddw " #in1 ", " #in0 "n"
- "paddw %%mm3, %%mm2n"
- "paddw %%mm2, " #in0 "n"
- "paddw " #in0 ", %%mm6n"
- asm volatile (
- "movl %3,%%ecxn"
- "pxor %%mm6,%%mm6n"
- "pxor %%mm7,%%mm7n"
- "movq (%0),%%mm0n"
- "movq 8(%0),%%mm1n"
- "add %2,%0n"
- "subl $2, %%ecxn"
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "1:n"
- SUM(%%mm4, %%mm5, %%mm0, %%mm1)
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "subl $2, %%ecxn"
- "jnz 1bn"
- "movq %%mm6,%%mm0n"
- "psrlq $32, %%mm6n"
- "paddw %%mm6,%%mm0n"
- "movq %%mm0,%%mm6n"
- "psrlq $16, %%mm0n"
- "paddw %%mm6,%%mm0n"
- "movd %%mm0,%1n"
- : "+r" (pix), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp & 0xFFFF;
- }
- #undef SUM
- static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
- int tmp;
- assert( (((int)pix) & 7) == 0);
- assert((line_size &7) ==0);
- #define SUM(in0, in1, out0, out1)
- "movq (%0), " #out0 "n"
- "movq 8(%0), " #out1 "n"
- "add %2,%0n"
- "psadbw " #out0 ", " #in0 "n"
- "psadbw " #out1 ", " #in1 "n"
- "paddw " #in1 ", " #in0 "n"
- "paddw " #in0 ", %%mm6n"
- asm volatile (
- "movl %3,%%ecxn"
- "pxor %%mm6,%%mm6n"
- "pxor %%mm7,%%mm7n"
- "movq (%0),%%mm0n"
- "movq 8(%0),%%mm1n"
- "add %2,%0n"
- "subl $2, %%ecxn"
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "1:n"
- SUM(%%mm4, %%mm5, %%mm0, %%mm1)
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "subl $2, %%ecxn"
- "jnz 1bn"
- "movd %%mm6,%1n"
- : "+r" (pix), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp;
- }
- #undef SUM
- static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- int tmp;
- assert( (((int)pix1) & 7) == 0);
- assert( (((int)pix2) & 7) == 0);
- assert((line_size &7) ==0);
- #define SUM(in0, in1, out0, out1)
- "movq (%0),%%mm2n"
- "movq (%1)," #out0 "n"
- "movq 8(%0),%%mm3n"
- "movq 8(%1)," #out1 "n"
- "add %3,%0n"
- "add %3,%1n"
- "psubb " #out0 ", %%mm2n"
- "psubb " #out1 ", %%mm3n"
- "pxor %%mm7, %%mm2n"
- "pxor %%mm7, %%mm3n"
- "movq %%mm2, " #out0 "n"
- "movq %%mm3, " #out1 "n"
- "psubusb " #in0 ", %%mm2n"
- "psubusb " #in1 ", %%mm3n"
- "psubusb " #out0 ", " #in0 "n"
- "psubusb " #out1 ", " #in1 "n"
- "por %%mm2, " #in0 "n"
- "por %%mm3, " #in1 "n"
- "movq " #in0 ", %%mm2n"
- "movq " #in1 ", %%mm3n"
- "punpcklbw %%mm7, " #in0 "n"
- "punpcklbw %%mm7, " #in1 "n"
- "punpckhbw %%mm7, %%mm2n"
- "punpckhbw %%mm7, %%mm3n"
- "paddw " #in1 ", " #in0 "n"
- "paddw %%mm3, %%mm2n"
- "paddw %%mm2, " #in0 "n"
- "paddw " #in0 ", %%mm6n"
- asm volatile (
- "movl %4,%%ecxn"
- "pxor %%mm6,%%mm6n"
- "pcmpeqw %%mm7,%%mm7n"
- "psllw $15, %%mm7n"
- "packsswb %%mm7, %%mm7n"
- "movq (%0),%%mm0n"
- "movq (%1),%%mm2n"
- "movq 8(%0),%%mm1n"
- "movq 8(%1),%%mm3n"
- "add %3,%0n"
- "add %3,%1n"
- "subl $2, %%ecxn"
- "psubb %%mm2, %%mm0n"
- "psubb %%mm3, %%mm1n"
- "pxor %%mm7, %%mm0n"
- "pxor %%mm7, %%mm1n"
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "1:n"
- SUM(%%mm4, %%mm5, %%mm0, %%mm1)
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "subl $2, %%ecxn"
- "jnz 1bn"
- "movq %%mm6,%%mm0n"
- "psrlq $32, %%mm6n"
- "paddw %%mm6,%%mm0n"
- "movq %%mm0,%%mm6n"
- "psrlq $16, %%mm0n"
- "paddw %%mm6,%%mm0n"
- "movd %%mm0,%2n"
- : "+r" (pix1), "+r" (pix2), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp & 0x7FFF;
- }
- #undef SUM
- static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
- int tmp;
- assert( (((int)pix1) & 7) == 0);
- assert( (((int)pix2) & 7) == 0);
- assert((line_size &7) ==0);
- #define SUM(in0, in1, out0, out1)
- "movq (%0)," #out0 "n"
- "movq (%1),%%mm2n"
- "movq 8(%0)," #out1 "n"
- "movq 8(%1),%%mm3n"
- "add %3,%0n"
- "add %3,%1n"
- "psubb %%mm2, " #out0 "n"
- "psubb %%mm3, " #out1 "n"
- "pxor %%mm7, " #out0 "n"
- "pxor %%mm7, " #out1 "n"
- "psadbw " #out0 ", " #in0 "n"
- "psadbw " #out1 ", " #in1 "n"
- "paddw " #in1 ", " #in0 "n"
- "paddw " #in0 ", %%mm6n"
- asm volatile (
- "movl %4,%%ecxn"
- "pxor %%mm6,%%mm6n"
- "pcmpeqw %%mm7,%%mm7n"
- "psllw $15, %%mm7n"
- "packsswb %%mm7, %%mm7n"
- "movq (%0),%%mm0n"
- "movq (%1),%%mm2n"
- "movq 8(%0),%%mm1n"
- "movq 8(%1),%%mm3n"
- "add %3,%0n"
- "add %3,%1n"
- "subl $2, %%ecxn"
- "psubb %%mm2, %%mm0n"
- "psubb %%mm3, %%mm1n"
- "pxor %%mm7, %%mm0n"
- "pxor %%mm7, %%mm1n"
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "1:n"
- SUM(%%mm4, %%mm5, %%mm0, %%mm1)
- SUM(%%mm0, %%mm1, %%mm4, %%mm5)
- "subl $2, %%ecxn"
- "jnz 1bn"
- "movd %%mm6,%2n"
- : "+r" (pix1), "+r" (pix2), "=r"(tmp)
- : "r" ((long)line_size) , "m" (h)
- : "%ecx");
- return tmp;
- }
- #undef SUM
- static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
- long i=0;
- asm volatile(
- "1: nt"
- "movq (%2, %0), %%mm0 nt"
- "movq (%1, %0), %%mm1 nt"
- "psubb %%mm0, %%mm1 nt"
- "movq %%mm1, (%3, %0) nt"
- "movq 8(%2, %0), %%mm0 nt"
- "movq 8(%1, %0), %%mm1 nt"
- "psubb %%mm0, %%mm1 nt"
- "movq %%mm1, 8(%3, %0) nt"
- "add $16, %0 nt"
- "cmp %4, %0 nt"
- " jb 1b nt"
- : "+r" (i)
- : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
- );
- for(; i<w; i++)
- dst[i+0] = src1[i+0]-src2[i+0];
- }
- static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
- long i=0;
- uint8_t l, lt;
- asm volatile(
- "1: nt"
- "movq -1(%1, %0), %%mm0 nt" // LT
- "movq (%1, %0), %%mm1 nt" // T
- "movq -1(%2, %0), %%mm2 nt" // L
- "movq (%2, %0), %%mm3 nt" // X
- "movq %%mm2, %%mm4 nt" // L
- "psubb %%mm0, %%mm2 nt"
- "paddb %%mm1, %%mm2 nt" // L + T - LT
- "movq %%mm4, %%mm5 nt" // L
- "pmaxub %%mm1, %%mm4 nt" // max(T, L)
- "pminub %%mm5, %%mm1 nt" // min(T, L)
- "pminub %%mm2, %%mm4 nt"
- "pmaxub %%mm1, %%mm4 nt"
- "psubb %%mm4, %%mm3 nt" // dst - pred
- "movq %%mm3, (%3, %0) nt"
- "add $8, %0 nt"
- "cmp %4, %0 nt"
- " jb 1b nt"
- : "+r" (i)
- : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
- );
- l= *left;
- lt= *left_top;
- dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
- *left_top= src1[w-1];
- *left = src2[w-1];
- }
- #define LBUTTERFLY2(a1,b1,a2,b2)
- "paddw " #b1 ", " #a1 " nt"
- "paddw " #b2 ", " #a2 " nt"
- "paddw " #b1 ", " #b1 " nt"
- "paddw " #b2 ", " #b2 " nt"
- "psubw " #a1 ", " #b1 " nt"
- "psubw " #a2 ", " #b2 " nt"
- #define HADAMARD48
- LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)
- LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)
- LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)
- LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)
- LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)
- LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)
- #define MMABS(a,z)
- "pxor " #z ", " #z " nt"
- "pcmpgtw " #a ", " #z " nt"
- "pxor " #z ", " #a " nt"
- "psubw " #z ", " #a " nt"
- #define MMABS_SUM(a,z, sum)
- "pxor " #z ", " #z " nt"
- "pcmpgtw " #a ", " #z " nt"
- "pxor " #z ", " #a " nt"
- "psubw " #z ", " #a " nt"
- "paddusw " #a ", " #sum " nt"
- #define MMABS_MMX2(a,z)
- "pxor " #z ", " #z " nt"
- "psubw " #a ", " #z " nt"
- "pmaxsw " #z ", " #a " nt"
- #define MMABS_SUM_MMX2(a,z, sum)
- "pxor " #z ", " #z " nt"
- "psubw " #a ", " #z " nt"
- "pmaxsw " #z ", " #a " nt"
- "paddusw " #a ", " #sum " nt"
- #define SBUTTERFLY(a,b,t,n)
- "movq " #a ", " #t " nt" /* abcd */
- "punpckl" #n " " #b ", " #a " nt" /* aebf */
- "punpckh" #n " " #b ", " #t " nt" /* cgdh */
- #define TRANSPOSE4(a,b,c,d,t)
- SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */
- SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */
- SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */
- SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
- #define LOAD4(o, a, b, c, d)
- "movq "#o"(%1), " #a " nt"
- "movq "#o"+16(%1), " #b " nt"
- "movq "#o"+32(%1), " #c " nt"
- "movq "#o"+48(%1), " #d " nt"
- #define STORE4(o, a, b, c, d)
- "movq "#a", "#o"(%1) nt"
- "movq "#b", "#o"+16(%1) nt"
- "movq "#c", "#o"+32(%1) nt"
- "movq "#d", "#o"+48(%1) nt"
- static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
- uint64_t temp[16] __align8;
- int sum=0;
- assert(h==8);
- diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
- asm volatile(
- LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
- HADAMARD48
- "movq %%mm7, 112(%1) nt"
- TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
- STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
- "movq 112(%1), %%mm7 nt"
- TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
- STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
- LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
- HADAMARD48
- "movq %%mm7, 120(%1) nt"
- TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
- STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
- "movq 120(%1), %%mm7 nt"
- TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
- "movq %%mm7, %%mm5 nt"//FIXME remove
- "movq %%mm6, %%mm7 nt"
- "movq %%mm0, %%mm6 nt"
- // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
- LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
- // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
- HADAMARD48
- "movq %%mm7, 64(%1) nt"
- MMABS(%%mm0, %%mm7)
- MMABS_SUM(%%mm1, %%mm7, %%mm0)
- MMABS_SUM(%%mm2, %%mm7, %%mm0)
- MMABS_SUM(%%mm3, %%mm7, %%mm0)
- MMABS_SUM(%%mm4, %%mm7, %%mm0)
- MMABS_SUM(%%mm5, %%mm7, %%mm0)
- MMABS_SUM(%%mm6, %%mm7, %%mm0)
- "movq 64(%1), %%mm1 nt"
- MMABS_SUM(%%mm1, %%mm7, %%mm0)
- "movq %%mm0, 64(%1) nt"
- LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
- HADAMARD48
- "movq %%mm7, (%1) nt"
- MMABS(%%mm0, %%mm7)
- MMABS_SUM(%%mm1, %%mm7, %%mm0)
- MMABS_SUM(%%mm2, %%mm7, %%mm0)
- MMABS_SUM(%%mm3, %%mm7, %%mm0)
- MMABS_SUM(%%mm4, %%mm7, %%mm0)
- MMABS_SUM(%%mm5, %%mm7, %%mm0)
- MMABS_SUM(%%mm6, %%mm7, %%mm0)
- "movq (%1), %%mm1 nt"
- MMABS_SUM(%%mm1, %%mm7, %%mm0)
- "movq 64(%1), %%mm1 nt"
- MMABS_SUM(%%mm1, %%mm7, %%mm0)
- "movq %%mm0, %%mm1 nt"
- "psrlq $32, %%mm0 nt"
- "paddusw %%mm1, %%mm0 nt"
- "movq %%mm0, %%mm1 nt"
- "psrlq $16, %%mm0 nt"
- "paddusw %%mm1, %%mm0 nt"
- "movd %%mm0, %0 nt"
- : "=r" (sum)
- : "r"(temp)
- );
- return sum&0xFFFF;
- }
- static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
- uint64_t temp[16] __align8;
- int sum=0;
- assert(h==8);
- diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
- asm volatile(
- LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
- HADAMARD48
- "movq %%mm7, 112(%1) nt"
- TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
- STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
- "movq 112(%1), %%mm7 nt"
- TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
- STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
- LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
- HADAMARD48
- "movq %%mm7, 120(%1) nt"
- TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
- STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
- "movq 120(%1), %%mm7 nt"
- TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
- "movq %%mm7, %%mm5 nt"//FIXME remove
- "movq %%mm6, %%mm7 nt"
- "movq %%mm0, %%mm6 nt"
- // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
- LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
- // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
- HADAMARD48
- "movq %%mm7, 64(%1) nt"
- MMABS_MMX2(%%mm0, %%mm7)
- MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
- "movq 64(%1), %%mm1 nt"
- MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
- "movq %%mm0, 64(%1) nt"
- LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
- HADAMARD48
- "movq %%mm7, (%1) nt"
- MMABS_MMX2(%%mm0, %%mm7)
- MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
- "movq (%1), %%mm1 nt"
- MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
- "movq 64(%1), %%mm1 nt"
- MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
- "movq %%mm0, %%mm1 nt"
- "psrlq $32, %%mm0 nt"
- "paddusw %%mm1, %%mm0 nt"
- "movq %%mm0, %%mm1 nt"
- "psrlq $16, %%mm0 nt"
- "paddusw %%mm1, %%mm0 nt"
- "movd %%mm0, %0 nt"
- : "=r" (sum)
- : "r"(temp)
- );
- return sum&0xFFFF;
- }
- WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
- WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
- #endif //CONFIG_ENCODERS
- #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
- #define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
- #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)
- "paddw " #m4 ", " #m3 " nt" /* x1 */
- "movq "MANGLE(ff_pw_20)", %%mm4 nt" /* 20 */
- "pmullw " #m3 ", %%mm4 nt" /* 20x1 */
- "movq "#in7", " #m3 " nt" /* d */
- "movq "#in0", %%mm5 nt" /* D */
- "paddw " #m3 ", %%mm5 nt" /* x4 */
- "psubw %%mm5, %%mm4 nt" /* 20x1 - x4 */
- "movq "#in1", %%mm5 nt" /* C */
- "movq "#in2", %%mm6 nt" /* B */
- "paddw " #m6 ", %%mm5 nt" /* x3 */
- "paddw " #m5 ", %%mm6 nt" /* x2 */
- "paddw %%mm6, %%mm6 nt" /* 2x2 */
- "psubw %%mm6, %%mm5 nt" /* -2x2 + x3 */
- "pmullw "MANGLE(ff_pw_3)", %%mm5 nt" /* -6x2 + 3x3 */
- "paddw " #rnd ", %%mm4 nt" /* x2 */
- "paddw %%mm4, %%mm5 nt" /* 20x1 - 6x2 + 3x3 - x4 */
- "psraw $5, %%mm5 nt"
- "packuswb %%mm5, %%mm5 nt"
- OP(%%mm5, out, %%mm7, d)
- #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)
- static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
- uint64_t temp;
- asm volatile(
- "pxor %%mm7, %%mm7 nt"
- "1: nt"
- "movq (%0), %%mm0 nt" /* ABCDEFGH */
- "movq %%mm0, %%mm1 nt" /* ABCDEFGH */
- "movq %%mm0, %%mm2 nt" /* ABCDEFGH */
- "punpcklbw %%mm7, %%mm0 nt" /* 0A0B0C0D */
- "punpckhbw %%mm7, %%mm1 nt" /* 0E0F0G0H */
- "pshufw $0x90, %%mm0, %%mm5 nt" /* 0A0A0B0C */
- "pshufw $0x41, %%mm0, %%mm6 nt" /* 0B0A0A0B */
- "movq %%mm2, %%mm3 nt" /* ABCDEFGH */
- "movq %%mm2, %%mm4 nt" /* ABCDEFGH */
- "psllq $8, %%mm2 nt" /* 0ABCDEFG */
- "psllq $16, %%mm3 nt" /* 00ABCDEF */
- "psllq $24, %%mm4 nt" /* 000ABCDE */
- "punpckhbw %%mm7, %%mm2 nt" /* 0D0E0F0G */
- "punpckhbw %%mm7, %%mm3 nt" /* 0C0D0E0F */
- "punpckhbw %%mm7, %%mm4 nt" /* 0B0C0D0E */
- "paddw %%mm3, %%mm5 nt" /* b */
- "paddw %%mm2, %%mm6 nt" /* c */
- "paddw %%mm5, %%mm5 nt" /* 2b */
- "psubw %%mm5, %%mm6 nt" /* c - 2b */
- "pshufw $0x06, %%mm0, %%mm5 nt" /* 0C0B0A0A */
- "pmullw "MANGLE(ff_pw_3)", %%mm6 nt" /* 3c - 6b */
- "paddw %%mm4, %%mm0 nt" /* a */
- "paddw %%mm1, %%mm5 nt" /* d */
- "pmullw "MANGLE(ff_pw_20)", %%mm0 nt" /* 20a */
- "psubw %%mm5, %%mm0 nt" /* 20a - d */
- "paddw %6, %%mm6 nt"
- "paddw %%mm6, %%mm0 nt" /* 20a - 6b + 3c - d */
- "psraw $5, %%mm0 nt"
- "movq %%mm0, %5 nt"
- /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */
- "movq 5(%0), %%mm0 nt" /* FGHIJKLM */
- "movq %%mm0, %%mm5 nt" /* FGHIJKLM */
- "movq %%mm0, %%mm6 nt" /* FGHIJKLM */
- "psrlq $8, %%mm0 nt" /* GHIJKLM0 */
- "psrlq $16, %%mm5 nt" /* HIJKLM00 */
- "punpcklbw %%mm7, %%mm0 nt" /* 0G0H0I0J */
- "punpcklbw %%mm7, %%mm5 nt" /* 0H0I0J0K */
- "paddw %%mm0, %%mm2 nt" /* b */
- "paddw %%mm5, %%mm3 nt" /* c */
- "paddw %%mm2, %%mm2 nt" /* 2b */
- "psubw %%mm2, %%mm3 nt" /* c - 2b */
- "movq %%mm6, %%mm2 nt" /* FGHIJKLM */
- "psrlq $24, %%mm6 nt" /* IJKLM000 */
- "punpcklbw %%mm7, %%mm2 nt" /* 0F0G0H0I */
- "punpcklbw %%mm7, %%mm6 nt" /* 0I0J0K0L */
- "pmullw "MANGLE(ff_pw_3)", %%mm3 nt" /* 3c - 6b */
- "paddw %%mm2, %%mm1 nt" /* a */
- "paddw %%mm6, %%mm4 nt" /* d */
- "pmullw "MANGLE(ff_pw_20)", %%mm1 nt" /* 20a */
- "psubw %%mm4, %%mm3 nt" /* - 6b +3c - d */
- "paddw %6, %%mm1 nt"
- "paddw %%mm1, %%mm3 nt" /* 20a - 6b +3c - d */
- "psraw $5, %%mm3 nt"
- "movq %5, %%mm1 nt"
- "packuswb %%mm3, %%mm1 nt"
- OP_MMX2(%%mm1, (%1),%%mm4, q)
- /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */
- "movq 9(%0), %%mm1 nt" /* JKLMNOPQ */
- "movq %%mm1, %%mm4 nt" /* JKLMNOPQ */
- "movq %%mm1, %%mm3 nt" /* JKLMNOPQ */
- "psrlq $8, %%mm1 nt" /* KLMNOPQ0 */
- "psrlq $16, %%mm4 nt" /* LMNOPQ00 */
- "punpcklbw %%mm7, %%mm1 nt" /* 0K0L0M0N */
- "punpcklbw %%mm7, %%mm4 nt" /* 0L0M0N0O */
- "paddw %%mm1, %%mm5 nt" /* b */
- "paddw %%mm4, %%mm0 nt" /* c */
- "paddw %%mm5, %%mm5 nt" /* 2b */
- "psubw %%mm5, %%mm0 nt" /* c - 2b */
- "movq %%mm3, %%mm5 nt" /* JKLMNOPQ */
- "psrlq $24, %%mm3 nt" /* MNOPQ000 */
- "pmullw "MANGLE(ff_pw_3)", %%mm0 nt" /* 3c - 6b */
- "punpcklbw %%mm7, %%mm3 nt" /* 0M0N0O0P */
- "paddw %%mm3, %%mm2 nt" /* d */
- "psubw %%mm2, %%mm0 nt" /* -6b + 3c - d */
- "movq %%mm5, %%mm2 nt" /* JKLMNOPQ */
- "punpcklbw %%mm7, %%mm2 nt" /* 0J0K0L0M */
- "punpckhbw %%mm7, %%mm5 nt" /* 0N0O0P0Q */
- "paddw %%mm2, %%mm6 nt" /* a */
- "pmullw "MANGLE(ff_pw_20)", %%mm6 nt" /* 20a */
- "paddw %6, %%mm0 nt"
- "paddw %%mm6, %%mm0 nt" /* 20a - 6b + 3c - d */
- "psraw $5, %%mm0 nt"
- /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */
- "paddw %%mm5, %%mm3 nt" /* a */
- "pshufw $0xF9, %%mm5, %%mm6 nt" /* 0O0P0Q0Q */
- "paddw %%mm4, %%mm6 nt" /* b */
- "pshufw $0xBE, %%mm5, %%mm4 nt" /* 0P0Q0Q0P */
- "pshufw $0x6F, %%mm5, %%mm5 nt" /* 0Q0Q0P0O */
- "paddw %%mm1, %%mm4 nt" /* c */
- "paddw %%mm2, %%mm5 nt" /* d */
- "paddw %%mm6, %%mm6 nt" /* 2b */
- "psubw %%mm6, %%mm4 nt" /* c - 2b */
- "pmullw "MANGLE(ff_pw_20)", %%mm3 nt" /* 20a */
- "pmullw "MANGLE(ff_pw_3)", %%mm4 nt" /* 3c - 6b */
- "psubw %%mm5, %%mm3 nt" /* -6b + 3c - d */
- "paddw %6, %%mm4 nt"
- "paddw %%mm3, %%mm4 nt" /* 20a - 6b + 3c - d */
- "psraw $5, %%mm4 nt"
- "packuswb %%mm4, %%mm0 nt"
- OP_MMX2(%%mm0, 8(%1), %%mm4, q)
- "add %3, %0 nt"
- "add %4, %1 nt"
- "decl %2 nt"
- " jnz 1b nt"
- : "+a"(src), "+c"(dst), "+m"(h)
- : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)
- : "memory"
- );
- }
- static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
- int i;
- int16_t temp[16];
- /* quick HACK, XXX FIXME MUST be optimized */
- for(i=0; i<h; i++)
- {
- temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);
- temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);
- temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);
- temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);
- temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);
- temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);
- temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);
- temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);
- temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);
- temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);
- temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);
- temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);
- temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);
- temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);
- temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);
- temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);
- asm volatile(
- "movq (%0), %%mm0 nt"
- "movq 8(%0), %%mm1 nt"
- "paddw %2, %%mm0 nt"
- "paddw %2, %%mm1 nt"
- "psraw $5, %%mm0 nt"
- "psraw $5, %%mm1 nt"
- "packuswb %%mm1, %%mm0 nt"
- OP_3DNOW(%%mm0, (%1), %%mm1, q)
- "movq 16(%0), %%mm0 nt"
- "movq 24(%0), %%mm1 nt"
- "paddw %2, %%mm0 nt"
- "paddw %2, %%mm1 nt"
- "psraw $5, %%mm0 nt"
- "psraw $5, %%mm1 nt"
- "packuswb %%mm1, %%mm0 nt"
- OP_3DNOW(%%mm0, 8(%1), %%mm1, q)
- :: "r"(temp), "r"(dst), "m"(ROUNDER)
- : "memory"
- );
- dst+=dstStride;
- src+=srcStride;
- }
- }
- static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
- uint64_t temp;
- asm volatile(
- "pxor %%mm7, %%mm7 nt"
- "1: nt"
- "movq (%0), %%mm0 nt" /* ABCDEFGH */
- "movq %%mm0, %%mm1 nt" /* ABCDEFGH */
- "movq %%mm0, %%mm2 nt" /* ABCDEFGH */
- "punpcklbw %%mm7, %%mm0 nt" /* 0A0B0C0D */
- "punpckhbw %%mm7, %%mm1 nt" /* 0E0F0G0H */
- "pshufw $0x90, %%mm0, %%mm5 nt" /* 0A0A0B0C */
- "pshufw $0x41, %%mm0, %%mm6 nt" /* 0B0A0A0B */
- "movq %%mm2, %%mm3 nt" /* ABCDEFGH */
- "movq %%mm2, %%mm4 nt" /* ABCDEFGH */
- "psllq $8, %%mm2 nt" /* 0ABCDEFG */
- "psllq $16, %%mm3 nt" /* 00ABCDEF */
- "psllq $24, %%mm4 nt" /* 000ABCDE */
- "punpckhbw %%mm7, %%mm2 nt" /* 0D0E0F0G */
- "punpckhbw %%mm7, %%mm3 nt" /* 0C0D0E0F */
- "punpckhbw %%mm7, %%mm4 nt" /* 0B0C0D0E */
- "paddw %%mm3, %%mm5 nt" /* b */
- "paddw %%mm2, %%mm6 nt" /* c */
- "paddw %%mm5, %%mm5 nt" /* 2b */
- "psubw %%mm5, %%mm6 nt" /* c - 2b */
- "pshufw $0x06, %%mm0, %%mm5 nt" /* 0C0B0A0A */
- "pmullw "MANGLE(ff_pw_3)", %%mm6 nt" /* 3c - 6b */
- "paddw %%mm4, %%mm0 nt" /* a */
- "paddw %%mm1, %%mm5 nt" /* d */
- "pmullw "MANGLE(ff_pw_20)", %%mm0 nt" /* 20a */
- "psubw %%mm5, %%mm0 nt" /* 20a - d */
- "paddw %6, %%mm6 nt"
- "paddw %%mm6, %%mm0 nt" /* 20a - 6b + 3c - d */
- "psraw $5, %%mm0 nt"
- /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */
- "movd 5(%0), %%mm5 nt" /* FGHI */
- "punpcklbw %%mm7, %%mm5 nt" /* 0F0G0H0I */
- "pshufw $0xF9, %%mm5, %%mm6 nt" /* 0G0H0I0I */
- "paddw %%mm5, %%mm1 nt" /* a */
- "paddw %%mm6, %%mm2 nt" /* b */
- "pshufw $0xBE, %%mm5, %%mm6 nt" /* 0H0I0I0H */
- "pshufw $0x6F, %%mm5, %%mm5 nt" /* 0I0I0H0G */
- "paddw %%mm6, %%mm3 nt" /* c */
- "paddw %%mm5, %%mm4 nt" /* d */
- "paddw %%mm2, %%mm2 nt" /* 2b */
- "psubw %%mm2, %%mm3 nt" /* c - 2b */
- "pmullw "MANGLE(ff_pw_20)", %%mm1 nt" /* 20a */
- "pmullw "MANGLE(ff_pw_3)", %%mm3 nt" /* 3c - 6b */
- "psubw %%mm4, %%mm3 nt" /* -6b + 3c - d */
- "paddw %6, %%mm1 nt"
- "paddw %%mm1, %%mm3 nt" /* 20a - 6b + 3c - d */
- "psraw $5, %%mm3 nt"
- "packuswb %%mm3, %%mm0 nt"
- OP_MMX2(%%mm0, (%1), %%mm4, q)
- "add %3, %0 nt"
- "add %4, %1 nt"
- "decl %2 nt"
- " jnz 1b nt"
- : "+a"(src), "+c"(dst), "+m"(h)
- : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)
- : "memory"
- );
- }
- static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
- int i;
- int16_t temp[8];
- /* quick HACK, XXX FIXME MUST be optimized */
- for(i=0; i<h; i++)
- {
- temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);
- temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);
- temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);
- temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);
- temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);
- temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);
- temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);
- temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);
- asm volatile(
- "movq (%0), %%mm0 nt"
- "movq 8(%0), %%mm1 nt"
- "paddw %2, %%mm0 nt"
- "paddw %2, %%mm1 nt"
- "psraw $5, %%mm0 nt"
- "psraw $5, %%mm1 nt"
- "packuswb %%mm1, %%mm0 nt"
- OP_3DNOW(%%mm0, (%1), %%mm1, q)
- :: "r"(temp), "r"(dst), "m"(ROUNDER)
- :"memory"
- );
- dst+=dstStride;
- src+=srcStride;
- }
- }
- #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)
- static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){
- uint64_t temp[17*4];
- uint64_t *temp_ptr= temp;
- int count= 17;
- /*FIXME unroll */
- asm volatile(
- "pxor %%mm7, %%mm7 nt"
- "1: nt"
- "movq (%0), %%mm0 nt"
- "movq (%0), %%mm1 nt"
- "movq 8(%0), %%mm2 nt"
- "movq 8(%0), %%mm3 nt"
- "punpcklbw %%mm7, %%mm0 nt"
- "punpckhbw %%mm7, %%mm1 nt"
- "punpcklbw %%mm7, %%mm2 nt"
- "punpckhbw %%mm7, %%mm3 nt"
- "movq %%mm0, (%1) nt"
- "movq %%mm1, 17*8(%1) nt"
- "movq %%mm2, 2*17*8(%1) nt"
- "movq %%mm3, 3*17*8(%1) nt"
- "add $8, %1 nt"
- "add %3, %0 nt"
- "decl %2 nt"
- " jnz 1b nt"
- : "+r" (src), "+r" (temp_ptr), "+r"(count)
- : "r" ((long)srcStride)
- : "memory"
- );
- temp_ptr= temp;
- count=4;
- /*FIXME reorder for speed */
- asm volatile(
- /*"pxor %%mm7, %%mm7 nt"*/
- "1: nt"
- "movq (%0), %%mm0 nt"
- "movq 8(%0), %%mm1 nt"
- "movq 16(%0), %%mm2 nt"
- "movq 24(%0), %%mm3 nt"
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)
- "add %4, %1 nt"
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)
- "add %4, %1 nt"
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)
- "add %4, %1 nt"
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)
- "add %4, %1 nt"
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)
- "add %4, %1 nt"
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)
- "add %4, %1 nt"
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)
- "add %4, %1 nt"
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)
- "add $136, %0 nt"
- "add %6, %1 nt"
- "decl %2 nt"
- " jnz 1b nt"
- : "+r"(temp_ptr), "+r"(dst), "+g"(count)
- : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)
- :"memory"
- );
- }
- static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){
- uint64_t temp[9*2];
- uint64_t *temp_ptr= temp;
- int count= 9;
- /*FIXME unroll */
- asm volatile(
- "pxor %%mm7, %%mm7 nt"
- "1: nt"
- "movq (%0), %%mm0 nt"
- "movq (%0), %%mm1 nt"
- "punpcklbw %%mm7, %%mm0 nt"
- "punpckhbw %%mm7, %%mm1 nt"
- "movq %%mm0, (%1) nt"
- "movq %%mm1, 9*8(%1) nt"
- "add $8, %1 nt"
- "add %3, %0 nt"
- "decl %2 nt"
- " jnz 1b nt"
- : "+r" (src), "+r" (temp_ptr), "+r"(count)
- : "r" ((long)srcStride)
- : "memory"
- );
- temp_ptr= temp;
- count=2;
- /*FIXME reorder for speed */
- asm volatile(
- /*"pxor %%mm7, %%mm7 nt"*/
- "1: nt"
- "movq (%0), %%mm0 nt"
- "movq 8(%0), %%mm1 nt"
- "movq 16(%0), %%mm2 nt"
- "movq 24(%0), %%mm3 nt"
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)
- "add %4, %1 nt"
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)
- "add %4, %1 nt"
- QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)
- QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)
- "add %4, %1 nt"
- QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)
- QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)
- "add $72, %0 nt"
- "add %6, %1 nt"
- "decl %2 nt"
- " jnz 1b nt"
- : "+r"(temp_ptr), "+r"(dst), "+g"(count)
- : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)
- : "memory"
- );
- }
- static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){
- OPNAME ## pixels8_mmx(dst, src, stride, 8);
- }
- static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t temp[8];
- uint8_t * const half= (uint8_t*)temp;
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);
- OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);
- }
- static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);
- }
- static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t temp[8];
- uint8_t * const half= (uint8_t*)temp;
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);
- OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);
- }
- static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t temp[8];
- uint8_t * const half= (uint8_t*)temp;
- put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);
- OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);
- }
- static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);
- }
- static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t temp[8];
- uint8_t * const half= (uint8_t*)temp;
- put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);
- OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);
- }
- static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[8 + 9];
- uint8_t * const halfH= ((uint8_t*)half) + 64;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);
- put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);
- put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);
- OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);
- }
- static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[8 + 9];
- uint8_t * const halfH= ((uint8_t*)half) + 64;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);
- put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);
- put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);
- OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);
- }
- static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[8 + 9];
- uint8_t * const halfH= ((uint8_t*)half) + 64;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);
- put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);
- put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);
- OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);
- }
- static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[8 + 9];
- uint8_t * const halfH= ((uint8_t*)half) + 64;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);
- put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);
- put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);
- OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);
- }
- static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[8 + 9];
- uint8_t * const halfH= ((uint8_t*)half) + 64;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);
- put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);
- OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);
- }
- static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[8 + 9];
- uint8_t * const halfH= ((uint8_t*)half) + 64;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);
- put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);
- OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);
- }
- static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[8 + 9];
- uint8_t * const halfH= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);
- put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);
- OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);
- }
- static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[8 + 9];
- uint8_t * const halfH= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);
- put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);
- OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);
- }
- static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[9];
- uint8_t * const halfH= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);
- OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);
- }
- static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){
- OPNAME ## pixels16_mmx(dst, src, stride, 16);
- }
- static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t temp[32];
- uint8_t * const half= (uint8_t*)temp;
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);
- OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);
- }
- static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);
- }
- static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t temp[32];
- uint8_t * const half= (uint8_t*)temp;
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);
- OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);
- }
- static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t temp[32];
- uint8_t * const half= (uint8_t*)temp;
- put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);
- OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);
- }
- static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);
- }
- static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t temp[32];
- uint8_t * const half= (uint8_t*)temp;
- put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);
- OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);
- }
- static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[16*2 + 17*2];
- uint8_t * const halfH= ((uint8_t*)half) + 256;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);
- put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);
- put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);
- OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);
- }
- static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[16*2 + 17*2];
- uint8_t * const halfH= ((uint8_t*)half) + 256;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);
- put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);
- put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);
- OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);
- }
- static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[16*2 + 17*2];
- uint8_t * const halfH= ((uint8_t*)half) + 256;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);
- put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);
- put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);
- OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);
- }
- static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[16*2 + 17*2];
- uint8_t * const halfH= ((uint8_t*)half) + 256;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);
- put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);
- put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);
- OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);
- }
- static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[16*2 + 17*2];
- uint8_t * const halfH= ((uint8_t*)half) + 256;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);
- put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);
- OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);
- }
- static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[16*2 + 17*2];
- uint8_t * const halfH= ((uint8_t*)half) + 256;
- uint8_t * const halfHV= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);
- put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);
- OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);
- }
- static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[17*2];
- uint8_t * const halfH= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);
- put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);
- OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);
- }
- static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[17*2];
- uint8_t * const halfH= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);
- put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);
- OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);
- }
- static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){
- uint64_t half[17*2];
- uint8_t * const halfH= ((uint8_t*)half);
- put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);
- OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);
- }
- #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " nt"
- #define AVG_3DNOW_OP(a,b,temp, size)
- "mov" #size " " #b ", " #temp " nt"
- "pavgusb " #temp ", " #a " nt"
- "mov" #size " " #a ", " #b " nt"
- #define AVG_MMX2_OP(a,b,temp, size)
- "mov" #size " " #b ", " #temp " nt"
- "pavgb " #temp ", " #a " nt"
- "mov" #size " " #a ", " #b " nt"
- QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
- QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
- QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
- QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
- QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
- QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
- QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
- QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
- QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
- #if 0
- static void just_return() { return; }
- #endif
- #define SET_QPEL_FUNC(postfix1, postfix2)
- c->put_ ## postfix1 = put_ ## postfix2;
- c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;
- c->avg_ ## postfix1 = avg_ ## postfix2;
- static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
- long i=0;
- assert(ABS(scale) < 256);
- scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
- asm volatile(
- "pcmpeqw %%mm6, %%mm6 nt" // -1w
- "psrlw $15, %%mm6 nt" // 1w
- "pxor %%mm7, %%mm7 nt"
- "movd %4, %%mm5 nt"
- "punpcklwd %%mm5, %%mm5 nt"
- "punpcklwd %%mm5, %%mm5 nt"
- "1: nt"
- "movq (%1, %0), %%mm0 nt"
- "movq 8(%1, %0), %%mm1 nt"
- "pmulhw %%mm5, %%mm0 nt"
- "pmulhw %%mm5, %%mm1 nt"
- "paddw %%mm6, %%mm0 nt"
- "paddw %%mm6, %%mm1 nt"
- "psraw $1, %%mm0 nt"
- "psraw $1, %%mm1 nt"
- "paddw (%2, %0), %%mm0 nt"
- "paddw 8(%2, %0), %%mm1 nt"
- "psraw $6, %%mm0 nt"
- "psraw $6, %%mm1 nt"
- "pmullw (%3, %0), %%mm0 nt"
- "pmullw 8(%3, %0), %%mm1 nt"
- "pmaddwd %%mm0, %%mm0 nt"
- "pmaddwd %%mm1, %%mm1 nt"
- "paddd %%mm1, %%mm0 nt"
- "psrld $4, %%mm0 nt"
- "paddd %%mm0, %%mm7 nt"
- "add $16, %0 nt"
- "cmp $128, %0 nt" //FIXME optimize & bench
- " jb 1b nt"
- "movq %%mm7, %%mm6 nt"
- "psrlq $32, %%mm7 nt"
- "paddd %%mm6, %%mm7 nt"
- "psrld $2, %%mm7 nt"
- "movd %%mm7, %0 nt"
- : "+r" (i)
- : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
- );
- return i;
- }
- static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
- long i=0;
- if(ABS(scale) < 256){
- scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
- asm volatile(
- "pcmpeqw %%mm6, %%mm6 nt" // -1w
- "psrlw $15, %%mm6 nt" // 1w
- "movd %3, %%mm5 nt"
- "punpcklwd %%mm5, %%mm5 nt"
- "punpcklwd %%mm5, %%mm5 nt"
- "1: nt"
- "movq (%1, %0), %%mm0 nt"
- "movq 8(%1, %0), %%mm1 nt"
- "pmulhw %%mm5, %%mm0 nt"
- "pmulhw %%mm5, %%mm1 nt"
- "paddw %%mm6, %%mm0 nt"
- "paddw %%mm6, %%mm1 nt"
- "psraw $1, %%mm0 nt"
- "psraw $1, %%mm1 nt"
- "paddw (%2, %0), %%mm0 nt"
- "paddw 8(%2, %0), %%mm1 nt"
- "movq %%mm0, (%2, %0) nt"
- "movq %%mm1, 8(%2, %0) nt"
- "add $16, %0 nt"
- "cmp $128, %0 nt" //FIXME optimize & bench
- " jb 1b nt"
- : "+r" (i)
- : "r"(basis), "r"(rem), "g"(scale)
- );
- }else{
- for(i=0; i<8*8; i++){
- rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
- }
- }
- }
- #include "h264dsp_mmx.c"
- /* external functions, from idct_mmx.c */
- void ff_mmx_idct(DCTELEM *block);
- void ff_mmxext_idct(DCTELEM *block);
- void ff_vp3_idct_sse2(int16_t *input_data);
- void ff_vp3_idct_mmx(int16_t *data);
- void ff_vp3_dsp_init_mmx(void);
- /* XXX: those functions should be suppressed ASAP when all IDCTs are
- converted */
- static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_mmx_idct (block);
- put_pixels_clamped_mmx(block, dest, line_size);
- }
- static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_mmx_idct (block);
- add_pixels_clamped_mmx(block, dest, line_size);
- }
- static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_mmxext_idct (block);
- put_pixels_clamped_mmx(block, dest, line_size);
- }
- static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_mmxext_idct (block);
- add_pixels_clamped_mmx(block, dest, line_size);
- }
- static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_vp3_idct_sse2(block);
- put_signed_pixels_clamped_mmx(block, dest, line_size);
- }
- static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_vp3_idct_sse2(block);
- add_pixels_clamped_mmx(block, dest, line_size);
- }
- static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_vp3_idct_mmx(block);
- put_signed_pixels_clamped_mmx(block, dest, line_size);
- }
- static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_vp3_idct_mmx(block);
- add_pixels_clamped_mmx(block, dest, line_size);
- }
- #ifdef CONFIG_GPL
- static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_idct_xvid_mmx (block);
- put_pixels_clamped_mmx(block, dest, line_size);
- }
- static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_idct_xvid_mmx (block);
- add_pixels_clamped_mmx(block, dest, line_size);
- }
- static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_idct_xvid_mmx2 (block);
- put_pixels_clamped_mmx(block, dest, line_size);
- }
- static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
- {
- ff_idct_xvid_mmx2 (block);
- add_pixels_clamped_mmx(block, dest, line_size);
- }
- #endif
- void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
- {
- mm_flags = mm_support();
- if (avctx->dsp_mask) {
- if (avctx->dsp_mask & FF_MM_FORCE)
- mm_flags |= (avctx->dsp_mask & 0xffff);
- else
- mm_flags &= ~(avctx->dsp_mask & 0xffff);
- }
- #if 0
- av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
- if (mm_flags & MM_MMX)
- av_log(avctx, AV_LOG_INFO, " mmx");
- if (mm_flags & MM_MMXEXT)
- av_log(avctx, AV_LOG_INFO, " mmxext");
- if (mm_flags & MM_3DNOW)
- av_log(avctx, AV_LOG_INFO, " 3dnow");
- if (mm_flags & MM_SSE)
- av_log(avctx, AV_LOG_INFO, " sse");
- if (mm_flags & MM_SSE2)
- av_log(avctx, AV_LOG_INFO, " sse2");
- av_log(avctx, AV_LOG_INFO, "n");
- #endif
- if (mm_flags & MM_MMX) {
- const int idct_algo= avctx->idct_algo;
- #ifdef CONFIG_ENCODERS
- const int dct_algo = avctx->dct_algo;
- if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
- if(mm_flags & MM_SSE2){
- c->fdct = ff_fdct_sse2;
- }else if(mm_flags & MM_MMXEXT){
- c->fdct = ff_fdct_mmx2;
- }else{
- c->fdct = ff_fdct_mmx;
- }
- }
- #endif //CONFIG_ENCODERS
- if(avctx->lowres==0){
- if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
- c->idct_put= ff_simple_idct_put_mmx;
- c->idct_add= ff_simple_idct_add_mmx;
- c->idct = ff_simple_idct_mmx;
- c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
- }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
- if(mm_flags & MM_MMXEXT){
- c->idct_put= ff_libmpeg2mmx2_idct_put;
- c->idct_add= ff_libmpeg2mmx2_idct_add;
- c->idct = ff_mmxext_idct;
- }else{
- c->idct_put= ff_libmpeg2mmx_idct_put;
- c->idct_add= ff_libmpeg2mmx_idct_add;
- c->idct = ff_mmx_idct;
- }
- c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
- }else if(idct_algo==FF_IDCT_VP3){
- if(mm_flags & MM_SSE2){
- c->idct_put= ff_vp3_idct_put_sse2;
- c->idct_add= ff_vp3_idct_add_sse2;
- c->idct = ff_vp3_idct_sse2;
- c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
- }else{
- ff_vp3_dsp_init_mmx();
- c->idct_put= ff_vp3_idct_put_mmx;
- c->idct_add= ff_vp3_idct_add_mmx;
- c->idct = ff_vp3_idct_mmx;
- c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
- }
- #ifdef CONFIG_GPL
- }else if(idct_algo==FF_IDCT_XVIDMMX){
- if(mm_flags & MM_MMXEXT){
- c->idct_put= ff_idct_xvid_mmx2_put;
- c->idct_add= ff_idct_xvid_mmx2_add;
- c->idct = ff_idct_xvid_mmx2;
- }else{
- c->idct_put= ff_idct_xvid_mmx_put;
- c->idct_add= ff_idct_xvid_mmx_add;
- c->idct = ff_idct_xvid_mmx;
- }
- #endif
- }
- }
- #ifdef CONFIG_ENCODERS
- c->get_pixels = get_pixels_mmx;
- c->diff_pixels = diff_pixels_mmx;
- #endif //CONFIG_ENCODERS
- c->put_pixels_clamped = put_pixels_clamped_mmx;
- c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
- c->add_pixels_clamped = add_pixels_clamped_mmx;
- c->clear_blocks = clear_blocks_mmx;
- #ifdef CONFIG_ENCODERS
- c->pix_sum = pix_sum16_mmx;
- #endif //CONFIG_ENCODERS
- c->put_pixels_tab[0][0] = put_pixels16_mmx;
- c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
- c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
- c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
- c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
- c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
- c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
- c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
- c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
- c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
- c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
- c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
- c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
- c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
- c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
- c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
- c->put_pixels_tab[1][0] = put_pixels8_mmx;
- c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
- c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
- c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
- c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
- c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
- c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
- c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
- c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
- c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
- c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
- c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
- c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
- c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
- c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
- c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
- c->add_bytes= add_bytes_mmx;
- #ifdef CONFIG_ENCODERS
- c->diff_bytes= diff_bytes_mmx;
- c->hadamard8_diff[0]= hadamard8_diff16_mmx;
- c->hadamard8_diff[1]= hadamard8_diff_mmx;
- c->pix_norm1 = pix_norm1_mmx;
- c->sse[0] = sse16_mmx;
- c->sse[1] = sse8_mmx;
- c->vsad[4]= vsad_intra16_mmx;
- c->nsse[0] = nsse16_mmx;
- c->nsse[1] = nsse8_mmx;
- if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
- c->vsad[0] = vsad16_mmx;
- }
- if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
- c->try_8x8basis= try_8x8basis_mmx;
- }
- c->add_8x8basis= add_8x8basis_mmx;
- #endif //CONFIG_ENCODERS
- c->h263_v_loop_filter= h263_v_loop_filter_mmx;
- c->h263_h_loop_filter= h263_h_loop_filter_mmx;
- c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
- if (mm_flags & MM_MMXEXT) {
- c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
- c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
- c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
- c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
- c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
- c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
- c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
- c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
- c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
- c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
- #ifdef CONFIG_ENCODERS
- c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
- c->hadamard8_diff[1]= hadamard8_diff_mmx2;
- c->vsad[4]= vsad_intra16_mmx2;
- #endif //CONFIG_ENCODERS
- c->h264_idct_add= ff_h264_idct_add_mmx2;
- if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
- c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
- c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
- c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
- c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
- c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
- c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
- #ifdef CONFIG_ENCODERS
- c->vsad[0] = vsad16_mmx2;
- #endif //CONFIG_ENCODERS
- }
- #if 1
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
- SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
- #endif
- //FIXME 3dnow too
- #define dspfunc(PFX, IDX, NUM)
- c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2;
- c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2;
- c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2;
- c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2;
- c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2;
- c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2;
- c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2;
- c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2;
- c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2;
- c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2;
- c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2;
- c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2;
- c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2;
- c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2;
- c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2;
- c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2
- dspfunc(put_h264_qpel, 0, 16);
- dspfunc(put_h264_qpel, 1, 8);
- dspfunc(put_h264_qpel, 2, 4);
- dspfunc(avg_h264_qpel, 0, 16);
- dspfunc(avg_h264_qpel, 1, 8);
- dspfunc(avg_h264_qpel, 2, 4);
- #undef dspfunc
- c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
- c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
- c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
- c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
- c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
- c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
- c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
- #ifdef CONFIG_ENCODERS
- c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
- #endif //CONFIG_ENCODERS
- } else if (mm_flags & MM_3DNOW) {
- c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
- c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
- c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
- c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
- c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
- c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
- c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
- c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
- c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
- c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
- if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
- c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
- c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
- c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
- c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
- c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
- c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
- }
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
- SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
- #define dspfunc(PFX, IDX, NUM)
- c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow;
- c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow;
- c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow;
- c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow;
- c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow;
- c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow;
- c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow;
- c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow;
- c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow;
- c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow;
- c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow;
- c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow;
- c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow;
- c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow;
- c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow;
- c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow
- dspfunc(put_h264_qpel, 0, 16);
- dspfunc(put_h264_qpel, 1, 8);
- dspfunc(put_h264_qpel, 2, 4);
- dspfunc(avg_h264_qpel, 0, 16);
- dspfunc(avg_h264_qpel, 1, 8);
- dspfunc(avg_h264_qpel, 2, 4);
- c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
- }
- }
- #ifdef CONFIG_ENCODERS
- dsputil_init_pix_mmx(c, avctx);
- #endif //CONFIG_ENCODERS
- #if 0
- // for speed testing
- get_pixels = just_return;
- put_pixels_clamped = just_return;
- add_pixels_clamped = just_return;
- pix_abs16x16 = just_return;
- pix_abs16x16_x2 = just_return;
- pix_abs16x16_y2 = just_return;
- pix_abs16x16_xy2 = just_return;
- put_pixels_tab[0] = just_return;
- put_pixels_tab[1] = just_return;
- put_pixels_tab[2] = just_return;
- put_pixels_tab[3] = just_return;
- put_no_rnd_pixels_tab[0] = just_return;
- put_no_rnd_pixels_tab[1] = just_return;
- put_no_rnd_pixels_tab[2] = just_return;
- put_no_rnd_pixels_tab[3] = just_return;
- avg_pixels_tab[0] = just_return;
- avg_pixels_tab[1] = just_return;
- avg_pixels_tab[2] = just_return;
- avg_pixels_tab[3] = just_return;
- avg_no_rnd_pixels_tab[0] = just_return;
- avg_no_rnd_pixels_tab[1] = just_return;
- avg_no_rnd_pixels_tab[2] = just_return;
- avg_no_rnd_pixels_tab[3] = just_return;
- //av_fdct = just_return;
- //ff_idct = just_return;
- #endif
- }