49 qadd = (qscale - 1) | 1;
61 "packssdw %%mm6, %%mm6 \n\t"
62 "packssdw %%mm6, %%mm6 \n\t"
64 "pxor %%mm7, %%mm7 \n\t"
65 "packssdw %%mm5, %%mm5 \n\t"
66 "packssdw %%mm5, %%mm5 \n\t"
67 "psubw %%mm5, %%mm7 \n\t"
68 "pxor %%mm4, %%mm4 \n\t"
71 "movq (%0, %3), %%mm0 \n\t"
72 "movq 8(%0, %3), %%mm1 \n\t"
74 "pmullw %%mm6, %%mm0 \n\t"
75 "pmullw %%mm6, %%mm1 \n\t"
77 "movq (%0, %3), %%mm2 \n\t"
78 "movq 8(%0, %3), %%mm3 \n\t"
80 "pcmpgtw %%mm4, %%mm2 \n\t"
81 "pcmpgtw %%mm4, %%mm3 \n\t"
83 "pxor %%mm2, %%mm0 \n\t"
84 "pxor %%mm3, %%mm1 \n\t"
86 "paddw %%mm7, %%mm0 \n\t"
87 "paddw %%mm7, %%mm1 \n\t"
89 "pxor %%mm0, %%mm2 \n\t"
90 "pxor %%mm1, %%mm3 \n\t"
92 "pcmpeqw %%mm7, %%mm0 \n\t"
93 "pcmpeqw %%mm7, %%mm1 \n\t"
95 "pandn %%mm2, %%mm0 \n\t"
96 "pandn %%mm3, %%mm1 \n\t"
98 "movq %%mm0, (%0, %3) \n\t"
99 "movq %%mm1, 8(%0, %3) \n\t"
103 ::
"r" (block+nCoeffs),
"rm"(qmul),
"rm" (qadd),
"r" (2*(-nCoeffs))
116 qadd = (qscale - 1) | 1;
123 "movd %1, %%mm6 \n\t"
124 "packssdw %%mm6, %%mm6 \n\t"
125 "packssdw %%mm6, %%mm6 \n\t"
126 "movd %2, %%mm5 \n\t"
127 "pxor %%mm7, %%mm7 \n\t"
128 "packssdw %%mm5, %%mm5 \n\t"
129 "packssdw %%mm5, %%mm5 \n\t"
130 "psubw %%mm5, %%mm7 \n\t"
131 "pxor %%mm4, %%mm4 \n\t"
134 "movq (%0, %3), %%mm0 \n\t"
135 "movq 8(%0, %3), %%mm1 \n\t"
137 "pmullw %%mm6, %%mm0 \n\t"
138 "pmullw %%mm6, %%mm1 \n\t"
140 "movq (%0, %3), %%mm2 \n\t"
141 "movq 8(%0, %3), %%mm3 \n\t"
143 "pcmpgtw %%mm4, %%mm2 \n\t"
144 "pcmpgtw %%mm4, %%mm3 \n\t"
146 "pxor %%mm2, %%mm0 \n\t"
147 "pxor %%mm3, %%mm1 \n\t"
149 "paddw %%mm7, %%mm0 \n\t"
150 "paddw %%mm7, %%mm1 \n\t"
152 "pxor %%mm0, %%mm2 \n\t"
153 "pxor %%mm1, %%mm3 \n\t"
155 "pcmpeqw %%mm7, %%mm0 \n\t"
156 "pcmpeqw %%mm7, %%mm1 \n\t"
158 "pandn %%mm2, %%mm0 \n\t"
159 "pandn %%mm3, %%mm1 \n\t"
161 "movq %%mm0, (%0, %3) \n\t"
162 "movq %%mm1, 8(%0, %3) \n\t"
166 ::
"r" (block+nCoeffs),
"rm"(qmul),
"rm" (qadd),
"r" (2*(-nCoeffs))
205 const uint16_t *quant_matrix;
219 "pcmpeqw %%mm7, %%mm7 \n\t"
220 "psrlw $15, %%mm7 \n\t"
221 "movd %2, %%mm6 \n\t"
222 "packssdw %%mm6, %%mm6 \n\t"
223 "packssdw %%mm6, %%mm6 \n\t"
224 "mov %3, %%"REG_a
" \n\t"
227 "movq (%0, %%"REG_a
"), %%mm0 \n\t"
228 "movq 8(%0, %%"REG_a
"), %%mm1 \n\t"
229 "movq (%1, %%"REG_a
"), %%mm4 \n\t"
230 "movq 8(%1, %%"REG_a
"), %%mm5 \n\t"
231 "pmullw %%mm6, %%mm4 \n\t"
232 "pmullw %%mm6, %%mm5 \n\t"
233 "pxor %%mm2, %%mm2 \n\t"
234 "pxor %%mm3, %%mm3 \n\t"
235 "pcmpgtw %%mm0, %%mm2 \n\t"
236 "pcmpgtw %%mm1, %%mm3 \n\t"
237 "pxor %%mm2, %%mm0 \n\t"
238 "pxor %%mm3, %%mm1 \n\t"
239 "psubw %%mm2, %%mm0 \n\t"
240 "psubw %%mm3, %%mm1 \n\t"
241 "pmullw %%mm4, %%mm0 \n\t"
242 "pmullw %%mm5, %%mm1 \n\t"
243 "pxor %%mm4, %%mm4 \n\t"
244 "pxor %%mm5, %%mm5 \n\t"
245 "pcmpeqw (%0, %%"REG_a
"), %%mm4 \n\t"
246 "pcmpeqw 8(%0, %%"REG_a
"), %%mm5\n\t"
247 "psraw $3, %%mm0 \n\t"
248 "psraw $3, %%mm1 \n\t"
249 "psubw %%mm7, %%mm0 \n\t"
250 "psubw %%mm7, %%mm1 \n\t"
251 "por %%mm7, %%mm0 \n\t"
252 "por %%mm7, %%mm1 \n\t"
253 "pxor %%mm2, %%mm0 \n\t"
254 "pxor %%mm3, %%mm1 \n\t"
255 "psubw %%mm2, %%mm0 \n\t"
256 "psubw %%mm3, %%mm1 \n\t"
257 "pandn %%mm0, %%mm4 \n\t"
258 "pandn %%mm1, %%mm5 \n\t"
259 "movq %%mm4, (%0, %%"REG_a
") \n\t"
260 "movq %%mm5, 8(%0, %%"REG_a
") \n\t"
262 "add $16, %%"REG_a
" \n\t"
264 ::
"r" (block+nCoeffs),
"r"(quant_matrix+nCoeffs),
"rm" (qscale),
"g" (-2*nCoeffs)
274 const uint16_t *quant_matrix;
282 "pcmpeqw %%mm7, %%mm7 \n\t"
283 "psrlw $15, %%mm7 \n\t"
284 "movd %2, %%mm6 \n\t"
285 "packssdw %%mm6, %%mm6 \n\t"
286 "packssdw %%mm6, %%mm6 \n\t"
287 "mov %3, %%"REG_a
" \n\t"
290 "movq (%0, %%"REG_a
"), %%mm0 \n\t"
291 "movq 8(%0, %%"REG_a
"), %%mm1 \n\t"
292 "movq (%1, %%"REG_a
"), %%mm4 \n\t"
293 "movq 8(%1, %%"REG_a
"), %%mm5 \n\t"
294 "pmullw %%mm6, %%mm4 \n\t"
295 "pmullw %%mm6, %%mm5 \n\t"
296 "pxor %%mm2, %%mm2 \n\t"
297 "pxor %%mm3, %%mm3 \n\t"
298 "pcmpgtw %%mm0, %%mm2 \n\t"
299 "pcmpgtw %%mm1, %%mm3 \n\t"
300 "pxor %%mm2, %%mm0 \n\t"
301 "pxor %%mm3, %%mm1 \n\t"
302 "psubw %%mm2, %%mm0 \n\t"
303 "psubw %%mm3, %%mm1 \n\t"
304 "paddw %%mm0, %%mm0 \n\t"
305 "paddw %%mm1, %%mm1 \n\t"
306 "paddw %%mm7, %%mm0 \n\t"
307 "paddw %%mm7, %%mm1 \n\t"
308 "pmullw %%mm4, %%mm0 \n\t"
309 "pmullw %%mm5, %%mm1 \n\t"
310 "pxor %%mm4, %%mm4 \n\t"
311 "pxor %%mm5, %%mm5 \n\t"
312 "pcmpeqw (%0, %%"REG_a
"), %%mm4 \n\t"
313 "pcmpeqw 8(%0, %%"REG_a
"), %%mm5\n\t"
314 "psraw $4, %%mm0 \n\t"
315 "psraw $4, %%mm1 \n\t"
316 "psubw %%mm7, %%mm0 \n\t"
317 "psubw %%mm7, %%mm1 \n\t"
318 "por %%mm7, %%mm0 \n\t"
319 "por %%mm7, %%mm1 \n\t"
320 "pxor %%mm2, %%mm0 \n\t"
321 "pxor %%mm3, %%mm1 \n\t"
322 "psubw %%mm2, %%mm0 \n\t"
323 "psubw %%mm3, %%mm1 \n\t"
324 "pandn %%mm0, %%mm4 \n\t"
325 "pandn %%mm1, %%mm5 \n\t"
326 "movq %%mm4, (%0, %%"REG_a
") \n\t"
327 "movq %%mm5, 8(%0, %%"REG_a
") \n\t"
329 "add $16, %%"REG_a
" \n\t"
331 ::
"r" (block+nCoeffs),
"r"(quant_matrix+nCoeffs),
"rm" (qscale),
"g" (-2*nCoeffs)
340 const uint16_t *quant_matrix;
354 "pcmpeqw %%mm7, %%mm7 \n\t"
355 "psrlw $15, %%mm7 \n\t"
356 "movd %2, %%mm6 \n\t"
357 "packssdw %%mm6, %%mm6 \n\t"
358 "packssdw %%mm6, %%mm6 \n\t"
359 "mov %3, %%"REG_a
" \n\t"
362 "movq (%0, %%"REG_a
"), %%mm0 \n\t"
363 "movq 8(%0, %%"REG_a
"), %%mm1 \n\t"
364 "movq (%1, %%"REG_a
"), %%mm4 \n\t"
365 "movq 8(%1, %%"REG_a
"), %%mm5 \n\t"
366 "pmullw %%mm6, %%mm4 \n\t"
367 "pmullw %%mm6, %%mm5 \n\t"
368 "pxor %%mm2, %%mm2 \n\t"
369 "pxor %%mm3, %%mm3 \n\t"
370 "pcmpgtw %%mm0, %%mm2 \n\t"
371 "pcmpgtw %%mm1, %%mm3 \n\t"
372 "pxor %%mm2, %%mm0 \n\t"
373 "pxor %%mm3, %%mm1 \n\t"
374 "psubw %%mm2, %%mm0 \n\t"
375 "psubw %%mm3, %%mm1 \n\t"
376 "pmullw %%mm4, %%mm0 \n\t"
377 "pmullw %%mm5, %%mm1 \n\t"
378 "pxor %%mm4, %%mm4 \n\t"
379 "pxor %%mm5, %%mm5 \n\t"
380 "pcmpeqw (%0, %%"REG_a
"), %%mm4 \n\t"
381 "pcmpeqw 8(%0, %%"REG_a
"), %%mm5\n\t"
382 "psraw $3, %%mm0 \n\t"
383 "psraw $3, %%mm1 \n\t"
384 "pxor %%mm2, %%mm0 \n\t"
385 "pxor %%mm3, %%mm1 \n\t"
386 "psubw %%mm2, %%mm0 \n\t"
387 "psubw %%mm3, %%mm1 \n\t"
388 "pandn %%mm0, %%mm4 \n\t"
389 "pandn %%mm1, %%mm5 \n\t"
390 "movq %%mm4, (%0, %%"REG_a
") \n\t"
391 "movq %%mm5, 8(%0, %%"REG_a
") \n\t"
393 "add $16, %%"REG_a
" \n\t"
395 ::
"r" (block+nCoeffs),
"r"(quant_matrix+nCoeffs),
"rm" (qscale),
"g" (-2*nCoeffs)
406 const uint16_t *quant_matrix;
415 "pcmpeqw %%mm7, %%mm7 \n\t"
416 "psrlq $48, %%mm7 \n\t"
417 "movd %2, %%mm6 \n\t"
418 "packssdw %%mm6, %%mm6 \n\t"
419 "packssdw %%mm6, %%mm6 \n\t"
420 "mov %3, %%"REG_a
" \n\t"
423 "movq (%0, %%"REG_a
"), %%mm0 \n\t"
424 "movq 8(%0, %%"REG_a
"), %%mm1 \n\t"
425 "movq (%1, %%"REG_a
"), %%mm4 \n\t"
426 "movq 8(%1, %%"REG_a
"), %%mm5 \n\t"
427 "pmullw %%mm6, %%mm4 \n\t"
428 "pmullw %%mm6, %%mm5 \n\t"
429 "pxor %%mm2, %%mm2 \n\t"
430 "pxor %%mm3, %%mm3 \n\t"
431 "pcmpgtw %%mm0, %%mm2 \n\t"
432 "pcmpgtw %%mm1, %%mm3 \n\t"
433 "pxor %%mm2, %%mm0 \n\t"
434 "pxor %%mm3, %%mm1 \n\t"
435 "psubw %%mm2, %%mm0 \n\t"
436 "psubw %%mm3, %%mm1 \n\t"
437 "paddw %%mm0, %%mm0 \n\t"
438 "paddw %%mm1, %%mm1 \n\t"
439 "pmullw %%mm4, %%mm0 \n\t"
440 "pmullw %%mm5, %%mm1 \n\t"
441 "paddw %%mm4, %%mm0 \n\t"
442 "paddw %%mm5, %%mm1 \n\t"
443 "pxor %%mm4, %%mm4 \n\t"
444 "pxor %%mm5, %%mm5 \n\t"
445 "pcmpeqw (%0, %%"REG_a
"), %%mm4 \n\t"
446 "pcmpeqw 8(%0, %%"REG_a
"), %%mm5\n\t"
447 "psrlw $4, %%mm0 \n\t"
448 "psrlw $4, %%mm1 \n\t"
449 "pxor %%mm2, %%mm0 \n\t"
450 "pxor %%mm3, %%mm1 \n\t"
451 "psubw %%mm2, %%mm0 \n\t"
452 "psubw %%mm3, %%mm1 \n\t"
453 "pandn %%mm0, %%mm4 \n\t"
454 "pandn %%mm1, %%mm5 \n\t"
455 "pxor %%mm4, %%mm7 \n\t"
456 "pxor %%mm5, %%mm7 \n\t"
457 "movq %%mm4, (%0, %%"REG_a
") \n\t"
458 "movq %%mm5, 8(%0, %%"REG_a
") \n\t"
460 "add $16, %%"REG_a
" \n\t"
462 "movd 124(%0, %3), %%mm0 \n\t"
463 "movq %%mm7, %%mm6 \n\t"
464 "psrlq $32, %%mm7 \n\t"
465 "pxor %%mm6, %%mm7 \n\t"
466 "movq %%mm7, %%mm6 \n\t"
467 "psrlq $16, %%mm7 \n\t"
468 "pxor %%mm6, %%mm7 \n\t"
469 "pslld $31, %%mm7 \n\t"
470 "psrlq $15, %%mm7 \n\t"
471 "pxor %%mm7, %%mm0 \n\t"
472 "movd %%mm0, 124(%0, %3) \n\t"
474 ::
"r" (block+nCoeffs),
"r"(quant_matrix+nCoeffs),
"rm" (qscale),
"r" (-2*nCoeffs)
487 "pxor %%mm7, %%mm7 \n\t"
489 "pxor %%mm0, %%mm0 \n\t"
490 "pxor %%mm1, %%mm1 \n\t"
491 "movq (%0), %%mm2 \n\t"
492 "movq 8(%0), %%mm3 \n\t"
493 "pcmpgtw %%mm2, %%mm0 \n\t"
494 "pcmpgtw %%mm3, %%mm1 \n\t"
495 "pxor %%mm0, %%mm2 \n\t"
496 "pxor %%mm1, %%mm3 \n\t"
497 "psubw %%mm0, %%mm2 \n\t"
498 "psubw %%mm1, %%mm3 \n\t"
499 "movq %%mm2, %%mm4 \n\t"
500 "movq %%mm3, %%mm5 \n\t"
501 "psubusw (%2), %%mm2 \n\t"
502 "psubusw 8(%2), %%mm3 \n\t"
503 "pxor %%mm0, %%mm2 \n\t"
504 "pxor %%mm1, %%mm3 \n\t"
505 "psubw %%mm0, %%mm2 \n\t"
506 "psubw %%mm1, %%mm3 \n\t"
507 "movq %%mm2, (%0) \n\t"
508 "movq %%mm3, 8(%0) \n\t"
509 "movq %%mm4, %%mm2 \n\t"
510 "movq %%mm5, %%mm3 \n\t"
511 "punpcklwd %%mm7, %%mm4 \n\t"
512 "punpckhwd %%mm7, %%mm2 \n\t"
513 "punpcklwd %%mm7, %%mm5 \n\t"
514 "punpckhwd %%mm7, %%mm3 \n\t"
515 "paddd (%1), %%mm4 \n\t"
516 "paddd 8(%1), %%mm2 \n\t"
517 "paddd 16(%1), %%mm5 \n\t"
518 "paddd 24(%1), %%mm3 \n\t"
519 "movq %%mm4, (%1) \n\t"
520 "movq %%mm2, 8(%1) \n\t"
521 "movq %%mm5, 16(%1) \n\t"
522 "movq %%mm3, 24(%1) \n\t"
528 :
"+r" (
block),
"+r" (sum),
"+r" (offset)
541 "pxor %%xmm7, %%xmm7 \n\t"
543 "pxor %%xmm0, %%xmm0 \n\t"
544 "pxor %%xmm1, %%xmm1 \n\t"
545 "movdqa (%0), %%xmm2 \n\t"
546 "movdqa 16(%0), %%xmm3 \n\t"
547 "pcmpgtw %%xmm2, %%xmm0 \n\t"
548 "pcmpgtw %%xmm3, %%xmm1 \n\t"
549 "pxor %%xmm0, %%xmm2 \n\t"
550 "pxor %%xmm1, %%xmm3 \n\t"
551 "psubw %%xmm0, %%xmm2 \n\t"
552 "psubw %%xmm1, %%xmm3 \n\t"
553 "movdqa %%xmm2, %%xmm4 \n\t"
554 "movdqa %%xmm3, %%xmm5 \n\t"
555 "psubusw (%2), %%xmm2 \n\t"
556 "psubusw 16(%2), %%xmm3 \n\t"
557 "pxor %%xmm0, %%xmm2 \n\t"
558 "pxor %%xmm1, %%xmm3 \n\t"
559 "psubw %%xmm0, %%xmm2 \n\t"
560 "psubw %%xmm1, %%xmm3 \n\t"
561 "movdqa %%xmm2, (%0) \n\t"
562 "movdqa %%xmm3, 16(%0) \n\t"
563 "movdqa %%xmm4, %%xmm6 \n\t"
564 "movdqa %%xmm5, %%xmm0 \n\t"
565 "punpcklwd %%xmm7, %%xmm4 \n\t"
566 "punpckhwd %%xmm7, %%xmm6 \n\t"
567 "punpcklwd %%xmm7, %%xmm5 \n\t"
568 "punpckhwd %%xmm7, %%xmm0 \n\t"
569 "paddd (%1), %%xmm4 \n\t"
570 "paddd 16(%1), %%xmm6 \n\t"
571 "paddd 32(%1), %%xmm5 \n\t"
572 "paddd 48(%1), %%xmm0 \n\t"
573 "movdqa %%xmm4, (%1) \n\t"
574 "movdqa %%xmm6, 16(%1) \n\t"
575 "movdqa %%xmm5, 32(%1) \n\t"
576 "movdqa %%xmm0, 48(%1) \n\t"
582 :
"+r" (
block),
"+r" (sum),
"+r" (offset)
585 "%xmm4",
"%xmm5",
"%xmm6",
"%xmm7")
590 #define HAVE_SSSE3_BAK
599 #define RENAME(a) a ## _MMX
600 #define RENAMEl(a) a ## _mmx
607 #define RENAME(a) a ## _MMX2
608 #define RENAMEl(a) a ## _mmx2
615 #define RENAME(a) a ## _SSE2
616 #define RENAMEl(a) a ## _sse2
619 #ifdef HAVE_SSSE3_BAK
624 #define RENAME(a) a ## _SSSE3
625 #define RENAMEl(a) a ## _sse2
656 if(mm_flags & AV_CPU_FLAG_SSE2){
int(* dct_quantize)(struct MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow)
int dct_algo
DCT algorithm, see FF_DCT_* below.
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, DCTELEM *block, int n, int qscale)
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, DCTELEM *block, int n, int qscale)
static void dct_unquantize_mpeg1_intra_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale)
static void dct_unquantize_mpeg2_inter_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale)
#define AV_CPU_FLAG_MMX2
SSE integer functions or AMD MMX ext.
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, DCTELEM *block, int n, int qscale)
void MPV_common_init_mmx(MpegEncContext *s)
int h263_aic
Advanded INTRA Coding (AIC)
uint16_t(* dct_offset)[64]
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, DCTELEM *block, int n, int qscale)
#define AV_CPU_FLAG_SSSE3
Conroe SSSE3 functions.
static void dct_unquantize_mpeg1_inter_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale)
#define XMM_CLOBBERS_ONLY(...)
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, DCTELEM *block, int n, int qscale)
static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block)
int block_last_index[12]
last non zero coefficient in block
uint16_t inter_matrix[64]
void(* denoise_dct)(struct MpegEncContext *s, DCTELEM *block)
#define AV_CPU_FLAG_MMX
standard MMX
ScanTable intra_scantable
static void denoise_dct_mmx(MpegEncContext *s, DCTELEM *block)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
struct AVCodecContext * avctx
static void dct_unquantize_h263_inter_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale)
static void dct_unquantize_h263_intra_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale)
int flags
AVCodecContext.flags (HQ, MV4, ...)
uint16_t inv_zigzag_direct16[64]
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
static void dct_unquantize_mpeg2_intra_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale)
ScanTable inter_scantable
if inter == intra then intra should be used to reduce tha cache usage
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, DCTELEM *block, int n, int qscale)