49 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
53 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9,10,10,10,10,
70 static const int8_t top [12]= {-1, 0,
LEFT_DC_PRED,-1,-1,-1,-1,-1, 0};
87 static const int mask[4]={0x8000,0x2000,0x80,0x20};
151 #if HAVE_FAST_UNALIGNED
154 for(i=0; i+1<length; i+=9){
155 if(!((~
AV_RN64A(src+i) & (
AV_RN64A(src+i) - 0x0100010001000101ULL)) & 0x8000800080008080ULL))
158 for(i=0; i+1<length; i+=5){
162 if(i>0 && !src[i]) i--;
166 for(i=0; i+1<length; i+=2){
168 if(i>0 && src[i-1]==0) i--;
170 if(i+2<length && src[i+1]==0 && src[i+2]<=3){
200 dst[di++]= src[si++];
201 dst[di++]= src[si++];
202 }
else if(src[si]==0 && src[si+1]==0){
212 dst[di++]= src[si++];
215 dst[di++]= src[si++];
244 int y_offset,
int list){
246 int filter_height= (raw_my&3) ? 2 : 0;
247 int full_my= (raw_my>>2) + y_offset;
248 int top = full_my - filter_height, bottom = full_my + height + filter_height;
250 return FFMAX(abs(top), bottom);
254 int y_offset,
int list0,
int list1,
int *nrefs){
270 if (refs[0][ref_n] < 0) nrefs[0] += 1;
271 refs[0][ref_n] =
FFMAX(refs[0][ref_n], my);
282 if (refs[1][ref_n] < 0) nrefs[1] += 1;
283 refs[1][ref_n] =
FFMAX(refs[1][ref_n], my);
295 const int mb_xy= h->
mb_xy;
301 memset(refs, -1,
sizeof(refs));
324 int y_offset= (i&2)<<2;
328 IS_DIR(sub_mb_type, 0, 0),
IS_DIR(sub_mb_type, 0, 1), nrefs);
331 IS_DIR(sub_mb_type, 0, 0),
IS_DIR(sub_mb_type, 0, 1), nrefs);
333 IS_DIR(sub_mb_type, 0, 0),
IS_DIR(sub_mb_type, 0, 1), nrefs);
336 IS_DIR(sub_mb_type, 0, 0),
IS_DIR(sub_mb_type, 0, 1), nrefs);
338 IS_DIR(sub_mb_type, 0, 0),
IS_DIR(sub_mb_type, 0, 1), nrefs);
343 int sub_y_offset= y_offset + 2*(j&2);
345 IS_DIR(sub_mb_type, 0, 0),
IS_DIR(sub_mb_type, 0, 1), nrefs);
352 for(ref=0; ref<48 && nrefs[list]; ref++){
353 int row = refs[list][ref];
358 int pic_height = 16*s->
mb_height >> ref_field_picture;
387 static const int x_offset[4]={0, 1*
stride, 4*
stride, 5*stride};
388 static const int y_offset[4]={0, 2*
stride, 8*
stride, 10*stride};
391 const int offset= y_offset[i];
392 const int z0= block[offset+
stride*0] + block[offset+
stride*4];
393 const int z1= block[offset+
stride*0] - block[offset+
stride*4];
394 const int z2= block[offset+
stride*1] - block[offset+
stride*5];
395 const int z3= block[offset+
stride*1] + block[offset+
stride*5];
404 const int offset= x_offset[i];
405 const int z0= temp[4*0+i] + temp[4*2+i];
406 const int z1= temp[4*0+i] - temp[4*2+i];
407 const int z2= temp[4*1+i] - temp[4*3+i];
408 const int z3= temp[4*1+i] + temp[4*3+i];
410 block[
stride*0 +offset]= (z0 + z3)>>1;
411 block[
stride*2 +offset]= (z1 + z2)>>1;
412 block[
stride*8 +offset]= (z1 - z2)>>1;
413 block[
stride*10+offset]= (z0 - z3)>>1;
422 static void chroma_dc_dct_c(
DCTELEM *block){
424 const int xStride= 16;
427 a= block[stride*0 + xStride*0];
428 b= block[stride*0 + xStride*1];
429 c= block[stride*1 + xStride*0];
430 d= block[stride*1 + xStride*1];
437 block[stride*0 + xStride*0]= (a+c);
438 block[stride*0 + xStride*1]= (e+
b);
439 block[stride*1 + xStride*0]= (a-c);
440 block[stride*1 + xStride*1]= (e-
b);
447 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
448 int src_x_offset,
int src_y_offset,
450 int pixel_shift,
int chroma_idc)
453 const int mx= h->
mv_cache[list][
scan8[n] ][0] + src_x_offset*8;
455 const int luma_xy= (mx&3) + ((my&3)<<2);
456 int offset = ((mx>>2) << pixel_shift) + (my>>2)*h->
mb_linesize;
457 uint8_t * src_y = pic->
f.
data[0] + offset;
458 uint8_t * src_cb, * src_cr;
462 const int full_mx= mx>>2;
463 const int full_my= my>>2;
464 const int pic_width = 16*s->
mb_width;
468 if(mx&7) extra_width -= 3;
469 if(my&7) extra_height -= 3;
471 if( full_mx < 0-extra_width
472 || full_my < 0-extra_height
473 || full_mx + 16 > pic_width + extra_width
474 || full_my + 16 > pic_height + extra_height){
476 16+5, 16+5, full_mx-2, full_my-2, pic_width, pic_height);
488 if(chroma_idc == 3 ){
489 src_cb = pic->
f.
data[1] + offset;
492 16+5, 16+5, full_mx-2, full_my-2, pic_width, pic_height);
500 src_cr = pic->
f.
data[2] + offset;
503 16+5, 16+5, full_mx-2, full_my-2, pic_width, pic_height);
513 ysh = 3 - (chroma_idc == 2 );
517 emu |= (my>>3) < 0 || (my>>3) + 8 >= (pic_height>>1);
520 src_cb = pic->
f.
data[1] + ((mx >> 3) << pixel_shift) + (my >> ysh) * h->
mb_uvlinesize;
521 src_cr = pic->
f.
data[2] + ((mx >> 3) << pixel_shift) + (my >> ysh) * h->
mb_uvlinesize;
525 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
526 pic_width >> 1, pic_height >> (chroma_idc == 1 ));
529 chroma_op(dest_cb, src_cb, h->
mb_uvlinesize, height >> (chroma_idc == 1 ),
530 mx&7, (my << (chroma_idc == 2 )) &7);
534 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
535 pic_width >> 1, pic_height >> (chroma_idc == 1 ));
538 chroma_op(dest_cr, src_cr, h->
mb_uvlinesize, height >> (chroma_idc == 1 ),
539 mx&7, (my << (chroma_idc == 2 )) &7);
544 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
545 int x_offset,
int y_offset,
548 int list0,
int list1,
int pixel_shift,
int chroma_idc)
554 dest_y += (2*x_offset << pixel_shift) + 2*y_offset*h->
mb_linesize;
555 if (chroma_idc == 3 ) {
556 dest_cb += (2*x_offset << pixel_shift) + 2*y_offset*h->
mb_linesize;
557 dest_cr += (2*x_offset << pixel_shift) + 2*y_offset*h->
mb_linesize;
558 }
else if (chroma_idc == 2 ) {
559 dest_cb += ( x_offset << pixel_shift) + 2*y_offset*h->
mb_uvlinesize;
560 dest_cr += ( x_offset << pixel_shift) + 2*y_offset*h->
mb_uvlinesize;
562 dest_cb += ( x_offset << pixel_shift) + y_offset*h->
mb_uvlinesize;
563 dest_cr += ( x_offset << pixel_shift) + y_offset*h->
mb_uvlinesize;
565 x_offset += 8*s->
mb_x;
571 dest_y, dest_cb, dest_cr, x_offset, y_offset,
572 qpix_op, chroma_op, pixel_shift, chroma_idc);
575 chroma_op= chroma_avg;
581 dest_y, dest_cb, dest_cr, x_offset, y_offset,
582 qpix_op, chroma_op, pixel_shift, chroma_idc);
588 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
589 int x_offset,
int y_offset,
593 int list0,
int list1,
int pixel_shift,
int chroma_idc){
597 dest_y += (2*x_offset << pixel_shift) + 2*y_offset*h->
mb_linesize;
598 if (chroma_idc == 3 ) {
600 chroma_weight_avg = luma_weight_avg;
601 chroma_weight_op = luma_weight_op;
602 dest_cb += (2*x_offset << pixel_shift) + 2*y_offset*h->
mb_linesize;
603 dest_cr += (2*x_offset << pixel_shift) + 2*y_offset*h->
mb_linesize;
604 }
else if (chroma_idc == 2 ) {
606 dest_cb += ( x_offset << pixel_shift) + 2*y_offset*h->
mb_uvlinesize;
607 dest_cr += ( x_offset << pixel_shift) + 2*y_offset*h->
mb_uvlinesize;
609 chroma_height = height >> 1;
610 dest_cb += ( x_offset << pixel_shift) + y_offset*h->
mb_uvlinesize;
611 dest_cr += ( x_offset << pixel_shift) + y_offset*h->
mb_uvlinesize;
613 x_offset += 8*s->
mb_x;
626 dest_y, dest_cb, dest_cr,
627 x_offset, y_offset, qpix_put, chroma_put,
628 pixel_shift, chroma_idc);
630 tmp_y, tmp_cb, tmp_cr,
631 x_offset, y_offset, qpix_put, chroma_put,
632 pixel_shift, chroma_idc);
636 int weight1 = 64 - weight0;
637 luma_weight_avg( dest_y, tmp_y, h-> mb_linesize,
638 height, 5, weight0, weight1, 0);
640 chroma_height, 5, weight0, weight1, 0);
642 chroma_height, 5, weight0, weight1, 0);
655 int list = list1 ? 1 : 0;
658 mc_dir_part(h, ref, n, square, height, delta, list,
659 dest_y, dest_cb, dest_cr, x_offset, y_offset,
660 qpix_put, chroma_put, pixel_shift, chroma_idc);
675 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
676 int x_offset,
int y_offset,
680 int list0,
int list1,
int pixel_shift,
int chroma_idc)
686 x_offset, y_offset, qpix_put, chroma_put,
687 weight_op[0], weight_op[1], weight_avg[0],
688 weight_avg[1], list0, list1, pixel_shift, chroma_idc);
690 mc_part_std(h, n, square, height, delta, dest_y, dest_cb, dest_cr,
691 x_offset, y_offset, qpix_put, chroma_put, qpix_avg,
692 chroma_avg, list0, list1, pixel_shift, chroma_idc);
706 int off= (mx << pixel_shift) + (my + (s->
mb_x&3)*4)*h->
mb_linesize + (64 << pixel_shift);
708 if (chroma_idc == 3 ) {
712 off= ((mx>>1) << pixel_shift) + ((my>>1) + (s->
mb_x&7))*s->
uvlinesize + (64 << pixel_shift);
722 int pixel_shift,
int chroma_idc)
725 const int mb_xy= h->
mb_xy;
735 mc_part(h, 0, 1, 16, 0, dest_y, dest_cb, dest_cr, 0, 0,
736 qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
737 weight_op, weight_avg,
739 pixel_shift, chroma_idc);
741 mc_part(h, 0, 0, 8, 8 << pixel_shift, dest_y, dest_cb, dest_cr, 0, 0,
742 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
743 weight_op, weight_avg,
745 pixel_shift, chroma_idc);
746 mc_part(h, 8, 0, 8, 8 << pixel_shift, dest_y, dest_cb, dest_cr, 0, 4,
747 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
748 weight_op, weight_avg,
750 pixel_shift, chroma_idc);
752 mc_part(h, 0, 0, 16, 8*h->
mb_linesize, dest_y, dest_cb, dest_cr, 0, 0,
753 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
754 &weight_op[1], &weight_avg[1],
756 pixel_shift, chroma_idc);
757 mc_part(h, 4, 0, 16, 8*h->
mb_linesize, dest_y, dest_cb, dest_cr, 4, 0,
758 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
759 &weight_op[1], &weight_avg[1],
761 pixel_shift, chroma_idc);
770 int x_offset= (i&1)<<2;
771 int y_offset= (i&2)<<1;
774 mc_part(h, n, 1, 8, 0, dest_y, dest_cb, dest_cr, x_offset, y_offset,
775 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
776 &weight_op[1], &weight_avg[1],
778 pixel_shift, chroma_idc);
780 mc_part(h, n , 0, 4, 4 << pixel_shift, dest_y, dest_cb, dest_cr, x_offset, y_offset,
781 qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
782 &weight_op[1], &weight_avg[1],
784 pixel_shift, chroma_idc);
785 mc_part(h, n+2, 0, 4, 4 << pixel_shift, dest_y, dest_cb, dest_cr, x_offset, y_offset+2,
786 qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
787 &weight_op[1], &weight_avg[1],
789 pixel_shift, chroma_idc);
791 mc_part(h, n , 0, 8, 4*h->
mb_linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset,
792 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
793 &weight_op[2], &weight_avg[2],
795 pixel_shift, chroma_idc);
796 mc_part(h, n+1, 0, 8, 4*h->
mb_linesize, dest_y, dest_cb, dest_cr, x_offset+2, y_offset,
797 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
798 &weight_op[2], &weight_avg[2],
800 pixel_shift, chroma_idc);
805 int sub_x_offset= x_offset + 2*(j&1);
806 int sub_y_offset= y_offset + (j&2);
807 mc_part(h, n+j, 1, 4, 0, dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset,
808 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
809 &weight_op[2], &weight_avg[2],
811 pixel_shift, chroma_idc);
827 hl_motion(h, dest_y, dest_cb, dest_cr, qpix_put, chroma_put,
828 qpix_avg, chroma_avg, weight_op, weight_avg, pixel_shift, 1);
838 hl_motion(h, dest_y, dest_cb, dest_cr, qpix_put, chroma_put,
839 qpix_avg, chroma_avg, weight_op, weight_avg, pixel_shift, 2);
891 for(q=0; q<max_qp+1; q++){
916 for(q=0; q<max_qp+1; q++){
917 int shift =
div6[q] + 2;
970 const int b_xy = 4*x + 4*y*h->
b_stride;
1051 int i, cnt, nalsize;
1064 cnt = *(p+5) & 0x1f;
1066 for (i = 0; i < cnt; i++) {
1078 for (i = 0; i < cnt; i++) {
1150 #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b)+(size))))
1155 for (i=0; i<count; i++){
1156 assert((
IN_RANGE(from[i], old_base,
sizeof(*old_base)) ||
1167 for (i=0; i<count; i++){
1168 if (to[i] && !from[i])
av_freep(&to[i]);
1169 else if (from[i] && !to[i]) to[i] =
av_malloc(size);
1171 if (from[i]) memcpy(to[i], from[i], size);
1186 #define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)
1193 if(dst == src || !
s1->context_initialized)
return 0;
1241 copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
1300 for(i=0; i<16; i++){
1304 for(i=0; i<16; i++){
1351 int i, pics, out_of_order, out_idx;
1352 int invalid = 0, cnt = 0;
1471 invalid += out->
poc == INT_MIN;
1473 if (!h->
mmco_reset && !cur->
f.
key_frame && cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
1482 invalid = MAX_DELAYED_PIC_COUNT;
1486 for (i = 1; i < MAX_DELAYED_PIC_COUNT && h->
delayed_pic[i] &&
1502 if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
1548 uint8_t *src_cb, uint8_t *src_cr,
1549 int linesize,
int uvlinesize,
int simple)
1552 uint8_t *top_border;
1559 src_cb -= uvlinesize;
1560 src_cr -= uvlinesize;
1568 AV_COPY128(top_border+16, src_y+15*linesize+16);
1572 AV_COPY128(top_border+32, src_cb + 15*uvlinesize);
1573 AV_COPY128(top_border+48, src_cb + 15*uvlinesize+16);
1574 AV_COPY128(top_border+64, src_cr + 15*uvlinesize);
1575 AV_COPY128(top_border+80, src_cr + 15*uvlinesize+16);
1577 AV_COPY128(top_border+16, src_cb + 15*uvlinesize);
1578 AV_COPY128(top_border+32, src_cr + 15*uvlinesize);
1580 }
else if(chroma422) {
1582 AV_COPY128(top_border+32, src_cb + 15*uvlinesize);
1583 AV_COPY128(top_border+48, src_cr + 15*uvlinesize);
1585 AV_COPY64(top_border+16, src_cb + 15*uvlinesize);
1586 AV_COPY64(top_border+24, src_cr + 15*uvlinesize);
1590 AV_COPY128(top_border+32, src_cb+7*uvlinesize);
1591 AV_COPY128(top_border+48, src_cr+7*uvlinesize);
1593 AV_COPY64(top_border+16, src_cb+7*uvlinesize);
1594 AV_COPY64(top_border+24, src_cr+7*uvlinesize);
1610 AV_COPY128(top_border+16, src_y+16*linesize+16);
1615 AV_COPY128(top_border+32, src_cb + 16*linesize);
1616 AV_COPY128(top_border+48, src_cb + 16*linesize+16);
1617 AV_COPY128(top_border+64, src_cr + 16*linesize);
1618 AV_COPY128(top_border+80, src_cr + 16*linesize+16);
1620 AV_COPY128(top_border+16, src_cb + 16*linesize);
1621 AV_COPY128(top_border+32, src_cr + 16*linesize);
1623 }
else if(chroma422) {
1625 AV_COPY128(top_border+32, src_cb+16*uvlinesize);
1626 AV_COPY128(top_border+48, src_cr+16*uvlinesize);
1628 AV_COPY64(top_border+16, src_cb+16*uvlinesize);
1629 AV_COPY64(top_border+24, src_cr+16*uvlinesize);
1633 AV_COPY128(top_border+32, src_cb+8*uvlinesize);
1634 AV_COPY128(top_border+48, src_cr+8*uvlinesize);
1636 AV_COPY64(top_border+16, src_cb+8*uvlinesize);
1637 AV_COPY64(top_border+24, src_cr+8*uvlinesize);
1644 uint8_t *src_cb, uint8_t *src_cr,
1645 int linesize,
int uvlinesize,
1646 int xchg,
int chroma444,
1647 int simple,
int pixel_shift){
1649 int deblock_topleft;
1652 uint8_t *top_border_m1;
1653 uint8_t *top_border;
1668 deblock_topleft = (s->
mb_x > 0);
1672 src_y -= linesize + 1 + pixel_shift;
1673 src_cb -= uvlinesize + 1 + pixel_shift;
1674 src_cr -= uvlinesize + 1 + pixel_shift;
1679 #define XCHG(a,b,xchg)\
1682 AV_SWAP64(b+0,a+0);\
1683 AV_SWAP64(b+8,a+8);\
1688 if (xchg) AV_SWAP64(b,a);\
1689 else AV_COPY64(b,a);
1692 if(deblock_topleft){
1693 XCHG(top_border_m1 + (8 << pixel_shift), src_y - (7 << pixel_shift), 1);
1695 XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
1696 XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
1703 if(deblock_topleft){
1704 XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
1705 XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
1707 XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
1708 XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
1709 XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
1710 XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
1712 XCHG(h->
top_borders[top_idx][s->
mb_x+1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
1713 XCHG(h->
top_borders[top_idx][s->
mb_x+1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
1717 if(deblock_topleft){
1718 XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
1719 XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
1721 XCHG(top_border + (16 << pixel_shift), src_cb+1+pixel_shift, 1);
1722 XCHG(top_border + (24 << pixel_shift), src_cr+1+pixel_shift, 1);
1729 if (high_bit_depth) {
1730 return AV_RN32A(((int32_t*)mb) + index);
1736 if (high_bit_depth) {
1737 AV_WN32A(((int32_t*)mb) + index, value);
1743 int pixel_shift,
int *block_offset,
int linesize, uint8_t *dest_y,
int p)
1750 block_offset += 16*p;
1754 if(transform_bypass){
1761 for(i=0; i<16; i+=4){
1762 uint8_t *
const ptr= dest_y + block_offset[i];
1772 idct_dc_add(ptr, h->
mb + (i*16+p*256 << pixel_shift), linesize);
1774 idct_add (ptr, h->
mb + (i*16+p*256 << pixel_shift), linesize);
1779 if(transform_bypass){
1786 for(i=0; i<16; i++){
1787 uint8_t *
const ptr= dest_y + block_offset[i];
1791 h->
hpc.
pred4x4_add[dir](ptr, h->
mb + (i*16+p*256 << pixel_shift), linesize);
1798 assert(s->
mb_y || linesize <= block_offset[i]);
1799 if(!topright_avail){
1801 tr_high= ((uint16_t*)ptr)[3 - linesize/2]*0x0001000100010001ULL;
1802 topright= (uint8_t*) &tr_high;
1804 tr= ptr[3 - linesize]*0x01010101u;
1805 topright= (uint8_t*) &tr;
1808 topright= ptr + (4 << pixel_shift) - linesize;
1812 h->
hpc.
pred4x4[ dir ](ptr, topright, linesize);
1817 idct_dc_add(ptr, h->
mb + (i*16+p*256 << pixel_shift), linesize);
1819 idct_add (ptr, h->
mb + (i*16+p*256 << pixel_shift), linesize);
1831 if(!transform_bypass)
1834 static const uint8_t dc_mapping[16] = { 0*16, 1*16, 4*16, 5*16, 2*16, 3*16, 6*16, 7*16,
1835 8*16, 9*16,12*16,13*16,10*16,11*16,14*16,15*16};
1836 for(i = 0; i < 16; i++)
1846 int pixel_shift,
int *block_offset,
int linesize, uint8_t *dest_y,
int p)
1851 block_offset += 16*p;
1855 if(transform_bypass){
1859 for(i=0; i<16; i++){
1861 s->
dsp.
add_pixels4(dest_y + block_offset[i], h->
mb + (i*16+p*256 << pixel_shift), linesize);
1867 }
else if(h->
cbp&15){
1868 if(transform_bypass){
1869 const int di =
IS_8x8DCT(mb_type) ? 4 : 1;
1871 for(i=0; i<16; i+=di){
1873 idct_add(dest_y + block_offset[i], h->
mb + (i*16+p*256 << pixel_shift), linesize);
1885 for(i=0; i<16; i++){
1887 uint8_t *
const ptr= dest_y + block_offset[i];
1898 const int mb_x= s->
mb_x;
1899 const int mb_y= s->
mb_y;
1900 const int mb_xy= h->
mb_xy;
1902 uint8_t *dest_y, *dest_cb, *dest_cr;
1903 int linesize, uvlinesize ;
1940 for(i=0; i<16; i+=4){
1961 for (i = 0; i < 16; i++) {
1962 uint16_t *tmp_y = (uint16_t*)(dest_y + i*linesize);
1963 for (j = 0; j < 16; j++)
1964 tmp_y[j] =
get_bits(&gb, bit_depth);
1968 for (i = 0; i < block_h; i++) {
1969 uint16_t *tmp_cb = (uint16_t*)(dest_cb + i*uvlinesize);
1970 for (j = 0; j < 8; j++) {
1971 tmp_cb[j] = 1 << (bit_depth - 1);
1974 for (i = 0; i < block_h; i++) {
1975 uint16_t *tmp_cr = (uint16_t*)(dest_cr + i*uvlinesize);
1976 for (j = 0; j < 8; j++) {
1977 tmp_cr[j] = 1 << (bit_depth - 1);
1981 for (i = 0; i < block_h; i++) {
1982 uint16_t *tmp_cb = (uint16_t*)(dest_cb + i*uvlinesize);
1983 for (j = 0; j < 8; j++)
1984 tmp_cb[j] =
get_bits(&gb, bit_depth);
1986 for (i = 0; i < block_h; i++) {
1987 uint16_t *tmp_cr = (uint16_t*)(dest_cr + i*uvlinesize);
1988 for (j = 0; j < 8; j++)
1989 tmp_cr[j] =
get_bits(&gb, bit_depth);
1994 for (i=0; i<16; i++) {
1995 memcpy(dest_y + i* linesize, h->
mb + i*8, 16);
1999 for (i = 0; i < block_h; i++) {
2000 memset(dest_cb + i*uvlinesize, 128, 8);
2001 memset(dest_cr + i*uvlinesize, 128, 8);
2004 for (i = 0; i < block_h; i++) {
2005 memcpy(dest_cb + i*uvlinesize, h->
mb + 128 + i*4, 8);
2006 memcpy(dest_cr + i*uvlinesize, h->
mb + 160 + i*4, 8);
2014 xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1, 0, simple, pixel_shift);
2021 hl_decode_mb_predict_luma(h, mb_type, is_h264, simple, transform_bypass, pixel_shift, block_offset, linesize, dest_y, 0);
2024 xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0, 0, simple, pixel_shift);
2043 hl_decode_mb_idct_luma(h, mb_type, is_h264, simple, transform_bypass, pixel_shift, block_offset, linesize, dest_y, 0);
2046 uint8_t *dest[2] = {dest_cb, dest_cr};
2047 if(transform_bypass){
2054 for(i=j*16; i<j*16+4; i++){
2056 idct_add (dest[j-1] + block_offset[i], h->
mb + (i*16 << pixel_shift), uvlinesize);
2059 for(i=j*16+4; i<j*16+8; i++){
2061 idct_add (dest[j-1] + block_offset[i+4], h->
mb + (i*16 << pixel_shift), uvlinesize);
2087 for(i=j*16; i<j*16+4; i++){
2089 uint8_t *
const ptr= dest[j-1] + block_offset[i];
2107 const int mb_x= s->
mb_x;
2108 const int mb_y= s->
mb_y;
2109 const int mb_xy= h->
mb_xy;
2118 for (p = 0; p < plane_count; p++)
2130 for (p = 0; p < 3; p++)
2141 for(i=0; i<16; i+=4){
2159 for (p = 0; p < plane_count; p++) {
2160 for (i = 0; i < 16; i++) {
2161 uint16_t *tmp = (uint16_t*)(dest[p] + i*linesize);
2162 for (j = 0; j < 16; j++)
2167 for (p = 0; p < plane_count; p++) {
2168 for (i = 0; i < 16; i++) {
2169 memcpy(dest[p] + i*linesize, h->
mb + p*128 + i*8, 16);
2176 xchg_mb_border(h, dest[0], dest[1], dest[2], linesize, linesize, 1, 1, simple, pixel_shift);
2178 for (p = 0; p < plane_count; p++)
2182 xchg_mb_border(h, dest[0], dest[1], dest[2], linesize, linesize, 0, 1, simple, pixel_shift);
2191 for (p = 0; p < plane_count; p++)
2192 hl_decode_mb_idct_luma(h, mb_type, 1, simple, transform_bypass, pixel_shift, block_offset, linesize, dest[p], p);
2204 #define hl_decode_mb_simple(sh, bits) \
2205 static void hl_decode_mb_simple_ ## bits(H264Context *h){ \
2206 hl_decode_mb_internal(h, 1, sh); \
2228 const int mb_xy= h->
mb_xy;
2237 }
else if (is_complex) {
2238 hl_decode_mb_complex(h);
2240 hl_decode_mb_simple_16(h);
2242 hl_decode_mb_simple_8(h);
2248 int luma_def, chroma_def;
2258 for(list=0; list<2; list++){
2262 int luma_weight_flag, chroma_weight_flag;
2265 if(luma_weight_flag){
2280 if(chroma_weight_flag){
2313 int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
2315 for (i = 0; i < 2; i++) {
2347 for(ref0=ref_start; ref0 < ref_count0; ref0++){
2349 for(ref1=ref_start; ref1 < ref_count1; ref1++){
2353 int td = av_clip(poc1 - poc0, -128, 127);
2355 int tb = av_clip(cur_poc - poc0, -128, 127);
2356 int tx = (16384 + (
FFABS(td) >> 1)) / td;
2357 int dist_scale_factor = (tb*tx + 32) >> 8;
2358 if(dist_scale_factor >= -64 && dist_scale_factor <= 128)
2359 w = 64 - dist_scale_factor;
2429 int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
2440 expected_delta_per_poc_cycle = 0;
2444 if(abs_frame_num > 0){
2448 expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
2449 for(i = 0; i <= frame_num_in_poc_cycle; i++)
2457 field_poc[0] = expectedpoc + h->
delta_poc[0];
2487 for(i=0; i<16; i++){
2488 #define T(x) (x>>2) | ((x<<2) & 0xF)
2493 for(i=0; i<64; i++){
2494 #define T(x) (x>>3) | ((x&7)<<3)
2635 "Reenabling low delay requires a codec flush.\n");
2649 "VDPAU decoding does not support video colorspace.\n");
2684 unsigned int first_mb_in_slice;
2685 unsigned int pps_id;
2686 int num_ref_idx_active_override_flag;
2687 unsigned int slice_type, tmp, i, j;
2688 int default_ref_list_done = 0;
2689 int last_pic_structure, last_pic_dropable, ret;
2702 if(first_mb_in_slice == 0){
2732 default_ref_list_done = 1;
2948 last_pic_dropable != s->
dropable) {
2950 "Changing field mode (%d -> %d) between slices is not allowed\n",
2957 "unset current_picture_ptr on %d. slice\n",
2968 if (unwrap_prev_frame_num > h->
frame_num) unwrap_prev_frame_num -= max_frame_num;
2972 if (unwrap_prev_frame_num < 0)
2973 unwrap_prev_frame_num += max_frame_num;
2999 if (!last_pic_dropable && last_pic_structure !=
PICT_FRAME) {
3009 if (!last_pic_dropable && last_pic_structure !=
PICT_FRAME) {
3020 "Invalid field mode combination %d/%d\n",
3025 }
else if (last_pic_dropable != s->
dropable) {
3027 "Cannot combine reference and non-reference fields in the same frame\n");
3131 first_mb_in_slice >= s->
mb_num){
3184 num_ref_idx_active_override_flag=
get_bits1(&s->
gb);
3186 if(num_ref_idx_active_override_flag){
3210 if(!default_ref_list_done){
3235 for (i = 0; i < 2; i++) {
3305 "deblocking filter parameters %d %d out of range\n",
3326 av_log(s->
avctx,
AV_LOG_INFO,
"Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
3351 for(i=0; i<16; i++){
3372 ref2frm[i+2]= 4*id_list[i]
3376 for(i=16; i<48; i++)
3377 ref2frm[i+4]= 4*id_list[(i-16)>>1]
3386 av_log(h->
s.
avctx,
AV_LOG_DEBUG,
"slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
3419 int left_xy[
LEFT_MBS],
int top_type,
int left_type[LEFT_MBS],
int mb_xy,
int list)
3426 const int b_xy= h->
mb2b_xy[top_xy] + 3*b_stride;
3427 const int b8_xy= 4*top_xy + 2;
3442 const int b8_xy= 4*left_xy[
LTOP] + 1;
3477 uint32_t ref01 = (
pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101;
3478 uint32_t ref23 = (
pack16to32(ref2frm[list][ref[2]],ref2frm[list][ref[3]])&0x00FF00FF)*0x0101;
3487 AV_COPY128(mv_dst + 8*0, mv_src + 0*b_stride);
3488 AV_COPY128(mv_dst + 8*1, mv_src + 1*b_stride);
3489 AV_COPY128(mv_dst + 8*2, mv_src + 2*b_stride);
3490 AV_COPY128(mv_dst + 8*3, mv_src + 3*b_stride);
3500 const int mb_xy= h->
mb_xy;
3511 left_xy[
LBOT] = left_xy[
LTOP] = mb_xy-1;
3516 if (left_mb_field_flag != curr_mb_field_flag) {
3520 if(curr_mb_field_flag){
3523 if (left_mb_field_flag != curr_mb_field_flag) {
3555 if(h->
slice_table[top_xy ] == 0xFFFF) top_type= 0;
3579 AV_COPY32(&nnz_cache[4+8*0], &nnz[3*4]);
3582 if(left_type[
LTOP]){
3584 nnz_cache[3+8*1]= nnz[3+0*4];
3585 nnz_cache[3+8*2]= nnz[3+1*4];
3586 nnz_cache[3+8*3]= nnz[3+2*4];
3587 nnz_cache[3+8*4]= nnz[3+3*4];
3594 nnz_cache[5+8*0]= (h->
cbp_table[top_xy] & 0x4000) >> 12;
3596 nnz_cache[7+8*0]= (h->
cbp_table[top_xy] & 0x8000) >> 12;
3600 nnz_cache[3+8*2]= (h->
cbp_table[left_xy[
LTOP]]&0x2000) >> 12;
3604 nnz_cache[3+8*4]= (h->
cbp_table[left_xy[
LBOT]]&0x8000) >> 12;
3608 nnz_cache[
scan8[0 ]]= nnz_cache[scan8[1 ]]=
3609 nnz_cache[scan8[2 ]]= nnz_cache[scan8[3 ]]= (h->
cbp & 0x1000) >> 12;
3611 nnz_cache[scan8[0+ 4]]= nnz_cache[scan8[1+ 4]]=
3612 nnz_cache[scan8[2+ 4]]= nnz_cache[scan8[3+ 4]]= (h->
cbp & 0x2000) >> 12;
3614 nnz_cache[scan8[0+ 8]]= nnz_cache[scan8[1+ 8]]=
3615 nnz_cache[scan8[2+ 8]]= nnz_cache[scan8[3+ 8]]= (h->
cbp & 0x4000) >> 12;
3617 nnz_cache[scan8[0+12]]= nnz_cache[scan8[1+12]]=
3618 nnz_cache[scan8[2+12]]= nnz_cache[scan8[3+12]]= (h->
cbp & 0x8000) >> 12;
3627 uint8_t *dest_y, *dest_cb, *dest_cr;
3628 int linesize, uvlinesize, mb_x, mb_y;
3635 for(mb_x= start_x; mb_x<end_x; mb_x++){
3636 for(mb_y=end_mb_y -
FRAME_MBAFF; mb_y<= end_mb_y; mb_y++){
3672 ff_h264_filter_mb (h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
3708 if((top + height) >= pic_height)
3709 height += deblock_border;
3711 top -= deblock_border;
3717 height =
FFMIN(height, pic_height - top);
3718 if (top < h->emu_edge_height) {
3735 int lf_x_start = s->
mb_x;
3785 s->
mb_x = lf_x_start = 0;
3825 s->
mb_x = lf_x_start = 0;
3880 "Input contains more MB rows than the frame height.\n");
3886 if(context_count == 1) {
3889 for(i = 1; i < context_count; i++) {
3904 for(i = 1; i < context_count; i++)
3931 for(;pass <= 1;pass++){
3934 next_avc = h->
is_avc ? 0 : buf_size;
3944 if(buf_index >= next_avc) {
3948 nalsize = (nalsize << 8) | buf[buf_index++];
3949 if(nalsize <= 0 || nalsize > buf_size - buf_index){
3953 next_avc= buf_index + nalsize;
3956 for(; buf_index + 3 < next_avc; buf_index++){
3958 if(buf[buf_index] == 0 && buf[buf_index+1] == 0 && buf[buf_index+2] == 1)
3963 if (buf_index + 3 >= buf_size) {
3964 buf_index = buf_size;
3969 if(buf_index >= next_avc)
continue;
3974 ptr=
ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index);
3975 if (ptr ==
NULL || dst_length < 0) {
3979 i= buf_index + consumed;
3981 buf[i]==0x00 && buf[i+1]==0x00 && buf[i+2]==0x01 && buf[i+3]==0xE0)
3985 while(dst_length > 0 && ptr[dst_length - 1] == 0)
3994 if (h->
is_avc && (nalsize != consumed) && nalsize){
3998 buf_index += consumed;
4008 nals_needed = nal_index;
4014 nals_needed = nal_index;
4067 static const uint8_t start_code[] = {0x00, 0x00, 0x01};
4077 "Decoding in chunks is not supported for "
4078 "partitioned slices.\n");
4121 h->
is_avc && (nalsize != consumed) && nalsize) {
4123 "try parsing the coomplete NAL\n");
4159 }
else if (err == 1) {
4190 if(pos+10>buf_size) pos=buf_size;
4196 void *
data,
int *data_size,
4199 const uint8_t *buf = avpkt->
data;
4200 int buf_size = avpkt->
size;
4214 if (buf_size == 0) {
4272 assert(pict->
data[0] || !*data_size);
4302 #define SIZE (COUNT*40)
4315 printf(
"testing unsigned exp golomb\n");
4316 for(i=0; i<COUNT; i++){
4324 for(i=0; i<COUNT; i++){
4330 printf(
"mismatch! at %d (%d should be %d) bits:%6X\n", i, j, i, s);
4338 printf(
"testing signed exp golomb\n");
4339 for(i=0; i<COUNT; i++){
4347 for(i=0; i<COUNT; i++){
4352 if(j != i - COUNT/2){
4353 printf(
"mismatch! at %d (%d should be %d) bits:%6X\n", i, j, i, s);
4359 printf(
"Testing RBSP\n");
4428 #if CONFIG_H264_VDPAU_DECODER
4429 AVCodec ff_h264_vdpau_decoder = {
4430 .
name =
"h264_vdpau",
4439 .long_name =
NULL_IF_CONFIG_SMALL(
"H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
Picture default_ref_list[2][32]
base reference list for all slices of a coded picture
#define PICT_BOTTOM_FIELD
#define FF_PROFILE_H264_MAIN
void ff_h264_direct_dist_scale_factor(H264Context *const h)
int video_signal_type_present_flag
void ff_vdpau_h264_set_reference_frames(MpegEncContext *s)
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb)
int ff_h264_decode_mb_cabac(H264Context *h)
Decode a CABAC coded macroblock.
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static void clone_tables(H264Context *dst, H264Context *src, int i)
Mimic alloc_tables(), but for every context thread.
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Motion estimation with emulated edge values.
enum PixelFormat pix_fmt
Pixel format, see PIX_FMT_xxx.
int ff_h264_decode_seq_parameter_set(H264Context *h)
Decode SPS.
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
static av_always_inline void hl_motion_422(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, qpel_mc_func(*qpix_put)[16], h264_chroma_mc_func(*chroma_put), qpel_mc_func(*qpix_avg)[16], h264_chroma_mc_func(*chroma_avg), h264_weight_func *weight_op, h264_biweight_func *weight_avg, int pixel_shift)
unsigned int top_samples_available
#define FF_PROFILE_H264_CAVLC_444
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
unsigned int topleft_samples_available
int single_decode_warning
1 if the single thread fallback warning has already been displayed, 0 otherwise.
5: top field, bottom field, top field repeated, in that order
static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
const uint8_t ff_zigzag_direct[64]
AV_WL32 AV_WL24 AV_WL16 AV_WB32 AV_WB24 AV_RB16
GetBitContext * intra_gb_ptr
void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
void ff_vdpau_h264_picture_complete(MpegEncContext *s)
static void copy_parameter_set(void **to, void **from, int count, int size)
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
3: top field, bottom field, in that order
const uint8_t * ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
void(* prefetch)(void *mem, int stride, int h)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
int chroma_qp_index_offset[2]
const uint8_t * bytestream_end
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
void(* pred16x16[4+3+2])(uint8_t *src, int stride)
static void set_ue_golomb(PutBitContext *pb, int i)
write unsigned exp golomb code.
static int ff_h264_decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
enum PixelFormat(* get_format)(struct AVCodecContext *s, const enum PixelFormat *fmt)
callback to negotiate the pixelFormat
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
#define AV_LOG_WARNING
Something somehow does not look correct.
MMCO mmco[MAX_MMCO_COUNT]
memory management control operations buffer.
static void align_get_bits(GetBitContext *s)
static av_always_inline void mc_part_weighted(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op, h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg, int list0, int list1, int pixel_shift, int chroma_idc)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
hardware decoding through VDA
static const uint8_t field_scan8x8[64]
static void init_dequant_tables(H264Context *h)
int bitstream_restriction_flag
#define FF_PROFILE_H264_INTRA
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
void(* h264_idct_add16intra)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
int repeat_pict
When decoding, this signals how much the picture must be delayed.
the normal 2^n-1 "JPEG" YUV ranges
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
void * thread_opaque
used by multithreading to store frame-specific info
static av_always_inline int dctcoef_get(DCTELEM *mb, int high_bit_depth, int index)
void(* h264_chroma_dc_dequant_idct)(DCTELEM *block, int qmul)
int field_picture
whether or not the picture was encoded in separate fields
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
const uint8_t * field_scan8x8_q0
void ff_generate_sliding_window_mmcos(H264Context *h)
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
discard all bidirectional frames
int is_avc
Used to parse AVC variant of h264.
void(* h264_idct_add8)(uint8_t **dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
uint8_t zigzag_scan8x8_cavlc[64]
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
uint32_t dequant8_buffer[6][QP_MAX_NUM+1][64]
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
static int context_init(H264Context *h)
Init context Allocate buffers which are not shared amongst multiple threads.
int mmco_reset
h264 MMCO_RESET set this 1. Reordering code must not mix pictures before and after MMCO_RESET...
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
uint32_t num_units_in_tick
static av_always_inline void mc_part(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, h264_weight_func *weight_op, h264_biweight_func *weight_avg, int list0, int list1, int pixel_shift, int chroma_idc)
4: bottom field, top field, in that order
int mb_num
number of MBs of a picture
static int get_lowest_part_list_y(H264Context *h, Picture *pic, int n, int height, int y_offset, int list)
void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc)
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
static const uint8_t rem6[QP_MAX_NUM+1]
unsigned current_sps_id
id of the current SPS
static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, int pixel_shift, int chroma_idc)
static av_always_inline uint32_t pack16to32(int a, int b)
int encoding
true if we are encoding (vs decoding)
static const uint8_t zigzag_scan[16]
void ff_h264_init_cabac_states(H264Context *h)
#define USES_LIST(a, list)
does this mb use listX, note does not work if subMBs
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
AVHWAccel * ff_find_hwaccel(enum CodecID codec_id, enum PixelFormat pix_fmt)
Return the hardware accelerated codec for codec codec_id and pixel format pix_fmt.
uint8_t * chroma_pred_mode_table
#define IS_DIR(a, part, list)
static const uint8_t div6[QP_MAX_NUM+1]
enum AVDiscard skip_frame
int ff_h264_decode_ref_pic_list_reordering(H264Context *h)
static const uint8_t golomb_to_pict_type[5]
void(* pred8x8_add[3])(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride)
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
struct AVHWAccel * hwaccel
Hardware accelerator in use.
int long_ref
1->long term reference 0->short term reference
static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
static int decode_init_thread_copy(AVCodecContext *avctx)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
uint8_t scaling_matrix4[6][16]
#define PIX_FMT_YUV420P10
const uint8_t * bytestream
int ref2frm[MAX_SLICES][2][64]
reference to frame number lists, used in the loop filter, the first 2 are for -2,-1 ...
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
#define FF_PROFILE_H264_BASELINE
DCTELEM mb_luma_dc[3][16 *2]
uint32_t(*[6] dequant4_coeff)[16]
static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, int pixel_shift)
int prev_frame_num_offset
for POC type 2
void(* h264_luma_dc_dequant_idct)(DCTELEM *output, DCTELEM *input, int qmul)
int offset_for_non_ref_pic
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
int luma_weight[48][2][2]
enum OutputFormat out_format
output format
#define FF_DEBUG_PICT_INFO
struct H264Context H264Context
H264Context.
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, int stride)
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]
enum AVColorPrimaries color_primaries
enum PixelFormat ff_hwaccel_pixfmt_list_420[]
av_cold void ff_h264_decode_init_vlc(void)
void(* h264_idct_add)(uint8_t *dst, DCTELEM *block, int stride)
DCTELEM mb[16 *48 *2]
as a dct coeffecient is int32_t in high depth, we need to reserve twice the space.
Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
int cabac
entropy_coding_mode_flag
qpel_mc_func(* qpel_put)[16]
#define LUMA_DC_BLOCK_INDEX
#define DIAG_DOWN_LEFT_PRED
static const uint8_t dequant8_coeff_init[6][6]
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
unsigned int crop_right
frame_cropping_rect_right_offset
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
const AVClass * av_class
information on struct for av_log
Picture current_picture
copy of the current picture structure.
void ff_h264_fill_mbaff_ref_list(H264Context *h)
static void clone_slice(H264Context *dst, H264Context *src)
Replicate H264 "master" context to thread contexts.
h264_weight_func weight_h264_pixels_tab[4]
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
int transform_bypass
qpprime_y_zero_transform_bypass_flag
static int init_poc(H264Context *h)
static int get_bits_count(const GetBitContext *s)
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
int ff_h264_get_slice_type(const H264Context *h)
Reconstruct bitstream slice_type.
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstre...
#define FF_PROFILE_H264_EXTENDED
static av_always_inline void hl_decode_mb_444_internal(H264Context *h, int simple, int pixel_shift)
static const uint8_t dequant8_coeff_init_scan[16]
int flags2
AVCodecContext.flags2.
int interlaced_frame
The content of the picture is interlaced.
#define MAX_DELAYED_PIC_COUNT
int mb_height
number of MBs horizontally & vertically
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
void MPV_common_end(MpegEncContext *s)
Picture * next_output_pic
const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM+1]
One chroma qp table for each supported bit depth (8, 9, 10).
the normal 219*2^(n-8) "MPEG" YUV ranges
static av_cold void common_init(H264Context *h)
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
int luma_log2_weight_denom
av_cold void dsputil_init(DSPContext *c, AVCodecContext *avctx)
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
static int h264_set_parameter_from_sps(H264Context *h)
int chroma_weight[48][2][2][2]
static int init(AVCodecParserContext *s)
int last_pocs[MAX_DELAYED_PIC_COUNT]
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
static void init_dequant4_coeff_table(H264Context *h)
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
const uint8_t * zigzag_scan8x8_cavlc_q0
H.264 / AVC / MPEG4 part10 codec.
static int get_bits_left(GetBitContext *gb)
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
int slice_context_count
number of used thread_contexts
int mb_aff
mb_adaptive_frame_field_flag
enum AVColorTransferCharacteristic color_trc
void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
int has_b_frames
Size of the frame reordering buffer in the decoder.
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
static int decode_slice_header(H264Context *h, H264Context *h0)
Decode a slice header.
void(* pred16x16_add[3])(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride)
const uint8_t * zigzag_scan_q0
int poc_type
pic_order_cnt_type
Multithreading support functions.
void(* h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int height, int log2_denom, int weightd, int weights, int offset)
static const uint16_t mask[17]
void ff_h264_hl_decode_mb(H264Context *h)
int reference
is this picture used as reference The values for this are the same as the MpegEncContext.picture_structure variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
int partitioned_frame
is current frame partitioned
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
void ff_h264_direct_ref_list_init(H264Context *const h)
#define CHROMA_DC_BLOCK_INDEX
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale)
Get the chroma qp.
GetBitContext * inter_gb_ptr
void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp)
static void copy_picture_range(Picture **to, Picture **from, int count, MpegEncContext *new_base, MpegEncContext *old_base)
#define ALZHEIMER_DC_L0T_PRED8x8
int unrestricted_mv
mv can point outside of the coded picture
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
int active_thread_type
Which multithreading methods are in use by the codec.
int mb_field_decoding_flag
static void flush_dpb(AVCodecContext *avctx)
int capabilities
Codec capabilities.
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
uint8_t * base[AV_NUM_DATA_POINTERS]
pointer to the first allocated byte of the picture.
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
uint8_t(*[2] mvd_table)[2]
#define FF_PROFILE_H264_HIGH_422
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
#define FF_PROFILE_H264_HIGH
int direct_spatial_mv_pred
simple assert() macros that are a bit more flexible than ISO C assert().
int weighted_pred
weighted_pred_flag
void av_log(void *avcl, int level, const char *fmt,...)
int main(int argc, char **argv)
static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
const char * name
Name of the codec implementation.
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
static void predict_field_decoding_flag(H264Context *h)
int quarter_sample
1->qpel, 0->half pel ME/MC
int ff_h264_decode_mb_cavlc(H264Context *h)
Decode a macroblock.
int low_delay
no reordering needed / has no b-frames
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
void(* pred8x8[4+3+4])(uint8_t *src, int stride)
int delta_pic_order_always_zero_flag
int new
flag to keep track if the decoder context needs re-init due to changed SPS
int dct_bits
Size of DCT coefficients.
int offset_for_top_to_bottom_field
#define IN_RANGE(a, b, size)
h264_chroma_mc_func put_h264_chroma_pixels_tab[3]
h264 Chroma MC
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define CODEC_FLAG_EMU_EDGE
Don't draw edges.
uint8_t zigzag_scan8x8[64]
void MPV_frame_end(MpegEncContext *s)
int picture_count
number of allocated pictures (MAX_PICTURE_COUNT * avctx->thread_count)
static const uint8_t scan8[16 *3+3]
void ff_mpeg_flush(AVCodecContext *avctx)
void(* add_pixels8)(uint8_t *pixels, DCTELEM *block, int line_size)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
int resync_mb_x
x position of last resync marker
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
h264_biweight_func biweight_h264_pixels_tab[4]
static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int chroma444, int simple, int pixel_shift)
uint8_t scaling_matrix8[6][64]
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
useful rectangle filling function
void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
int refs
number of reference frames
unsigned int left_samples_available
int ref_frame_count
num_ref_frames
enum AVPictureType pict_type
Picture type of the frame, see ?_TYPE below.
void ff_h264_filter_mb(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
int frame_num_offset
for POC type 2
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define FF_THREAD_FRAME
Decode more than one frame at once.
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
uint8_t field_scan8x8_cavlc[64]
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
int colour_description_present_flag
void MPV_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
static const uint8_t field_scan8x8_cavlc[64]
int width
picture width / height.
static av_always_inline void fill_filter_caches_inter(H264Context *h, MpegEncContext *const s, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
struct AVFrame AVFrame
Audio Video Frame.
Picture * current_picture_ptr
pointer to the current picture
#define FF_PROFILE_UNKNOWN
int long_ref_count
number of actual long term references
void ff_copy_picture(Picture *dst, Picture *src)
qpel_mc_func avg_2tap_qpel_pixels_tab[4][16]
void(* pred8x8l_add[2])(uint8_t *pix, const DCTELEM *block, int stride)
static void implicit_weight_table(H264Context *h, int field)
Initialize implicit_weight table.
PPS * pps_buffers[MAX_PPS_COUNT]
static av_always_inline void prefetch_motion(H264Context *h, int list, int pixel_shift, int chroma_idc)
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
void ff_er_frame_end(MpegEncContext *s)
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
void ff_vdpau_h264_picture_start(MpegEncContext *s)
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum PixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Context Adaptive Binary Arithmetic Coder inline functions.
int init_qp
pic_init_qp_minus26 + 26
int frame_num
h264 frame_num (raw frame_num from slice header)
int8_t intra4x4_pred_mode_cache[5 *8]
int max_pic_num
max_frame_num or 2*max_frame_num for field pics.
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
static const uint8_t field_scan[16]
static int decode_slice(struct AVCodecContext *avctx, void *arg)
unsigned int topright_samples_available
static void av_noinline hl_decode_mb_444_complex(H264Context *h)
const uint8_t * zigzag_scan8x8_q0
int curr_pic_num
frame_num for frames or 2*frame_num+1 for field pics.
static void init_scan_tables(H264Context *h)
initialize scan tables
static int av_unused get_cabac_terminate(CABACContext *c)
static void loop_filter(H264Context *h, int start_x, int end_x)
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
uint32_t dequant4_buffer[6][QP_MAX_NUM+1][16]
void(* h264_idct8_dc_add)(uint8_t *dst, DCTELEM *block, int stride)
void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
av_cold int MPV_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
#define PART_NOT_AVAILABLE
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
void ff_vdpau_add_data_chunk(MpegEncContext *s, const uint8_t *buf, int buf_size)
int first_field
is 1 for the first field of a field picture 0 otherwise
int dequant_coeff_pps
reinit tables when pps changes
#define AVERROR_PATCHWELCOME
Not yet implemented in Libav, patches welcome.
int pic_order_present
pic_order_present_flag
static void idct_add(uint8_t *dest, int line_size, DCTELEM *block)
SPS * sps_buffers[MAX_SPS_COUNT]
void(* h264_idct8_add)(uint8_t *dst, DCTELEM *block, int stride)
struct H264Context * thread_context[MAX_THREADS]
int chroma_log2_weight_denom
uint32_t * mb_type
macroblock type table mb_type_base + mb_width + 2
short offset_for_ref_frame[256]
#define CODEC_FLAG_LOW_DELAY
Force low delay.
int timing_info_present_flag
static void decode_finish_row(H264Context *h)
Draw edges and report progress for the last MB row.
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
Execute the reference picture marking (memory management control operations).
static av_always_inline void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, qpel_mc_func(*qpix_put)[16], h264_chroma_mc_func(*chroma_put), qpel_mc_func(*qpix_avg)[16], h264_chroma_mc_func(*chroma_avg), h264_weight_func *weight_op, h264_biweight_func *weight_avg, int pixel_shift, int chroma_idc)
struct MpegEncContext * thread_context[MAX_THREADS]
H264 / AVC / MPEG4 part10 codec data table
int ff_h264_frame_start(H264Context *h)
static void set_se_golomb(PutBitContext *pb, int i)
write signed exp golomb code.
void ff_thread_await_progress(AVFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
#define FF_PROFILE_H264_HIGH_422_INTRA
int slice_alpha_c0_offset
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
void ff_h264_remove_all_refs(H264Context *h)
int prev_frame_num
frame_num of the last pic for POC type 1/2
int linesize[AV_NUM_DATA_POINTERS]
Size, in bytes, of the data for each picture/channel plane.
static void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int height, int y_offset, int list0, int list1, int *nrefs)
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
#define PIX_FMT_YUV422P10
discard all non reference
void av_log_ask_for_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message asking for a sample.
int implicit_weight[48][48][2]
int max_contexts
Max number of threads / contexts.
main external API structure.
static void(WINAPI *cond_broadcast)(pthread_cond_t *cond)
static void close(AVCodecParserContext *s)
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
int height
picture size. must be a multiple of 16
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int16_t(*[2] motion_val)[2]
motion vector table
static void init_dequant8_coeff_table(H264Context *h)
qpel_mc_func put_2tap_qpel_pixels_tab[4][16]
Picture * picture
main picture buffer
int data_partitioning
data partitioning flag from header
int constraint_set_flags
constraint_set[0-3]_flag
static unsigned int get_bits1(GetBitContext *s)
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
#define FF_PROFILE_H264_HIGH_10_INTRA
void(* h264_idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride)
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
const uint8_t * field_scan8x8_cavlc_q0
void ff_draw_horiz_band(MpegEncContext *s, int y, int h)
uint32_t(*[6] dequant8_coeff)[64]
int qp_thresh
QP threshold to skip loopfilter.
enum AVColorSpace colorspace
YUV colorspace type.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static av_always_inline void hl_motion_420(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, qpel_mc_func(*qpix_put)[16], h264_chroma_mc_func(*chroma_put), qpel_mc_func(*qpix_avg)[16], h264_chroma_mc_func(*chroma_avg), h264_weight_func *weight_op, h264_biweight_func *weight_avg, int pixel_shift)
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
void ff_er_frame_start(MpegEncContext *s)
static void await_references(H264Context *h)
Wait until all reference frames are available for MC operations.
void(* h264_weight_func)(uint8_t *block, int stride, int height, int log2_denom, int weight, int offset)
int8_t * ref_index[2]
motion reference frame index the order in which these are stored can depend on the codec...
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
DSPContext dsp
pointers for accelerated dsp functions
#define FF_PROFILE_H264_HIGH_444
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
6: bottom field, top field, bottom field repeated, in that order
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
void(* h264_idct8_add4)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
int field_poc[2]
h264 top/bottom POC
#define FF_BUG_AUTODETECT
autodetection
int transform_8x8_mode
transform_8x8_mode_flag
static int pred_weight_table(H264Context *h)
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, int stride)
int pic_struct_present_flag
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
qpel_mc_func put_h264_qpel_pixels_tab[4][16]
#define FIELD_OR_MBAFF_PICTURE
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int8_t * qscale_table
QP table.
enum PixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
qpel_mc_func(* qpel_avg)[16]
void ff_h264_filter_mb_fast(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
void(* clear_blocks)(DCTELEM *blocks)
struct MpegEncContext * owner2
pointer to the MpegEncContext that allocated this picture
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
static void idr(H264Context *h)
instantaneous decoder refresh.
static int field_end(H264Context *h, int in_setup)
int mb_height
pic_height_in_map_units_minus1 + 1
qpel_mc_func avg_h264_qpel_pixels_tab[4][16]
static const uint8_t dequant4_coeff_init[6][3]
#define hl_decode_mb_simple(sh, bits)
Process a macroblock; this case avoids checks for expensive uncommon cases.
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
uint16_t * slice_table_base
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
H.264 / AVC / MPEG4 part10 motion vector predicion.
Picture last_picture
copy of the previous picture structure.
uint8_t * obmc_scratchpad
Picture * last_picture_ptr
pointer to the previous picture.
static int execute_decode_slices(H264Context *h, int context_count)
Call decode_slice() for each context.
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
int cur_chroma_format_idc
#define CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries...
enum AVDiscard skip_loop_filter
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
int bit_depth_luma
bit_depth_luma_minus8 + 8
void(* pred4x4_add[2])(uint8_t *pix, const DCTELEM *block, int stride)
static enum PixelFormat hwaccel_pixfmt_list_h264_jpeg_420[]
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
#define PIX_FMT_YUV444P10
#define FF_DEBUG_STARTCODE
void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
int top_field_first
If the content is interlaced, is top field displayed first.
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
Print debugging info for the given picture.
void(* h264_idct_add16)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
int ff_h264_decode_extradata(H264Context *h)
uint8_t(*[2] top_borders)[(16 *3)*2]
int resync_mb_y
y position of last resync marker
struct AVCodecInternal * internal
Private context used for internal data.
void ff_init_cabac_states(CABACContext *c)
static int fill_filter_caches(H264Context *h, int mb_type)
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
void(* add_pixels4)(uint8_t *pixels, DCTELEM *block, int line_size)
Picture next_picture
copy of the next picture structure.
int key_frame
1 -> keyframe, 0-> not
int linesize
line size, in bytes, may be different from width
int current_slice
current slice number, used to initalize slice_num of each thread/context
void(* pred8x8l[9+3])(uint8_t *src, int topleft, int topright, int stride)
int mb_width
pic_width_in_mbs_minus1 + 1
#define FF_PROFILE_H264_HIGH_444_INTRA
const uint8_t * field_scan_q0
static void free_tables(H264Context *h, int free_rbsp)
#define CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
int ff_h264_fill_default_ref_list(H264Context *h)
Fill the default_ref_list.
int flags
AVCodecContext.flags (HQ, MV4, ...)
uint8_t field_scan8x8[64]
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
#define copy_fields(to, from, start_field, end_field)
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
static av_always_inline void mc_part_std(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, int list0, int list1, int pixel_shift, int chroma_idc)
int8_t * intra4x4_pred_mode
static av_always_inline void dctcoef_set(DCTELEM *mb, int high_bit_depth, int index, int value)
void ff_thread_report_progress(AVFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
discard all frames except keyframes
#define FF_PROFILE_H264_CONSTRAINED
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
int mb_linesize
may be equal to s->linesize or s->linesize*2, for mbaff
static void av_noinline hl_decode_mb_444_simple(H264Context *h)
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
int deblocking_filter
disable_deblocking_filter_idc with 1<->0
void av_log_missing_feature(void *avc, const char *feature, int want_sample)
Log a generic warning message about a missing feature.
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function for encode/decode called after coding/decoding the header and before a frame is code...
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
uint8_t(* non_zero_count)[48]
#define FF_PROFILE_H264_HIGH_10
unsigned int crop_bottom
frame_cropping_rect_bottom_offset
int uvlinesize
line size, for chroma in bytes, may be different from width
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
av_cold int ff_h264_decode_end(AVCodecContext *avctx)
static const uint8_t zigzag_scan8x8_cavlc[64]
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
#define FF_QSCALE_TYPE_H264
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
unsigned int rbsp_buffer_size[2]
Context Adaptive Binary Arithmetic Coder.
int8_t ref_cache[2][5 *8]
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
int short_ref_count
number of actual short term references
static const AVProfile profiles[]
enum AVColorSpace colorspace