46 #define MB_INTRA_VLC_BITS 9
52 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
82 int topleft_mb_pos, top_mb_pos;
83 int stride_y, fieldtx;
98 v_dist = (16 - fieldtx) >> (fieldtx == 0);
122 v_dist = fieldtx ? 15 : 8;
144 #define inc_blk_idx(idx) do { \
146 if (idx >= v->n_allocated_blks) \
165 for (j = 0; j < 2; j++) {
198 for (j = 0; j < 2; j++) {
215 for (j = 0; j < 2; j++) {
231 for (j = 0; j < 2; j++) {
242 for (j = 0; j < 2; j++) {
335 uint8_t *srcY, *srcU, *srcV;
336 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
345 mx = s->
mv[dir][0][0];
346 my = s->
mv[dir][0][1];
354 uvmx = (mx + ((mx & 3) == 3)) >> 1;
355 uvmy = (my + ((my & 3) == 3)) >> 1;
367 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
368 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
398 if (!srcY || !srcU) {
403 src_x = s->
mb_x * 16 + (mx >> 2);
404 src_y = s->
mb_y * 16 + (my >> 2);
405 uvsrc_x = s->
mb_x * 8 + (uvmx >> 2);
406 uvsrc_y = s->
mb_y * 8 + (uvmy >> 2);
409 src_x = av_clip( src_x, -16, s->
mb_width * 16);
410 src_y = av_clip( src_y, -16, s->
mb_height * 16);
411 uvsrc_x = av_clip(uvsrc_x, -8, s->
mb_width * 8);
412 uvsrc_y = av_clip(uvsrc_y, -8, s->
mb_height * 8);
420 srcY += src_y * s->
linesize + src_x;
439 || (
unsigned)(src_y - s->
mspel) > v_edge_pos - (my&3) - 16 - s->
mspel * 3) {
449 uvsrc_x, uvsrc_y, s->
h_edge_pos >> 1, v_edge_pos >> 1);
451 uvsrc_x, uvsrc_y, s->
h_edge_pos >> 1, v_edge_pos >> 1);
460 for (j = 0; j < 17 + s->
mspel * 2; j++) {
461 for (i = 0; i < 17 + s->
mspel * 2; i++)
462 src[i] = ((src[i] - 128) >> 1) + 128;
467 for (j = 0; j < 9; j++) {
468 for (i = 0; i < 9; i++) {
469 src[i] = ((src[i] - 128) >> 1) + 128;
470 src2[i] = ((src2[i] - 128) >> 1) + 128;
482 for (j = 0; j < 17 + s->
mspel * 2; j++) {
483 for (i = 0; i < 17 + s->
mspel * 2; i++)
484 src[i] = v->
luty[src[i]];
489 for (j = 0; j < 9; j++) {
490 for (i = 0; i < 9; i++) {
491 src[i] = v->
lutuv[src[i]];
492 src2[i] = v->
lutuv[src2[i]];
509 dxy = ((my & 3) << 2) | (mx & 3);
516 dxy = (my & 2) | ((mx & 2) >> 1);
525 uvmx = (uvmx & 3) << 1;
526 uvmy = (uvmy & 3) << 1;
539 if (c < d)
return (
FFMIN(b, d) +
FFMAX(a, c)) / 2;
542 if (c < d)
return (
FFMIN(a, d) +
FFMAX(b, c)) / 2;
554 int dxy, mx, my, src_x, src_y;
564 mx = s->
mv[dir][n][0];
565 my = s->
mv[dir][n][1];
589 int same_count = 0, opp_count = 0, k;
590 int chosen_mv[2][4][2],
f;
592 for (k = 0; k < 4; k++) {
594 chosen_mv[
f][f ? opp_count : same_count][0] = s->
mv[0][k][0];
595 chosen_mv[
f][f ? opp_count : same_count][1] = s->
mv[0][k][1];
599 f = opp_count > same_count;
600 switch (f ? opp_count : same_count) {
602 tx =
median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
603 chosen_mv[f][2][0], chosen_mv[f][3][0]);
604 ty =
median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
605 chosen_mv[f][2][1], chosen_mv[f][3][1]);
608 tx =
mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
609 ty =
mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
612 tx = (chosen_mv[
f][0][0] + chosen_mv[
f][1][0]) / 2;
613 ty = (chosen_mv[
f][0][1] + chosen_mv[
f][1][1]) / 2;
618 for (k = 0; k < 4; k++)
626 qx = (s->
mb_x * 16) + (mx >> 2);
627 qy = (s->
mb_y * 8) + (my >> 3);
632 mx -= 4 * (qx -
width);
635 else if (qy > height + 1)
636 my -= 8 * (qy - height - 1);
640 off = ((n > 1) ? s->
linesize : 0) + (n & 1) * 8;
642 off = s->
linesize * 4 * (n & 2) + (n & 1) * 8;
646 src_x = s->
mb_x * 16 + (n & 1) * 8 + (mx >> 2);
648 src_y = s->
mb_y * 16 + (n & 2) * 4 + (my >> 2);
650 src_y = s->
mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
653 src_x = av_clip(src_x, -16, s->
mb_width * 16);
654 src_y = av_clip(src_y, -16, s->
mb_height * 16);
667 srcY += src_y * s->
linesize + src_x;
671 if (fieldmv && !(src_y & 1))
673 if (fieldmv && (src_y & 1) && src_y < 4)
678 || (
unsigned)(src_y - (s->
mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->
mspel * 2) << fieldmv)) {
682 9 + s->
mspel * 2, (9 + s->
mspel * 2) << fieldmv,
683 src_x - s->
mspel, src_y - (s->
mspel << fieldmv),
692 for (j = 0; j < 9 + s->
mspel * 2; j++) {
693 for (i = 0; i < 9 + s->
mspel * 2; i++)
694 src[i] = ((src[i] - 128) >> 1) + 128;
704 for (j = 0; j < 9 + s->
mspel * 2; j++) {
705 for (i = 0; i < 9 + s->
mspel * 2; i++)
706 src[i] = v->
luty[src[i]];
714 dxy = ((my & 3) << 2) | (mx & 3);
717 dxy = (my & 2) | ((mx & 2) >> 1);
728 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
730 idx = ((a[3] != flag) << 3)
731 | ((a[2] != flag) << 2)
732 | ((a[1] != flag) << 1)
735 *tx =
median4(mvx[0], mvx[1], mvx[2], mvx[3]);
736 *ty =
median4(mvy[0], mvy[1], mvy[2], mvy[3]);
738 }
else if (count[idx] == 1) {
741 *tx =
mid_pred(mvx[1], mvx[2], mvx[3]);
742 *ty =
mid_pred(mvy[1], mvy[2], mvy[3]);
745 *tx =
mid_pred(mvx[0], mvx[2], mvx[3]);
746 *ty =
mid_pred(mvy[0], mvy[2], mvy[3]);
749 *tx =
mid_pred(mvx[0], mvx[1], mvx[3]);
750 *ty =
mid_pred(mvy[0], mvy[1], mvy[3]);
753 *tx =
mid_pred(mvx[0], mvx[1], mvx[2]);
754 *ty =
mid_pred(mvy[0], mvy[1], mvy[2]);
757 }
else if (count[idx] == 2) {
759 for (i = 0; i < 3; i++)
764 for (i = t1 + 1; i < 4; i++)
769 *tx = (mvx[
t1] + mvx[
t2]) / 2;
770 *ty = (mvy[
t1] + mvy[
t2]) / 2;
784 uint8_t *srcU, *srcV;
785 int uvmx, uvmy, uvsrc_x, uvsrc_y;
786 int k, tx = 0, ty = 0;
787 int mvx[4], mvy[4], intra[4], mv_f[4];
797 for (k = 0; k < 4; k++) {
798 mvx[k] = s->
mv[dir][k][0];
799 mvy[k] = s->
mv[dir][k][1];
816 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
818 valid_count =
get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
826 uvmx = (tx + ((tx & 3) == 3)) >> 1;
827 uvmy = (ty + ((ty & 3) == 3)) >> 1;
833 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
834 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
838 uvmy += 2 - 4 * chroma_ref_type;
840 uvsrc_x = s->
mb_x * 8 + (uvmx >> 2);
841 uvsrc_y = s->
mb_y * 8 + (uvmy >> 2);
844 uvsrc_x = av_clip(uvsrc_x, -8, s->
mb_width * 8);
845 uvsrc_y = av_clip(uvsrc_y, -8, s->
mb_height * 8);
875 if (chroma_ref_type) {
884 || (
unsigned)uvsrc_x > (s->
h_edge_pos >> 1) - 9
885 || (
unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
887 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
890 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
902 for (j = 0; j < 9; j++) {
903 for (i = 0; i < 9; i++) {
904 src[i] = ((src[i] - 128) >> 1) + 128;
905 src2[i] = ((src2[i] - 128) >> 1) + 128;
918 for (j = 0; j < 9; j++) {
919 for (i = 0; i < 9; i++) {
920 src[i] = v->
lutuv[src[i]];
921 src2[i] = v->
lutuv[src2[i]];
930 uvmx = (uvmx & 3) << 1;
931 uvmy = (uvmy & 3) << 1;
947 uint8_t *srcU, *srcV;
948 int uvsrc_x, uvsrc_y;
949 int uvmx_field[4], uvmy_field[4];
952 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
953 int v_dist = fieldmv ? 1 : 4;
961 for (i = 0; i < 4; i++) {
963 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
966 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
968 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
971 for (i = 0; i < 4; i++) {
972 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->
uvlinesize : 0);
973 uvsrc_x = s->
mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
974 uvsrc_y = s->
mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
980 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
981 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
983 if (fieldmv && !(uvsrc_y & 1))
985 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
988 || s->
h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
989 || (
unsigned)uvsrc_x > (s->
h_edge_pos >> 1) - 5
990 || (
unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
992 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
995 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1003 uint8_t *src, *src2;
1007 for (j = 0; j < 5; j++) {
1008 for (i = 0; i < 5; i++) {
1009 src[i] = v->
lutuv[src[i]];
1010 src2[i] = v->
lutuv[src2[i]];
1038 #define GET_MQUANT() \
1039 if (v->dquantfrm) { \
1041 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1042 if (v->dqbilevel) { \
1043 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1045 mqdiff = get_bits(gb, 3); \
1047 mquant = v->pq + mqdiff; \
1049 mquant = get_bits(gb, 5); \
1052 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1053 edges = 1 << v->dqsbedge; \
1054 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1055 edges = (3 << v->dqsbedge) % 15; \
1056 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1058 if ((edges&1) && !s->mb_x) \
1059 mquant = v->altpq; \
1060 if ((edges&2) && s->first_slice_line) \
1061 mquant = v->altpq; \
1062 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1063 mquant = v->altpq; \
1064 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1065 mquant = v->altpq; \
1075 #define GET_MVDATA(_dmv_x, _dmv_y) \
1076 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1077 VC1_MV_DIFF_VLC_BITS, 2); \
1079 mb_has_coeffs = 1; \
1082 mb_has_coeffs = 0; \
1085 _dmv_x = _dmv_y = 0; \
1086 } else if (index == 35) { \
1087 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1088 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1089 } else if (index == 36) { \
1094 index1 = index % 6; \
1095 if (!s->quarter_sample && index1 == 5) val = 1; \
1097 if (size_table[index1] - val > 0) \
1098 val = get_bits(gb, size_table[index1] - val); \
1100 sign = 0 - (val&1); \
1101 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1103 index1 = index / 6; \
1104 if (!s->quarter_sample && index1 == 5) val = 1; \
1106 if (size_table[index1] - val > 0) \
1107 val = get_bits(gb, size_table[index1] - val); \
1109 sign = 0 - (val & 1); \
1110 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1114 int *dmv_y,
int *pred_flag)
1117 int extend_x = 0, extend_y = 0;
1121 const int* offs_tab;
1138 extend_x = extend_y = 1;
1146 *pred_flag = *dmv_y & 1;
1147 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1155 index1 = (index + 1) % 9;
1157 val =
get_bits(gb, index1 + extend_x);
1158 sign = 0 -(val & 1);
1159 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1166 index1 = (index + 1) / 9;
1167 if (index1 > v->
numref) {
1169 sign = 0 - (val & 1);
1170 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->
numref])) - sign;
1174 *pred_flag = index1 & 1;
1180 int scaledvalue, refdist;
1181 int scalesame1, scalesame2;
1182 int scalezone1_x, zone1offset_x;
1199 if (
FFABS(n) < scalezone1_x)
1200 scaledvalue = (n * scalesame1) >> 8;
1203 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1205 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1213 int scaledvalue, refdist;
1214 int scalesame1, scalesame2;
1215 int scalezone1_y, zone1offset_y;
1232 if (
FFABS(n) < scalezone1_y)
1233 scaledvalue = (n * scalesame1) >> 8;
1236 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1238 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1243 return av_clip(scaledvalue, -v->
range_y / 2 + 1, v->
range_y / 2);
1245 return av_clip(scaledvalue, -v->
range_y / 2, v->
range_y / 2 - 1);
1250 int scalezone1_x, zone1offset_x;
1251 int scaleopp1, scaleopp2, brfd;
1263 if (
FFABS(n) < scalezone1_x)
1264 scaledvalue = (n * scaleopp1) >> 8;
1267 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1269 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1277 int scalezone1_y, zone1offset_y;
1278 int scaleopp1, scaleopp2, brfd;
1290 if (
FFABS(n) < scalezone1_y)
1291 scaledvalue = (n * scaleopp1) >> 8;
1294 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1296 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1300 return av_clip(scaledvalue, -v->
range_y / 2 + 1, v->
range_y / 2);
1302 return av_clip(scaledvalue, -v->
range_y / 2, v->
range_y / 2 - 1);
1309 int brfd, scalesame;
1323 n = (n * scalesame >> 8) << hpel;
1330 int refdist, scaleopp;
1347 n = (n * scaleopp >> 8) << hpel;
1354 int mv1,
int r_x,
int r_y, uint8_t* is_intra,
1355 int pred_flag,
int dir)
1362 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1363 int opposit, a_f, b_f, c_f;
1364 int16_t field_predA[2];
1365 int16_t field_predB[2];
1366 int16_t field_predC[2];
1367 int a_valid, b_valid, c_valid;
1368 int hybridmv_thresh, y_bias = 0;
1416 off = (s->
mb_x > 0) ? -1 : 1;
1431 b_valid = a_valid && (s->
mb_width > 1);
1432 c_valid = s->
mb_x || (n == 1 || n == 3);
1434 a_valid = a_valid && !is_intra[xy -
wrap];
1435 b_valid = b_valid && !is_intra[xy - wrap +
off];
1436 c_valid = c_valid && !is_intra[xy - 1];
1441 num_oppfield += a_f;
1442 num_samefield += 1 - a_f;
1443 field_predA[0] = A[0];
1444 field_predA[1] = A[1];
1446 field_predA[0] = field_predA[1] = 0;
1451 num_oppfield += b_f;
1452 num_samefield += 1 - b_f;
1453 field_predB[0] = B[0];
1454 field_predB[1] = B[1];
1456 field_predB[0] = field_predB[1] = 0;
1461 num_oppfield += c_f;
1462 num_samefield += 1 - c_f;
1463 field_predC[0] = C[0];
1464 field_predC[1] = C[1];
1466 field_predC[0] = field_predC[1] = 0;
1471 if (num_samefield <= num_oppfield)
1472 opposit = 1 - pred_flag;
1474 opposit = pred_flag;
1478 if (a_valid && !a_f) {
1479 field_predA[0] =
scaleforopp(v, field_predA[0], 0, dir);
1480 field_predA[1] =
scaleforopp(v, field_predA[1], 1, dir);
1482 if (b_valid && !b_f) {
1483 field_predB[0] =
scaleforopp(v, field_predB[0], 0, dir);
1484 field_predB[1] =
scaleforopp(v, field_predB[1], 1, dir);
1486 if (c_valid && !c_f) {
1487 field_predC[0] =
scaleforopp(v, field_predC[0], 0, dir);
1488 field_predC[1] =
scaleforopp(v, field_predC[1], 1, dir);
1493 if (a_valid && a_f) {
1494 field_predA[0] =
scaleforsame(v, n, field_predA[0], 0, dir);
1495 field_predA[1] =
scaleforsame(v, n, field_predA[1], 1, dir);
1497 if (b_valid && b_f) {
1498 field_predB[0] =
scaleforsame(v, n, field_predB[0], 0, dir);
1499 field_predB[1] =
scaleforsame(v, n, field_predB[1], 1, dir);
1501 if (c_valid && c_f) {
1502 field_predC[0] =
scaleforsame(v, n, field_predC[0], 0, dir);
1503 field_predC[1] =
scaleforsame(v, n, field_predC[1], 1, dir);
1510 px = field_predA[0];
1511 py = field_predA[1];
1512 }
else if (c_valid) {
1513 px = field_predC[0];
1514 py = field_predC[1];
1515 }
else if (b_valid) {
1516 px = field_predB[0];
1517 py = field_predB[1];
1523 if (num_samefield + num_oppfield > 1) {
1524 px =
mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1525 py =
mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1531 qx = (s->
mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1532 qy = (s->
mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1536 if (qx + px < -60) px = -60 - qx;
1537 if (qy + py < -60) py = -60 - qy;
1539 if (qx + px < -28) px = -28 - qx;
1540 if (qy + py < -28) py = -28 - qy;
1542 if (qx + px > X) px = X - qx;
1543 if (qy + py > Y) py = Y - qy;
1548 hybridmv_thresh = 32;
1549 if (a_valid && c_valid) {
1550 if (is_intra[xy - wrap])
1553 sum =
FFABS(px - field_predA[0]) +
FFABS(py - field_predA[1]);
1554 if (sum > hybridmv_thresh) {
1556 px = field_predA[0];
1557 py = field_predA[1];
1559 px = field_predC[0];
1560 py = field_predC[1];
1563 if (is_intra[xy - 1])
1566 sum =
FFABS(px - field_predC[0]) +
FFABS(py - field_predC[1]);
1567 if (sum > hybridmv_thresh) {
1569 px = field_predA[0];
1570 py = field_predA[1];
1572 px = field_predC[0];
1573 py = field_predC[1];
1606 int mvn,
int r_x,
int r_y, uint8_t* is_intra)
1610 int A[2],
B[2], C[2];
1612 int a_valid = 0, b_valid = 0, c_valid = 0;
1613 int field_a, field_b, field_c;
1614 int total_valid, num_samefield, num_oppfield;
1615 int pos_c, pos_b, n_adj;
1643 off = ((n == 0) || (n == 1)) ? 1 : -1;
1645 if (s->
mb_x || (n == 1) || (n == 3)) {
1665 B[0] = B[1] = C[0] = C[1] = 0;
1673 n_adj = (n & 2) | (n & 1);
1727 total_valid = a_valid + b_valid + c_valid;
1729 if (!s->
mb_x && !(n == 1 || n == 3)) {
1734 B[0] = B[1] = C[0] = C[1] = 0;
1741 if (total_valid >= 2) {
1744 }
else if (total_valid) {
1745 if (a_valid) { px = A[0]; py = A[1]; }
1746 if (b_valid) { px = B[0]; py = B[1]; }
1747 if (c_valid) { px = C[0]; py = C[1]; }
1753 field_a = (A[1] & 4) ? 1 : 0;
1757 field_b = (B[1] & 4) ? 1 : 0;
1761 field_c = (C[1] & 4) ? 1 : 0;
1765 num_oppfield = field_a + field_b + field_c;
1766 num_samefield = total_valid - num_oppfield;
1767 if (total_valid == 3) {
1768 if ((num_samefield == 3) || (num_oppfield == 3)) {
1771 }
else if (num_samefield >= num_oppfield) {
1774 px = !field_a ? A[0] : B[0];
1775 py = !field_a ? A[1] : B[1];
1777 px = field_a ? A[0] : B[0];
1778 py = field_a ? A[1] : B[1];
1780 }
else if (total_valid == 2) {
1781 if (num_samefield >= num_oppfield) {
1782 if (!field_a && a_valid) {
1785 }
else if (!field_b && b_valid) {
1788 }
else if (c_valid) {
1793 if (field_a && a_valid) {
1796 }
else if (field_b && b_valid) {
1799 }
else if (c_valid) {
1804 }
else if (total_valid == 1) {
1805 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1806 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1821 }
else if (mvn == 2) {
1824 s->
mv[0][n + 1][0] = s->
mv[0][n][0];
1825 s->
mv[0][n + 1][1] = s->
mv[0][n][1];
1835 uint8_t *srcY, *srcU, *srcV;
1836 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1843 mx = s->
mv[1][0][0];
1844 my = s->
mv[1][0][1];
1845 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1846 uvmy = (my + ((my & 3) == 3)) >> 1;
1853 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1854 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1860 src_x = s->
mb_x * 16 + (mx >> 2);
1861 src_y = s->
mb_y * 16 + (my >> 2);
1862 uvsrc_x = s->
mb_x * 8 + (uvmx >> 2);
1863 uvsrc_y = s->
mb_y * 8 + (uvmy >> 2);
1866 src_x = av_clip( src_x, -16, s->
mb_width * 16);
1867 src_y = av_clip( src_y, -16, s->
mb_height * 16);
1868 uvsrc_x = av_clip(uvsrc_x, -8, s->
mb_width * 8);
1869 uvsrc_y = av_clip(uvsrc_y, -8, s->
mb_height * 8);
1877 srcY += src_y * s->
linesize + src_x;
1895 || (
unsigned)(src_y - s->
mspel) > v_edge_pos - (my & 3) - 16 - s->
mspel * 3) {
1905 uvsrc_x, uvsrc_y, s->
h_edge_pos >> 1, v_edge_pos >> 1);
1907 uvsrc_x, uvsrc_y, s->
h_edge_pos >> 1, v_edge_pos >> 1);
1913 uint8_t *src, *src2;
1916 for (j = 0; j < 17 + s->
mspel * 2; j++) {
1917 for (i = 0; i < 17 + s->
mspel * 2; i++)
1918 src[i] = ((src[i] - 128) >> 1) + 128;
1923 for (j = 0; j < 9; j++) {
1924 for (i = 0; i < 9; i++) {
1925 src[i] = ((src[i] - 128) >> 1) + 128;
1926 src2[i] = ((src2[i] - 128) >> 1) + 128;
1944 dxy = ((my & 3) << 2) | (mx & 3);
1951 dxy = (my & 2) | ((mx & 2) >> 1);
1961 uvmx = (uvmx & 3) << 1;
1962 uvmy = (uvmy & 3) << 1;
1976 #if B_FRACTION_DEN==256
1980 return 2 * ((value * n + 255) >> 9);
1981 return (value * n + 128) >> 8;
1992 int qs,
int qs_last)
2000 return (value * n + 255) >> 9;
2002 return (value * n + 128) >> 8;
2008 int direct,
int mode)
2037 int direct,
int mvtype)
2045 const uint8_t *is_intra = v->
mb_type[0];
2072 s->
mv[0][0][0] = av_clip(s->
mv[0][0][0], -60 - (s->
mb_x << 6), (s->
mb_width << 6) - 4 - (s->
mb_x << 6));
2073 s->
mv[0][0][1] = av_clip(s->
mv[0][0][1], -60 - (s->
mb_y << 6), (s->
mb_height << 6) - 4 - (s->
mb_y << 6));
2074 s->
mv[1][0][0] = av_clip(s->
mv[1][0][0], -60 - (s->
mb_x << 6), (s->
mb_width << 6) - 4 - (s->
mb_x << 6));
2075 s->
mv[1][0][1] = av_clip(s->
mv[1][0][1], -60 - (s->
mb_y << 6), (s->
mb_height << 6) - 4 - (s->
mb_y << 6));
2091 if (!s->
mb_x) C[0] = C[1] = 0;
2100 }
else if (s->
mb_x) {
2110 qx = (s->
mb_x << 5);
2111 qy = (s->
mb_y << 5);
2114 if (qx + px < -28) px = -28 - qx;
2115 if (qy + py < -28) py = -28 - qy;
2116 if (qx + px > X) px = X - qx;
2117 if (qy + py > Y) py = Y - qy;
2119 qx = (s->
mb_x << 6);
2120 qy = (s->
mb_y << 6);
2123 if (qx + px < -60) px = -60 - qx;
2124 if (qy + py < -60) py = -60 - qy;
2125 if (qx + px > X) px = X - qx;
2126 if (qy + py > Y) py = Y - qy;
2131 if (is_intra[xy - wrap])
2144 if (is_intra[xy - 2])
2160 s->
mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2161 s->
mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2179 }
else if (s->
mb_x) {
2189 qx = (s->
mb_x << 5);
2190 qy = (s->
mb_y << 5);
2193 if (qx + px < -28) px = -28 - qx;
2194 if (qy + py < -28) py = -28 - qy;
2195 if (qx + px > X) px = X - qx;
2196 if (qy + py > Y) py = Y - qy;
2198 qx = (s->
mb_x << 6);
2199 qy = (s->
mb_y << 6);
2202 if (qx + px < -60) px = -60 - qx;
2203 if (qy + py < -60) py = -60 - qy;
2204 if (qx + px > X) px = X - qx;
2205 if (qy + py > Y) py = Y - qy;
2210 if (is_intra[xy - wrap])
2223 if (is_intra[xy - 2])
2240 s->
mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2241 s->
mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2256 int total_opp, k,
f;
2271 f = (total_opp > 2) ? 1 : 0;
2273 s->
mv[0][0][0] = s->
mv[0][0][1] = 0;
2274 s->
mv[1][0][0] = s->
mv[1][0][1] = 0;
2278 for (k = 0; k < 4; k++) {
2295 if (n == 3 || mv1) {
2300 if (n == 3 || mv1) {
2316 int16_t **dc_val_ptr,
int *dir_ptr)
2320 static const uint16_t dcpred[32] = {
2321 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2322 114, 102, 93, 85, 79, 73, 68, 64,
2323 60, 57, 54, 51, 49, 47, 45, 43,
2324 41, 39, 38, 37, 35, 34, 33
2338 b = dc_val[ - 1 -
wrap];
2339 a = dc_val[ -
wrap];
2341 if (pq < 9 || !overlap) {
2344 b = a = dcpred[
scale];
2345 if (s->
mb_x == 0 && (n != 1 && n != 3))
2346 b = c = dcpred[
scale];
2351 if (s->
mb_x == 0 && (n != 1 && n != 3))
2355 if (abs(a - b) <= abs(b - c)) {
2364 *dc_val_ptr = &dc_val[0];
2381 int a_avail,
int c_avail,
2382 int16_t **dc_val_ptr,
int *dir_ptr)
2397 b = dc_val[ - 1 -
wrap];
2398 a = dc_val[ -
wrap];
2402 if (dqscale_index < 0)
2404 if (c_avail && (n != 1 && n != 3)) {
2409 if (a_avail && (n != 2 && n != 3)) {
2414 if (a_avail && c_avail && (n != 3)) {
2425 if (a_avail && c_avail) {
2426 if (abs(a - b) <= abs(b - c)) {
2433 }
else if (a_avail) {
2436 }
else if (c_avail) {
2445 *dc_val_ptr = &dc_val[0];
2458 uint8_t **coded_block_ptr)
2494 int *value,
int codingset)
2560 int coded,
int codingset)
2564 int dc_pred_dir = 0;
2567 int16_t *ac_val, *ac_val2;
2581 if (dcdiff == 119 ) {
2584 else if (v->
pq == 2) dcdiff =
get_bits(gb, 9);
2588 dcdiff = (dcdiff << 2) +
get_bits(gb, 2) - 3;
2589 else if (v->
pq == 2)
2590 dcdiff = (dcdiff << 1) +
get_bits1(gb) - 1;
2615 int last = 0, skip, value;
2616 const uint8_t *zz_table;
2642 block[zz_table[i++]] = value;
2648 for (k = 1; k < 8; k++)
2651 for (k = 1; k < 8; k++)
2656 for (k = 1; k < 8; k++) {
2662 for (k = 1; k < 64; k++)
2666 block[k] += (block[k] < 0) ? -v->
pq : v->
pq;
2680 memset(ac_val2, 0, 16 * 2);
2684 memcpy(ac_val2, ac_val, 8 * 2);
2688 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2694 for (k = 1; k < 8; k++) {
2700 for (k = 1; k < 8; k++) {
2723 int coded,
int codingset,
int mquant)
2727 int dc_pred_dir = 0;
2730 int16_t *ac_val, *ac_val2;
2749 if (dcdiff == 119 ) {
2751 if (mquant == 1) dcdiff =
get_bits(gb, 10);
2752 else if (mquant == 2) dcdiff =
get_bits(gb, 9);
2756 dcdiff = (dcdiff << 2) +
get_bits(gb, 2) - 3;
2757 else if (mquant == 2)
2758 dcdiff = (dcdiff << 1) +
get_bits1(gb) - 1;
2779 if (!a_avail && !c_avail)
2784 scale = mquant * 2 + ((mquant == v->
pq) ? v->
halfpq : 0);
2792 if ( dc_pred_dir && c_avail && mb_pos)
2794 if (!dc_pred_dir && a_avail && mb_pos >= s->
mb_stride)
2796 if ( dc_pred_dir && n == 1)
2798 if (!dc_pred_dir && n == 2)
2804 int last = 0, skip, value;
2805 const uint8_t *zz_table;
2829 block[zz_table[i++]] = value;
2835 if (q2 && q1 != q2) {
2836 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
2837 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
2842 for (k = 1; k < 8; k++)
2845 for (k = 1; k < 8; k++)
2850 for (k = 1; k < 8; k++)
2853 for (k = 1; k < 8; k++)
2859 for (k = 1; k < 8; k++) {
2865 for (k = 1; k < 64; k++)
2869 block[k] += (block[k] < 0) ? -mquant : mquant;
2872 if (use_pred) i = 63;
2876 memset(ac_val2, 0, 16 * 2);
2879 memcpy(ac_val2, ac_val, 8 * 2);
2880 if (q2 && q1 != q2) {
2881 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
2882 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
2885 for (k = 1; k < 8; k++)
2886 ac_val2[k] = (ac_val2[k] * q2 *
ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2891 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2892 if (q2 && q1 != q2) {
2893 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
2894 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
2897 for (k = 1; k < 8; k++)
2898 ac_val2[k + 8] = (ac_val2[k + 8] * q2 *
ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2906 for (k = 1; k < 8; k++) {
2912 for (k = 1; k < 8; k++) {
2935 int coded,
int mquant,
int codingset)
2939 int dc_pred_dir = 0;
2942 int16_t *ac_val, *ac_val2;
2953 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
2970 if (dcdiff == 119 ) {
2972 if (mquant == 1) dcdiff =
get_bits(gb, 10);
2973 else if (mquant == 2) dcdiff =
get_bits(gb, 9);
2977 dcdiff = (dcdiff << 2) +
get_bits(gb, 2) - 3;
2978 else if (mquant == 2)
2979 dcdiff = (dcdiff << 1) +
get_bits1(gb) - 1;
2986 dcdiff +=
vc1_pred_dc(&v->
s, v->
overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3001 if (!a_avail) dc_pred_dir = 1;
3002 if (!c_avail) dc_pred_dir = 0;
3003 if (!a_avail && !c_avail) use_pred = 0;
3007 scale = mquant * 2 + v->
halfpq;
3015 if (dc_pred_dir && c_avail && mb_pos)
3017 if (!dc_pred_dir && a_avail && mb_pos >= s->
mb_stride)
3019 if ( dc_pred_dir && n == 1)
3021 if (!dc_pred_dir && n == 2)
3023 if (n == 3) q2 = q1;
3026 int last = 0, skip, value;
3035 block[v->
zz_8x8[0][i++]] = value;
3039 block[v->
zz_8x8[2][i++]] = value;
3041 block[v->
zz_8x8[3][i++]] = value;
3043 block[v->
zzi_8x8[i++]] = value;
3051 if (q2 && q1 != q2) {
3052 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
3053 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
3058 for (k = 1; k < 8; k++)
3061 for (k = 1; k < 8; k++)
3066 for (k = 1; k < 8; k++)
3069 for (k = 1; k < 8; k++)
3075 for (k = 1; k < 8; k++) {
3081 for (k = 1; k < 64; k++)
3085 block[k] += (block[k] < 0) ? -mquant : mquant;
3088 if (use_pred) i = 63;
3092 memset(ac_val2, 0, 16 * 2);
3095 memcpy(ac_val2, ac_val, 8 * 2);
3096 if (q2 && q1 != q2) {
3097 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
3098 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
3101 for (k = 1; k < 8; k++)
3102 ac_val2[k] = (ac_val2[k] * q2 *
ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3107 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3108 if (q2 && q1 != q2) {
3109 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
3110 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
3113 for (k = 1; k < 8; k++)
3114 ac_val2[k + 8] = (ac_val2[k + 8] * q2 *
ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3122 for (k = 1; k < 8; k++) {
3128 for (k = 1; k < 8; k++) {
3145 int mquant,
int ttmb,
int first_block,
3146 uint8_t *dst,
int linesize,
int skip_block,
3153 int scale,
off, idx, last, skip, value;
3154 int ttblk = ttmb & 7;
3166 && ((v->
ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3176 scale = 2 * mquant + ((v->
pq == mquant) ? v->
halfpq : 0);
3201 block[idx] = value *
scale;
3203 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3215 pat = ~subblkpat & 0xF;
3216 for (j = 0; j < 4; j++) {
3217 last = subblkpat & (1 << (3 - j));
3219 off = (j & 1) * 4 + (j & 2) * 16;
3231 block[idx +
off] += (block[idx +
off] < 0) ? -mquant : mquant;
3233 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3242 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3243 for (j = 0; j < 2; j++) {
3244 last = subblkpat & (1 << (1 - j));
3256 block[idx] = value *
scale;
3258 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3260 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3269 pat = ~(subblkpat * 5) & 0xF;
3270 for (j = 0; j < 2; j++) {
3271 last = subblkpat & (1 << (1 - j));
3283 block[idx] = value *
scale;
3285 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3287 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3297 *ttmb_out |= ttblk << (n * 4);
3310 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3312 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3316 if (block_num > 3) {
3317 dst = s->
dest[block_num - 3];
3319 dst = s->
dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3325 if (block_num > 3) {
3326 bottom_cbp = v->
cbp[s->
mb_x] >> (block_num * 4);
3327 bottom_is_intra = v->
is_intra[s->
mb_x] >> (block_num * 4);
3331 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3332 : (v->
cbp[s->
mb_x] >> ((block_num - 2) * 4));
3333 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3339 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3340 mv[0][0] !=
mv[mv_stride][0] ||
mv[0][1] !=
mv[mv_stride][1]) {
3343 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3355 dst -= 4 * linesize;
3358 idx = (block_cbp | (block_cbp >> 2)) & 3;
3374 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3376 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3380 if (block_num > 3) {
3381 dst = s->
dest[block_num - 3] - 8 * linesize;
3383 dst = s->
dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3389 if (block_num > 3) {
3394 right_cbp = (block_num & 1) ? (v->
cbp[s->
mb_x - s->
mb_stride] >> ((block_num - 1) * 4))
3395 : (mb_cbp >> ((block_num + 1) * 4));
3397 : (mb_is_intra >> ((block_num + 1) * 4));
3400 if (block_is_intra & 1 || right_is_intra & 1 ||
mv[0][0] !=
mv[1][0] ||
mv[0][1] !=
mv[1][1]) {
3403 idx = ((right_cbp >> 1) | block_cbp) & 5;
3418 idx = (block_cbp | (block_cbp >> 1)) & 5;
3435 for (i = 0; i < 6; i++) {
3442 for (i = 0; i < 6; i++) {
3448 for (i = 0; i < 6; i++) {
3465 int ttmb = v->
ttfrm;
3467 int mb_has_coeffs = 1;
3471 int first_block = 1;
3473 int skipped, fourmv;
3474 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3499 if (s->
mb_intra && !mb_has_coeffs) {
3503 }
else if (mb_has_coeffs) {
3519 for (i = 0; i < 6; i++) {
3522 val = ((cbp >> (5 - i)) & 1);
3523 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
3530 if (i == 1 || i == 3 || s->
mb_x)
3539 for (j = 0; j < 64; j++)
3540 s->
block[i][j] <<= 1;
3548 block_cbp |= 0xF << (i << 2);
3549 block_intra |= 1 << i;
3554 block_cbp |= pat << (i << 2);
3555 if (!v->
ttmbf && ttmb < 8)
3562 for (i = 0; i < 6; i++) {
3573 int intra_count = 0, coded_inter = 0;
3574 int is_intra[6], is_coded[6];
3577 for (i = 0; i < 6; i++) {
3578 val = ((cbp >> (5 - i)) & 1);
3593 is_coded[i] = mb_has_coeffs;
3596 is_intra[i] = (intra_count >= 3);
3603 coded_inter = !is_intra[i] & is_coded[i];
3607 if (!intra_count && !coded_inter)
3614 for (i = 0; i < 6; i++)
3627 if (!v->
ttmbf && coded_inter)
3629 for (i = 0; i < 6; i++) {
3631 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
3638 if (i == 1 || i == 3 || s->
mb_x)
3647 for (j = 0; j < 64; j++)
3648 s->
block[i][j] <<= 1;
3657 block_cbp |= 0xF << (i << 2);
3658 block_intra |= 1 << i;
3659 }
else if (is_coded[i]) {
3661 first_block, s->
dest[dst_idx] + off,
3665 block_cbp |= pat << (i << 2);
3666 if (!v->
ttmbf && ttmb < 8)
3674 for (i = 0; i < 6; i++) {
3678 for (i = 0; i < 4; i++) {
3704 int ttmb = v->
ttfrm;
3706 int mb_has_coeffs = 1;
3709 int first_block = 1;
3711 int skipped, fourmv = 0, twomv = 0;
3712 int block_cbp = 0, pat, block_tt = 0;
3713 int idx_mbmode = 0, mvbp;
3714 int stride_y, fieldtx;
3762 for (i = 0; i < 6; i++)
3775 for (i = 0; i < 6; i++) {
3778 val = ((cbp >> (5 - i)) & 1);
3783 if (i == 1 || i == 3 || s->
mb_x)
3792 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->
linesize : (i & 1) * 8 + 4 * (i & 2) * s->
linesize;
3814 for (i = 0; i < 6; i++)
3821 for (i = 0; i < 6; i++) {
3824 val = ((mvbp >> (3 - i)) & 1);
3830 }
else if (i == 4) {
3862 if (!v->
ttmbf && cbp)
3864 for (i = 0; i < 6; i++) {
3867 val = ((cbp >> (5 - i)) & 1);
3869 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
3871 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->
linesize));
3874 first_block, s->
dest[dst_idx] + off,
3877 block_cbp |= pat << (i << 2);
3878 if (!v->
ttmbf && ttmb < 8)
3886 for (i = 0; i < 6; i++) {
3912 int ttmb = v->
ttfrm;
3914 int mb_has_coeffs = 1;
3917 int first_block = 1;
3920 int block_cbp = 0, pat, block_tt = 0;
3926 if (idx_mbmode <= 1) {
3937 mb_has_coeffs = idx_mbmode & 1;
3941 for (i = 0; i < 6; i++) {
3945 val = ((cbp >> (5 - i)) & 1);
3949 if (i == 1 || i == 3 || s->
mb_x)
3957 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
3966 if (idx_mbmode <= 5) {
3968 if (idx_mbmode & 1) {
3973 mb_has_coeffs = !(idx_mbmode & 2);
3976 for (i = 0; i < 6; i++) {
3978 dmv_x = dmv_y = pred_flag = 0;
3979 val = ((v->
fourmvbp >> (3 - i)) & 1);
3988 mb_has_coeffs = idx_mbmode & 1;
3996 if (!v->
ttmbf && cbp) {
4000 for (i = 0; i < 6; i++) {
4003 val = ((cbp >> (5 - i)) & 1);
4004 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->
linesize;
4009 first_block, s->
dest[dst_idx] + off,
4013 block_cbp |= pat << (i << 2);
4014 if (!v->
ttmbf && ttmb < 8) ttmb = -1;
4034 int ttmb = v->
ttfrm;
4035 int mb_has_coeffs = 0;
4038 int first_block = 1;
4040 int skipped, direct;
4041 int dmv_x[2], dmv_y[2];
4056 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4057 for (i = 0; i < 6; i++) {
4066 dmv_x[1] = dmv_x[0];
4067 dmv_y[1] = dmv_y[0];
4080 dmv_x[0] = dmv_y[0] = 0;
4084 for (i = 0; i < 6; i++)
4091 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4101 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4103 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4105 if (!mb_has_coeffs && !s->
mb_intra) {
4108 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4111 if (s->
mb_intra && !mb_has_coeffs) {
4120 if (!mb_has_coeffs) {
4123 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4129 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4141 for (i = 0; i < 6; i++) {
4144 val = ((cbp >> (5 - i)) & 1);
4145 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
4152 if (i == 1 || i == 3 || s->
mb_x)
4161 for (j = 0; j < 64; j++)
4162 s->
block[i][j] <<= 1;
4166 first_block, s->
dest[dst_idx] + off,
4169 if (!v->
ttmbf && ttmb < 8)
4186 int ttmb = v->
ttfrm;
4187 int mb_has_coeffs = 0;
4189 int first_block = 1;
4192 int dmv_x[2], dmv_y[2], pred_flag[2];
4194 int idx_mbmode, interpmvp;
4200 if (idx_mbmode <= 1) {
4211 mb_has_coeffs = idx_mbmode & 1;
4215 for (i = 0; i < 6; i++) {
4218 val = ((cbp >> (5 - i)) & 1);
4223 if (i == 1 || i == 3 || s->
mb_x)
4232 for (j = 0; j < 64; j++)
4233 s->
block[i][j] <<= 1;
4234 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
4247 if (idx_mbmode <= 5) {
4248 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4249 pred_flag[0] = pred_flag[1] = 0;
4274 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4275 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4279 mb_has_coeffs = !(idx_mbmode & 2);
4285 for (i = 0; i < 6; i++) {
4287 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4288 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4289 val = ((v->
fourmvbp >> (3 - i)) & 1);
4300 mb_has_coeffs = idx_mbmode & 1;
4308 if (!v->
ttmbf && cbp) {
4312 for (i = 0; i < 6; i++) {
4315 val = ((cbp >> (5 - i)) & 1);
4316 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->
linesize;
4321 first_block, s->
dest[dst_idx] + off,
4324 if (!v->
ttmbf && ttmb < 8)
4381 dst[0] = s->
dest[0];
4382 dst[1] = dst[0] + 8;
4384 dst[3] = dst[2] + 8;
4385 dst[4] = s->
dest[1];
4386 dst[5] = s->
dest[2];
4398 for (k = 0; k < 6; k++) {
4399 val = ((cbp >> (5 - k)) & 1);
4406 cbp |= val << (5 - k);
4415 for (j = 0; j < 64; j++)
4416 s->
block[k][j] <<= 1;
4420 for (j = 0; j < 64; j++)
4549 for (k = 0; k < 6; k++) {
4550 val = ((cbp >> (5 - k)) & 1);
4557 cbp |= val << (5 - k);
4665 if (apply_loop_filter) {
4799 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4815 int effect_type, effect_flag;
4816 int effect_pcount1, effect_pcount2;
4817 int effect_params1[15], effect_params2[10];
4825 static void vc1_sprite_parse_transform(
GetBitContext* gb,
int c[7])
4832 c[2] = get_fp_val(gb);
4836 c[0] = c[4] = get_fp_val(gb);
4837 c[2] = get_fp_val(gb);
4840 c[0] = get_fp_val(gb);
4841 c[2] = get_fp_val(gb);
4842 c[4] = get_fp_val(gb);
4845 c[0] = get_fp_val(gb);
4846 c[1] = get_fp_val(gb);
4847 c[2] = get_fp_val(gb);
4848 c[3] = get_fp_val(gb);
4849 c[4] = get_fp_val(gb);
4852 c[5] = get_fp_val(gb);
4854 c[6] = get_fp_val(gb);
4864 for (sprite = 0; sprite <= v->
two_sprites; sprite++) {
4865 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4866 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4869 for (i = 0; i < 7; i++)
4871 sd->coefs[sprite][i] / (1<<16),
4872 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4878 switch (sd->effect_pcount1 =
get_bits(gb, 4)) {
4880 vc1_sprite_parse_transform(gb, sd->effect_params1);
4883 vc1_sprite_parse_transform(gb, sd->effect_params1);
4884 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
4887 for (i = 0; i < sd->effect_pcount1; i++)
4888 sd->effect_params1[i] = get_fp_val(gb);
4890 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
4893 for (i = 0; i < sd->effect_pcount1; i++)
4895 sd->effect_params1[i] / (1 << 16),
4896 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
4900 sd->effect_pcount2 =
get_bits(gb, 16);
4901 if (sd->effect_pcount2 > 10) {
4904 }
else if (sd->effect_pcount2) {
4907 while (++i < sd->effect_pcount2) {
4908 sd->effect_params2[i] = get_fp_val(gb);
4910 sd->effect_params2[i] / (1 << 16),
4911 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
4926 static void vc1_draw_sprites(
VC1Context *v, SpriteData* sd)
4928 int i, plane, row, sprite;
4929 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
4930 uint8_t* src_h[2][2];
4931 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
4935 for (i = 0; i < 2; i++) {
4936 xoff[i] = av_clip(sd->coefs[i][2], 0, v->
sprite_width-1 << 16);
4937 xadv[i] = sd->coefs[i][0];
4941 yoff[i] = av_clip(sd->coefs[i][5], 0, v->
sprite_height-1 << 16);
4944 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
4953 for (sprite = 0; sprite <= v->
two_sprites; sprite++) {
4956 int ycoord = yoff[sprite] + yadv[sprite] * row;
4957 int yline = ycoord >> 16;
4958 ysub[sprite] = ycoord & 0xFFFF;
4963 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
4964 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
4966 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + (yline + 1) * iline;
4968 if (sr_cache[sprite][0] != yline) {
4969 if (sr_cache[sprite][1] == yline) {
4971 FFSWAP(
int, sr_cache[sprite][0], sr_cache[sprite][1]);
4973 v->
vc1dsp.
sprite_h(v->
sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
4974 sr_cache[sprite][0] = yline;
4977 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
4978 v->
vc1dsp.
sprite_h(v->
sr_rows[sprite][1], iplane + (yline + 1) * iline, xoff[sprite], xadv[sprite], width);
4979 sr_cache[sprite][1] = yline + 1;
4981 src_h[sprite][0] = v->
sr_rows[sprite][0];
4982 src_h[sprite][1] = v->
sr_rows[sprite][1];
4990 memcpy(dst, src_h[0][0], width);
4993 if (ysub[0] && ysub[1]) {
4995 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
4996 }
else if (ysub[0]) {
4998 src_h[1][0], alpha, width);
4999 }
else if (ysub[1]) {
5001 src_h[0][0], (1<<16)-1-alpha, width);
5009 for (i = 0; i < 2; i++) {
5025 vc1_parse_sprites(v, gb, &sd);
5047 vc1_draw_sprites(v, &sd);
5067 plane ? 128 : 0, f->
linesize[plane]);
5126 for (i = 0; i < 4; i++)
5200 }
else if (count < 0) {
5204 const uint8_t *start = avctx->
extradata;
5206 const uint8_t *next;
5207 int size, buf2_size;
5208 uint8_t *buf2 =
NULL;
5209 int seq_initialized = 0, ep_initialized = 0;
5219 for (; next < end; start = next) {
5221 size = next - start - 4;
5232 seq_initialized = 1;
5244 if (!seq_initialized || !ep_initialized) {
5261 for (i = 0; i < 64; i++) {
5262 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5304 for (i = 0; i < 4; i++)
5336 const uint8_t *buf = avpkt->
data;
5337 int buf_size = avpkt->
size, n_slices = 0, i;
5341 uint8_t *buf2 =
NULL;
5342 const uint8_t *buf_start = buf;
5343 int mb_height, n_slices1;
5348 } *slices =
NULL, *tmp;
5376 const uint8_t *start, *end, *next;
5380 for (start = buf, end = buf + buf_size; next < end; start = next) {
5382 size = next - start - 4;
5383 if (size <= 0)
continue;
5393 slices =
av_realloc(slices,
sizeof(*slices) * (n_slices+1));
5397 if (!slices[n_slices].buf)
5400 slices[n_slices].buf);
5405 slices[n_slices].mby_start = s->
mb_height >> 1;
5406 n_slices1 = n_slices - 1;
5417 slices =
av_realloc(slices,
sizeof(*slices) * (n_slices+1));
5421 if (!slices[n_slices].buf)
5424 slices[n_slices].buf);
5427 slices[n_slices].mby_start =
get_bits(&slices[n_slices].gb, 9);
5433 }
else if (v->
interlace && ((buf[0] & 0xC0) == 0xC0)) {
5434 const uint8_t *divider;
5442 tmp =
av_realloc(slices,
sizeof(*slices) * (n_slices+1));
5447 if (!slices[n_slices].buf)
5449 buf_size3 =
vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5452 slices[n_slices].mby_start = s->
mb_height >> 1;
5453 n_slices1 = n_slices - 1;
5586 v->
bits = buf_size * 8;
5600 v->
mv_f[0] = tmp[0];
5601 v->
mv_f[1] = tmp[1];
5610 for (i = 0; i <= n_slices; i++) {
5611 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5614 "picture boundary (%d >= %d)\n", i,
5615 slices[i - 1].mby_start, mb_height);
5643 s->
start_mb_y = (i == 0) ? 0 :
FFMAX(0, slices[i-1].mby_start % mb_height);
5645 s->
end_mb_y = (i == n_slices ) ? mb_height :
FFMIN(mb_height, slices[i].mby_start % mb_height);
5647 s->
end_mb_y = (i <= n_slices1 + 1) ? mb_height :
FFMIN(mb_height, slices[i].mby_start % mb_height);
5650 s->
gb = slices[i].gb;
5678 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5679 if (vc1_decode_sprites(v, &s->
gb))
5698 for (i = 0; i < n_slices; i++)
5705 for (i = 0; i < n_slices; i++)
5735 #if CONFIG_WMV3_DECODER
5752 #if CONFIG_WMV3_VDPAU_DECODER
5753 AVCodec ff_wmv3_vdpau_decoder = {
5754 .
name =
"wmv3_vdpau",
5768 #if CONFIG_VC1_VDPAU_DECODER
5769 AVCodec ff_vc1_vdpau_decoder = {
5770 .
name =
"vc1_vdpau",
5784 #if CONFIG_WMV3IMAGE_DECODER
5785 AVCodec ff_wmv3image_decoder = {
5786 .
name =
"wmv3image",
5794 .
flush = vc1_sprite_flush,
5800 #if CONFIG_VC1IMAGE_DECODER
5801 AVCodec ff_vc1image_decoder = {
5810 .
flush = vc1_sprite_flush,
static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
in the bitstream is reported as 00b
const int ff_vc1_ttblk_to_tt[3][8]
Table for conversion between TTBLK and TTMB.
int vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
op_pixels_func avg_vc1_mspel_pixels_tab[16]
int use_ic
use intensity compensation in B-frames
#define VC1_TTBLK_VLC_BITS
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
int(* get_buffer)(struct AVCodecContext *c, AVFrame *pic)
Called at the beginning of each frame to get a buffer for it.
void(* vc1_h_overlap)(uint8_t *src, int stride)
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Motion estimation with emulated edge values.
enum PixelFormat pix_fmt
Pixel format, see PIX_FMT_xxx.
void ff_init_block_index(MpegEncContext *s)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define VC1_IF_MBMODE_VLC_BITS
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb)
#define FF_BUFFER_HINTS_VALID
#define VC1_ICBPCY_VLC_BITS
void(* vc1_inv_trans_4x8)(uint8_t *dest, int line_size, DCTELEM *block)
static int vc1_decode_p_mb(VC1Context *v)
Decode one P-frame MB.
AV_WL32 AV_WL24 AV_WL16 AV_RB32
int k_x
Number of bits for MVs (depends on MV range)
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
int coded_width
Bitstream width / height, may be different from width/height if lowres enabled.
int mv_type_is_raw
mv type mb plane is not coded
int buffer_hints
codec suggestion on buffer type if != 0
void(* release_buffer)(struct AVCodecContext *c, AVFrame *pic)
Called to release buffers which were allocated with get_buffer.
static av_always_inline int scaleforsame(VC1Context *v, int i, int n, int dim, int dir)
uint8_t dmvrange
Frame decoding info for interlaced picture.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static av_always_inline int scaleforopp_y(VC1Context *v, int n, int dir)
static const uint8_t vc1_index_decode_table[AC_MODES][185][2]
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
enum PixelFormat(* get_format)(struct AVCodecContext *s, const enum PixelFormat *fmt)
callback to negotiate the pixelFormat
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
int16_t(*[3] ac_val)[16]
used for for mpeg4 AC prediction, all 3 arrays must be continuous
static const int vc1_last_decode_table[AC_MODES]
#define AV_LOG_WARNING
Something somehow does not look correct.
int tt_index
Index for Transform Type tables (to decode TTMB)
static void vc1_decode_p_blocks(VC1Context *v)
static void vc1_put_signed_blocks_clamped(VC1Context *v)
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
#define GET_MVDATA(_dmv_x, _dmv_y)
Get MV differentials.
#define VC1_2REF_MVDATA_VLC_BITS
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
int repeat_pict
When decoding, this signals how much the picture must be delayed.
static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
Decode intra block in inter frames - more generic version than vc1_decode_i_block.
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
uint8_t zz_8x8[4][64]
Zigzag table for TT_8x8, permuted for IDCT.
static void vc1_decode_b_blocks(VC1Context *v)
void * av_realloc(void *ptr, size_t size)
Allocate or reallocate a block of memory.
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
struct VC1Context VC1Context
The VC1 Context.
const uint16_t vc1_field_mvpred_scales[2][7][4]
void(* vc1_v_loop_filter8)(uint8_t *src, int stride, int pq)
static void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
AVHWAccel * ff_find_hwaccel(enum CodecID codec_id, enum PixelFormat pix_fmt)
Return the hardware accelerated codec for codec codec_id and pixel format pix_fmt.
static int vc1_coded_block_pred(MpegEncContext *s, int n, uint8_t **coded_block_ptr)
enum AVDiscard skip_frame
#define VC1_4MV_BLOCK_PATTERN_VLC_BITS
static void vc1_apply_p_loop_filter(VC1Context *v)
struct AVHWAccel * hwaccel
Hardware accelerator in use.
int refdist
distance of the current picture from reference
uint8_t * acpred_plane
AC prediction flags bitplane.
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
static const AVProfile profiles[]
static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
Decode intra block in intra frames - should be faster than decode_intra_block.
void(* vc1_v_loop_filter4)(uint8_t *src, int stride, int pq)
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
void(* vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, DCTELEM *block)
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]
enum PixelFormat ff_hwaccel_pixfmt_list_420[]
static void vc1_mc_4mv_chroma4(VC1Context *v)
Do motion compensation for 4-MV field chroma macroblock (both U and V)
void(* vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, DCTELEM *block)
int interlace
Progressive/interlaced (RPTFTM syntax element)
int y_ac_table_index
Luma index from AC2FRM element.
qpel_mc_func(* qpel_put)[16]
void(* vc1_inv_trans_8x8)(DCTELEM *b)
int c_ac_table_index
AC coding set indexes.
const int ff_vc1_ac_sizes[AC_MODES]
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int ttfrm
Transform type info present at frame level.
Picture current_picture
copy of the current picture structure.
uint8_t lutuv[256]
lookup tables used for intensity compensation
int codingset2
index of current table set from 11.8 to use for chroma block decoding
int16_t bfraction
Relative position % anchors=> how to scale MVs.
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags...
static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
static int get_bits_count(const GetBitContext *s)
void(* vc1_h_loop_filter8)(uint8_t *src, int stride, int pq)
static av_always_inline int scaleforsame_x(VC1Context *v, int n, int dir)
void(* vc1_inv_trans_8x8_dc)(uint8_t *dest, int line_size, DCTELEM *block)
uint8_t * forward_mb_plane
bitplane for "forward" MBs
int mb_height
number of MBs horizontally & vertically
static void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
Reconstruct motion vector for B-frame and do motion compensation.
enum PixelFormat ff_pixfmt_list_420[]
void MPV_common_end(MpegEncContext *s)
static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
Do motion compensation for 4-MV macroblock - luminance block.
uint8_t * over_flags_plane
Overflags bitplane.
static void vc1_decode_b_mb(VC1Context *v)
Decode one B-frame MB (in Main profile)
const int8_t ff_vc1_adv_interlaced_4x8_zz[32]
static void ff_update_block_index(MpegEncContext *s)
static int init(AVCodecParserContext *s)
qpel_mc_func put_qpel_pixels_tab[2][16]
uint8_t ttmbf
Transform type flag.
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
int k_y
Number of bits for MVs (depends on MV range)
void(* add_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size)
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static int get_bits_left(GetBitContext *gb)
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
void(* put_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size)
int dmb_is_raw
direct mb plane is raw
static int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value for I-frames only prediction dir: left=0, top=1.
#define VC1_CBPCY_P_VLC_BITS
int has_b_frames
Size of the frame reordering buffer in the decoder.
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
int overlap
overlapped transforms in use
in the bitstream is reported as 11b
int reference
is this picture used as reference The values for this are the same as the MpegEncContext.picture_structure variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
const int8_t ff_vc1_simple_progressive_4x4_zz[16]
int qs_last
if qpel has been used in the previous (tr.) picture
void(* vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, DCTELEM *block)
qpel_mc_func avg_qpel_pixels_tab[2][16]
static void vc1_mc_1mv(VC1Context *v, int dir)
Do motion compensation over 1 macroblock Mostly adapted hpel_motion and qpel_motion from mpegvideo...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static av_cold int vc1_decode_init(AVCodecContext *avctx)
Initialize a VC1/WMV3 decoder.
#define GET_MQUANT()
Get macroblock-level quantizer scale.
AVFrame sprite_output_frame
int capabilities
Codec capabilities.
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
Decode intra block in intra frames - should be faster than decode_intra_block.
void(* vc1_v_s_overlap)(DCTELEM *top, DCTELEM *bottom)
static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block, uint8_t *dst, int linesize, int skip_block, int *ttmb_out)
Decode P block.
int vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
void av_log(void *avcl, int level, const char *fmt,...)
const char * name
Name of the codec implementation.
#define IS_MARKER(state, i, buf, buf_size)
int quarter_sample
1->qpel, 0->half pel ME/MC
static void vc1_decode_blocks(VC1Context *v)
int low_delay
no reordering needed / has no b-frames
static av_cold int vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
void(* clear_block)(DCTELEM *block)
void(* vc1_v_loop_filter16)(uint8_t *src, int stride, int pq)
static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n, int dir)
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
int res_rtm_flag
reserved, set to 1
h264_chroma_mc_func put_h264_chroma_pixels_tab[3]
h264 Chroma MC
static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
Decode a VC1/WMV3 frame.
#define CODEC_FLAG_EMU_EDGE
Don't draw edges.
void MPV_frame_end(MpegEncContext *s)
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
Decode one AC coefficient.
void ff_mpeg_flush(AVCodecContext *avctx)
const int8_t ff_vc1_adv_interlaced_4x4_zz[16]
static av_always_inline int scaleforopp(VC1Context *v, int n, int dim, int dir)
void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
int cur_field_type
0: top, 1: bottom
static av_cold int vc1_decode_init_alloc_tables(VC1Context *v)
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
static av_always_inline int scaleforopp_x(VC1Context *v, int n)
enum AVPictureType pict_type
Picture type of the frame, see ?_TYPE below.
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
uint8_t * blk_mv_type_base
av_cold void ff_intrax8_common_init(IntraX8Context *w, MpegEncContext *const s)
Initialize IntraX8 frame decoder.
int field_mode
1 for interlaced field pictures
av_cold void ff_intrax8_common_end(IntraX8Context *w)
Destroy IntraX8 frame structure.
int width
picture width / height.
#define VC1_SUBBLKPAT_VLC_BITS
struct AVFrame AVFrame
Audio Video Frame.
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
uint8_t mv_mode
Frame decoding info for all profiles.
#define FF_PROFILE_VC1_MAIN
Picture * current_picture_ptr
pointer to the current picture
#define FF_PROFILE_UNKNOWN
static void vc1_decode_skip_blocks(VC1Context *v)
static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
void ff_er_frame_end(MpegEncContext *s)
static const int offset_table[6]
static int median4(int a, int b, int c, int d)
static int vc1_decode_p_mb_intfr(VC1Context *v)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
int block_last_index[12]
last non zero coefficient in block
static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x, int *dmv_y, int *pred_flag)
static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
#define FF_PROFILE_VC1_SIMPLE
int16_t(* luma_mv_base)[2]
int block_index[6]
index to current MB in block based arrays with edges
VLC * cbpcy_vlc
CBPCY VLC table.
static int decode210(GetBitContext *gb)
static const float pred[4]
uint8_t * sr_rows[2][2]
Sprite resizer line cache.
static const int8_t mv[256][2]
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
uint32_t * mb_type
macroblock type table mb_type_base + mb_width + 2
static void vc1_loop_filter_iblk(VC1Context *v, int pq)
static void vc1_interp_mc(VC1Context *v)
Motion compensation for direct or interpolated blocks in B-frames.
int first_slice_line
used in mpeg4 too to handle resync markers
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
static const int offset_table1[9]
const uint8_t wmv1_scantable[WMV1_SCANTABLE_COUNT][64]
int res_sprite
Simple/Main Profile sequence header.
void(* vc1_h_loop_filter4)(uint8_t *src, int stride, int pq)
int top_blk_sh
Either 3 or 0, positions of l/t in blk[].
op_pixels_func avg_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
int linesize[AV_NUM_DATA_POINTERS]
Size, in bytes, of the data for each picture/channel plane.
const int8_t ff_vc1_adv_interlaced_8x8_zz[64]
static const uint8_t vc1_delta_run_table[AC_MODES][57]
discard all non reference
int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
void av_log_ask_for_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message asking for a sample.
main external API structure.
static void close(AVCodecParserContext *s)
static int vc1_decode_p_mb_intfi(VC1Context *v)
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
int height
picture size. must be a multiple of 16
static void vc1_decode_b_mb_intfi(VC1Context *v)
Decode one B-frame MB (in interlaced field B picture)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
int16_t(*[2] motion_val)[2]
motion vector table
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
Picture * picture
main picture buffer
const uint8_t ff_vc1_mbmode_intfrp[2][15][4]
static unsigned int get_bits1(GetBitContext *s)
int fmb_is_raw
forward mb plane is raw
void(* vc1_inv_trans_4x4)(uint8_t *dest, int line_size, DCTELEM *block)
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
static void skip_bits(GetBitContext *s, int n)
#define MB_INTRA_VLC_BITS
void ff_draw_horiz_band(MpegEncContext *s, int y, int h)
static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define VC1_2MV_BLOCK_PATTERN_VLC_BITS
static int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int a_avail, int c_avail, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value prediction dir: left=0, top=1.
void ff_er_frame_start(MpegEncContext *s)
DSPContext dsp
pointers for accelerated dsp functions
int skip_is_raw
skip mb plane is not coded
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
int ff_intrax8_decode_picture(IntraX8Context *const w, int dquant, int quant_offset)
Decode single IntraX8 frame.
#define FF_PROFILE_VC1_COMPLEX
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
int ref_field_type[2]
forward and backward reference field type (top or bottom)
int vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Decode Simple/Main Profiles sequence header.
uint8_t * direct_mb_plane
bitplane for "direct" MBs
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
static const uint8_t vc1_last_delta_run_table[AC_MODES][10]
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
DCTELEM(* block)[64]
points to one of the following blocks
uint8_t * mv_type_mb_plane
bitplane for mv_type == (4MV)
int numref
number of past field pictures used as reference
const int32_t ff_vc1_dqscale[63]
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
static const uint16_t scale[4]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
enum PixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
int8_t * qscale_table
QP table.
qpel_mc_func(* qpel_avg)[16]
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
void(* clear_blocks)(DCTELEM *blocks)
in the bitstream is reported as 10b
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
static const int offset_table2[9]
static void vc1_decode_i_blocks(VC1Context *v)
Decode blocks of I-frame.
void(* put_signed_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size)
int pqindex
raw pqindex used in coding set selection
static const uint8_t vc1_last_delta_level_table[AC_MODES][44]
#define VC1_1REF_MVDATA_VLC_BITS
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstrea...
#define VC1_TTMB_VLC_BITS
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
static const int size_table[6]
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
static void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, int mvn, int r_x, int r_y, uint8_t *is_intra)
Predict and set motion vector for interlaced frame picture MBs.
Picture last_picture
copy of the previous picture structure.
uint8_t dquantfrm
pquant parameters
Picture * last_picture_ptr
pointer to the previous picture.
int res_fasttx
reserved, always 1
enum AVDiscard skip_loop_filter
int * ttblk
Transform type at the block level.
VLC ff_vc1_ac_coeff_table[8]
WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstrea...
#define VC1_INTFR_4MV_MBMODE_VLC_BITS
#define FF_PROFILE_VC1_ADVANCED
uint8_t pquantizer
Uniform (over sequence) quantizer in use.
static void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t *is_intra, int pred_flag, int dir)
Predict and set motion vector.
const uint16_t vc1_b_field_mvpred_scales[7][4]
void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, int buf_size)
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
Print debugging info for the given picture.
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
const int8_t ff_vc1_adv_interlaced_8x4_zz[32]
static int decode012(GetBitContext *gb)
VLC_TYPE(* table)[2]
code, bits
Picture next_picture
copy of the next picture structure.
static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv, int qs, int qs_last)
int key_frame
1 -> keyframe, 0-> not
static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
Do motion compensation for 4-MV macroblock - both chroma blocks.
int linesize
line size, in bytes, may be different from width
static void vc1_decode_i_blocks_adv(VC1Context *v)
Decode blocks of I-frame for advanced profile.
static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
int level
Advanced Profile.
int flags
AVCodecContext.flags (HQ, MV4, ...)
int brfd
reference frame distance (forward or backward)
uint8_t mv_mode2
Secondary MV coding mode (B frames)
int new_sprite
Frame decoding info for sprite modes.
#define FFSWAP(type, a, b)
discard all frames except keyframes
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
int codingset
index of current table set from 11.8 to use for luma block decoding
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
int ff_find_unused_picture(MpegEncContext *s, int shared)
#define MKTAG(a, b, c, d)
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function for encode/decode called after coding/decoding the header and before a frame is code...
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
int uvlinesize
line size, for chroma in bytes, may be different from width
#define VC1_INTFR_NON4MV_MBMODE_VLC_BITS
void(* vc1_inv_trans_8x4)(uint8_t *dest, int line_size, DCTELEM *block)
void(* vc1_v_overlap)(uint8_t *src, int stride)
av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
void(* vc1_h_s_overlap)(DCTELEM *left, DCTELEM *right)
VLC ff_msmp4_dc_luma_vlc[2]
VLC ff_vc1_subblkpat_vlc[3]
uint8_t halfpq
Uniform quant over image and qp+.5.
static void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
static const uint8_t vc1_delta_level_table[AC_MODES][31]
static av_always_inline const uint8_t * find_next_marker(const uint8_t *src, const uint8_t *end)
Find VC-1 marker in buffer.
VLC ff_msmp4_dc_chroma_vlc[2]
op_pixels_func put_vc1_mspel_pixels_tab[16]
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
void(* vc1_h_loop_filter16)(uint8_t *src, int stride, int pq)