vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
29 #include "internal.h"
30 #include "dsputil.h"
31 #include "avcodec.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "vc1.h"
35 #include "vc1data.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
38 #include "unary.h"
39 #include "simple_idct.h"
40 #include "mathops.h"
41 #include "vdpau_internal.h"
42 
43 #undef NDEBUG
44 #include <assert.h>
45 
46 #define MB_INTRA_VLC_BITS 9
47 #define DC_VLC_BITS 9
48 #define AC_VLC_BITS 9
49 
50 
51 static const uint16_t vlc_offs[] = {
52  0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
53  2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8342,
54  9304, 9988, 10630, 11234, 12174, 13006, 13560, 14232, 14786, 15432, 16350, 17522,
55  20372, 21818, 22330, 22394, 23166, 23678, 23742, 24820, 25332, 25396, 26460, 26980,
56  27048, 27592, 27600, 27608, 27616, 27624, 28224, 28258, 28290, 28802, 28834, 28866,
57  29378, 29412, 29444, 29960, 29994, 30026, 30538, 30572, 30604, 31120, 31154, 31186,
58  31714, 31746, 31778, 32306, 32340, 32372
59 };
60 
61 // offset tables for interlaced picture MVDATA decoding
62 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
63 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
64 
71 {
72  static int done = 0;
73  int i = 0;
74  static VLC_TYPE vlc_table[32372][2];
75 
76  v->hrd_rate = v->hrd_buffer = NULL;
77 
78  /* VLC tables */
79  if (!done) {
84  ff_vc1_norm2_bits, 1, 1,
87  ff_vc1_norm6_bits, 1, 1,
88  ff_vc1_norm6_codes, 2, 2, 556);
90  ff_vc1_imode_bits, 1, 1,
92  for (i = 0; i < 3; i++) {
93  ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 0]];
94  ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i * 3 + 1] - vlc_offs[i * 3 + 0];
96  ff_vc1_ttmb_bits[i], 1, 1,
98  ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 1]];
99  ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i * 3 + 2] - vlc_offs[i * 3 + 1];
101  ff_vc1_ttblk_bits[i], 1, 1,
103  ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 2]];
104  ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i * 3 + 3] - vlc_offs[i * 3 + 2];
106  ff_vc1_subblkpat_bits[i], 1, 1,
108  }
109  for (i = 0; i < 4; i++) {
110  ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 9]];
111  ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i * 3 + 10] - vlc_offs[i * 3 + 9];
115  ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 10]];
116  ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i * 3 + 11] - vlc_offs[i * 3 + 10];
118  ff_vc1_cbpcy_p_bits[i], 1, 1,
120  ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 11]];
121  ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i * 3 + 12] - vlc_offs[i * 3 + 11];
123  ff_vc1_mv_diff_bits[i], 1, 1,
125  }
126  for (i = 0; i < 8; i++) {
127  ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i * 2 + 21]];
128  ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i * 2 + 22] - vlc_offs[i * 2 + 21];
130  &vc1_ac_tables[i][0][1], 8, 4,
131  &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
132  /* initialize interlaced MVDATA tables (2-Ref) */
133  ff_vc1_2ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 2 + 22]];
134  ff_vc1_2ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 2 + 23] - vlc_offs[i * 2 + 22];
136  ff_vc1_2ref_mvdata_bits[i], 1, 1,
138  }
139  for (i = 0; i < 4; i++) {
140  /* initialize 4MV MBMODE VLC tables for interlaced frame P picture */
141  ff_vc1_intfr_4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 37]];
142  ff_vc1_intfr_4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 38] - vlc_offs[i * 3 + 37];
146  /* initialize NON-4MV MBMODE VLC tables for the same */
147  ff_vc1_intfr_non4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 38]];
148  ff_vc1_intfr_non4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 39] - vlc_offs[i * 3 + 38];
152  /* initialize interlaced MVDATA tables (1-Ref) */
153  ff_vc1_1ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 39]];
154  ff_vc1_1ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 3 + 40] - vlc_offs[i * 3 + 39];
156  ff_vc1_1ref_mvdata_bits[i], 1, 1,
158  }
159  for (i = 0; i < 4; i++) {
160  /* Initialize 2MV Block pattern VLC tables */
161  ff_vc1_2mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i + 49]];
162  ff_vc1_2mv_block_pattern_vlc[i].table_allocated = vlc_offs[i + 50] - vlc_offs[i + 49];
166  }
167  for (i = 0; i < 8; i++) {
168  /* Initialize interlaced CBPCY VLC tables (Table 124 - Table 131) */
169  ff_vc1_icbpcy_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 53]];
170  ff_vc1_icbpcy_vlc[i].table_allocated = vlc_offs[i * 3 + 54] - vlc_offs[i * 3 + 53];
172  ff_vc1_icbpcy_p_bits[i], 1, 1,
174  /* Initialize interlaced field picture MBMODE VLC tables */
175  ff_vc1_if_mmv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 54]];
176  ff_vc1_if_mmv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 55] - vlc_offs[i * 3 + 54];
178  ff_vc1_if_mmv_mbmode_bits[i], 1, 1,
180  ff_vc1_if_1mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 55]];
181  ff_vc1_if_1mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 56] - vlc_offs[i * 3 + 55];
183  ff_vc1_if_1mv_mbmode_bits[i], 1, 1,
185  }
186  done = 1;
187  }
188 
189  /* Other defaults */
190  v->pq = -1;
191  v->mvrange = 0; /* 7.1.1.18, p80 */
192 
193  return 0;
194 }
195 
196 /***********************************************************************/
207 enum Imode {
215 }; //imode defines
217 
218  //Bitplane group
220 
222 {
223  MpegEncContext *s = &v->s;
224  int topleft_mb_pos, top_mb_pos;
225  int stride_y, fieldtx;
226  int v_dist;
227 
228  /* The put pixels loop is always one MB row behind the decoding loop,
229  * because we can only put pixels when overlap filtering is done, and
230  * for filtering of the bottom edge of a MB, we need the next MB row
231  * present as well.
232  * Within the row, the put pixels loop is also one MB col behind the
233  * decoding loop. The reason for this is again, because for filtering
234  * of the right MB edge, we need the next MB present. */
235  if (!s->first_slice_line) {
236  if (s->mb_x) {
237  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
238  fieldtx = v->fieldtx_plane[topleft_mb_pos];
239  stride_y = s->linesize << fieldtx;
240  v_dist = (16 - fieldtx) >> (fieldtx == 0);
242  s->dest[0] - 16 * s->linesize - 16,
243  stride_y);
245  s->dest[0] - 16 * s->linesize - 8,
246  stride_y);
248  s->dest[0] - v_dist * s->linesize - 16,
249  stride_y);
251  s->dest[0] - v_dist * s->linesize - 8,
252  stride_y);
254  s->dest[1] - 8 * s->uvlinesize - 8,
255  s->uvlinesize);
257  s->dest[2] - 8 * s->uvlinesize - 8,
258  s->uvlinesize);
259  }
260  if (s->mb_x == s->mb_width - 1) {
261  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
262  fieldtx = v->fieldtx_plane[top_mb_pos];
263  stride_y = s->linesize << fieldtx;
264  v_dist = fieldtx ? 15 : 8;
266  s->dest[0] - 16 * s->linesize,
267  stride_y);
269  s->dest[0] - 16 * s->linesize + 8,
270  stride_y);
272  s->dest[0] - v_dist * s->linesize,
273  stride_y);
275  s->dest[0] - v_dist * s->linesize + 8,
276  stride_y);
278  s->dest[1] - 8 * s->uvlinesize,
279  s->uvlinesize);
281  s->dest[2] - 8 * s->uvlinesize,
282  s->uvlinesize);
283  }
284  }
285 
286 #define inc_blk_idx(idx) do { \
287  idx++; \
288  if (idx >= v->n_allocated_blks) \
289  idx = 0; \
290  } while (0)
291 
296 }
297 
298 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
299 {
300  MpegEncContext *s = &v->s;
301  int j;
302  if (!s->first_slice_line) {
303  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
304  if (s->mb_x)
305  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
306  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
307  for (j = 0; j < 2; j++) {
308  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
309  if (s->mb_x)
310  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
311  }
312  }
313  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
314 
315  if (s->mb_y == s->end_mb_y - 1) {
316  if (s->mb_x) {
317  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
318  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
319  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
320  }
321  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
322  }
323 }
324 
326 {
327  MpegEncContext *s = &v->s;
328  int j;
329 
330  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
331  * means it runs two rows/cols behind the decoding loop. */
332  if (!s->first_slice_line) {
333  if (s->mb_x) {
334  if (s->mb_y >= s->start_mb_y + 2) {
335  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
336 
337  if (s->mb_x >= 2)
338  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
339  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
340  for (j = 0; j < 2; j++) {
341  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
342  if (s->mb_x >= 2) {
343  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
344  }
345  }
346  }
347  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
348  }
349 
350  if (s->mb_x == s->mb_width - 1) {
351  if (s->mb_y >= s->start_mb_y + 2) {
352  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
353 
354  if (s->mb_x)
355  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
356  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
357  for (j = 0; j < 2; j++) {
358  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
359  if (s->mb_x >= 2) {
360  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
361  }
362  }
363  }
364  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
365  }
366 
367  if (s->mb_y == s->end_mb_y) {
368  if (s->mb_x) {
369  if (s->mb_x >= 2)
370  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
371  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
372  if (s->mb_x >= 2) {
373  for (j = 0; j < 2; j++) {
374  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
375  }
376  }
377  }
378 
379  if (s->mb_x == s->mb_width - 1) {
380  if (s->mb_x)
381  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
382  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
383  if (s->mb_x) {
384  for (j = 0; j < 2; j++) {
385  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
386  }
387  }
388  }
389  }
390  }
391 }
392 
394 {
395  MpegEncContext *s = &v->s;
396  int mb_pos;
397 
398  if (v->condover == CONDOVER_NONE)
399  return;
400 
401  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
402 
403  /* Within a MB, the horizontal overlap always runs before the vertical.
404  * To accomplish that, we run the H on left and internal borders of the
405  * currently decoded MB. Then, we wait for the next overlap iteration
406  * to do H overlap on the right edge of this MB, before moving over and
407  * running the V overlap. Therefore, the V overlap makes us trail by one
408  * MB col and the H overlap filter makes us trail by one MB row. This
409  * is reflected in the time at which we run the put_pixels loop. */
410  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
411  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
412  v->over_flags_plane[mb_pos - 1])) {
414  v->block[v->cur_blk_idx][0]);
416  v->block[v->cur_blk_idx][2]);
417  if (!(s->flags & CODEC_FLAG_GRAY)) {
419  v->block[v->cur_blk_idx][4]);
421  v->block[v->cur_blk_idx][5]);
422  }
423  }
425  v->block[v->cur_blk_idx][1]);
427  v->block[v->cur_blk_idx][3]);
428 
429  if (s->mb_x == s->mb_width - 1) {
430  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
431  v->over_flags_plane[mb_pos - s->mb_stride])) {
433  v->block[v->cur_blk_idx][0]);
435  v->block[v->cur_blk_idx][1]);
436  if (!(s->flags & CODEC_FLAG_GRAY)) {
438  v->block[v->cur_blk_idx][4]);
440  v->block[v->cur_blk_idx][5]);
441  }
442  }
444  v->block[v->cur_blk_idx][2]);
446  v->block[v->cur_blk_idx][3]);
447  }
448  }
449  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
450  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
451  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
453  v->block[v->left_blk_idx][0]);
455  v->block[v->left_blk_idx][1]);
456  if (!(s->flags & CODEC_FLAG_GRAY)) {
458  v->block[v->left_blk_idx][4]);
460  v->block[v->left_blk_idx][5]);
461  }
462  }
464  v->block[v->left_blk_idx][2]);
466  v->block[v->left_blk_idx][3]);
467  }
468 }
469 
473 static void vc1_mc_1mv(VC1Context *v, int dir)
474 {
475  MpegEncContext *s = &v->s;
476  DSPContext *dsp = &v->s.dsp;
477  uint8_t *srcY, *srcU, *srcV;
478  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
479  int off, off_uv;
480  int v_edge_pos = s->v_edge_pos >> v->field_mode;
481 
482  if ((!v->field_mode ||
483  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
484  !v->s.last_picture.f.data[0])
485  return;
486 
487  mx = s->mv[dir][0][0];
488  my = s->mv[dir][0][1];
489 
490  // store motion vectors for further use in B frames
491  if (s->pict_type == AV_PICTURE_TYPE_P) {
492  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
493  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
494  }
495 
496  uvmx = (mx + ((mx & 3) == 3)) >> 1;
497  uvmy = (my + ((my & 3) == 3)) >> 1;
498  v->luma_mv[s->mb_x][0] = uvmx;
499  v->luma_mv[s->mb_x][1] = uvmy;
500 
501  if (v->field_mode &&
502  v->cur_field_type != v->ref_field_type[dir]) {
503  my = my - 2 + 4 * v->cur_field_type;
504  uvmy = uvmy - 2 + 4 * v->cur_field_type;
505  }
506 
507  // fastuvmc shall be ignored for interlaced frame picture
508  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
509  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
510  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
511  }
512  if (v->field_mode) { // interlaced field picture
513  if (!dir) {
514  if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
515  srcY = s->current_picture.f.data[0];
516  srcU = s->current_picture.f.data[1];
517  srcV = s->current_picture.f.data[2];
518  } else {
519  srcY = s->last_picture.f.data[0];
520  srcU = s->last_picture.f.data[1];
521  srcV = s->last_picture.f.data[2];
522  }
523  } else {
524  srcY = s->next_picture.f.data[0];
525  srcU = s->next_picture.f.data[1];
526  srcV = s->next_picture.f.data[2];
527  }
528  } else {
529  if (!dir) {
530  srcY = s->last_picture.f.data[0];
531  srcU = s->last_picture.f.data[1];
532  srcV = s->last_picture.f.data[2];
533  } else {
534  srcY = s->next_picture.f.data[0];
535  srcU = s->next_picture.f.data[1];
536  srcV = s->next_picture.f.data[2];
537  }
538  }
539 
540  src_x = s->mb_x * 16 + (mx >> 2);
541  src_y = s->mb_y * 16 + (my >> 2);
542  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
543  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
544 
545  if (v->profile != PROFILE_ADVANCED) {
546  src_x = av_clip( src_x, -16, s->mb_width * 16);
547  src_y = av_clip( src_y, -16, s->mb_height * 16);
548  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
549  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
550  } else {
551  src_x = av_clip( src_x, -17, s->avctx->coded_width);
552  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
553  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
554  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
555  }
556 
557  srcY += src_y * s->linesize + src_x;
558  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
559  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
560 
561  if (v->field_mode && v->ref_field_type[dir]) {
562  srcY += s->current_picture_ptr->f.linesize[0];
563  srcU += s->current_picture_ptr->f.linesize[1];
564  srcV += s->current_picture_ptr->f.linesize[2];
565  }
566 
567  /* for grayscale we should not try to read from unknown area */
568  if (s->flags & CODEC_FLAG_GRAY) {
569  srcU = s->edge_emu_buffer + 18 * s->linesize;
570  srcV = s->edge_emu_buffer + 18 * s->linesize;
571  }
572 
574  || s->h_edge_pos < 22 || v_edge_pos < 22
575  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
576  || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) {
577  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
578 
579  srcY -= s->mspel * (1 + s->linesize);
581  17 + s->mspel * 2, 17 + s->mspel * 2,
582  src_x - s->mspel, src_y - s->mspel,
583  s->h_edge_pos, v_edge_pos);
584  srcY = s->edge_emu_buffer;
585  s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
586  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
587  s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
588  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
589  srcU = uvbuf;
590  srcV = uvbuf + 16;
591  /* if we deal with range reduction we need to scale source blocks */
592  if (v->rangeredfrm) {
593  int i, j;
594  uint8_t *src, *src2;
595 
596  src = srcY;
597  for (j = 0; j < 17 + s->mspel * 2; j++) {
598  for (i = 0; i < 17 + s->mspel * 2; i++)
599  src[i] = ((src[i] - 128) >> 1) + 128;
600  src += s->linesize;
601  }
602  src = srcU;
603  src2 = srcV;
604  for (j = 0; j < 9; j++) {
605  for (i = 0; i < 9; i++) {
606  src[i] = ((src[i] - 128) >> 1) + 128;
607  src2[i] = ((src2[i] - 128) >> 1) + 128;
608  }
609  src += s->uvlinesize;
610  src2 += s->uvlinesize;
611  }
612  }
613  /* if we deal with intensity compensation we need to scale source blocks */
614  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
615  int i, j;
616  uint8_t *src, *src2;
617 
618  src = srcY;
619  for (j = 0; j < 17 + s->mspel * 2; j++) {
620  for (i = 0; i < 17 + s->mspel * 2; i++)
621  src[i] = v->luty[src[i]];
622  src += s->linesize;
623  }
624  src = srcU;
625  src2 = srcV;
626  for (j = 0; j < 9; j++) {
627  for (i = 0; i < 9; i++) {
628  src[i] = v->lutuv[src[i]];
629  src2[i] = v->lutuv[src2[i]];
630  }
631  src += s->uvlinesize;
632  src2 += s->uvlinesize;
633  }
634  }
635  srcY += s->mspel * (1 + s->linesize);
636  }
637 
638  if (v->field_mode && v->cur_field_type) {
639  off = s->current_picture_ptr->f.linesize[0];
640  off_uv = s->current_picture_ptr->f.linesize[1];
641  } else {
642  off = 0;
643  off_uv = 0;
644  }
645  if (s->mspel) {
646  dxy = ((my & 3) << 2) | (mx & 3);
647  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
648  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
649  srcY += s->linesize * 8;
650  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
651  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
652  } else { // hpel mc - always used for luma
653  dxy = (my & 2) | ((mx & 2) >> 1);
654  if (!v->rnd)
655  dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
656  else
657  dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
658  }
659 
660  if (s->flags & CODEC_FLAG_GRAY) return;
661  /* Chroma MC always uses qpel bilinear */
662  uvmx = (uvmx & 3) << 1;
663  uvmy = (uvmy & 3) << 1;
664  if (!v->rnd) {
665  dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
666  dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
667  } else {
668  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
669  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
670  }
671 }
672 
673 static inline int median4(int a, int b, int c, int d)
674 {
675  if (a < b) {
676  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
677  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
678  } else {
679  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
680  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
681  }
682 }
683 
686 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
687 {
688  MpegEncContext *s = &v->s;
689  DSPContext *dsp = &v->s.dsp;
690  uint8_t *srcY;
691  int dxy, mx, my, src_x, src_y;
692  int off;
693  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
694  int v_edge_pos = s->v_edge_pos >> v->field_mode;
695 
696  if ((!v->field_mode ||
697  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
698  !v->s.last_picture.f.data[0])
699  return;
700 
701  mx = s->mv[dir][n][0];
702  my = s->mv[dir][n][1];
703 
704  if (!dir) {
705  if (v->field_mode) {
706  if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
707  srcY = s->current_picture.f.data[0];
708  else
709  srcY = s->last_picture.f.data[0];
710  } else
711  srcY = s->last_picture.f.data[0];
712  } else
713  srcY = s->next_picture.f.data[0];
714 
715  if (v->field_mode) {
716  if (v->cur_field_type != v->ref_field_type[dir])
717  my = my - 2 + 4 * v->cur_field_type;
718  }
719 
720  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
721  int same_count = 0, opp_count = 0, k;
722  int chosen_mv[2][4][2], f;
723  int tx, ty;
724  for (k = 0; k < 4; k++) {
725  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
726  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
727  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
728  opp_count += f;
729  same_count += 1 - f;
730  }
731  f = opp_count > same_count;
732  switch (f ? opp_count : same_count) {
733  case 4:
734  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
735  chosen_mv[f][2][0], chosen_mv[f][3][0]);
736  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
737  chosen_mv[f][2][1], chosen_mv[f][3][1]);
738  break;
739  case 3:
740  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
741  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
742  break;
743  case 2:
744  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
745  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
746  break;
747  }
748  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
749  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
750  for (k = 0; k < 4; k++)
751  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
752  }
753 
754  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
755  int qx, qy;
756  int width = s->avctx->coded_width;
757  int height = s->avctx->coded_height >> 1;
758  qx = (s->mb_x * 16) + (mx >> 2);
759  qy = (s->mb_y * 8) + (my >> 3);
760 
761  if (qx < -17)
762  mx -= 4 * (qx + 17);
763  else if (qx > width)
764  mx -= 4 * (qx - width);
765  if (qy < -18)
766  my -= 8 * (qy + 18);
767  else if (qy > height + 1)
768  my -= 8 * (qy - height - 1);
769  }
770 
771  if ((v->fcm == ILACE_FRAME) && fieldmv)
772  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
773  else
774  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
775  if (v->field_mode && v->cur_field_type)
776  off += s->current_picture_ptr->f.linesize[0];
777 
778  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
779  if (!fieldmv)
780  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
781  else
782  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
783 
784  if (v->profile != PROFILE_ADVANCED) {
785  src_x = av_clip(src_x, -16, s->mb_width * 16);
786  src_y = av_clip(src_y, -16, s->mb_height * 16);
787  } else {
788  src_x = av_clip(src_x, -17, s->avctx->coded_width);
789  if (v->fcm == ILACE_FRAME) {
790  if (src_y & 1)
791  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
792  else
793  src_y = av_clip(src_y, -18, s->avctx->coded_height);
794  } else {
795  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
796  }
797  }
798 
799  srcY += src_y * s->linesize + src_x;
800  if (v->field_mode && v->ref_field_type[dir])
801  srcY += s->current_picture_ptr->f.linesize[0];
802 
803  if (fieldmv && !(src_y & 1))
804  v_edge_pos--;
805  if (fieldmv && (src_y & 1) && src_y < 4)
806  src_y--;
808  || s->h_edge_pos < 13 || v_edge_pos < 23
809  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
810  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
811  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
812  /* check emulate edge stride and offset */
814  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
815  src_x - s->mspel, src_y - (s->mspel << fieldmv),
816  s->h_edge_pos, v_edge_pos);
817  srcY = s->edge_emu_buffer;
818  /* if we deal with range reduction we need to scale source blocks */
819  if (v->rangeredfrm) {
820  int i, j;
821  uint8_t *src;
822 
823  src = srcY;
824  for (j = 0; j < 9 + s->mspel * 2; j++) {
825  for (i = 0; i < 9 + s->mspel * 2; i++)
826  src[i] = ((src[i] - 128) >> 1) + 128;
827  src += s->linesize << fieldmv;
828  }
829  }
830  /* if we deal with intensity compensation we need to scale source blocks */
831  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
832  int i, j;
833  uint8_t *src;
834 
835  src = srcY;
836  for (j = 0; j < 9 + s->mspel * 2; j++) {
837  for (i = 0; i < 9 + s->mspel * 2; i++)
838  src[i] = v->luty[src[i]];
839  src += s->linesize << fieldmv;
840  }
841  }
842  srcY += s->mspel * (1 + (s->linesize << fieldmv));
843  }
844 
845  if (s->mspel) {
846  dxy = ((my & 3) << 2) | (mx & 3);
847  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
848  } else { // hpel mc - always used for luma
849  dxy = (my & 2) | ((mx & 2) >> 1);
850  if (!v->rnd)
851  dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
852  else
853  dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
854  }
855 }
856 
857 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
858 {
859  int idx, i;
860  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
861 
862  idx = ((a[3] != flag) << 3)
863  | ((a[2] != flag) << 2)
864  | ((a[1] != flag) << 1)
865  | (a[0] != flag);
866  if (!idx) {
867  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
868  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
869  return 4;
870  } else if (count[idx] == 1) {
871  switch (idx) {
872  case 0x1:
873  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
874  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
875  return 3;
876  case 0x2:
877  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
878  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
879  return 3;
880  case 0x4:
881  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
882  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
883  return 3;
884  case 0x8:
885  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
886  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
887  return 3;
888  }
889  } else if (count[idx] == 2) {
890  int t1 = 0, t2 = 0;
891  for (i = 0; i < 3; i++)
892  if (!a[i]) {
893  t1 = i;
894  break;
895  }
896  for (i = t1 + 1; i < 4; i++)
897  if (!a[i]) {
898  t2 = i;
899  break;
900  }
901  *tx = (mvx[t1] + mvx[t2]) / 2;
902  *ty = (mvy[t1] + mvy[t2]) / 2;
903  return 2;
904  } else {
905  return 0;
906  }
907  return -1;
908 }
909 
912 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
913 {
914  MpegEncContext *s = &v->s;
915  DSPContext *dsp = &v->s.dsp;
916  uint8_t *srcU, *srcV;
917  int uvmx, uvmy, uvsrc_x, uvsrc_y;
918  int k, tx = 0, ty = 0;
919  int mvx[4], mvy[4], intra[4], mv_f[4];
920  int valid_count;
921  int chroma_ref_type = v->cur_field_type, off = 0;
922  int v_edge_pos = s->v_edge_pos >> v->field_mode;
923 
924  if (!v->field_mode && !v->s.last_picture.f.data[0])
925  return;
926  if (s->flags & CODEC_FLAG_GRAY)
927  return;
928 
929  for (k = 0; k < 4; k++) {
930  mvx[k] = s->mv[dir][k][0];
931  mvy[k] = s->mv[dir][k][1];
932  intra[k] = v->mb_type[0][s->block_index[k]];
933  if (v->field_mode)
934  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
935  }
936 
937  /* calculate chroma MV vector from four luma MVs */
938  if (!v->field_mode || (v->field_mode && !v->numref)) {
939  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
940  if (!valid_count) {
941  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
942  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
943  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
944  return; //no need to do MC for intra blocks
945  }
946  } else {
947  int dominant = 0;
948  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
949  dominant = 1;
950  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
951  if (dominant)
952  chroma_ref_type = !v->cur_field_type;
953  }
954  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
955  return;
956  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
957  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
958  uvmx = (tx + ((tx & 3) == 3)) >> 1;
959  uvmy = (ty + ((ty & 3) == 3)) >> 1;
960 
961  v->luma_mv[s->mb_x][0] = uvmx;
962  v->luma_mv[s->mb_x][1] = uvmy;
963 
964  if (v->fastuvmc) {
965  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
966  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
967  }
968  // Field conversion bias
969  if (v->cur_field_type != chroma_ref_type)
970  uvmy += 2 - 4 * chroma_ref_type;
971 
972  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
973  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
974 
975  if (v->profile != PROFILE_ADVANCED) {
976  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
977  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
978  } else {
979  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
980  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
981  }
982 
983  if (!dir) {
984  if (v->field_mode) {
985  if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
986  srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
987  srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
988  } else {
989  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
990  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
991  }
992  } else {
993  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
994  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
995  }
996  } else {
997  srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
998  srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
999  }
1000 
1001  if (v->field_mode) {
1002  if (chroma_ref_type) {
1003  srcU += s->current_picture_ptr->f.linesize[1];
1004  srcV += s->current_picture_ptr->f.linesize[2];
1005  }
1006  off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
1007  }
1008 
1009  if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1010  || s->h_edge_pos < 18 || v_edge_pos < 18
1011  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1012  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
1014  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1015  s->h_edge_pos >> 1, v_edge_pos >> 1);
1016  s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1017  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1018  s->h_edge_pos >> 1, v_edge_pos >> 1);
1019  srcU = s->edge_emu_buffer;
1020  srcV = s->edge_emu_buffer + 16;
1021 
1022  /* if we deal with range reduction we need to scale source blocks */
1023  if (v->rangeredfrm) {
1024  int i, j;
1025  uint8_t *src, *src2;
1026 
1027  src = srcU;
1028  src2 = srcV;
1029  for (j = 0; j < 9; j++) {
1030  for (i = 0; i < 9; i++) {
1031  src[i] = ((src[i] - 128) >> 1) + 128;
1032  src2[i] = ((src2[i] - 128) >> 1) + 128;
1033  }
1034  src += s->uvlinesize;
1035  src2 += s->uvlinesize;
1036  }
1037  }
1038  /* if we deal with intensity compensation we need to scale source blocks */
1039  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1040  int i, j;
1041  uint8_t *src, *src2;
1042 
1043  src = srcU;
1044  src2 = srcV;
1045  for (j = 0; j < 9; j++) {
1046  for (i = 0; i < 9; i++) {
1047  src[i] = v->lutuv[src[i]];
1048  src2[i] = v->lutuv[src2[i]];
1049  }
1050  src += s->uvlinesize;
1051  src2 += s->uvlinesize;
1052  }
1053  }
1054  }
1055 
1056  /* Chroma MC always uses qpel bilinear */
1057  uvmx = (uvmx & 3) << 1;
1058  uvmy = (uvmy & 3) << 1;
1059  if (!v->rnd) {
1060  dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1061  dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1062  } else {
1063  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1064  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1065  }
1066 }
1067 
1071 {
1072  MpegEncContext *s = &v->s;
1073  DSPContext *dsp = &v->s.dsp;
1074  uint8_t *srcU, *srcV;
1075  int uvsrc_x, uvsrc_y;
1076  int uvmx_field[4], uvmy_field[4];
1077  int i, off, tx, ty;
1078  int fieldmv = v->blk_mv_type[s->block_index[0]];
1079  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
1080  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
1081  int v_edge_pos = s->v_edge_pos >> 1;
1082 
1083  if (!v->s.last_picture.f.data[0])
1084  return;
1085  if (s->flags & CODEC_FLAG_GRAY)
1086  return;
1087 
1088  for (i = 0; i < 4; i++) {
1089  tx = s->mv[0][i][0];
1090  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1091  ty = s->mv[0][i][1];
1092  if (fieldmv)
1093  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1094  else
1095  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1096  }
1097 
1098  for (i = 0; i < 4; i++) {
1099  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1100  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1101  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1102  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1103  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1104  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1105  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1106  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1107  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1108  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1109 
1110  if (fieldmv && !(uvsrc_y & 1))
1111  v_edge_pos--;
1112  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1113  uvsrc_y--;
1114  if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
1115  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1116  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1117  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1119  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1120  s->h_edge_pos >> 1, v_edge_pos);
1121  s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1122  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1123  s->h_edge_pos >> 1, v_edge_pos);
1124  srcU = s->edge_emu_buffer;
1125  srcV = s->edge_emu_buffer + 16;
1126 
1127  /* if we deal with intensity compensation we need to scale source blocks */
1128  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1129  int i, j;
1130  uint8_t *src, *src2;
1131 
1132  src = srcU;
1133  src2 = srcV;
1134  for (j = 0; j < 5; j++) {
1135  for (i = 0; i < 5; i++) {
1136  src[i] = v->lutuv[src[i]];
1137  src2[i] = v->lutuv[src2[i]];
1138  }
1139  src += s->uvlinesize << 1;
1140  src2 += s->uvlinesize << 1;
1141  }
1142  }
1143  }
1144  if (!v->rnd) {
1145  dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1146  dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1147  } else {
1148  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1149  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1150  }
1151  }
1152 }
1153 
1154 /***********************************************************************/
1165 #define GET_MQUANT() \
1166  if (v->dquantfrm) { \
1167  int edges = 0; \
1168  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1169  if (v->dqbilevel) { \
1170  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1171  } else { \
1172  mqdiff = get_bits(gb, 3); \
1173  if (mqdiff != 7) \
1174  mquant = v->pq + mqdiff; \
1175  else \
1176  mquant = get_bits(gb, 5); \
1177  } \
1178  } \
1179  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1180  edges = 1 << v->dqsbedge; \
1181  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1182  edges = (3 << v->dqsbedge) % 15; \
1183  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1184  edges = 15; \
1185  if ((edges&1) && !s->mb_x) \
1186  mquant = v->altpq; \
1187  if ((edges&2) && s->first_slice_line) \
1188  mquant = v->altpq; \
1189  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1190  mquant = v->altpq; \
1191  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1192  mquant = v->altpq; \
1193  }
1194 
1202 #define GET_MVDATA(_dmv_x, _dmv_y) \
1203  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1204  VC1_MV_DIFF_VLC_BITS, 2); \
1205  if (index > 36) { \
1206  mb_has_coeffs = 1; \
1207  index -= 37; \
1208  } else \
1209  mb_has_coeffs = 0; \
1210  s->mb_intra = 0; \
1211  if (!index) { \
1212  _dmv_x = _dmv_y = 0; \
1213  } else if (index == 35) { \
1214  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1215  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1216  } else if (index == 36) { \
1217  _dmv_x = 0; \
1218  _dmv_y = 0; \
1219  s->mb_intra = 1; \
1220  } else { \
1221  index1 = index % 6; \
1222  if (!s->quarter_sample && index1 == 5) val = 1; \
1223  else val = 0; \
1224  if (size_table[index1] - val > 0) \
1225  val = get_bits(gb, size_table[index1] - val); \
1226  else val = 0; \
1227  sign = 0 - (val&1); \
1228  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1229  \
1230  index1 = index / 6; \
1231  if (!s->quarter_sample && index1 == 5) val = 1; \
1232  else val = 0; \
1233  if (size_table[index1] - val > 0) \
1234  val = get_bits(gb, size_table[index1] - val); \
1235  else val = 0; \
1236  sign = 0 - (val & 1); \
1237  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1238  }
1239 
1241  int *dmv_y, int *pred_flag)
1242 {
1243  int index, index1;
1244  int extend_x = 0, extend_y = 0;
1245  GetBitContext *gb = &v->s.gb;
1246  int bits, esc;
1247  int val, sign;
1248  const int* offs_tab;
1249 
1250  if (v->numref) {
1251  bits = VC1_2REF_MVDATA_VLC_BITS;
1252  esc = 125;
1253  } else {
1254  bits = VC1_1REF_MVDATA_VLC_BITS;
1255  esc = 71;
1256  }
1257  switch (v->dmvrange) {
1258  case 1:
1259  extend_x = 1;
1260  break;
1261  case 2:
1262  extend_y = 1;
1263  break;
1264  case 3:
1265  extend_x = extend_y = 1;
1266  break;
1267  }
1268  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1269  if (index == esc) {
1270  *dmv_x = get_bits(gb, v->k_x);
1271  *dmv_y = get_bits(gb, v->k_y);
1272  if (v->numref) {
1273  *pred_flag = *dmv_y & 1;
1274  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1275  }
1276  }
1277  else {
1278  if (extend_x)
1279  offs_tab = offset_table2;
1280  else
1281  offs_tab = offset_table1;
1282  index1 = (index + 1) % 9;
1283  if (index1 != 0) {
1284  val = get_bits(gb, index1 + extend_x);
1285  sign = 0 -(val & 1);
1286  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1287  } else
1288  *dmv_x = 0;
1289  if (extend_y)
1290  offs_tab = offset_table2;
1291  else
1292  offs_tab = offset_table1;
1293  index1 = (index + 1) / 9;
1294  if (index1 > v->numref) {
1295  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1296  sign = 0 - (val & 1);
1297  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1298  } else
1299  *dmv_y = 0;
1300  if (v->numref)
1301  *pred_flag = index1 & 1;
1302  }
1303 }
1304 
1305 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1306 {
1307  int scaledvalue, refdist;
1308  int scalesame1, scalesame2;
1309  int scalezone1_x, zone1offset_x;
1310  int table_index = dir ^ v->second_field;
1311 
1312  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1313  refdist = v->refdist;
1314  else
1315  refdist = dir ? v->brfd : v->frfd;
1316  if (refdist > 3)
1317  refdist = 3;
1318  scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1319  scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1320  scalezone1_x = vc1_field_mvpred_scales[table_index][3][refdist];
1321  zone1offset_x = vc1_field_mvpred_scales[table_index][5][refdist];
1322 
1323  if (FFABS(n) > 255)
1324  scaledvalue = n;
1325  else {
1326  if (FFABS(n) < scalezone1_x)
1327  scaledvalue = (n * scalesame1) >> 8;
1328  else {
1329  if (n < 0)
1330  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1331  else
1332  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1333  }
1334  }
1335  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1336 }
1337 
1338 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1339 {
1340  int scaledvalue, refdist;
1341  int scalesame1, scalesame2;
1342  int scalezone1_y, zone1offset_y;
1343  int table_index = dir ^ v->second_field;
1344 
1345  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1346  refdist = v->refdist;
1347  else
1348  refdist = dir ? v->brfd : v->frfd;
1349  if (refdist > 3)
1350  refdist = 3;
1351  scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1352  scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1353  scalezone1_y = vc1_field_mvpred_scales[table_index][4][refdist];
1354  zone1offset_y = vc1_field_mvpred_scales[table_index][6][refdist];
1355 
1356  if (FFABS(n) > 63)
1357  scaledvalue = n;
1358  else {
1359  if (FFABS(n) < scalezone1_y)
1360  scaledvalue = (n * scalesame1) >> 8;
1361  else {
1362  if (n < 0)
1363  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1364  else
1365  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1366  }
1367  }
1368 
1369  if (v->cur_field_type && !v->ref_field_type[dir])
1370  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1371  else
1372  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1373 }
1374 
1375 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1376 {
1377  int scalezone1_x, zone1offset_x;
1378  int scaleopp1, scaleopp2, brfd;
1379  int scaledvalue;
1380 
1381  brfd = FFMIN(v->brfd, 3);
1382  scalezone1_x = vc1_b_field_mvpred_scales[3][brfd];
1383  zone1offset_x = vc1_b_field_mvpred_scales[5][brfd];
1384  scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1385  scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1386 
1387  if (FFABS(n) > 255)
1388  scaledvalue = n;
1389  else {
1390  if (FFABS(n) < scalezone1_x)
1391  scaledvalue = (n * scaleopp1) >> 8;
1392  else {
1393  if (n < 0)
1394  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1395  else
1396  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1397  }
1398  }
1399  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1400 }
1401 
1402 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1403 {
1404  int scalezone1_y, zone1offset_y;
1405  int scaleopp1, scaleopp2, brfd;
1406  int scaledvalue;
1407 
1408  brfd = FFMIN(v->brfd, 3);
1409  scalezone1_y = vc1_b_field_mvpred_scales[4][brfd];
1410  zone1offset_y = vc1_b_field_mvpred_scales[6][brfd];
1411  scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1412  scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1413 
1414  if (FFABS(n) > 63)
1415  scaledvalue = n;
1416  else {
1417  if (FFABS(n) < scalezone1_y)
1418  scaledvalue = (n * scaleopp1) >> 8;
1419  else {
1420  if (n < 0)
1421  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1422  else
1423  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1424  }
1425  }
1426  if (v->cur_field_type && !v->ref_field_type[dir]) {
1427  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1428  } else {
1429  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1430  }
1431 }
1432 
1433 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1434  int dim, int dir)
1435 {
1436  int brfd, scalesame;
1437  int hpel = 1 - v->s.quarter_sample;
1438 
1439  n >>= hpel;
1440  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1441  if (dim)
1442  n = scaleforsame_y(v, i, n, dir) << hpel;
1443  else
1444  n = scaleforsame_x(v, n, dir) << hpel;
1445  return n;
1446  }
1447  brfd = FFMIN(v->brfd, 3);
1448  scalesame = vc1_b_field_mvpred_scales[0][brfd];
1449 
1450  n = (n * scalesame >> 8) << hpel;
1451  return n;
1452 }
1453 
1454 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1455  int dim, int dir)
1456 {
1457  int refdist, scaleopp;
1458  int hpel = 1 - v->s.quarter_sample;
1459 
1460  n >>= hpel;
1461  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1462  if (dim)
1463  n = scaleforopp_y(v, n, dir) << hpel;
1464  else
1465  n = scaleforopp_x(v, n) << hpel;
1466  return n;
1467  }
1468  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1469  refdist = FFMIN(v->refdist, 3);
1470  else
1471  refdist = dir ? v->brfd : v->frfd;
1472  scaleopp = vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1473 
1474  n = (n * scaleopp >> 8) << hpel;
1475  return n;
1476 }
1477 
1480 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1481  int mv1, int r_x, int r_y, uint8_t* is_intra,
1482  int pred_flag, int dir)
1483 {
1484  MpegEncContext *s = &v->s;
1485  int xy, wrap, off = 0;
1486  int16_t *A, *B, *C;
1487  int px, py;
1488  int sum;
1489  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1490  int opposit, a_f, b_f, c_f;
1491  int16_t field_predA[2];
1492  int16_t field_predB[2];
1493  int16_t field_predC[2];
1494  int a_valid, b_valid, c_valid;
1495  int hybridmv_thresh, y_bias = 0;
1496 
1497  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1499  mixedmv_pic = 1;
1500  else
1501  mixedmv_pic = 0;
1502  /* scale MV difference to be quad-pel */
1503  dmv_x <<= 1 - s->quarter_sample;
1504  dmv_y <<= 1 - s->quarter_sample;
1505 
1506  wrap = s->b8_stride;
1507  xy = s->block_index[n];
1508 
1509  if (s->mb_intra) {
1510  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1511  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1512  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1513  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1514  if (mv1) { /* duplicate motion data for 1-MV block */
1515  s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1516  s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1517  s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1518  s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1519  s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1520  s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1521  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1522  s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1523  s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1524  s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1525  s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1526  s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1527  s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1528  }
1529  return;
1530  }
1531 
1532  C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1533  A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1534  if (mv1) {
1535  if (v->field_mode && mixedmv_pic)
1536  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1537  else
1538  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1539  } else {
1540  //in 4-MV mode different blocks have different B predictor position
1541  switch (n) {
1542  case 0:
1543  off = (s->mb_x > 0) ? -1 : 1;
1544  break;
1545  case 1:
1546  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1547  break;
1548  case 2:
1549  off = 1;
1550  break;
1551  case 3:
1552  off = -1;
1553  }
1554  }
1555  B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1556 
1557  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1558  b_valid = a_valid && (s->mb_width > 1);
1559  c_valid = s->mb_x || (n == 1 || n == 3);
1560  if (v->field_mode) {
1561  a_valid = a_valid && !is_intra[xy - wrap];
1562  b_valid = b_valid && !is_intra[xy - wrap + off];
1563  c_valid = c_valid && !is_intra[xy - 1];
1564  }
1565 
1566  if (a_valid) {
1567  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1568  num_oppfield += a_f;
1569  num_samefield += 1 - a_f;
1570  field_predA[0] = A[0];
1571  field_predA[1] = A[1];
1572  } else {
1573  field_predA[0] = field_predA[1] = 0;
1574  a_f = 0;
1575  }
1576  if (b_valid) {
1577  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1578  num_oppfield += b_f;
1579  num_samefield += 1 - b_f;
1580  field_predB[0] = B[0];
1581  field_predB[1] = B[1];
1582  } else {
1583  field_predB[0] = field_predB[1] = 0;
1584  b_f = 0;
1585  }
1586  if (c_valid) {
1587  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1588  num_oppfield += c_f;
1589  num_samefield += 1 - c_f;
1590  field_predC[0] = C[0];
1591  field_predC[1] = C[1];
1592  } else {
1593  field_predC[0] = field_predC[1] = 0;
1594  c_f = 0;
1595  }
1596 
1597  if (v->field_mode) {
1598  if (num_samefield <= num_oppfield)
1599  opposit = 1 - pred_flag;
1600  else
1601  opposit = pred_flag;
1602  } else
1603  opposit = 0;
1604  if (opposit) {
1605  if (a_valid && !a_f) {
1606  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1607  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1608  }
1609  if (b_valid && !b_f) {
1610  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1611  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1612  }
1613  if (c_valid && !c_f) {
1614  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1615  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1616  }
1617  v->mv_f[dir][xy + v->blocks_off] = 1;
1618  v->ref_field_type[dir] = !v->cur_field_type;
1619  } else {
1620  if (a_valid && a_f) {
1621  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1622  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1623  }
1624  if (b_valid && b_f) {
1625  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1626  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1627  }
1628  if (c_valid && c_f) {
1629  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1630  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1631  }
1632  v->mv_f[dir][xy + v->blocks_off] = 0;
1633  v->ref_field_type[dir] = v->cur_field_type;
1634  }
1635 
1636  if (a_valid) {
1637  px = field_predA[0];
1638  py = field_predA[1];
1639  } else if (c_valid) {
1640  px = field_predC[0];
1641  py = field_predC[1];
1642  } else if (b_valid) {
1643  px = field_predB[0];
1644  py = field_predB[1];
1645  } else {
1646  px = 0;
1647  py = 0;
1648  }
1649 
1650  if (num_samefield + num_oppfield > 1) {
1651  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1652  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1653  }
1654 
1655  /* Pullback MV as specified in 8.3.5.3.4 */
1656  if (!v->field_mode) {
1657  int qx, qy, X, Y;
1658  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1659  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1660  X = (s->mb_width << 6) - 4;
1661  Y = (s->mb_height << 6) - 4;
1662  if (mv1) {
1663  if (qx + px < -60) px = -60 - qx;
1664  if (qy + py < -60) py = -60 - qy;
1665  } else {
1666  if (qx + px < -28) px = -28 - qx;
1667  if (qy + py < -28) py = -28 - qy;
1668  }
1669  if (qx + px > X) px = X - qx;
1670  if (qy + py > Y) py = Y - qy;
1671  }
1672 
1673  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1674  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1675  hybridmv_thresh = 32;
1676  if (a_valid && c_valid) {
1677  if (is_intra[xy - wrap])
1678  sum = FFABS(px) + FFABS(py);
1679  else
1680  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1681  if (sum > hybridmv_thresh) {
1682  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1683  px = field_predA[0];
1684  py = field_predA[1];
1685  } else {
1686  px = field_predC[0];
1687  py = field_predC[1];
1688  }
1689  } else {
1690  if (is_intra[xy - 1])
1691  sum = FFABS(px) + FFABS(py);
1692  else
1693  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1694  if (sum > hybridmv_thresh) {
1695  if (get_bits1(&s->gb)) {
1696  px = field_predA[0];
1697  py = field_predA[1];
1698  } else {
1699  px = field_predC[0];
1700  py = field_predC[1];
1701  }
1702  }
1703  }
1704  }
1705  }
1706 
1707  if (v->field_mode && !s->quarter_sample) {
1708  r_x <<= 1;
1709  r_y <<= 1;
1710  }
1711  if (v->field_mode && v->numref)
1712  r_y >>= 1;
1713  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1714  y_bias = 1;
1715  /* store MV using signed modulus of MV range defined in 4.11 */
1716  s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1717  s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1718  if (mv1) { /* duplicate motion data for 1-MV block */
1719  s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1720  s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1721  s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1722  s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1723  s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1724  s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1725  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1726  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1727  }
1728 }
1729 
1732 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1733  int mvn, int r_x, int r_y, uint8_t* is_intra)
1734 {
1735  MpegEncContext *s = &v->s;
1736  int xy, wrap, off = 0;
1737  int A[2], B[2], C[2];
1738  int px, py;
1739  int a_valid = 0, b_valid = 0, c_valid = 0;
1740  int field_a, field_b, field_c; // 0: same, 1: opposit
1741  int total_valid, num_samefield, num_oppfield;
1742  int pos_c, pos_b, n_adj;
1743 
1744  wrap = s->b8_stride;
1745  xy = s->block_index[n];
1746 
1747  if (s->mb_intra) {
1748  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1749  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1750  s->current_picture.f.motion_val[1][xy][0] = 0;
1751  s->current_picture.f.motion_val[1][xy][1] = 0;
1752  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1753  s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1754  s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1755  s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1756  s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1757  s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1758  s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1759  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1760  s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1761  s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1762  s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1763  s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1764  s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1765  s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1766  }
1767  return;
1768  }
1769 
1770  off = ((n == 0) || (n == 1)) ? 1 : -1;
1771  /* predict A */
1772  if (s->mb_x || (n == 1) || (n == 3)) {
1773  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1774  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1775  A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1776  A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1777  a_valid = 1;
1778  } else { // current block has frame mv and cand. has field MV (so average)
1779  A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1780  + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1781  A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1782  + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1783  a_valid = 1;
1784  }
1785  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1786  a_valid = 0;
1787  A[0] = A[1] = 0;
1788  }
1789  } else
1790  A[0] = A[1] = 0;
1791  /* Predict B and C */
1792  B[0] = B[1] = C[0] = C[1] = 0;
1793  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1794  if (!s->first_slice_line) {
1795  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1796  b_valid = 1;
1797  n_adj = n | 2;
1798  pos_b = s->block_index[n_adj] - 2 * wrap;
1799  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1800  n_adj = (n & 2) | (n & 1);
1801  }
1802  B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1803  B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1804  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1805  B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1806  B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1807  }
1808  }
1809  if (s->mb_width > 1) {
1810  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1811  c_valid = 1;
1812  n_adj = 2;
1813  pos_c = s->block_index[2] - 2 * wrap + 2;
1814  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1815  n_adj = n & 2;
1816  }
1817  C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1818  C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1819  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1820  C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1821  C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1822  }
1823  if (s->mb_x == s->mb_width - 1) {
1824  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1825  c_valid = 1;
1826  n_adj = 3;
1827  pos_c = s->block_index[3] - 2 * wrap - 2;
1828  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1829  n_adj = n | 1;
1830  }
1831  C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1832  C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1833  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1834  C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1835  C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1836  }
1837  } else
1838  c_valid = 0;
1839  }
1840  }
1841  }
1842  }
1843  } else {
1844  pos_b = s->block_index[1];
1845  b_valid = 1;
1846  B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1847  B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1848  pos_c = s->block_index[0];
1849  c_valid = 1;
1850  C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1851  C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1852  }
1853 
1854  total_valid = a_valid + b_valid + c_valid;
1855  // check if predictor A is out of bounds
1856  if (!s->mb_x && !(n == 1 || n == 3)) {
1857  A[0] = A[1] = 0;
1858  }
1859  // check if predictor B is out of bounds
1860  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1861  B[0] = B[1] = C[0] = C[1] = 0;
1862  }
1863  if (!v->blk_mv_type[xy]) {
1864  if (s->mb_width == 1) {
1865  px = B[0];
1866  py = B[1];
1867  } else {
1868  if (total_valid >= 2) {
1869  px = mid_pred(A[0], B[0], C[0]);
1870  py = mid_pred(A[1], B[1], C[1]);
1871  } else if (total_valid) {
1872  if (a_valid) { px = A[0]; py = A[1]; }
1873  if (b_valid) { px = B[0]; py = B[1]; }
1874  if (c_valid) { px = C[0]; py = C[1]; }
1875  } else
1876  px = py = 0;
1877  }
1878  } else {
1879  if (a_valid)
1880  field_a = (A[1] & 4) ? 1 : 0;
1881  else
1882  field_a = 0;
1883  if (b_valid)
1884  field_b = (B[1] & 4) ? 1 : 0;
1885  else
1886  field_b = 0;
1887  if (c_valid)
1888  field_c = (C[1] & 4) ? 1 : 0;
1889  else
1890  field_c = 0;
1891 
1892  num_oppfield = field_a + field_b + field_c;
1893  num_samefield = total_valid - num_oppfield;
1894  if (total_valid == 3) {
1895  if ((num_samefield == 3) || (num_oppfield == 3)) {
1896  px = mid_pred(A[0], B[0], C[0]);
1897  py = mid_pred(A[1], B[1], C[1]);
1898  } else if (num_samefield >= num_oppfield) {
1899  /* take one MV from same field set depending on priority
1900  the check for B may not be necessary */
1901  px = !field_a ? A[0] : B[0];
1902  py = !field_a ? A[1] : B[1];
1903  } else {
1904  px = field_a ? A[0] : B[0];
1905  py = field_a ? A[1] : B[1];
1906  }
1907  } else if (total_valid == 2) {
1908  if (num_samefield >= num_oppfield) {
1909  if (!field_a && a_valid) {
1910  px = A[0];
1911  py = A[1];
1912  } else if (!field_b && b_valid) {
1913  px = B[0];
1914  py = B[1];
1915  } else if (c_valid) {
1916  px = C[0];
1917  py = C[1];
1918  } else px = py = 0;
1919  } else {
1920  if (field_a && a_valid) {
1921  px = A[0];
1922  py = A[1];
1923  } else if (field_b && b_valid) {
1924  px = B[0];
1925  py = B[1];
1926  } else if (c_valid) {
1927  px = C[0];
1928  py = C[1];
1929  }
1930  }
1931  } else if (total_valid == 1) {
1932  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1933  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1934  } else
1935  px = py = 0;
1936  }
1937 
1938  /* store MV using signed modulus of MV range defined in 4.11 */
1939  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1940  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1941  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1942  s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1943  s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1944  s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1945  s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1946  s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1947  s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1948  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1949  s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1950  s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1951  s->mv[0][n + 1][0] = s->mv[0][n][0];
1952  s->mv[0][n + 1][1] = s->mv[0][n][1];
1953  }
1954 }
1955 
1959 {
1960  MpegEncContext *s = &v->s;
1961  DSPContext *dsp = &v->s.dsp;
1962  uint8_t *srcY, *srcU, *srcV;
1963  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1964  int off, off_uv;
1965  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1966 
1967  if (!v->field_mode && !v->s.next_picture.f.data[0])
1968  return;
1969 
1970  mx = s->mv[1][0][0];
1971  my = s->mv[1][0][1];
1972  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1973  uvmy = (my + ((my & 3) == 3)) >> 1;
1974  if (v->field_mode) {
1975  if (v->cur_field_type != v->ref_field_type[1])
1976  my = my - 2 + 4 * v->cur_field_type;
1977  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1978  }
1979  if (v->fastuvmc) {
1980  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1981  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1982  }
1983  srcY = s->next_picture.f.data[0];
1984  srcU = s->next_picture.f.data[1];
1985  srcV = s->next_picture.f.data[2];
1986 
1987  src_x = s->mb_x * 16 + (mx >> 2);
1988  src_y = s->mb_y * 16 + (my >> 2);
1989  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1990  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1991 
1992  if (v->profile != PROFILE_ADVANCED) {
1993  src_x = av_clip( src_x, -16, s->mb_width * 16);
1994  src_y = av_clip( src_y, -16, s->mb_height * 16);
1995  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1996  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1997  } else {
1998  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1999  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
2000  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
2001  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
2002  }
2003 
2004  srcY += src_y * s->linesize + src_x;
2005  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2006  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2007 
2008  if (v->field_mode && v->ref_field_type[1]) {
2009  srcY += s->current_picture_ptr->f.linesize[0];
2010  srcU += s->current_picture_ptr->f.linesize[1];
2011  srcV += s->current_picture_ptr->f.linesize[2];
2012  }
2013 
2014  /* for grayscale we should not try to read from unknown area */
2015  if (s->flags & CODEC_FLAG_GRAY) {
2016  srcU = s->edge_emu_buffer + 18 * s->linesize;
2017  srcV = s->edge_emu_buffer + 18 * s->linesize;
2018  }
2019 
2020  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22
2021  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 16 - s->mspel * 3
2022  || (unsigned)(src_y - s->mspel) > v_edge_pos - (my & 3) - 16 - s->mspel * 3) {
2023  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
2024 
2025  srcY -= s->mspel * (1 + s->linesize);
2026  s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
2027  17 + s->mspel * 2, 17 + s->mspel * 2,
2028  src_x - s->mspel, src_y - s->mspel,
2029  s->h_edge_pos, v_edge_pos);
2030  srcY = s->edge_emu_buffer;
2031  s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
2032  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2033  s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
2034  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2035  srcU = uvbuf;
2036  srcV = uvbuf + 16;
2037  /* if we deal with range reduction we need to scale source blocks */
2038  if (v->rangeredfrm) {
2039  int i, j;
2040  uint8_t *src, *src2;
2041 
2042  src = srcY;
2043  for (j = 0; j < 17 + s->mspel * 2; j++) {
2044  for (i = 0; i < 17 + s->mspel * 2; i++)
2045  src[i] = ((src[i] - 128) >> 1) + 128;
2046  src += s->linesize;
2047  }
2048  src = srcU;
2049  src2 = srcV;
2050  for (j = 0; j < 9; j++) {
2051  for (i = 0; i < 9; i++) {
2052  src[i] = ((src[i] - 128) >> 1) + 128;
2053  src2[i] = ((src2[i] - 128) >> 1) + 128;
2054  }
2055  src += s->uvlinesize;
2056  src2 += s->uvlinesize;
2057  }
2058  }
2059  srcY += s->mspel * (1 + s->linesize);
2060  }
2061 
2062  if (v->field_mode && v->cur_field_type) {
2063  off = s->current_picture_ptr->f.linesize[0];
2064  off_uv = s->current_picture_ptr->f.linesize[1];
2065  } else {
2066  off = 0;
2067  off_uv = 0;
2068  }
2069 
2070  if (s->mspel) {
2071  dxy = ((my & 3) << 2) | (mx & 3);
2072  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2073  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2074  srcY += s->linesize * 8;
2075  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2076  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2077  } else { // hpel mc
2078  dxy = (my & 2) | ((mx & 2) >> 1);
2079 
2080  if (!v->rnd)
2081  dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2082  else
2083  dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2084  }
2085 
2086  if (s->flags & CODEC_FLAG_GRAY) return;
2087  /* Chroma MC always uses qpel blilinear */
2088  uvmx = (uvmx & 3) << 1;
2089  uvmy = (uvmy & 3) << 1;
2090  if (!v->rnd) {
2091  dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2092  dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2093  } else {
2094  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2095  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2096  }
2097 }
2098 
2099 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2100 {
2101  int n = bfrac;
2102 
2103 #if B_FRACTION_DEN==256
2104  if (inv)
2105  n -= 256;
2106  if (!qs)
2107  return 2 * ((value * n + 255) >> 9);
2108  return (value * n + 128) >> 8;
2109 #else
2110  if (inv)
2111  n -= B_FRACTION_DEN;
2112  if (!qs)
2113  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2114  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2115 #endif
2116 }
2117 
2118 static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv,
2119  int qs, int qs_last)
2120 {
2121  int n = bfrac;
2122 
2123  if (inv)
2124  n -= 256;
2125  n <<= !qs_last;
2126  if (!qs)
2127  return (value * n + 255) >> 9;
2128  else
2129  return (value * n + 128) >> 8;
2130 }
2131 
2134 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2135  int direct, int mode)
2136 {
2137  if (v->use_ic) {
2138  v->mv_mode2 = v->mv_mode;
2140  }
2141  if (direct) {
2142  vc1_mc_1mv(v, 0);
2143  vc1_interp_mc(v);
2144  if (v->use_ic)
2145  v->mv_mode = v->mv_mode2;
2146  return;
2147  }
2148  if (mode == BMV_TYPE_INTERPOLATED) {
2149  vc1_mc_1mv(v, 0);
2150  vc1_interp_mc(v);
2151  if (v->use_ic)
2152  v->mv_mode = v->mv_mode2;
2153  return;
2154  }
2155 
2156  if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2157  v->mv_mode = v->mv_mode2;
2158  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2159  if (v->use_ic)
2160  v->mv_mode = v->mv_mode2;
2161 }
2162 
2163 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2164  int direct, int mvtype)
2165 {
2166  MpegEncContext *s = &v->s;
2167  int xy, wrap, off = 0;
2168  int16_t *A, *B, *C;
2169  int px, py;
2170  int sum;
2171  int r_x, r_y;
2172  const uint8_t *is_intra = v->mb_type[0];
2173 
2174  r_x = v->range_x;
2175  r_y = v->range_y;
2176  /* scale MV difference to be quad-pel */
2177  dmv_x[0] <<= 1 - s->quarter_sample;
2178  dmv_y[0] <<= 1 - s->quarter_sample;
2179  dmv_x[1] <<= 1 - s->quarter_sample;
2180  dmv_y[1] <<= 1 - s->quarter_sample;
2181 
2182  wrap = s->b8_stride;
2183  xy = s->block_index[0];
2184 
2185  if (s->mb_intra) {
2186  s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2187  s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2188  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2189  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2190  return;
2191  }
2192  if (!v->field_mode) {
2193  s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2194  s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2195  s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2196  s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2197 
2198  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2199  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2200  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2201  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2202  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2203  }
2204  if (direct) {
2205  s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2206  s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2207  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2208  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2209  return;
2210  }
2211 
2212  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2213  C = s->current_picture.f.motion_val[0][xy - 2];
2214  A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2215  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2216  B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2217 
2218  if (!s->mb_x) C[0] = C[1] = 0;
2219  if (!s->first_slice_line) { // predictor A is not out of bounds
2220  if (s->mb_width == 1) {
2221  px = A[0];
2222  py = A[1];
2223  } else {
2224  px = mid_pred(A[0], B[0], C[0]);
2225  py = mid_pred(A[1], B[1], C[1]);
2226  }
2227  } else if (s->mb_x) { // predictor C is not out of bounds
2228  px = C[0];
2229  py = C[1];
2230  } else {
2231  px = py = 0;
2232  }
2233  /* Pullback MV as specified in 8.3.5.3.4 */
2234  {
2235  int qx, qy, X, Y;
2236  if (v->profile < PROFILE_ADVANCED) {
2237  qx = (s->mb_x << 5);
2238  qy = (s->mb_y << 5);
2239  X = (s->mb_width << 5) - 4;
2240  Y = (s->mb_height << 5) - 4;
2241  if (qx + px < -28) px = -28 - qx;
2242  if (qy + py < -28) py = -28 - qy;
2243  if (qx + px > X) px = X - qx;
2244  if (qy + py > Y) py = Y - qy;
2245  } else {
2246  qx = (s->mb_x << 6);
2247  qy = (s->mb_y << 6);
2248  X = (s->mb_width << 6) - 4;
2249  Y = (s->mb_height << 6) - 4;
2250  if (qx + px < -60) px = -60 - qx;
2251  if (qy + py < -60) py = -60 - qy;
2252  if (qx + px > X) px = X - qx;
2253  if (qy + py > Y) py = Y - qy;
2254  }
2255  }
2256  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2257  if (0 && !s->first_slice_line && s->mb_x) {
2258  if (is_intra[xy - wrap])
2259  sum = FFABS(px) + FFABS(py);
2260  else
2261  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2262  if (sum > 32) {
2263  if (get_bits1(&s->gb)) {
2264  px = A[0];
2265  py = A[1];
2266  } else {
2267  px = C[0];
2268  py = C[1];
2269  }
2270  } else {
2271  if (is_intra[xy - 2])
2272  sum = FFABS(px) + FFABS(py);
2273  else
2274  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2275  if (sum > 32) {
2276  if (get_bits1(&s->gb)) {
2277  px = A[0];
2278  py = A[1];
2279  } else {
2280  px = C[0];
2281  py = C[1];
2282  }
2283  }
2284  }
2285  }
2286  /* store MV using signed modulus of MV range defined in 4.11 */
2287  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2288  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2289  }
2290  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2291  C = s->current_picture.f.motion_val[1][xy - 2];
2292  A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2293  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2294  B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2295 
2296  if (!s->mb_x)
2297  C[0] = C[1] = 0;
2298  if (!s->first_slice_line) { // predictor A is not out of bounds
2299  if (s->mb_width == 1) {
2300  px = A[0];
2301  py = A[1];
2302  } else {
2303  px = mid_pred(A[0], B[0], C[0]);
2304  py = mid_pred(A[1], B[1], C[1]);
2305  }
2306  } else if (s->mb_x) { // predictor C is not out of bounds
2307  px = C[0];
2308  py = C[1];
2309  } else {
2310  px = py = 0;
2311  }
2312  /* Pullback MV as specified in 8.3.5.3.4 */
2313  {
2314  int qx, qy, X, Y;
2315  if (v->profile < PROFILE_ADVANCED) {
2316  qx = (s->mb_x << 5);
2317  qy = (s->mb_y << 5);
2318  X = (s->mb_width << 5) - 4;
2319  Y = (s->mb_height << 5) - 4;
2320  if (qx + px < -28) px = -28 - qx;
2321  if (qy + py < -28) py = -28 - qy;
2322  if (qx + px > X) px = X - qx;
2323  if (qy + py > Y) py = Y - qy;
2324  } else {
2325  qx = (s->mb_x << 6);
2326  qy = (s->mb_y << 6);
2327  X = (s->mb_width << 6) - 4;
2328  Y = (s->mb_height << 6) - 4;
2329  if (qx + px < -60) px = -60 - qx;
2330  if (qy + py < -60) py = -60 - qy;
2331  if (qx + px > X) px = X - qx;
2332  if (qy + py > Y) py = Y - qy;
2333  }
2334  }
2335  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2336  if (0 && !s->first_slice_line && s->mb_x) {
2337  if (is_intra[xy - wrap])
2338  sum = FFABS(px) + FFABS(py);
2339  else
2340  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2341  if (sum > 32) {
2342  if (get_bits1(&s->gb)) {
2343  px = A[0];
2344  py = A[1];
2345  } else {
2346  px = C[0];
2347  py = C[1];
2348  }
2349  } else {
2350  if (is_intra[xy - 2])
2351  sum = FFABS(px) + FFABS(py);
2352  else
2353  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2354  if (sum > 32) {
2355  if (get_bits1(&s->gb)) {
2356  px = A[0];
2357  py = A[1];
2358  } else {
2359  px = C[0];
2360  py = C[1];
2361  }
2362  }
2363  }
2364  }
2365  /* store MV using signed modulus of MV range defined in 4.11 */
2366 
2367  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2368  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2369  }
2370  s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2371  s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2372  s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2373  s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2374 }
2375 
2376 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2377 {
2378  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2379  MpegEncContext *s = &v->s;
2380  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2381 
2382  if (v->bmvtype == BMV_TYPE_DIRECT) {
2383  int total_opp, k, f;
2384  if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2385  s->mv[0][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2386  v->bfraction, 0, s->quarter_sample, v->qs_last);
2387  s->mv[0][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2388  v->bfraction, 0, s->quarter_sample, v->qs_last);
2389  s->mv[1][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2390  v->bfraction, 1, s->quarter_sample, v->qs_last);
2391  s->mv[1][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2392  v->bfraction, 1, s->quarter_sample, v->qs_last);
2393 
2394  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2395  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2396  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2397  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2398  f = (total_opp > 2) ? 1 : 0;
2399  } else {
2400  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2401  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2402  f = 0;
2403  }
2404  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2405  for (k = 0; k < 4; k++) {
2406  s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2407  s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2408  s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2409  s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2410  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2411  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2412  }
2413  return;
2414  }
2415  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2416  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2417  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2418  return;
2419  }
2420  if (dir) { // backward
2421  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2422  if (n == 3 || mv1) {
2423  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2424  }
2425  } else { // forward
2426  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2427  if (n == 3 || mv1) {
2428  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2429  }
2430  }
2431 }
2432 
2442 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2443  int16_t **dc_val_ptr, int *dir_ptr)
2444 {
2445  int a, b, c, wrap, pred, scale;
2446  int16_t *dc_val;
2447  static const uint16_t dcpred[32] = {
2448  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2449  114, 102, 93, 85, 79, 73, 68, 64,
2450  60, 57, 54, 51, 49, 47, 45, 43,
2451  41, 39, 38, 37, 35, 34, 33
2452  };
2453 
2454  /* find prediction - wmv3_dc_scale always used here in fact */
2455  if (n < 4) scale = s->y_dc_scale;
2456  else scale = s->c_dc_scale;
2457 
2458  wrap = s->block_wrap[n];
2459  dc_val = s->dc_val[0] + s->block_index[n];
2460 
2461  /* B A
2462  * C X
2463  */
2464  c = dc_val[ - 1];
2465  b = dc_val[ - 1 - wrap];
2466  a = dc_val[ - wrap];
2467 
2468  if (pq < 9 || !overlap) {
2469  /* Set outer values */
2470  if (s->first_slice_line && (n != 2 && n != 3))
2471  b = a = dcpred[scale];
2472  if (s->mb_x == 0 && (n != 1 && n != 3))
2473  b = c = dcpred[scale];
2474  } else {
2475  /* Set outer values */
2476  if (s->first_slice_line && (n != 2 && n != 3))
2477  b = a = 0;
2478  if (s->mb_x == 0 && (n != 1 && n != 3))
2479  b = c = 0;
2480  }
2481 
2482  if (abs(a - b) <= abs(b - c)) {
2483  pred = c;
2484  *dir_ptr = 1; // left
2485  } else {
2486  pred = a;
2487  *dir_ptr = 0; // top
2488  }
2489 
2490  /* update predictor */
2491  *dc_val_ptr = &dc_val[0];
2492  return pred;
2493 }
2494 
2495 
2507 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2508  int a_avail, int c_avail,
2509  int16_t **dc_val_ptr, int *dir_ptr)
2510 {
2511  int a, b, c, wrap, pred;
2512  int16_t *dc_val;
2513  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2514  int q1, q2 = 0;
2515  int dqscale_index;
2516 
2517  wrap = s->block_wrap[n];
2518  dc_val = s->dc_val[0] + s->block_index[n];
2519 
2520  /* B A
2521  * C X
2522  */
2523  c = dc_val[ - 1];
2524  b = dc_val[ - 1 - wrap];
2525  a = dc_val[ - wrap];
2526  /* scale predictors if needed */
2527  q1 = s->current_picture.f.qscale_table[mb_pos];
2528  dqscale_index = s->y_dc_scale_table[q1] - 1;
2529  if (dqscale_index < 0)
2530  return 0;
2531  if (c_avail && (n != 1 && n != 3)) {
2532  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2533  if (q2 && q2 != q1)
2534  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2535  }
2536  if (a_avail && (n != 2 && n != 3)) {
2537  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2538  if (q2 && q2 != q1)
2539  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2540  }
2541  if (a_avail && c_avail && (n != 3)) {
2542  int off = mb_pos;
2543  if (n != 1)
2544  off--;
2545  if (n != 2)
2546  off -= s->mb_stride;
2547  q2 = s->current_picture.f.qscale_table[off];
2548  if (q2 && q2 != q1)
2549  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2550  }
2551 
2552  if (a_avail && c_avail) {
2553  if (abs(a - b) <= abs(b - c)) {
2554  pred = c;
2555  *dir_ptr = 1; // left
2556  } else {
2557  pred = a;
2558  *dir_ptr = 0; // top
2559  }
2560  } else if (a_avail) {
2561  pred = a;
2562  *dir_ptr = 0; // top
2563  } else if (c_avail) {
2564  pred = c;
2565  *dir_ptr = 1; // left
2566  } else {
2567  pred = 0;
2568  *dir_ptr = 1; // left
2569  }
2570 
2571  /* update predictor */
2572  *dc_val_ptr = &dc_val[0];
2573  return pred;
2574 }
2575  // Block group
2577 
2584 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2585  uint8_t **coded_block_ptr)
2586 {
2587  int xy, wrap, pred, a, b, c;
2588 
2589  xy = s->block_index[n];
2590  wrap = s->b8_stride;
2591 
2592  /* B C
2593  * A X
2594  */
2595  a = s->coded_block[xy - 1 ];
2596  b = s->coded_block[xy - 1 - wrap];
2597  c = s->coded_block[xy - wrap];
2598 
2599  if (b == c) {
2600  pred = a;
2601  } else {
2602  pred = c;
2603  }
2604 
2605  /* store value */
2606  *coded_block_ptr = &s->coded_block[xy];
2607 
2608  return pred;
2609 }
2610 
2620 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2621  int *value, int codingset)
2622 {
2623  GetBitContext *gb = &v->s.gb;
2624  int index, escape, run = 0, level = 0, lst = 0;
2625 
2626  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2627  if (index != vc1_ac_sizes[codingset] - 1) {
2628  run = vc1_index_decode_table[codingset][index][0];
2629  level = vc1_index_decode_table[codingset][index][1];
2630  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2631  if (get_bits1(gb))
2632  level = -level;
2633  } else {
2634  escape = decode210(gb);
2635  if (escape != 2) {
2636  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2637  run = vc1_index_decode_table[codingset][index][0];
2638  level = vc1_index_decode_table[codingset][index][1];
2639  lst = index >= vc1_last_decode_table[codingset];
2640  if (escape == 0) {
2641  if (lst)
2642  level += vc1_last_delta_level_table[codingset][run];
2643  else
2644  level += vc1_delta_level_table[codingset][run];
2645  } else {
2646  if (lst)
2647  run += vc1_last_delta_run_table[codingset][level] + 1;
2648  else
2649  run += vc1_delta_run_table[codingset][level] + 1;
2650  }
2651  if (get_bits1(gb))
2652  level = -level;
2653  } else {
2654  int sign;
2655  lst = get_bits1(gb);
2656  if (v->s.esc3_level_length == 0) {
2657  if (v->pq < 8 || v->dquantfrm) { // table 59
2658  v->s.esc3_level_length = get_bits(gb, 3);
2659  if (!v->s.esc3_level_length)
2660  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2661  } else { // table 60
2662  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2663  }
2664  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2665  }
2666  run = get_bits(gb, v->s.esc3_run_length);
2667  sign = get_bits1(gb);
2668  level = get_bits(gb, v->s.esc3_level_length);
2669  if (sign)
2670  level = -level;
2671  }
2672  }
2673 
2674  *last = lst;
2675  *skip = run;
2676  *value = level;
2677 }
2678 
2686 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2687  int coded, int codingset)
2688 {
2689  GetBitContext *gb = &v->s.gb;
2690  MpegEncContext *s = &v->s;
2691  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2692  int i;
2693  int16_t *dc_val;
2694  int16_t *ac_val, *ac_val2;
2695  int dcdiff;
2696 
2697  /* Get DC differential */
2698  if (n < 4) {
2700  } else {
2702  }
2703  if (dcdiff < 0) {
2704  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2705  return -1;
2706  }
2707  if (dcdiff) {
2708  if (dcdiff == 119 /* ESC index value */) {
2709  /* TODO: Optimize */
2710  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2711  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2712  else dcdiff = get_bits(gb, 8);
2713  } else {
2714  if (v->pq == 1)
2715  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2716  else if (v->pq == 2)
2717  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2718  }
2719  if (get_bits1(gb))
2720  dcdiff = -dcdiff;
2721  }
2722 
2723  /* Prediction */
2724  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2725  *dc_val = dcdiff;
2726 
2727  /* Store the quantized DC coeff, used for prediction */
2728  if (n < 4) {
2729  block[0] = dcdiff * s->y_dc_scale;
2730  } else {
2731  block[0] = dcdiff * s->c_dc_scale;
2732  }
2733  /* Skip ? */
2734  if (!coded) {
2735  goto not_coded;
2736  }
2737 
2738  // AC Decoding
2739  i = 1;
2740 
2741  {
2742  int last = 0, skip, value;
2743  const uint8_t *zz_table;
2744  int scale;
2745  int k;
2746 
2747  scale = v->pq * 2 + v->halfpq;
2748 
2749  if (v->s.ac_pred) {
2750  if (!dc_pred_dir)
2751  zz_table = v->zz_8x8[2];
2752  else
2753  zz_table = v->zz_8x8[3];
2754  } else
2755  zz_table = v->zz_8x8[1];
2756 
2757  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2758  ac_val2 = ac_val;
2759  if (dc_pred_dir) // left
2760  ac_val -= 16;
2761  else // top
2762  ac_val -= 16 * s->block_wrap[n];
2763 
2764  while (!last) {
2765  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2766  i += skip;
2767  if (i > 63)
2768  break;
2769  block[zz_table[i++]] = value;
2770  }
2771 
2772  /* apply AC prediction if needed */
2773  if (s->ac_pred) {
2774  if (dc_pred_dir) { // left
2775  for (k = 1; k < 8; k++)
2776  block[k << v->left_blk_sh] += ac_val[k];
2777  } else { // top
2778  for (k = 1; k < 8; k++)
2779  block[k << v->top_blk_sh] += ac_val[k + 8];
2780  }
2781  }
2782  /* save AC coeffs for further prediction */
2783  for (k = 1; k < 8; k++) {
2784  ac_val2[k] = block[k << v->left_blk_sh];
2785  ac_val2[k + 8] = block[k << v->top_blk_sh];
2786  }
2787 
2788  /* scale AC coeffs */
2789  for (k = 1; k < 64; k++)
2790  if (block[k]) {
2791  block[k] *= scale;
2792  if (!v->pquantizer)
2793  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2794  }
2795 
2796  if (s->ac_pred) i = 63;
2797  }
2798 
2799 not_coded:
2800  if (!coded) {
2801  int k, scale;
2802  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2803  ac_val2 = ac_val;
2804 
2805  i = 0;
2806  scale = v->pq * 2 + v->halfpq;
2807  memset(ac_val2, 0, 16 * 2);
2808  if (dc_pred_dir) { // left
2809  ac_val -= 16;
2810  if (s->ac_pred)
2811  memcpy(ac_val2, ac_val, 8 * 2);
2812  } else { // top
2813  ac_val -= 16 * s->block_wrap[n];
2814  if (s->ac_pred)
2815  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2816  }
2817 
2818  /* apply AC prediction if needed */
2819  if (s->ac_pred) {
2820  if (dc_pred_dir) { //left
2821  for (k = 1; k < 8; k++) {
2822  block[k << v->left_blk_sh] = ac_val[k] * scale;
2823  if (!v->pquantizer && block[k << v->left_blk_sh])
2824  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2825  }
2826  } else { // top
2827  for (k = 1; k < 8; k++) {
2828  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2829  if (!v->pquantizer && block[k << v->top_blk_sh])
2830  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2831  }
2832  }
2833  i = 63;
2834  }
2835  }
2836  s->block_last_index[n] = i;
2837 
2838  return 0;
2839 }
2840 
2850  int coded, int codingset, int mquant)
2851 {
2852  GetBitContext *gb = &v->s.gb;
2853  MpegEncContext *s = &v->s;
2854  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2855  int i;
2856  int16_t *dc_val;
2857  int16_t *ac_val, *ac_val2;
2858  int dcdiff;
2859  int a_avail = v->a_avail, c_avail = v->c_avail;
2860  int use_pred = s->ac_pred;
2861  int scale;
2862  int q1, q2 = 0;
2863  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2864 
2865  /* Get DC differential */
2866  if (n < 4) {
2868  } else {
2870  }
2871  if (dcdiff < 0) {
2872  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2873  return -1;
2874  }
2875  if (dcdiff) {
2876  if (dcdiff == 119 /* ESC index value */) {
2877  /* TODO: Optimize */
2878  if (mquant == 1) dcdiff = get_bits(gb, 10);
2879  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2880  else dcdiff = get_bits(gb, 8);
2881  } else {
2882  if (mquant == 1)
2883  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2884  else if (mquant == 2)
2885  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2886  }
2887  if (get_bits1(gb))
2888  dcdiff = -dcdiff;
2889  }
2890 
2891  /* Prediction */
2892  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2893  *dc_val = dcdiff;
2894 
2895  /* Store the quantized DC coeff, used for prediction */
2896  if (n < 4) {
2897  block[0] = dcdiff * s->y_dc_scale;
2898  } else {
2899  block[0] = dcdiff * s->c_dc_scale;
2900  }
2901 
2902  //AC Decoding
2903  i = 1;
2904 
2905  /* check if AC is needed at all */
2906  if (!a_avail && !c_avail)
2907  use_pred = 0;
2908  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2909  ac_val2 = ac_val;
2910 
2911  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2912 
2913  if (dc_pred_dir) // left
2914  ac_val -= 16;
2915  else // top
2916  ac_val -= 16 * s->block_wrap[n];
2917 
2918  q1 = s->current_picture.f.qscale_table[mb_pos];
2919  if ( dc_pred_dir && c_avail && mb_pos)
2920  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2921  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2922  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2923  if ( dc_pred_dir && n == 1)
2924  q2 = q1;
2925  if (!dc_pred_dir && n == 2)
2926  q2 = q1;
2927  if (n == 3)
2928  q2 = q1;
2929 
2930  if (coded) {
2931  int last = 0, skip, value;
2932  const uint8_t *zz_table;
2933  int k;
2934 
2935  if (v->s.ac_pred) {
2936  if (!use_pred && v->fcm == ILACE_FRAME) {
2937  zz_table = v->zzi_8x8;
2938  } else {
2939  if (!dc_pred_dir) // top
2940  zz_table = v->zz_8x8[2];
2941  else // left
2942  zz_table = v->zz_8x8[3];
2943  }
2944  } else {
2945  if (v->fcm != ILACE_FRAME)
2946  zz_table = v->zz_8x8[1];
2947  else
2948  zz_table = v->zzi_8x8;
2949  }
2950 
2951  while (!last) {
2952  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2953  i += skip;
2954  if (i > 63)
2955  break;
2956  block[zz_table[i++]] = value;
2957  }
2958 
2959  /* apply AC prediction if needed */
2960  if (use_pred) {
2961  /* scale predictors if needed*/
2962  if (q2 && q1 != q2) {
2963  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2964  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2965 
2966  if (q1 < 1)
2967  return AVERROR_INVALIDDATA;
2968  if (dc_pred_dir) { // left
2969  for (k = 1; k < 8; k++)
2970  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2971  } else { // top
2972  for (k = 1; k < 8; k++)
2973  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2974  }
2975  } else {
2976  if (dc_pred_dir) { //left
2977  for (k = 1; k < 8; k++)
2978  block[k << v->left_blk_sh] += ac_val[k];
2979  } else { //top
2980  for (k = 1; k < 8; k++)
2981  block[k << v->top_blk_sh] += ac_val[k + 8];
2982  }
2983  }
2984  }
2985  /* save AC coeffs for further prediction */
2986  for (k = 1; k < 8; k++) {
2987  ac_val2[k ] = block[k << v->left_blk_sh];
2988  ac_val2[k + 8] = block[k << v->top_blk_sh];
2989  }
2990 
2991  /* scale AC coeffs */
2992  for (k = 1; k < 64; k++)
2993  if (block[k]) {
2994  block[k] *= scale;
2995  if (!v->pquantizer)
2996  block[k] += (block[k] < 0) ? -mquant : mquant;
2997  }
2998 
2999  if (use_pred) i = 63;
3000  } else { // no AC coeffs
3001  int k;
3002 
3003  memset(ac_val2, 0, 16 * 2);
3004  if (dc_pred_dir) { // left
3005  if (use_pred) {
3006  memcpy(ac_val2, ac_val, 8 * 2);
3007  if (q2 && q1 != q2) {
3008  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3009  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3010  if (q1 < 1)
3011  return AVERROR_INVALIDDATA;
3012  for (k = 1; k < 8; k++)
3013  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3014  }
3015  }
3016  } else { // top
3017  if (use_pred) {
3018  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3019  if (q2 && q1 != q2) {
3020  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3021  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3022  if (q1 < 1)
3023  return AVERROR_INVALIDDATA;
3024  for (k = 1; k < 8; k++)
3025  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3026  }
3027  }
3028  }
3029 
3030  /* apply AC prediction if needed */
3031  if (use_pred) {
3032  if (dc_pred_dir) { // left
3033  for (k = 1; k < 8; k++) {
3034  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3035  if (!v->pquantizer && block[k << v->left_blk_sh])
3036  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3037  }
3038  } else { // top
3039  for (k = 1; k < 8; k++) {
3040  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3041  if (!v->pquantizer && block[k << v->top_blk_sh])
3042  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3043  }
3044  }
3045  i = 63;
3046  }
3047  }
3048  s->block_last_index[n] = i;
3049 
3050  return 0;
3051 }
3052 
3062  int coded, int mquant, int codingset)
3063 {
3064  GetBitContext *gb = &v->s.gb;
3065  MpegEncContext *s = &v->s;
3066  int dc_pred_dir = 0; /* Direction of the DC prediction used */
3067  int i;
3068  int16_t *dc_val;
3069  int16_t *ac_val, *ac_val2;
3070  int dcdiff;
3071  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3072  int a_avail = v->a_avail, c_avail = v->c_avail;
3073  int use_pred = s->ac_pred;
3074  int scale;
3075  int q1, q2 = 0;
3076 
3077  s->dsp.clear_block(block);
3078 
3079  /* XXX: Guard against dumb values of mquant */
3080  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3081 
3082  /* Set DC scale - y and c use the same */
3083  s->y_dc_scale = s->y_dc_scale_table[mquant];
3084  s->c_dc_scale = s->c_dc_scale_table[mquant];
3085 
3086  /* Get DC differential */
3087  if (n < 4) {
3089  } else {
3091  }
3092  if (dcdiff < 0) {
3093  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3094  return -1;
3095  }
3096  if (dcdiff) {
3097  if (dcdiff == 119 /* ESC index value */) {
3098  /* TODO: Optimize */
3099  if (mquant == 1) dcdiff = get_bits(gb, 10);
3100  else if (mquant == 2) dcdiff = get_bits(gb, 9);
3101  else dcdiff = get_bits(gb, 8);
3102  } else {
3103  if (mquant == 1)
3104  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3105  else if (mquant == 2)
3106  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3107  }
3108  if (get_bits1(gb))
3109  dcdiff = -dcdiff;
3110  }
3111 
3112  /* Prediction */
3113  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3114  *dc_val = dcdiff;
3115 
3116  /* Store the quantized DC coeff, used for prediction */
3117 
3118  if (n < 4) {
3119  block[0] = dcdiff * s->y_dc_scale;
3120  } else {
3121  block[0] = dcdiff * s->c_dc_scale;
3122  }
3123 
3124  //AC Decoding
3125  i = 1;
3126 
3127  /* check if AC is needed at all and adjust direction if needed */
3128  if (!a_avail) dc_pred_dir = 1;
3129  if (!c_avail) dc_pred_dir = 0;
3130  if (!a_avail && !c_avail) use_pred = 0;
3131  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3132  ac_val2 = ac_val;
3133 
3134  scale = mquant * 2 + v->halfpq;
3135 
3136  if (dc_pred_dir) //left
3137  ac_val -= 16;
3138  else //top
3139  ac_val -= 16 * s->block_wrap[n];
3140 
3141  q1 = s->current_picture.f.qscale_table[mb_pos];
3142  if (dc_pred_dir && c_avail && mb_pos)
3143  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3144  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3145  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3146  if ( dc_pred_dir && n == 1)
3147  q2 = q1;
3148  if (!dc_pred_dir && n == 2)
3149  q2 = q1;
3150  if (n == 3) q2 = q1;
3151 
3152  if (coded) {
3153  int last = 0, skip, value;
3154  int k;
3155 
3156  while (!last) {
3157  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3158  i += skip;
3159  if (i > 63)
3160  break;
3161  if (v->fcm == PROGRESSIVE)
3162  block[v->zz_8x8[0][i++]] = value;
3163  else {
3164  if (use_pred && (v->fcm == ILACE_FRAME)) {
3165  if (!dc_pred_dir) // top
3166  block[v->zz_8x8[2][i++]] = value;
3167  else // left
3168  block[v->zz_8x8[3][i++]] = value;
3169  } else {
3170  block[v->zzi_8x8[i++]] = value;
3171  }
3172  }
3173  }
3174 
3175  /* apply AC prediction if needed */
3176  if (use_pred) {
3177  /* scale predictors if needed*/
3178  if (q2 && q1 != q2) {
3179  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3180  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3181 
3182  if (q1 < 1)
3183  return AVERROR_INVALIDDATA;
3184  if (dc_pred_dir) { // left
3185  for (k = 1; k < 8; k++)
3186  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3187  } else { //top
3188  for (k = 1; k < 8; k++)
3189  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3190  }
3191  } else {
3192  if (dc_pred_dir) { // left
3193  for (k = 1; k < 8; k++)
3194  block[k << v->left_blk_sh] += ac_val[k];
3195  } else { // top
3196  for (k = 1; k < 8; k++)
3197  block[k << v->top_blk_sh] += ac_val[k + 8];
3198  }
3199  }
3200  }
3201  /* save AC coeffs for further prediction */
3202  for (k = 1; k < 8; k++) {
3203  ac_val2[k ] = block[k << v->left_blk_sh];
3204  ac_val2[k + 8] = block[k << v->top_blk_sh];
3205  }
3206 
3207  /* scale AC coeffs */
3208  for (k = 1; k < 64; k++)
3209  if (block[k]) {
3210  block[k] *= scale;
3211  if (!v->pquantizer)
3212  block[k] += (block[k] < 0) ? -mquant : mquant;
3213  }
3214 
3215  if (use_pred) i = 63;
3216  } else { // no AC coeffs
3217  int k;
3218 
3219  memset(ac_val2, 0, 16 * 2);
3220  if (dc_pred_dir) { // left
3221  if (use_pred) {
3222  memcpy(ac_val2, ac_val, 8 * 2);
3223  if (q2 && q1 != q2) {
3224  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3225  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3226  if (q1 < 1)
3227  return AVERROR_INVALIDDATA;
3228  for (k = 1; k < 8; k++)
3229  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3230  }
3231  }
3232  } else { // top
3233  if (use_pred) {
3234  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3235  if (q2 && q1 != q2) {
3236  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3237  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3238  if (q1 < 1)
3239  return AVERROR_INVALIDDATA;
3240  for (k = 1; k < 8; k++)
3241  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3242  }
3243  }
3244  }
3245 
3246  /* apply AC prediction if needed */
3247  if (use_pred) {
3248  if (dc_pred_dir) { // left
3249  for (k = 1; k < 8; k++) {
3250  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3251  if (!v->pquantizer && block[k << v->left_blk_sh])
3252  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3253  }
3254  } else { // top
3255  for (k = 1; k < 8; k++) {
3256  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3257  if (!v->pquantizer && block[k << v->top_blk_sh])
3258  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3259  }
3260  }
3261  i = 63;
3262  }
3263  }
3264  s->block_last_index[n] = i;
3265 
3266  return 0;
3267 }
3268 
3271 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3272  int mquant, int ttmb, int first_block,
3273  uint8_t *dst, int linesize, int skip_block,
3274  int *ttmb_out)
3275 {
3276  MpegEncContext *s = &v->s;
3277  GetBitContext *gb = &s->gb;
3278  int i, j;
3279  int subblkpat = 0;
3280  int scale, off, idx, last, skip, value;
3281  int ttblk = ttmb & 7;
3282  int pat = 0;
3283 
3284  s->dsp.clear_block(block);
3285 
3286  if (ttmb == -1) {
3288  }
3289  if (ttblk == TT_4X4) {
3290  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3291  }
3292  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3293  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3294  || (!v->res_rtm_flag && !first_block))) {
3295  subblkpat = decode012(gb);
3296  if (subblkpat)
3297  subblkpat ^= 3; // swap decoded pattern bits
3298  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3299  ttblk = TT_8X4;
3300  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3301  ttblk = TT_4X8;
3302  }
3303  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3304 
3305  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3306  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3307  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3308  ttblk = TT_8X4;
3309  }
3310  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3311  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3312  ttblk = TT_4X8;
3313  }
3314  switch (ttblk) {
3315  case TT_8X8:
3316  pat = 0xF;
3317  i = 0;
3318  last = 0;
3319  while (!last) {
3320  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3321  i += skip;
3322  if (i > 63)
3323  break;
3324  if (!v->fcm)
3325  idx = v->zz_8x8[0][i++];
3326  else
3327  idx = v->zzi_8x8[i++];
3328  block[idx] = value * scale;
3329  if (!v->pquantizer)
3330  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3331  }
3332  if (!skip_block) {
3333  if (i == 1)
3334  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3335  else {
3336  v->vc1dsp.vc1_inv_trans_8x8(block);
3337  s->dsp.add_pixels_clamped(block, dst, linesize);
3338  }
3339  }
3340  break;
3341  case TT_4X4:
3342  pat = ~subblkpat & 0xF;
3343  for (j = 0; j < 4; j++) {
3344  last = subblkpat & (1 << (3 - j));
3345  i = 0;
3346  off = (j & 1) * 4 + (j & 2) * 16;
3347  while (!last) {
3348  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3349  i += skip;
3350  if (i > 15)
3351  break;
3352  if (!v->fcm)
3354  else
3355  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3356  block[idx + off] = value * scale;
3357  if (!v->pquantizer)
3358  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3359  }
3360  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3361  if (i == 1)
3362  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3363  else
3364  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3365  }
3366  }
3367  break;
3368  case TT_8X4:
3369  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3370  for (j = 0; j < 2; j++) {
3371  last = subblkpat & (1 << (1 - j));
3372  i = 0;
3373  off = j * 32;
3374  while (!last) {
3375  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3376  i += skip;
3377  if (i > 31)
3378  break;
3379  if (!v->fcm)
3380  idx = v->zz_8x4[i++] + off;
3381  else
3382  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3383  block[idx] = value * scale;
3384  if (!v->pquantizer)
3385  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3386  }
3387  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3388  if (i == 1)
3389  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3390  else
3391  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3392  }
3393  }
3394  break;
3395  case TT_4X8:
3396  pat = ~(subblkpat * 5) & 0xF;
3397  for (j = 0; j < 2; j++) {
3398  last = subblkpat & (1 << (1 - j));
3399  i = 0;
3400  off = j * 4;
3401  while (!last) {
3402  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3403  i += skip;
3404  if (i > 31)
3405  break;
3406  if (!v->fcm)
3407  idx = v->zz_4x8[i++] + off;
3408  else
3409  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3410  block[idx] = value * scale;
3411  if (!v->pquantizer)
3412  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3413  }
3414  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3415  if (i == 1)
3416  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3417  else
3418  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3419  }
3420  }
3421  break;
3422  }
3423  if (ttmb_out)
3424  *ttmb_out |= ttblk << (n * 4);
3425  return pat;
3426 }
3427  // Macroblock group
3429 
3430 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3431 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3432 
3434 {
3435  MpegEncContext *s = &v->s;
3436  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3437  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3438  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3439  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3440  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3441  uint8_t *dst;
3442 
3443  if (block_num > 3) {
3444  dst = s->dest[block_num - 3];
3445  } else {
3446  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3447  }
3448  if (s->mb_y != s->end_mb_y || block_num < 2) {
3449  int16_t (*mv)[2];
3450  int mv_stride;
3451 
3452  if (block_num > 3) {
3453  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3454  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3455  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3456  mv_stride = s->mb_stride;
3457  } else {
3458  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3459  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3460  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3461  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3462  mv_stride = s->b8_stride;
3463  mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3464  }
3465 
3466  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3467  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3468  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3469  } else {
3470  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3471  if (idx == 3) {
3472  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3473  } else if (idx) {
3474  if (idx == 1)
3475  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3476  else
3477  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3478  }
3479  }
3480  }
3481 
3482  dst -= 4 * linesize;
3483  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3484  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3485  idx = (block_cbp | (block_cbp >> 2)) & 3;
3486  if (idx == 3) {
3487  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3488  } else if (idx) {
3489  if (idx == 1)
3490  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3491  else
3492  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3493  }
3494  }
3495 }
3496 
3498 {
3499  MpegEncContext *s = &v->s;
3500  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3501  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3502  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3503  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3504  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3505  uint8_t *dst;
3506 
3507  if (block_num > 3) {
3508  dst = s->dest[block_num - 3] - 8 * linesize;
3509  } else {
3510  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3511  }
3512 
3513  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3514  int16_t (*mv)[2];
3515 
3516  if (block_num > 3) {
3517  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3518  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3519  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3520  } else {
3521  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3522  : (mb_cbp >> ((block_num + 1) * 4));
3523  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3524  : (mb_is_intra >> ((block_num + 1) * 4));
3525  mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3526  }
3527  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3528  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3529  } else {
3530  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3531  if (idx == 5) {
3532  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3533  } else if (idx) {
3534  if (idx == 1)
3535  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3536  else
3537  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3538  }
3539  }
3540  }
3541 
3542  dst -= 4;
3543  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3544  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3545  idx = (block_cbp | (block_cbp >> 1)) & 5;
3546  if (idx == 5) {
3547  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3548  } else if (idx) {
3549  if (idx == 1)
3550  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3551  else
3552  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3553  }
3554  }
3555 }
3556 
3558 {
3559  MpegEncContext *s = &v->s;
3560  int i;
3561 
3562  for (i = 0; i < 6; i++) {
3564  }
3565 
3566  /* V always precedes H, therefore we run H one MB before V;
3567  * at the end of a row, we catch up to complete the row */
3568  if (s->mb_x) {
3569  for (i = 0; i < 6; i++) {
3571  }
3572  if (s->mb_x == s->mb_width - 1) {
3573  s->mb_x++;
3575  for (i = 0; i < 6; i++) {
3577  }
3578  }
3579  }
3580 }
3581 
3585 {
3586  MpegEncContext *s = &v->s;
3587  GetBitContext *gb = &s->gb;
3588  int i, j;
3589  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3590  int cbp; /* cbp decoding stuff */
3591  int mqdiff, mquant; /* MB quantization */
3592  int ttmb = v->ttfrm; /* MB Transform type */
3593 
3594  int mb_has_coeffs = 1; /* last_flag */
3595  int dmv_x, dmv_y; /* Differential MV components */
3596  int index, index1; /* LUT indexes */
3597  int val, sign; /* temp values */
3598  int first_block = 1;
3599  int dst_idx, off;
3600  int skipped, fourmv;
3601  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3602 
3603  mquant = v->pq; /* lossy initialization */
3604 
3605  if (v->mv_type_is_raw)
3606  fourmv = get_bits1(gb);
3607  else
3608  fourmv = v->mv_type_mb_plane[mb_pos];
3609  if (v->skip_is_raw)
3610  skipped = get_bits1(gb);
3611  else
3612  skipped = v->s.mbskip_table[mb_pos];
3613 
3614  if (!fourmv) { /* 1MV mode */
3615  if (!skipped) {
3616  GET_MVDATA(dmv_x, dmv_y);
3617 
3618  if (s->mb_intra) {
3619  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3620  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3621  }
3623  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3624 
3625  /* FIXME Set DC val for inter block ? */
3626  if (s->mb_intra && !mb_has_coeffs) {
3627  GET_MQUANT();
3628  s->ac_pred = get_bits1(gb);
3629  cbp = 0;
3630  } else if (mb_has_coeffs) {
3631  if (s->mb_intra)
3632  s->ac_pred = get_bits1(gb);
3633  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3634  GET_MQUANT();
3635  } else {
3636  mquant = v->pq;
3637  cbp = 0;
3638  }
3639  s->current_picture.f.qscale_table[mb_pos] = mquant;
3640 
3641  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3642  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3643  VC1_TTMB_VLC_BITS, 2);
3644  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3645  dst_idx = 0;
3646  for (i = 0; i < 6; i++) {
3647  s->dc_val[0][s->block_index[i]] = 0;
3648  dst_idx += i >> 2;
3649  val = ((cbp >> (5 - i)) & 1);
3650  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3651  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3652  if (s->mb_intra) {
3653  /* check if prediction blocks A and C are available */
3654  v->a_avail = v->c_avail = 0;
3655  if (i == 2 || i == 3 || !s->first_slice_line)
3656  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3657  if (i == 1 || i == 3 || s->mb_x)
3658  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3659 
3660  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3661  (i & 4) ? v->codingset2 : v->codingset);
3662  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3663  continue;
3664  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3665  if (v->rangeredfrm)
3666  for (j = 0; j < 64; j++)
3667  s->block[i][j] <<= 1;
3668  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3669  if (v->pq >= 9 && v->overlap) {
3670  if (v->c_avail)
3671  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3672  if (v->a_avail)
3673  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3674  }
3675  block_cbp |= 0xF << (i << 2);
3676  block_intra |= 1 << i;
3677  } else if (val) {
3678  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3679  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3680  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3681  block_cbp |= pat << (i << 2);
3682  if (!v->ttmbf && ttmb < 8)
3683  ttmb = -1;
3684  first_block = 0;
3685  }
3686  }
3687  } else { // skipped
3688  s->mb_intra = 0;
3689  for (i = 0; i < 6; i++) {
3690  v->mb_type[0][s->block_index[i]] = 0;
3691  s->dc_val[0][s->block_index[i]] = 0;
3692  }
3693  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3694  s->current_picture.f.qscale_table[mb_pos] = 0;
3695  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3696  vc1_mc_1mv(v, 0);
3697  }
3698  } else { // 4MV mode
3699  if (!skipped /* unskipped MB */) {
3700  int intra_count = 0, coded_inter = 0;
3701  int is_intra[6], is_coded[6];
3702  /* Get CBPCY */
3703  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3704  for (i = 0; i < 6; i++) {
3705  val = ((cbp >> (5 - i)) & 1);
3706  s->dc_val[0][s->block_index[i]] = 0;
3707  s->mb_intra = 0;
3708  if (i < 4) {
3709  dmv_x = dmv_y = 0;
3710  s->mb_intra = 0;
3711  mb_has_coeffs = 0;
3712  if (val) {
3713  GET_MVDATA(dmv_x, dmv_y);
3714  }
3715  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3716  if (!s->mb_intra)
3717  vc1_mc_4mv_luma(v, i, 0);
3718  intra_count += s->mb_intra;
3719  is_intra[i] = s->mb_intra;
3720  is_coded[i] = mb_has_coeffs;
3721  }
3722  if (i & 4) {
3723  is_intra[i] = (intra_count >= 3);
3724  is_coded[i] = val;
3725  }
3726  if (i == 4)
3727  vc1_mc_4mv_chroma(v, 0);
3728  v->mb_type[0][s->block_index[i]] = is_intra[i];
3729  if (!coded_inter)
3730  coded_inter = !is_intra[i] & is_coded[i];
3731  }
3732  // if there are no coded blocks then don't do anything more
3733  dst_idx = 0;
3734  if (!intra_count && !coded_inter)
3735  goto end;
3736  GET_MQUANT();
3737  s->current_picture.f.qscale_table[mb_pos] = mquant;
3738  /* test if block is intra and has pred */
3739  {
3740  int intrapred = 0;
3741  for (i = 0; i < 6; i++)
3742  if (is_intra[i]) {
3743  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3744  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3745  intrapred = 1;
3746  break;
3747  }
3748  }
3749  if (intrapred)
3750  s->ac_pred = get_bits1(gb);
3751  else
3752  s->ac_pred = 0;
3753  }
3754  if (!v->ttmbf && coded_inter)
3756  for (i = 0; i < 6; i++) {
3757  dst_idx += i >> 2;
3758  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3759  s->mb_intra = is_intra[i];
3760  if (is_intra[i]) {
3761  /* check if prediction blocks A and C are available */
3762  v->a_avail = v->c_avail = 0;
3763  if (i == 2 || i == 3 || !s->first_slice_line)
3764  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3765  if (i == 1 || i == 3 || s->mb_x)
3766  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3767 
3768  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3769  (i & 4) ? v->codingset2 : v->codingset);
3770  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3771  continue;
3772  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3773  if (v->rangeredfrm)
3774  for (j = 0; j < 64; j++)
3775  s->block[i][j] <<= 1;
3776  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3777  (i & 4) ? s->uvlinesize : s->linesize);
3778  if (v->pq >= 9 && v->overlap) {
3779  if (v->c_avail)
3780  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3781  if (v->a_avail)
3782  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3783  }
3784  block_cbp |= 0xF << (i << 2);
3785  block_intra |= 1 << i;
3786  } else if (is_coded[i]) {
3787  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3788  first_block, s->dest[dst_idx] + off,
3789  (i & 4) ? s->uvlinesize : s->linesize,
3790  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3791  &block_tt);
3792  block_cbp |= pat << (i << 2);
3793  if (!v->ttmbf && ttmb < 8)
3794  ttmb = -1;
3795  first_block = 0;
3796  }
3797  }
3798  } else { // skipped MB
3799  s->mb_intra = 0;
3800  s->current_picture.f.qscale_table[mb_pos] = 0;
3801  for (i = 0; i < 6; i++) {
3802  v->mb_type[0][s->block_index[i]] = 0;
3803  s->dc_val[0][s->block_index[i]] = 0;
3804  }
3805  for (i = 0; i < 4; i++) {
3806  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3807  vc1_mc_4mv_luma(v, i, 0);
3808  }
3809  vc1_mc_4mv_chroma(v, 0);
3810  s->current_picture.f.qscale_table[mb_pos] = 0;
3811  }
3812  }
3813 end:
3814  v->cbp[s->mb_x] = block_cbp;
3815  v->ttblk[s->mb_x] = block_tt;
3816  v->is_intra[s->mb_x] = block_intra;
3817 
3818  return 0;
3819 }
3820 
3821 /* Decode one macroblock in an interlaced frame p picture */
3822 
3824 {
3825  MpegEncContext *s = &v->s;
3826  GetBitContext *gb = &s->gb;
3827  int i;
3828  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3829  int cbp = 0; /* cbp decoding stuff */
3830  int mqdiff, mquant; /* MB quantization */
3831  int ttmb = v->ttfrm; /* MB Transform type */
3832 
3833  int mb_has_coeffs = 1; /* last_flag */
3834  int dmv_x, dmv_y; /* Differential MV components */
3835  int val; /* temp value */
3836  int first_block = 1;
3837  int dst_idx, off;
3838  int skipped, fourmv = 0, twomv = 0;
3839  int block_cbp = 0, pat, block_tt = 0;
3840  int idx_mbmode = 0, mvbp;
3841  int stride_y, fieldtx;
3842 
3843  mquant = v->pq; /* Loosy initialization */
3844 
3845  if (v->skip_is_raw)
3846  skipped = get_bits1(gb);
3847  else
3848  skipped = v->s.mbskip_table[mb_pos];
3849  if (!skipped) {
3850  if (v->fourmvswitch)
3851  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3852  else
3853  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3854  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3855  /* store the motion vector type in a flag (useful later) */
3856  case MV_PMODE_INTFR_4MV:
3857  fourmv = 1;
3858  v->blk_mv_type[s->block_index[0]] = 0;
3859  v->blk_mv_type[s->block_index[1]] = 0;
3860  v->blk_mv_type[s->block_index[2]] = 0;
3861  v->blk_mv_type[s->block_index[3]] = 0;
3862  break;
3864  fourmv = 1;
3865  v->blk_mv_type[s->block_index[0]] = 1;
3866  v->blk_mv_type[s->block_index[1]] = 1;
3867  v->blk_mv_type[s->block_index[2]] = 1;
3868  v->blk_mv_type[s->block_index[3]] = 1;
3869  break;
3871  twomv = 1;
3872  v->blk_mv_type[s->block_index[0]] = 1;
3873  v->blk_mv_type[s->block_index[1]] = 1;
3874  v->blk_mv_type[s->block_index[2]] = 1;
3875  v->blk_mv_type[s->block_index[3]] = 1;
3876  break;
3877  case MV_PMODE_INTFR_1MV:
3878  v->blk_mv_type[s->block_index[0]] = 0;
3879  v->blk_mv_type[s->block_index[1]] = 0;
3880  v->blk_mv_type[s->block_index[2]] = 0;
3881  v->blk_mv_type[s->block_index[3]] = 0;
3882  break;
3883  }
3884  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3885  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3886  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3887  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3888  s->mb_intra = v->is_intra[s->mb_x] = 1;
3889  for (i = 0; i < 6; i++)
3890  v->mb_type[0][s->block_index[i]] = 1;
3891  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3892  mb_has_coeffs = get_bits1(gb);
3893  if (mb_has_coeffs)
3894  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3895  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3896  GET_MQUANT();
3897  s->current_picture.f.qscale_table[mb_pos] = mquant;
3898  /* Set DC scale - y and c use the same (not sure if necessary here) */
3899  s->y_dc_scale = s->y_dc_scale_table[mquant];
3900  s->c_dc_scale = s->c_dc_scale_table[mquant];
3901  dst_idx = 0;
3902  for (i = 0; i < 6; i++) {
3903  s->dc_val[0][s->block_index[i]] = 0;
3904  dst_idx += i >> 2;
3905  val = ((cbp >> (5 - i)) & 1);
3906  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3907  v->a_avail = v->c_avail = 0;
3908  if (i == 2 || i == 3 || !s->first_slice_line)
3909  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3910  if (i == 1 || i == 3 || s->mb_x)
3911  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3912 
3913  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3914  (i & 4) ? v->codingset2 : v->codingset);
3915  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3916  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3917  if (i < 4) {
3918  stride_y = s->linesize << fieldtx;
3919  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3920  } else {
3921  stride_y = s->uvlinesize;
3922  off = 0;
3923  }
3924  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3925  //TODO: loop filter
3926  }
3927 
3928  } else { // inter MB
3929  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3930  if (mb_has_coeffs)
3931  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3932  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3934  } else {
3935  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3936  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3938  }
3939  }
3940  s->mb_intra = v->is_intra[s->mb_x] = 0;
3941  for (i = 0; i < 6; i++)
3942  v->mb_type[0][s->block_index[i]] = 0;
3943  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3944  /* for all motion vector read MVDATA and motion compensate each block */
3945  dst_idx = 0;
3946  if (fourmv) {
3947  mvbp = v->fourmvbp;
3948  for (i = 0; i < 6; i++) {
3949  if (i < 4) {
3950  dmv_x = dmv_y = 0;
3951  val = ((mvbp >> (3 - i)) & 1);
3952  if (val) {
3953  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3954  }
3955  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3956  vc1_mc_4mv_luma(v, i, 0);
3957  } else if (i == 4) {
3958  vc1_mc_4mv_chroma4(v);
3959  }
3960  }
3961  } else if (twomv) {
3962  mvbp = v->twomvbp;
3963  dmv_x = dmv_y = 0;
3964  if (mvbp & 2) {
3965  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3966  }
3967  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3968  vc1_mc_4mv_luma(v, 0, 0);
3969  vc1_mc_4mv_luma(v, 1, 0);
3970  dmv_x = dmv_y = 0;
3971  if (mvbp & 1) {
3972  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3973  }
3974  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3975  vc1_mc_4mv_luma(v, 2, 0);
3976  vc1_mc_4mv_luma(v, 3, 0);
3977  vc1_mc_4mv_chroma4(v);
3978  } else {
3979  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3980  if (mvbp) {
3981  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3982  }
3983  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3984  vc1_mc_1mv(v, 0);
3985  }
3986  if (cbp)
3987  GET_MQUANT(); // p. 227
3988  s->current_picture.f.qscale_table[mb_pos] = mquant;
3989  if (!v->ttmbf && cbp)
3991  for (i = 0; i < 6; i++) {
3992  s->dc_val[0][s->block_index[i]] = 0;
3993  dst_idx += i >> 2;
3994  val = ((cbp >> (5 - i)) & 1);
3995  if (!fieldtx)
3996  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3997  else
3998  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3999  if (val) {
4000  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4001  first_block, s->dest[dst_idx] + off,
4002  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4003  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4004  block_cbp |= pat << (i << 2);
4005  if (!v->ttmbf && ttmb < 8)
4006  ttmb = -1;
4007  first_block = 0;
4008  }
4009  }
4010  }
4011  } else { // skipped
4012  s->mb_intra = v->is_intra[s->mb_x] = 0;
4013  for (i = 0; i < 6; i++) {
4014  v->mb_type[0][s->block_index[i]] = 0;
4015  s->dc_val[0][s->block_index[i]] = 0;
4016  }
4017  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
4018  s->current_picture.f.qscale_table[mb_pos] = 0;
4019  v->blk_mv_type[s->block_index[0]] = 0;
4020  v->blk_mv_type[s->block_index[1]] = 0;
4021  v->blk_mv_type[s->block_index[2]] = 0;
4022  v->blk_mv_type[s->block_index[3]] = 0;
4023  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
4024  vc1_mc_1mv(v, 0);
4025  }
4026  if (s->mb_x == s->mb_width - 1)
4027  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
4028  return 0;
4029 }
4030 
4032 {
4033  MpegEncContext *s = &v->s;
4034  GetBitContext *gb = &s->gb;
4035  int i;
4036  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4037  int cbp = 0; /* cbp decoding stuff */
4038  int mqdiff, mquant; /* MB quantization */
4039  int ttmb = v->ttfrm; /* MB Transform type */
4040 
4041  int mb_has_coeffs = 1; /* last_flag */
4042  int dmv_x, dmv_y; /* Differential MV components */
4043  int val; /* temp values */
4044  int first_block = 1;
4045  int dst_idx, off;
4046  int pred_flag;
4047  int block_cbp = 0, pat, block_tt = 0;
4048  int idx_mbmode = 0;
4049 
4050  mquant = v->pq; /* Loosy initialization */
4051 
4052  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4053  if (idx_mbmode <= 1) { // intra MB
4054  s->mb_intra = v->is_intra[s->mb_x] = 1;
4055  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4056  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4057  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4058  GET_MQUANT();
4059  s->current_picture.f.qscale_table[mb_pos] = mquant;
4060  /* Set DC scale - y and c use the same (not sure if necessary here) */
4061  s->y_dc_scale = s->y_dc_scale_table[mquant];
4062  s->c_dc_scale = s->c_dc_scale_table[mquant];
4063  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4064  mb_has_coeffs = idx_mbmode & 1;
4065  if (mb_has_coeffs)
4066  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4067  dst_idx = 0;
4068  for (i = 0; i < 6; i++) {
4069  s->dc_val[0][s->block_index[i]] = 0;
4070  v->mb_type[0][s->block_index[i]] = 1;
4071  dst_idx += i >> 2;
4072  val = ((cbp >> (5 - i)) & 1);
4073  v->a_avail = v->c_avail = 0;
4074  if (i == 2 || i == 3 || !s->first_slice_line)
4075  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4076  if (i == 1 || i == 3 || s->mb_x)
4077  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4078 
4079  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4080  (i & 4) ? v->codingset2 : v->codingset);
4081  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4082  continue;
4083  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4084  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4085  off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4086  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4087  // TODO: loop filter
4088  }
4089  } else {
4090  s->mb_intra = v->is_intra[s->mb_x] = 0;
4091  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4092  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4093  if (idx_mbmode <= 5) { // 1-MV
4094  dmv_x = dmv_y = 0;
4095  if (idx_mbmode & 1) {
4096  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4097  }
4098  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4099  vc1_mc_1mv(v, 0);
4100  mb_has_coeffs = !(idx_mbmode & 2);
4101  } else { // 4-MV
4103  for (i = 0; i < 6; i++) {
4104  if (i < 4) {
4105  dmv_x = dmv_y = pred_flag = 0;
4106  val = ((v->fourmvbp >> (3 - i)) & 1);
4107  if (val) {
4108  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4109  }
4110  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4111  vc1_mc_4mv_luma(v, i, 0);
4112  } else if (i == 4)
4113  vc1_mc_4mv_chroma(v, 0);
4114  }
4115  mb_has_coeffs = idx_mbmode & 1;
4116  }
4117  if (mb_has_coeffs)
4118  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4119  if (cbp) {
4120  GET_MQUANT();
4121  }
4122  s->current_picture.f.qscale_table[mb_pos] = mquant;
4123  if (!v->ttmbf && cbp) {
4125  }
4126  dst_idx = 0;
4127  for (i = 0; i < 6; i++) {
4128  s->dc_val[0][s->block_index[i]] = 0;
4129  dst_idx += i >> 2;
4130  val = ((cbp >> (5 - i)) & 1);
4131  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4132  if (v->cur_field_type)
4133  off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4134  if (val) {
4135  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4136  first_block, s->dest[dst_idx] + off,
4137  (i & 4) ? s->uvlinesize : s->linesize,
4138  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4139  &block_tt);
4140  block_cbp |= pat << (i << 2);
4141  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4142  first_block = 0;
4143  }
4144  }
4145  }
4146  if (s->mb_x == s->mb_width - 1)
4147  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4148  return 0;
4149 }
4150 
4154 {
4155  MpegEncContext *s = &v->s;
4156  GetBitContext *gb = &s->gb;
4157  int i, j;
4158  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4159  int cbp = 0; /* cbp decoding stuff */
4160  int mqdiff, mquant; /* MB quantization */
4161  int ttmb = v->ttfrm; /* MB Transform type */
4162  int mb_has_coeffs = 0; /* last_flag */
4163  int index, index1; /* LUT indexes */
4164  int val, sign; /* temp values */
4165  int first_block = 1;
4166  int dst_idx, off;
4167  int skipped, direct;
4168  int dmv_x[2], dmv_y[2];
4169  int bmvtype = BMV_TYPE_BACKWARD;
4170 
4171  mquant = v->pq; /* lossy initialization */
4172  s->mb_intra = 0;
4173 
4174  if (v->dmb_is_raw)
4175  direct = get_bits1(gb);
4176  else
4177  direct = v->direct_mb_plane[mb_pos];
4178  if (v->skip_is_raw)
4179  skipped = get_bits1(gb);
4180  else
4181  skipped = v->s.mbskip_table[mb_pos];
4182 
4183  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4184  for (i = 0; i < 6; i++) {
4185  v->mb_type[0][s->block_index[i]] = 0;
4186  s->dc_val[0][s->block_index[i]] = 0;
4187  }
4188  s->current_picture.f.qscale_table[mb_pos] = 0;
4189 
4190  if (!direct) {
4191  if (!skipped) {
4192  GET_MVDATA(dmv_x[0], dmv_y[0]);
4193  dmv_x[1] = dmv_x[0];
4194  dmv_y[1] = dmv_y[0];
4195  }
4196  if (skipped || !s->mb_intra) {
4197  bmvtype = decode012(gb);
4198  switch (bmvtype) {
4199  case 0:
4200  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4201  break;
4202  case 1:
4203  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4204  break;
4205  case 2:
4206  bmvtype = BMV_TYPE_INTERPOLATED;
4207  dmv_x[0] = dmv_y[0] = 0;
4208  }
4209  }
4210  }
4211  for (i = 0; i < 6; i++)
4212  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4213 
4214  if (skipped) {
4215  if (direct)
4216  bmvtype = BMV_TYPE_INTERPOLATED;
4217  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4218  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4219  return;
4220  }
4221  if (direct) {
4222  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4223  GET_MQUANT();
4224  s->mb_intra = 0;
4225  s->current_picture.f.qscale_table[mb_pos] = mquant;
4226  if (!v->ttmbf)
4228  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4229  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4230  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4231  } else {
4232  if (!mb_has_coeffs && !s->mb_intra) {
4233  /* no coded blocks - effectively skipped */
4234  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4235  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4236  return;
4237  }
4238  if (s->mb_intra && !mb_has_coeffs) {
4239  GET_MQUANT();
4240  s->current_picture.f.qscale_table[mb_pos] = mquant;
4241  s->ac_pred = get_bits1(gb);
4242  cbp = 0;
4243  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4244  } else {
4245  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4246  GET_MVDATA(dmv_x[0], dmv_y[0]);
4247  if (!mb_has_coeffs) {
4248  /* interpolated skipped block */
4249  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4250  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4251  return;
4252  }
4253  }
4254  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4255  if (!s->mb_intra) {
4256  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4257  }
4258  if (s->mb_intra)
4259  s->ac_pred = get_bits1(gb);
4260  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4261  GET_MQUANT();
4262  s->current_picture.f.qscale_table[mb_pos] = mquant;
4263  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4265  }
4266  }
4267  dst_idx = 0;
4268  for (i = 0; i < 6; i++) {
4269  s->dc_val[0][s->block_index[i]] = 0;
4270  dst_idx += i >> 2;
4271  val = ((cbp >> (5 - i)) & 1);
4272  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4273  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4274  if (s->mb_intra) {
4275  /* check if prediction blocks A and C are available */
4276  v->a_avail = v->c_avail = 0;
4277  if (i == 2 || i == 3 || !s->first_slice_line)
4278  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4279  if (i == 1 || i == 3 || s->mb_x)
4280  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4281 
4282  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4283  (i & 4) ? v->codingset2 : v->codingset);
4284  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4285  continue;
4286  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4287  if (v->rangeredfrm)
4288  for (j = 0; j < 64; j++)
4289  s->block[i][j] <<= 1;
4290  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4291  } else if (val) {
4292  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4293  first_block, s->dest[dst_idx] + off,
4294  (i & 4) ? s->uvlinesize : s->linesize,
4295  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4296  if (!v->ttmbf && ttmb < 8)
4297  ttmb = -1;
4298  first_block = 0;
4299  }
4300  }
4301 }
4302 
4306 {
4307  MpegEncContext *s = &v->s;
4308  GetBitContext *gb = &s->gb;
4309  int i, j;
4310  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4311  int cbp = 0; /* cbp decoding stuff */
4312  int mqdiff, mquant; /* MB quantization */
4313  int ttmb = v->ttfrm; /* MB Transform type */
4314  int mb_has_coeffs = 0; /* last_flag */
4315  int val; /* temp value */
4316  int first_block = 1;
4317  int dst_idx, off;
4318  int fwd;
4319  int dmv_x[2], dmv_y[2], pred_flag[2];
4320  int bmvtype = BMV_TYPE_BACKWARD;
4321  int idx_mbmode, interpmvp;
4322 
4323  mquant = v->pq; /* Loosy initialization */
4324  s->mb_intra = 0;
4325 
4326  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4327  if (idx_mbmode <= 1) { // intra MB
4328  s->mb_intra = v->is_intra[s->mb_x] = 1;
4329  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4330  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4331  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4332  GET_MQUANT();
4333  s->current_picture.f.qscale_table[mb_pos] = mquant;
4334  /* Set DC scale - y and c use the same (not sure if necessary here) */
4335  s->y_dc_scale = s->y_dc_scale_table[mquant];
4336  s->c_dc_scale = s->c_dc_scale_table[mquant];
4337  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4338  mb_has_coeffs = idx_mbmode & 1;
4339  if (mb_has_coeffs)
4340  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4341  dst_idx = 0;
4342  for (i = 0; i < 6; i++) {
4343  s->dc_val[0][s->block_index[i]] = 0;
4344  dst_idx += i >> 2;
4345  val = ((cbp >> (5 - i)) & 1);
4346  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4347  v->a_avail = v->c_avail = 0;
4348  if (i == 2 || i == 3 || !s->first_slice_line)
4349  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4350  if (i == 1 || i == 3 || s->mb_x)
4351  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4352 
4353  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4354  (i & 4) ? v->codingset2 : v->codingset);
4355  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4356  continue;
4357  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4358  if (v->rangeredfrm)
4359  for (j = 0; j < 64; j++)
4360  s->block[i][j] <<= 1;
4361  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4362  off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4363  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4364  // TODO: yet to perform loop filter
4365  }
4366  } else {
4367  s->mb_intra = v->is_intra[s->mb_x] = 0;
4368  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4369  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4370  if (v->fmb_is_raw)
4371  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4372  else
4373  fwd = v->forward_mb_plane[mb_pos];
4374  if (idx_mbmode <= 5) { // 1-MV
4375  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4376  pred_flag[0] = pred_flag[1] = 0;
4377  if (fwd)
4378  bmvtype = BMV_TYPE_FORWARD;
4379  else {
4380  bmvtype = decode012(gb);
4381  switch (bmvtype) {
4382  case 0:
4383  bmvtype = BMV_TYPE_BACKWARD;
4384  break;
4385  case 1:
4386  bmvtype = BMV_TYPE_DIRECT;
4387  break;
4388  case 2:
4389  bmvtype = BMV_TYPE_INTERPOLATED;
4390  interpmvp = get_bits1(gb);
4391  }
4392  }
4393  v->bmvtype = bmvtype;
4394  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4395  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4396  }
4397  if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4398  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4399  }
4400  if (bmvtype == BMV_TYPE_DIRECT) {
4401  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4402  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4403  }
4404  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4405  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4406  mb_has_coeffs = !(idx_mbmode & 2);
4407  } else { // 4-MV
4408  if (fwd)
4409  bmvtype = BMV_TYPE_FORWARD;
4410  v->bmvtype = bmvtype;
4412  for (i = 0; i < 6; i++) {
4413  if (i < 4) {
4414  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4415  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4416  val = ((v->fourmvbp >> (3 - i)) & 1);
4417  if (val) {
4418  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4419  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4420  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4421  }
4422  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4423  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4424  } else if (i == 4)
4425  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4426  }
4427  mb_has_coeffs = idx_mbmode & 1;
4428  }
4429  if (mb_has_coeffs)
4430  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4431  if (cbp) {
4432  GET_MQUANT();
4433  }
4434  s->current_picture.f.qscale_table[mb_pos] = mquant;
4435  if (!v->ttmbf && cbp) {
4437  }
4438  dst_idx = 0;
4439  for (i = 0; i < 6; i++) {
4440  s->dc_val[0][s->block_index[i]] = 0;
4441  dst_idx += i >> 2;
4442  val = ((cbp >> (5 - i)) & 1);
4443  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4444  if (v->cur_field_type)
4445  off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4446  if (val) {
4447  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4448  first_block, s->dest[dst_idx] + off,
4449  (i & 4) ? s->uvlinesize : s->linesize,
4450  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4451  if (!v->ttmbf && ttmb < 8)
4452  ttmb = -1;
4453  first_block = 0;
4454  }
4455  }
4456  }
4457 }
4458 
4462 {
4463  int k, j;
4464  MpegEncContext *s = &v->s;
4465  int cbp, val;
4466  uint8_t *coded_val;
4467  int mb_pos;
4468 
4469  /* select codingmode used for VLC tables selection */
4470  switch (v->y_ac_table_index) {
4471  case 0:
4473  break;
4474  case 1:
4476  break;
4477  case 2:
4479  break;
4480  }
4481 
4482  switch (v->c_ac_table_index) {
4483  case 0:
4485  break;
4486  case 1:
4488  break;
4489  case 2:
4491  break;
4492  }
4493 
4494  /* Set DC scale - y and c use the same */
4495  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4496  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4497 
4498  //do frame decode
4499  s->mb_x = s->mb_y = 0;
4500  s->mb_intra = 1;
4501  s->first_slice_line = 1;
4502  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4503  s->mb_x = 0;
4505  for (; s->mb_x < s->mb_width; s->mb_x++) {
4506  uint8_t *dst[6];
4508  dst[0] = s->dest[0];
4509  dst[1] = dst[0] + 8;
4510  dst[2] = s->dest[0] + s->linesize * 8;
4511  dst[3] = dst[2] + 8;
4512  dst[4] = s->dest[1];
4513  dst[5] = s->dest[2];
4514  s->dsp.clear_blocks(s->block[0]);
4515  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4516  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4517  s->current_picture.f.qscale_table[mb_pos] = v->pq;
4518  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4519  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4520 
4521  // do actual MB decoding and displaying
4523  v->s.ac_pred = get_bits1(&v->s.gb);
4524 
4525  for (k = 0; k < 6; k++) {
4526  val = ((cbp >> (5 - k)) & 1);
4527 
4528  if (k < 4) {
4529  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4530  val = val ^ pred;
4531  *coded_val = val;
4532  }
4533  cbp |= val << (5 - k);
4534 
4535  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4536 
4537  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4538  continue;
4539  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4540  if (v->pq >= 9 && v->overlap) {
4541  if (v->rangeredfrm)
4542  for (j = 0; j < 64; j++)
4543  s->block[k][j] <<= 1;
4544  s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4545  } else {
4546  if (v->rangeredfrm)
4547  for (j = 0; j < 64; j++)
4548  s->block[k][j] = (s->block[k][j] - 64) << 1;
4549  s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4550  }
4551  }
4552 
4553  if (v->pq >= 9 && v->overlap) {
4554  if (s->mb_x) {
4555  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4556  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4557  if (!(s->flags & CODEC_FLAG_GRAY)) {
4558  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4559  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4560  }
4561  }
4562  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4563  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4564  if (!s->first_slice_line) {
4565  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4566  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4567  if (!(s->flags & CODEC_FLAG_GRAY)) {
4568  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4569  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4570  }
4571  }
4572  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4573  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4574  }
4575  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4576 
4577  if (get_bits_count(&s->gb) > v->bits) {
4578  ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4579  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4580  get_bits_count(&s->gb), v->bits);
4581  return;
4582  }
4583  }
4584  if (!v->s.loop_filter)
4585  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4586  else if (s->mb_y)
4587  ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4588 
4589  s->first_slice_line = 0;
4590  }
4591  if (v->s.loop_filter)
4592  ff_draw_horiz_band(s, (s->mb_height - 1) * 16, 16);
4593  ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4594 }
4595 
4599 {
4600  int k;
4601  MpegEncContext *s = &v->s;
4602  int cbp, val;
4603  uint8_t *coded_val;
4604  int mb_pos;
4605  int mquant = v->pq;
4606  int mqdiff;
4607  GetBitContext *gb = &s->gb;
4608 
4609  /* select codingmode used for VLC tables selection */
4610  switch (v->y_ac_table_index) {
4611  case 0:
4613  break;
4614  case 1:
4616  break;
4617  case 2:
4619  break;
4620  }
4621 
4622  switch (v->c_ac_table_index) {
4623  case 0:
4625  break;
4626  case 1:
4628  break;
4629  case 2:
4631  break;
4632  }
4633 
4634  // do frame decode
4635  s->mb_x = s->mb_y = 0;
4636  s->mb_intra = 1;
4637  s->first_slice_line = 1;
4638  s->mb_y = s->start_mb_y;
4639  if (s->start_mb_y) {
4640  s->mb_x = 0;
4642  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4643  (1 + s->b8_stride) * sizeof(*s->coded_block));
4644  }
4645  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4646  s->mb_x = 0;
4648  for (;s->mb_x < s->mb_width; s->mb_x++) {
4649  DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4651  s->dsp.clear_blocks(block[0]);
4652  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4653  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4654  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4655  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4656 
4657  // do actual MB decoding and displaying
4658  if (v->fieldtx_is_raw)
4659  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4661  if ( v->acpred_is_raw)
4662  v->s.ac_pred = get_bits1(&v->s.gb);
4663  else
4664  v->s.ac_pred = v->acpred_plane[mb_pos];
4665 
4666  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4667  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4668 
4669  GET_MQUANT();
4670 
4671  s->current_picture.f.qscale_table[mb_pos] = mquant;
4672  /* Set DC scale - y and c use the same */
4673  s->y_dc_scale = s->y_dc_scale_table[mquant];
4674  s->c_dc_scale = s->c_dc_scale_table[mquant];
4675 
4676  for (k = 0; k < 6; k++) {
4677  val = ((cbp >> (5 - k)) & 1);
4678 
4679  if (k < 4) {
4680  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4681  val = val ^ pred;
4682  *coded_val = val;
4683  }
4684  cbp |= val << (5 - k);
4685 
4686  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4687  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4688 
4689  vc1_decode_i_block_adv(v, block[k], k, val,
4690  (k < 4) ? v->codingset : v->codingset2, mquant);
4691 
4692  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4693  continue;
4695  }
4696 
4700 
4701  if (get_bits_count(&s->gb) > v->bits) {
4702  // TODO: may need modification to handle slice coding
4703  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4704  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4705  get_bits_count(&s->gb), v->bits);
4706  return;
4707  }
4708  }
4709  if (!v->s.loop_filter)
4710  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4711  else if (s->mb_y)
4712  ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4713  s->first_slice_line = 0;
4714  }
4715 
4716  /* raw bottom MB row */
4717  s->mb_x = 0;
4719  for (;s->mb_x < s->mb_width; s->mb_x++) {
4722  if (v->s.loop_filter)
4724  }
4725  if (v->s.loop_filter)
4726  ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4727  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4728  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4729 }
4730 
4732 {
4733  MpegEncContext *s = &v->s;
4734  int apply_loop_filter;
4735 
4736  /* select codingmode used for VLC tables selection */
4737  switch (v->c_ac_table_index) {
4738  case 0:
4740  break;
4741  case 1:
4743  break;
4744  case 2:
4746  break;
4747  }
4748 
4749  switch (v->c_ac_table_index) {
4750  case 0:
4752  break;
4753  case 1:
4755  break;
4756  case 2:
4758  break;
4759  }
4760 
4761  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4762  s->first_slice_line = 1;
4763  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4764  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4765  s->mb_x = 0;
4767  for (; s->mb_x < s->mb_width; s->mb_x++) {
4769 
4770  if (v->fcm == ILACE_FIELD)
4772  else if (v->fcm == ILACE_FRAME)
4774  else vc1_decode_p_mb(v);
4775  if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
4777  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4778  // TODO: may need modification to handle slice coding
4779  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4780  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4781  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4782  return;
4783  }
4784  }
4785  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4786  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4787  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4788  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4789  if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4790  s->first_slice_line = 0;
4791  }
4792  if (apply_loop_filter) {
4793  s->mb_x = 0;
4795  for (; s->mb_x < s->mb_width; s->mb_x++) {
4798  }
4799  }
4800  if (s->end_mb_y >= s->start_mb_y)
4801  ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4802  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4803  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4804 }
4805 
4807 {
4808  MpegEncContext *s = &v->s;
4809 
4810  /* select codingmode used for VLC tables selection */
4811  switch (v->c_ac_table_index) {
4812  case 0:
4814  break;
4815  case 1:
4817  break;
4818  case 2:
4820  break;
4821  }
4822 
4823  switch (v->c_ac_table_index) {
4824  case 0:
4826  break;
4827  case 1:
4829  break;
4830  case 2:
4832  break;
4833  }
4834 
4835  s->first_slice_line = 1;
4836  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4837  s->mb_x = 0;
4839  for (; s->mb_x < s->mb_width; s->mb_x++) {
4841 
4842  if (v->fcm == ILACE_FIELD)
4844  else
4845  vc1_decode_b_mb(v);
4846  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4847  // TODO: may need modification to handle slice coding
4848  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4849  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4850  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4851  return;
4852  }
4853  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4854  }
4855  if (!v->s.loop_filter)
4856  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4857  else if (s->mb_y)
4858  ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4859  s->first_slice_line = 0;
4860  }
4861  if (v->s.loop_filter)
4862  ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4863  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4864  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4865 }
4866 
4868 {
4869  MpegEncContext *s = &v->s;
4870 
4871  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
4872  s->first_slice_line = 1;
4873  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4874  s->mb_x = 0;
4877  memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4878  memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4879  memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4880  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4881  s->first_slice_line = 0;
4882  }
4884 }
4885 
4887 {
4888 
4889  v->s.esc3_level_length = 0;
4890  if (v->x8_type) {
4891  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4892  } else {
4893  v->cur_blk_idx = 0;
4894  v->left_blk_idx = -1;
4895  v->topleft_blk_idx = 1;
4896  v->top_blk_idx = 2;
4897  switch (v->s.pict_type) {
4898  case AV_PICTURE_TYPE_I:
4899  if (v->profile == PROFILE_ADVANCED)
4901  else
4903  break;
4904  case AV_PICTURE_TYPE_P:
4905  if (v->p_frame_skipped)
4907  else
4909  break;
4910  case AV_PICTURE_TYPE_B:
4911  if (v->bi_type) {
4912  if (v->profile == PROFILE_ADVANCED)
4914  else
4916  } else
4918  break;
4919  }
4920  }
4921 }
4922 
4923 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4924 
4925 typedef struct {
4937  int coefs[2][7];
4938 
4939  int effect_type, effect_flag;
4940  int effect_pcount1, effect_pcount2;
4941  int effect_params1[15], effect_params2[10];
4942 } SpriteData;
4943 
4944 static inline int get_fp_val(GetBitContext* gb)
4945 {
4946  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4947 }
4948 
4949 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4950 {
4951  c[1] = c[3] = 0;
4952 
4953  switch (get_bits(gb, 2)) {
4954  case 0:
4955  c[0] = 1 << 16;
4956  c[2] = get_fp_val(gb);
4957  c[4] = 1 << 16;
4958  break;
4959  case 1:
4960  c[0] = c[4] = get_fp_val(gb);
4961  c[2] = get_fp_val(gb);
4962  break;
4963  case 2:
4964  c[0] = get_fp_val(gb);
4965  c[2] = get_fp_val(gb);
4966  c[4] = get_fp_val(gb);
4967  break;
4968  case 3:
4969  c[0] = get_fp_val(gb);
4970  c[1] = get_fp_val(gb);
4971  c[2] = get_fp_val(gb);
4972  c[3] = get_fp_val(gb);
4973  c[4] = get_fp_val(gb);
4974  break;
4975  }
4976  c[5] = get_fp_val(gb);
4977  if (get_bits1(gb))
4978  c[6] = get_fp_val(gb);
4979  else
4980  c[6] = 1 << 16;
4981 }
4982 
4983 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4984 {
4985  AVCodecContext *avctx = v->s.avctx;
4986  int sprite, i;
4987 
4988  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4989  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4990  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4991  av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4992  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4993  for (i = 0; i < 7; i++)
4994  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4995  sd->coefs[sprite][i] / (1<<16),
4996  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4997  av_log(avctx, AV_LOG_DEBUG, "\n");
4998  }
4999 
5000  skip_bits(gb, 2);
5001  if (sd->effect_type = get_bits_long(gb, 30)) {
5002  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5003  case 7:
5004  vc1_sprite_parse_transform(gb, sd->effect_params1);
5005  break;
5006  case 14:
5007  vc1_sprite_parse_transform(gb, sd->effect_params1);
5008  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5009  break;
5010  default:
5011  for (i = 0; i < sd->effect_pcount1; i++)
5012  sd->effect_params1[i] = get_fp_val(gb);
5013  }
5014  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5015  // effect 13 is simple alpha blending and matches the opacity above
5016  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5017  for (i = 0; i < sd->effect_pcount1; i++)
5018  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5019  sd->effect_params1[i] / (1 << 16),
5020  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5021  av_log(avctx, AV_LOG_DEBUG, "\n");
5022  }
5023 
5024  sd->effect_pcount2 = get_bits(gb, 16);
5025  if (sd->effect_pcount2 > 10) {
5026  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5027  return;
5028  } else if (sd->effect_pcount2) {
5029  i = -1;
5030  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5031  while (++i < sd->effect_pcount2) {
5032  sd->effect_params2[i] = get_fp_val(gb);
5033  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5034  sd->effect_params2[i] / (1 << 16),
5035  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5036  }
5037  av_log(avctx, AV_LOG_DEBUG, "\n");
5038  }
5039  }
5040  if (sd->effect_flag = get_bits1(gb))
5041  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5042 
5043  if (get_bits_count(gb) >= gb->size_in_bits +
5044  (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
5045  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5046  if (get_bits_count(gb) < gb->size_in_bits - 8)
5047  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5048 }
5049 
5050 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5051 {
5052  int i, plane, row, sprite;
5053  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5054  uint8_t* src_h[2][2];
5055  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5056  int ysub[2];
5057  MpegEncContext *s = &v->s;
5058 
5059  for (i = 0; i < 2; i++) {
5060  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5061  xadv[i] = sd->coefs[i][0];
5062  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5063  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5064 
5065  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5066  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5067  }
5068  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5069 
5070  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5071  int width = v->output_width>>!!plane;
5072 
5073  for (row = 0; row < v->output_height>>!!plane; row++) {
5074  uint8_t *dst = v->sprite_output_frame.data[plane] +
5075  v->sprite_output_frame.linesize[plane] * row;
5076 
5077  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5078  uint8_t *iplane = s->current_picture.f.data[plane];
5079  int iline = s->current_picture.f.linesize[plane];
5080  int ycoord = yoff[sprite] + yadv[sprite] * row;
5081  int yline = ycoord >> 16;
5082  ysub[sprite] = ycoord & 0xFFFF;
5083  if (sprite) {
5084  iplane = s->last_picture.f.data[plane];
5085  iline = s->last_picture.f.linesize[plane];
5086  }
5087  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5088  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5089  if (ysub[sprite])
5090  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + (yline + 1) * iline;
5091  } else {
5092  if (sr_cache[sprite][0] != yline) {
5093  if (sr_cache[sprite][1] == yline) {
5094  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5095  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5096  } else {
5097  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5098  sr_cache[sprite][0] = yline;
5099  }
5100  }
5101  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5102  v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane + (yline + 1) * iline, xoff[sprite], xadv[sprite], width);
5103  sr_cache[sprite][1] = yline + 1;
5104  }
5105  src_h[sprite][0] = v->sr_rows[sprite][0];
5106  src_h[sprite][1] = v->sr_rows[sprite][1];
5107  }
5108  }
5109 
5110  if (!v->two_sprites) {
5111  if (ysub[0]) {
5112  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5113  } else {
5114  memcpy(dst, src_h[0][0], width);
5115  }
5116  } else {
5117  if (ysub[0] && ysub[1]) {
5118  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5119  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5120  } else if (ysub[0]) {
5121  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5122  src_h[1][0], alpha, width);
5123  } else if (ysub[1]) {
5124  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5125  src_h[0][0], (1<<16)-1-alpha, width);
5126  } else {
5127  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5128  }
5129  }
5130  }
5131 
5132  if (!plane) {
5133  for (i = 0; i < 2; i++) {
5134  xoff[i] >>= 1;
5135  yoff[i] >>= 1;
5136  }
5137  }
5138 
5139  }
5140 }
5141 
5142 
5143 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5144 {
5145  MpegEncContext *s = &v->s;
5146  AVCodecContext *avctx = s->avctx;
5147  SpriteData sd;
5148 
5149  vc1_parse_sprites(v, gb, &sd);
5150 
5151  if (!s->current_picture.f.data[0]) {
5152  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5153  return -1;
5154  }
5155 
5156  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5157  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5158  v->two_sprites = 0;
5159  }
5160 
5161  if (v->sprite_output_frame.data[0])
5162  avctx->release_buffer(avctx, &v->sprite_output_frame);
5163 
5166  if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
5167  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5168  return -1;
5169  }
5170 
5171  vc1_draw_sprites(v, &sd);
5172 
5173  return 0;
5174 }
5175 
5176 static void vc1_sprite_flush(AVCodecContext *avctx)
5177 {
5178  VC1Context *v = avctx->priv_data;
5179  MpegEncContext *s = &v->s;
5180  AVFrame *f = &s->current_picture.f;
5181  int plane, i;
5182 
5183  /* Windows Media Image codecs have a convergence interval of two keyframes.
5184  Since we can't enforce it, clear to black the missing sprite. This is
5185  wrong but it looks better than doing nothing. */
5186 
5187  if (f->data[0])
5188  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5189  for (i = 0; i < v->sprite_height>>!!plane; i++)
5190  memset(f->data[plane] + i * f->linesize[plane],
5191  plane ? 128 : 0, f->linesize[plane]);
5192 }
5193 
5194 #endif
5195 
5197 {
5198  MpegEncContext *s = &v->s;
5199  int i;
5200 
5201  /* Allocate mb bitplanes */
5206  v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5208 
5209  v->n_allocated_blks = s->mb_width + 2;
5210  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5211  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5212  v->cbp = v->cbp_base + s->mb_stride;
5213  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5214  v->ttblk = v->ttblk_base + s->mb_stride;
5215  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5216  v->is_intra = v->is_intra_base + s->mb_stride;
5217  v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5218  v->luma_mv = v->luma_mv_base + s->mb_stride;
5219 
5220  /* allocate block type info in that way so it could be used with s->block_index[] */
5221  v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5222  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5223  v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5224  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5225 
5226  /* allocate memory to store block level MV info */
5227  v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5228  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5229  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5230  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5231  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5232  v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5233  v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5234  v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5235  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5236  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5237  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5238 
5239  /* Init coded blocks info */
5240  if (v->profile == PROFILE_ADVANCED) {
5241 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5242 // return -1;
5243 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5244 // return -1;
5245  }
5246 
5247  ff_intrax8_common_init(&v->x8,s);
5248 
5250  for (i = 0; i < 4; i++)
5251  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5252  }
5253 
5254  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5255  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5256  !v->mb_type_base)
5257  return -1;
5258 
5259  return 0;
5260 }
5261 
5267 {
5268  VC1Context *v = avctx->priv_data;
5269  MpegEncContext *s = &v->s;
5270  GetBitContext gb;
5271  int i;
5272 
5273  /* save the container output size for WMImage */
5274  v->output_width = avctx->width;
5275  v->output_height = avctx->height;
5276 
5277  if (!avctx->extradata_size || !avctx->extradata)
5278  return -1;
5279  if (!(avctx->flags & CODEC_FLAG_GRAY))
5280  avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5281  else
5282  avctx->pix_fmt = PIX_FMT_GRAY8;
5283  avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5284  v->s.avctx = avctx;
5285  avctx->flags |= CODEC_FLAG_EMU_EDGE;
5286  v->s.flags |= CODEC_FLAG_EMU_EDGE;
5287 
5288  if (avctx->idct_algo == FF_IDCT_AUTO) {
5289  avctx->idct_algo = FF_IDCT_WMV2;
5290  }
5291 
5292  if (ff_vc1_init_common(v) < 0)
5293  return -1;
5294  ff_vc1dsp_init(&v->vc1dsp);
5295 
5296  if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE) {
5297  int count = 0;
5298 
5299  // looks like WMV3 has a sequence header stored in the extradata
5300  // advanced sequence header may be before the first frame
5301  // the last byte of the extradata is a version number, 1 for the
5302  // samples we can decode
5303 
5304  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5305 
5306  if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
5307  return -1;
5308 
5309  count = avctx->extradata_size*8 - get_bits_count(&gb);
5310  if (count > 0) {
5311  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5312  count, get_bits(&gb, count));
5313  } else if (count < 0) {
5314  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5315  }
5316  } else { // VC1/WVC1/WVP2
5317  const uint8_t *start = avctx->extradata;
5318  uint8_t *end = avctx->extradata + avctx->extradata_size;
5319  const uint8_t *next;
5320  int size, buf2_size;
5321  uint8_t *buf2 = NULL;
5322  int seq_initialized = 0, ep_initialized = 0;
5323 
5324  if (avctx->extradata_size < 16) {
5325  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5326  return -1;
5327  }
5328 
5330  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5331  next = start;
5332  for (; next < end; start = next) {
5333  next = find_next_marker(start + 4, end);
5334  size = next - start - 4;
5335  if (size <= 0)
5336  continue;
5337  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5338  init_get_bits(&gb, buf2, buf2_size * 8);
5339  switch (AV_RB32(start)) {
5340  case VC1_CODE_SEQHDR:
5341  if (vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5342  av_free(buf2);
5343  return -1;
5344  }
5345  seq_initialized = 1;
5346  break;
5347  case VC1_CODE_ENTRYPOINT:
5348  if (vc1_decode_entry_point(avctx, v, &gb) < 0) {
5349  av_free(buf2);
5350  return -1;
5351  }
5352  ep_initialized = 1;
5353  break;
5354  }
5355  }
5356  av_free(buf2);
5357  if (!seq_initialized || !ep_initialized) {
5358  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5359  return -1;
5360  }
5361  v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5362  }
5363 
5364  avctx->profile = v->profile;
5365  if (v->profile == PROFILE_ADVANCED)
5366  avctx->level = v->level;
5367 
5368  avctx->has_b_frames = !!avctx->max_b_frames;
5369 
5370  s->mb_width = (avctx->coded_width + 15) >> 4;
5371  s->mb_height = (avctx->coded_height + 15) >> 4;
5372 
5373  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5374  for (i = 0; i < 64; i++) {
5375 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5376  v->zz_8x8[0][i] = transpose(wmv1_scantable[0][i]);
5377  v->zz_8x8[1][i] = transpose(wmv1_scantable[1][i]);
5378  v->zz_8x8[2][i] = transpose(wmv1_scantable[2][i]);
5379  v->zz_8x8[3][i] = transpose(wmv1_scantable[3][i]);
5381  }
5382  v->left_blk_sh = 0;
5383  v->top_blk_sh = 3;
5384  } else {
5385  memcpy(v->zz_8x8, wmv1_scantable, 4*64);
5386  v->left_blk_sh = 3;
5387  v->top_blk_sh = 0;
5388  }
5389 
5390  if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5391  v->sprite_width = avctx->coded_width;
5392  v->sprite_height = avctx->coded_height;
5393 
5394  avctx->coded_width = avctx->width = v->output_width;
5395  avctx->coded_height = avctx->height = v->output_height;
5396 
5397  // prevent 16.16 overflows
5398  if (v->sprite_width > 1 << 14 ||
5399  v->sprite_height > 1 << 14 ||
5400  v->output_width > 1 << 14 ||
5401  v->output_height > 1 << 14) return -1;
5402  }
5403  return 0;
5404 }
5405 
5410 {
5411  VC1Context *v = avctx->priv_data;
5412  int i;
5413 
5414  if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5415  && v->sprite_output_frame.data[0])
5416  avctx->release_buffer(avctx, &v->sprite_output_frame);
5417  for (i = 0; i < 4; i++)
5418  av_freep(&v->sr_rows[i >> 1][i & 1]);
5419  av_freep(&v->hrd_rate);
5420  av_freep(&v->hrd_buffer);
5421  MPV_common_end(&v->s);
5425  av_freep(&v->fieldtx_plane);
5426  av_freep(&v->acpred_plane);
5428  av_freep(&v->mb_type_base);
5430  av_freep(&v->mv_f_base);
5431  av_freep(&v->mv_f_last_base);
5432  av_freep(&v->mv_f_next_base);
5433  av_freep(&v->block);
5434  av_freep(&v->cbp_base);
5435  av_freep(&v->ttblk_base);
5436  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5437  av_freep(&v->luma_mv_base);
5439  return 0;
5440 }
5441 
5442 
5446 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5447  int *data_size, AVPacket *avpkt)
5448 {
5449  const uint8_t *buf = avpkt->data;
5450  int buf_size = avpkt->size, n_slices = 0, i;
5451  VC1Context *v = avctx->priv_data;
5452  MpegEncContext *s = &v->s;
5453  AVFrame *pict = data;
5454  uint8_t *buf2 = NULL;
5455  const uint8_t *buf_start = buf;
5456  int mb_height, n_slices1;
5457  struct {
5458  uint8_t *buf;
5459  GetBitContext gb;
5460  int mby_start;
5461  } *slices = NULL, *tmp;
5462 
5463  /* no supplementary picture */
5464  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5465  /* special case for last picture */
5466  if (s->low_delay == 0 && s->next_picture_ptr) {
5467  *pict = *(AVFrame*)s->next_picture_ptr;
5468  s->next_picture_ptr = NULL;
5469 
5470  *data_size = sizeof(AVFrame);
5471  }
5472 
5473  return 0;
5474  }
5475 
5477  if (v->profile < PROFILE_ADVANCED)
5478  avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
5479  else
5480  avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
5481  }
5482 
5483  //for advanced profile we may need to parse and unescape data
5484  if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5485  int buf_size2 = 0;
5486  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5487 
5488  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5489  const uint8_t *start, *end, *next;
5490  int size;
5491 
5492  next = buf;
5493  for (start = buf, end = buf + buf_size; next < end; start = next) {
5494  next = find_next_marker(start + 4, end);
5495  size = next - start - 4;
5496  if (size <= 0) continue;
5497  switch (AV_RB32(start)) {
5498  case VC1_CODE_FRAME:
5499  if (avctx->hwaccel ||
5501  buf_start = start;
5502  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5503  break;
5504  case VC1_CODE_FIELD: {
5505  int buf_size3;
5506  slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5507  if (!slices)
5508  goto err;
5509  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5510  if (!slices[n_slices].buf)
5511  goto err;
5512  buf_size3 = vc1_unescape_buffer(start + 4, size,
5513  slices[n_slices].buf);
5514  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5515  buf_size3 << 3);
5516  /* assuming that the field marker is at the exact middle,
5517  hope it's correct */
5518  slices[n_slices].mby_start = s->mb_height >> 1;
5519  n_slices1 = n_slices - 1; // index of the last slice of the first field
5520  n_slices++;
5521  break;
5522  }
5523  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5524  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5525  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5526  vc1_decode_entry_point(avctx, v, &s->gb);
5527  break;
5528  case VC1_CODE_SLICE: {
5529  int buf_size3;
5530  slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5531  if (!slices)
5532  goto err;
5533  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5534  if (!slices[n_slices].buf)
5535  goto err;
5536  buf_size3 = vc1_unescape_buffer(start + 4, size,
5537  slices[n_slices].buf);
5538  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5539  buf_size3 << 3);
5540  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5541  n_slices++;
5542  break;
5543  }
5544  }
5545  }
5546  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5547  const uint8_t *divider;
5548  int buf_size3;
5549 
5550  divider = find_next_marker(buf, buf + buf_size);
5551  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5552  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5553  goto err;
5554  } else { // found field marker, unescape second field
5555  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5556  if (!tmp)
5557  goto err;
5558  slices = tmp;
5559  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5560  if (!slices[n_slices].buf)
5561  goto err;
5562  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5563  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5564  buf_size3 << 3);
5565  slices[n_slices].mby_start = s->mb_height >> 1;
5566  n_slices1 = n_slices - 1;
5567  n_slices++;
5568  }
5569  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5570  } else {
5571  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5572  }
5573  init_get_bits(&s->gb, buf2, buf_size2*8);
5574  } else
5575  init_get_bits(&s->gb, buf, buf_size*8);
5576 
5577  if (v->res_sprite) {
5578  v->new_sprite = !get_bits1(&s->gb);
5579  v->two_sprites = get_bits1(&s->gb);
5580  /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
5581  we're using the sprite compositor. These are intentionally kept separate
5582  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5583  the vc1 one for WVP2 */
5584  if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5585  if (v->new_sprite) {
5586  // switch AVCodecContext parameters to those of the sprites
5587  avctx->width = avctx->coded_width = v->sprite_width;
5588  avctx->height = avctx->coded_height = v->sprite_height;
5589  } else {
5590  goto image;
5591  }
5592  }
5593  }
5594 
5595  if (s->context_initialized &&
5596  (s->width != avctx->coded_width ||
5597  s->height != avctx->coded_height)) {
5598  vc1_decode_end(avctx);
5599  }
5600 
5601  if (!s->context_initialized) {
5602  if (ff_msmpeg4_decode_init(avctx) < 0 || vc1_decode_init_alloc_tables(v) < 0)
5603  return -1;
5604 
5605  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5606 
5607  if (v->profile == PROFILE_ADVANCED) {
5608  s->h_edge_pos = avctx->coded_width;
5609  s->v_edge_pos = avctx->coded_height;
5610  }
5611  }
5612 
5613  /* We need to set current_picture_ptr before reading the header,
5614  * otherwise we cannot store anything in there. */
5615  if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5616  int i = ff_find_unused_picture(s, 0);
5617  if (i < 0)
5618  goto err;
5619  s->current_picture_ptr = &s->picture[i];
5620  }
5621 
5622  // do parse frame header
5623  v->pic_header_flag = 0;
5624  if (v->profile < PROFILE_ADVANCED) {
5625  if (vc1_parse_frame_header(v, &s->gb) == -1) {
5626  goto err;
5627  }
5628  } else {
5629  if (vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5630  goto err;
5631  }
5632  }
5633 
5634  if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5635  && s->pict_type != AV_PICTURE_TYPE_I) {
5636  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5637  goto err;
5638  }
5639 
5640  // process pulldown flags
5642  // Pulldown flags are only valid when 'broadcast' has been set.
5643  // So ticks_per_frame will be 2
5644  if (v->rff) {
5645  // repeat field
5647  } else if (v->rptfrm) {
5648  // repeat frames
5649  s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5650  }
5651 
5652  // for skipping the frame
5655 
5656  /* skip B-frames if we don't have reference frames */
5657  if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) {
5658  goto err;
5659  }
5660  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5661  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5662  avctx->skip_frame >= AVDISCARD_ALL) {
5663  goto end;
5664  }
5665 
5666  if (s->next_p_frame_damaged) {
5667  if (s->pict_type == AV_PICTURE_TYPE_B)
5668  goto end;
5669  else
5670  s->next_p_frame_damaged = 0;
5671  }
5672 
5673  if (MPV_frame_start(s, avctx) < 0) {
5674  goto err;
5675  }
5676 
5679 
5682  ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5683  else if (avctx->hwaccel) {
5684  if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5685  goto err;
5686  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5687  goto err;
5688  if (avctx->hwaccel->end_frame(avctx) < 0)
5689  goto err;
5690  } else {
5691  ff_er_frame_start(s);
5692 
5693  v->bits = buf_size * 8;
5694  if (v->field_mode) {
5695  uint8_t *tmp[2];
5696  s->current_picture.f.linesize[0] <<= 1;
5697  s->current_picture.f.linesize[1] <<= 1;
5698  s->current_picture.f.linesize[2] <<= 1;
5699  s->linesize <<= 1;
5700  s->uvlinesize <<= 1;
5701  tmp[0] = v->mv_f_last[0];
5702  tmp[1] = v->mv_f_last[1];
5703  v->mv_f_last[0] = v->mv_f_next[0];
5704  v->mv_f_last[1] = v->mv_f_next[1];
5705  v->mv_f_next[0] = v->mv_f[0];
5706  v->mv_f_next[1] = v->mv_f[1];
5707  v->mv_f[0] = tmp[0];
5708  v->mv_f[1] = tmp[1];
5709  }
5710  mb_height = s->mb_height >> v->field_mode;
5711  for (i = 0; i <= n_slices; i++) {
5712  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5713  v->second_field = 1;
5714  v->blocks_off = s->mb_width * s->mb_height << 1;
5715  v->mb_off = s->mb_stride * s->mb_height >> 1;
5716  } else {
5717  v->second_field = 0;
5718  v->blocks_off = 0;
5719  v->mb_off = 0;
5720  }
5721  if (i) {
5722  v->pic_header_flag = 0;
5723  if (v->field_mode && i == n_slices1 + 2)
5725  else if (get_bits1(&s->gb)) {
5726  v->pic_header_flag = 1;
5728  }
5729  }
5730  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5731  if (!v->field_mode || v->second_field)
5732  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5733  else
5734  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5735  vc1_decode_blocks(v);
5736  if (i != n_slices)
5737  s->gb = slices[i].gb;
5738  }
5739  if (v->field_mode) {
5740  v->second_field = 0;
5741  if (s->pict_type == AV_PICTURE_TYPE_B) {
5742  memcpy(v->mv_f_base, v->mv_f_next_base,
5743  2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5744  }
5745  s->current_picture.f.linesize[0] >>= 1;
5746  s->current_picture.f.linesize[1] >>= 1;
5747  s->current_picture.f.linesize[2] >>= 1;
5748  s->linesize >>= 1;
5749  s->uvlinesize >>= 1;
5750  }
5751 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
5752 // if (get_bits_count(&s->gb) > buf_size * 8)
5753 // return -1;
5754  ff_er_frame_end(s);
5755  }
5756 
5757  MPV_frame_end(s);
5758 
5759  if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5760 image:
5761  avctx->width = avctx->coded_width = v->output_width;
5762  avctx->height = avctx->coded_height = v->output_height;
5763  if (avctx->skip_frame >= AVDISCARD_NONREF)
5764  goto end;
5765 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5766  if (vc1_decode_sprites(v, &s->gb))
5767  goto err;
5768 #endif
5769  *pict = v->sprite_output_frame;
5770  *data_size = sizeof(AVFrame);
5771  } else {
5772  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5773  *pict = *(AVFrame*)s->current_picture_ptr;
5774  } else if (s->last_picture_ptr != NULL) {
5775  *pict = *(AVFrame*)s->last_picture_ptr;
5776  }
5777  if (s->last_picture_ptr || s->low_delay) {
5778  *data_size = sizeof(AVFrame);
5779  ff_print_debug_info(s, pict);
5780  }
5781  }
5782 
5783 end:
5784  av_free(buf2);
5785  for (i = 0; i < n_slices; i++)
5786  av_free(slices[i].buf);
5787  av_free(slices);
5788  return buf_size;
5789 
5790 err:
5791  av_free(buf2);
5792  for (i = 0; i < n_slices; i++)
5793  av_free(slices[i].buf);
5794  av_free(slices);
5795  return -1;
5796 }
5797 
5798 
5799 static const AVProfile profiles[] = {
5800  { FF_PROFILE_VC1_SIMPLE, "Simple" },
5801  { FF_PROFILE_VC1_MAIN, "Main" },
5802  { FF_PROFILE_VC1_COMPLEX, "Complex" },
5803  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5804  { FF_PROFILE_UNKNOWN },
5805 };
5806 
5808  .name = "vc1",
5809  .type = AVMEDIA_TYPE_VIDEO,
5810  .id = CODEC_ID_VC1,
5811  .priv_data_size = sizeof(VC1Context),
5812  .init = vc1_decode_init,
5813  .close = vc1_decode_end,
5815  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5816  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5817  .pix_fmts = ff_hwaccel_pixfmt_list_420,
5818  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5819 };
5820 
5821 #if CONFIG_WMV3_DECODER
5822 AVCodec ff_wmv3_decoder = {
5823  .name = "wmv3",
5824  .type = AVMEDIA_TYPE_VIDEO,
5825  .id = CODEC_ID_WMV3,
5826  .priv_data_size = sizeof(VC1Context),
5827  .init = vc1_decode_init,
5828  .close = vc1_decode_end,
5830  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5831  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5832  .pix_fmts = ff_hwaccel_pixfmt_list_420,
5833  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5834 };
5835 #endif
5836 
5837 #if CONFIG_WMV3_VDPAU_DECODER
5838 AVCodec ff_wmv3_vdpau_decoder = {
5839  .name = "wmv3_vdpau",
5840  .type = AVMEDIA_TYPE_VIDEO,
5841  .id = CODEC_ID_WMV3,
5842  .priv_data_size = sizeof(VC1Context),
5843  .init = vc1_decode_init,
5844  .close = vc1_decode_end,
5847  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5848  .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
5849  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5850 };
5851 #endif
5852 
5853 #if CONFIG_VC1_VDPAU_DECODER
5854 AVCodec ff_vc1_vdpau_decoder = {
5855  .name = "vc1_vdpau",
5856  .type = AVMEDIA_TYPE_VIDEO,
5857  .id = CODEC_ID_VC1,
5858  .priv_data_size = sizeof(VC1Context),
5859  .init = vc1_decode_init,
5860  .close = vc1_decode_end,
5863  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5864  .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
5865  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5866 };
5867 #endif
5868 
5869 #if CONFIG_WMV3IMAGE_DECODER
5870 AVCodec ff_wmv3image_decoder = {
5871  .name = "wmv3image",
5872  .type = AVMEDIA_TYPE_VIDEO,
5873  .id = CODEC_ID_WMV3IMAGE,
5874  .priv_data_size = sizeof(VC1Context),
5875  .init = vc1_decode_init,
5876  .close = vc1_decode_end,
5878  .capabilities = CODEC_CAP_DR1,
5879  .flush = vc1_sprite_flush,
5880  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5881  .pix_fmts = ff_pixfmt_list_420
5882 };
5883 #endif
5884 
5885 #if CONFIG_VC1IMAGE_DECODER
5886 AVCodec ff_vc1image_decoder = {
5887  .name = "vc1image",
5888  .type = AVMEDIA_TYPE_VIDEO,
5889  .id = CODEC_ID_VC1IMAGE,
5890  .priv_data_size = sizeof(VC1Context),
5891  .init = vc1_decode_init,
5892  .close = vc1_decode_end,
5894  .capabilities = CODEC_CAP_DR1,
5895  .flush = vc1_sprite_flush,
5896  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5897  .pix_fmts = ff_pixfmt_list_420
5898 };
5899 #endif